file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
phys.rs
|
//! Phys module handles kernel-level
//! interfacing for physical, on-board peripherals.
pub mod addrs;
pub mod dma;
pub mod gpio;
pub mod irq;
pub mod periodic_timers;
pub mod pins;
pub mod timer;
pub mod uart;
pub mod xbar;
pub enum Bitwise {
Or, // Or with the existing value
And, // And with the existing value
Eq, // Assign absolute vlaue
}
pub enum
|
{
Input,
Output,
}
// Enable all physical clocks that we need
pub fn phys_clocks_en() {
periodic_timers::pit_start_clock();
uart::uart_start_clock();
gpio::gpio_start_clock();
xbar::xbar_start_clock();
dma::dma_start_clock();
}
/// Takes a memory address and does an 8-bit write
/// to the location.
///
/// This method will be deprecated in the future,
/// in preference of `assign_8`
pub fn write_byte(address: u32, value: u8) {
unsafe {
*(address as *mut u8) = value;
}
}
/// Takes a memory address and does an 8-bit write
/// to the location.
pub fn assign_8(address: u32, value: u8) {
unsafe {
*(address as *mut u8) = value;
}
}
/// Takes a memory address and does a 16-bit write
/// to the location.
pub fn assign_16(address: u32, value: u16) {
unsafe {
*(address as *mut u16) = value;
}
}
/// Takes a memory address and does a 32-bit write
/// to the location.
pub fn assign(address: u32, value: u32) {
unsafe {
*(address as *mut u32) = value;
}
}
/// Takes a memory address, an operation, and a value
/// and performs the operations against the address.
///
/// This is useful if you want to maintain existing data
/// and logically AND or logically OR an additional byte.
pub fn assign_bit(address: u32, op: Bitwise, value: u32) {
unsafe {
let original_value = *(address as *mut u32);
match op {
Bitwise::Or => {
assign(address, original_value | value);
},
Bitwise::And => {
assign(address, original_value & value);
},
Bitwise::Eq => {
assign(address, value);
}
}
}
}
/// Takes a memory address and performs a 4-byte read,
/// resulting in a u32 of the current data.
pub fn read_word(address: u32) -> u32 {
unsafe {
return *(address as *mut u32);
}
}
/// Takes a memory address and performs a 2-byte read,
/// resulting in a u16 of the current data.
pub fn read_16(address: u32) -> u16 {
unsafe {
return *(address as *mut u16);
}
}
/// Takes a value and sets a particular bit to zero,
/// returning the new value.
pub fn clear_bit(number: u32, bit: u8) -> u32 {
return number & !(0x01 << bit);
}
/// Takes a value and sets a particular bit to one,
/// returning the new value.
pub fn set_bit(number: u32, bit: u8) -> u32 {
return number | (0x01 << bit);
}
/// A structure defining a register
/// used in peripherals
pub struct Reg {
base: u32,
mask: u32,
}
|
Dir
|
group.rs
|
//! Creates a group pattern in many different ways as desired.
//!
//! This module consists of methods needed to create a group in many different ways, having different options/settings.
//! The main one is the [`group`](../struct.EasyRegex.html#method.group) method which takes an expression as argument
//! and a set of meta as ```GroupSettings``` creates a group for it.
//! Other methods starting with the ```into``` word followed by an underline makes all previously chained expressions into a group.
//! They mostly take no arguments for settings and are useful to be added to the latter part of a method chain.
use crate::{
settings::{GroupSettings, Settings, base::DEFAULT},
EasyRegex,
};
impl EasyRegex {
/// Creates a group of expressions.
///
/// This method takes an expression (a segment of entire pattern) followed
/// by a set of settings (```GroupSettings``` struct) that will be concatenated/inserted to the expression itself,
/// outputing the previous pattern followed by this group.
///
/// # Examples
///
/// ```
/// use easy_regex::{EasyRegex, settings::group::OPTIONAL_GROUP};
///
/// let result = EasyRegex::new_section().group("expression", &OPTIONAL_GROUP);
/// assert_eq!("(expression)?", result.get_regex().unwrap().as_str());
/// ```
pub fn group(self, expression: &str, group_sttings: &GroupSettings) -> Self {
let mut final_result = EasyRegex::new_section();
// to make the regex itself clearer, this extra if condition is added.
if group_sttings.other.flags.is_some() && group_sttings.is_non_capture {
final_result.0 = format!(
"({}:{})",
group_sttings.other.flags.unwrap().as_str(),
expression
);
} else {
final_result = final_result
.literal(
expression,
&Settings {
flags: group_sttings.other.flags,
..Default::default()
},
)
.into_group(&Settings {
flags: None,
..group_sttings.other
});
if group_sttings.is_non_capture {
final_result.0.insert_str(1, "?:");
}
}
self.literal(&final_result.0, &DEFAULT)
}
/// Same as the ```group``` method with the option to add a custom name to the group.
///
/// # Examples
///
/// ```
/// use easy_regex::{EasyRegex, settings::group::OPTIONAL_GROUP};
///
/// let result = EasyRegex::new_section().named_group("my_group", "expression", &OPTIONAL_GROUP);
/// assert_eq!("(?P<my_group>expression)?", result.get_regex().unwrap().as_str());
/// ```
pub fn named_group(self, name: &str, expression: &str, group_settings: &GroupSettings) -> Self {
let final_result = format!("?P<{}>{}", name, expression);
self.group(&final_result, &group_settings)
}
/// Turns the previous expressions into a **capturing** group. It uses ```Settings``` struct for the settings parameter.
///
/// # Examples
///
/// ```
/// use easy_regex::{EasyRegex, settings::base::OPTIONAL};
///
/// let result = EasyRegex::new(r"\d{3}").into_group(&OPTIONAL);
/// assert_eq!(r"(\d{3})?", result.get_regex().unwrap().as_str());
/// ```
pub fn into_group(self, settings: &Settings) -> Self {
let raw_result = format!("({})", self.0);
let final_result = EasyRegex(String::new()).literal(&raw_result, &settings);
final_result
}
/// A variation of ```into_group``` having *name* option **(?P\<name\>RegExp)**.
pub fn into_named_group(self, name: &str, settings: &Settings) -> Self {
let raw_result = format!("(?P<{}>{})", name, self.0);
let final_result = EasyRegex(String::new()).literal(&raw_result, &settings);
final_result
}
/// A variation of ```into_group``` having *non-capturing* option **(?:RegExp)**.
pub fn into_non_capturing(self) -> Self {
let result = format!("(?:{})", self.0);
EasyRegex(result)
}
////////////////////////////////////////////////////////////////
/// A variation of ```into_group``` having *Insensitive* flag **(?i)**.
pub fn into_insensitive_group(self) -> Self {
let result = format!("((?i){})", self.0);
EasyRegex(result)
}
/// A variation of ```into_group``` having *Multiline* flag **(?m)**.
pub fn into_multline_group(self) -> Self {
let result = format!("((?m){})", self.0);
EasyRegex(result)
}
/// A variation of ```into_group``` having *Dot All* flag **(?s)**.
pub fn into_dot_match_newline_group(self) -> Self {
let result = format!("((?s){})", self.0);
EasyRegex(result)
}
/// A variation of ```into_group``` ignoring *whitespaces* **(?x)**.
pub fn into_ignore_whitespace_group(self) -> Self {
|
}
////////////////////////////////////////////////////////////////
/// A variation of ```into_non_capturing``` having *Insensitive* flag **(?i)**.
pub fn into_insensitive_non_capturing(self) -> Self {
let result = format!("(?i:{})", self.0);
EasyRegex(result)
}
/// A variation of ```into_non_capturing``` having *Multiline* flag **(?m)**.
pub fn into_multiline_non_capturing(self) -> Self {
let result = format!("(?m:{})", self.0);
EasyRegex(result)
}
/// A variation of ```into_non_capturing``` having *Dot All* flag **(?s)**.
pub fn into_dot_match_newline_non_capturing(self) -> Self {
let result = format!("(?s:{})", self.0);
EasyRegex(result)
}
/// A variation of ```into_non_capturing``` ignoring *whitespaces* **(?x)**.
pub fn into_ignore_whitespace_non_capturing(self) -> Self {
let result = format!("(?x:{})", self.0);
EasyRegex(result)
}
////////////////////////////////////////////////////////////////
/// A variation of ```into_group``` having *Insensitive* flag cleared **(?-i)**.
pub fn into_sensitive_group(self) -> Self {
let result = format!("((?-i){})", self.0);
EasyRegex(result)
}
/// A variation of ```into_group``` having *Multiline* flag cleared **(?-m)**.
pub fn into_single_line_group(self) -> Self {
let result = format!("((?-m){})", self.0);
EasyRegex(result)
}
/// A variation of ```into_group``` having *Dot All* flag cleared **(?-s)**.
pub fn into_dot_dismatch_newline_group(self) -> Self {
let result = format!("((?-s){})", self.0);
EasyRegex(result)
}
/// A variation of ```into_group``` taking *whitespaces* into account **(?-x)**.
pub fn into_include_whitespace_group(self) -> Self {
let result = format!("((?-x){})", self.0);
EasyRegex(result)
}
////////////////////////////////////////////////////////////////
/// A variation of ```into_non_capturing``` having *Insensitive* flag cleared **(?-i)**.
pub fn into_sensitive_non_capturing(self) -> Self {
let result = format!("(?-i:{})", self.0);
EasyRegex(result)
}
/// A variation of ```into_non_capturing``` having *Multiline* flag cleared **(?-m)**.
pub fn into_single_line_non_capturing(self) -> Self {
let result = format!("(?-m:{})", self.0);
EasyRegex(result)
}
/// A variation of ```into_non_capturing``` having *Dot All* flag cleared **(?-s)**.
pub fn into_dot_dismatch_newline_non_capturing(self) -> Self {
let result = format!("(?-s:{})", self.0);
EasyRegex(result)
}
/// A variation of ```into_non_capturing``` taking *whitespaces* into account **(?-x)**.
pub fn into_include_whitespace_non_capturing(self) -> Self {
let result = format!("(?-x:{})", self.0);
EasyRegex(result)
}
}
#[cfg(test)]
mod tests {
use self::EasyRegex;
use super::*;
use crate::settings::group::{DEFAULT_GROUP, INSENSITIVE_GROUP, INSENSITIVE_NON_CAPTURE};
#[test]
fn group_works() {
let initial_exp = EasyRegex::new("initial_");
let result = initial_exp.group("group", &DEFAULT_GROUP);
assert_eq!("initial_(group)", result.0);
}
#[test]
fn optional_non_capture_group_works() {
let initial_exp = EasyRegex::start_of_line();
let group_settings = GroupSettings {
other: Settings {
is_optional: true,
..Default::default()
},
is_non_capture: true,
};
let result = initial_exp.group("group", &group_settings);
assert_eq!("^(?:group)?", result.0);
}
#[test]
fn insensitive_group_works() {
let result = EasyRegex::start_of_line()
.group("group", &INSENSITIVE_GROUP)
.get_regex()
.unwrap();
assert_eq!("^((?i)group)", result.as_str());
}
#[test]
fn insensitive_non_capturing_group_works() {
let result = EasyRegex::start_of_line()
.group("group", &INSENSITIVE_NON_CAPTURE)
.get_regex()
.unwrap();
assert_eq!("^(?i:group)", result.as_str());
}
#[test]
fn into_group_works() {
let initial_exp = EasyRegex::new("group");
let result = initial_exp.into_group(&DEFAULT);
assert_eq!("(group)", result.0);
}
////////////////////////////////////////////////// ERRORS /////////////////////////////////////////////////////
// #[test]
// fn into_negative_group_added_optional_exp_not_works() {
// let initial_exp = MetaFuncRegex::new("group");
// let result = initial_exp
// // .into_negative_group()
// .literal_exp(&String::new(), &OPTIONAL);
// let err = result.get_regex().unwrap_err();
// let re = regex::Regex::new("/(?!group)/").unwrap();
// // regex::Regex::is_matchbuild(&re).unwrap();
// // println!("{}", &after);
// assert_eq!(
// regex::Error::Syntax(
// "regex parse error:
// (?!group)?
// ^^^
// error: look-around, including look-ahead and look-behind, is not supported"
// .to_string()
// ),
// err
// );
// }
// #[test]
// fn optional_negative_group_not_works() {
// let initial_exp = MetaFuncRegex::new("^");
// let group_settings = GroupSettings {
// other: Settings {
// is_optional: true,
// ..Default::default()
// },
// is_non_capture: false,
// flags: None,
// };
// let result = initial_exp.group("group", &group_settings);
// let err = result.get_regex().unwrap_err();
// assert_eq!(
// regex::Error::Syntax(
// "regex parse error:
// ^(?!group)?
// ^^^
// error: look-around, including look-ahead and look-behind, is not supported"
// .to_string()
// ),
// err
// );
// }
}
|
let result = format!("((?x){})", self.0);
EasyRegex(result)
|
infix.rs
|
//! Infix expressions in gluon are initially parsed as if they were all left-
//! associative with the same precedence. Therefore we need to rebalance them
//! after the fact.
use base::ast::{walk_mut_expr, Expr, IdentEnv, MutVisitor, SpannedExpr, SpannedIdent};
use base::error::Errors;
use base::fnv::FnvMap;
use base::pos::{self, BytePos, Spanned};
use std::cmp::Ordering;
use std::error::Error as StdError;
use std::fmt;
use std::hash::Hash;
use std::marker::PhantomData;
use std::mem;
/// The fixity (associativity) of an infix operator
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Fixity {
/// Left operator associativity.
///
/// For example, when the `(~)` operator is left-associative:
///
/// ```text
/// x ~ y ~ z ≡ (x ~ y) ~ z
/// ```
Left,
/// Right operator associativity.
///
/// For example, when the `(~)` operator is right-associative:
///
/// ```text
/// x ~ y ~ z ≡ x ~ (y ~ z)
/// ```
Right,
}
impl fmt::Display for Fixity {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Fixity::Left => write!(f, "infixl"),
Fixity::Right => write!(f, "infixr"),
}
}
}
/// Metadata pertaining to an infix operator
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct OpMeta {
/// The precedence of the operator
pub precedence: i32,
/// The fixity of the operator
pub fixity: Fixity,
}
impl OpMeta {
pub fn new(precedence: i32, fixity: Fixity) -> OpMeta {
OpMeta {
precedence: precedence,
fixity: fixity,
}
}
}
impl fmt::Display for OpMeta {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.fixity, self.precedence)
}
}
/// A table of operator metadata
pub struct OpTable<Id> {
pub operators: FnvMap<Id, OpMeta>,
}
impl<Id> OpTable<Id> {
pub fn new<I>(ops: I) -> OpTable<Id>
where
I: IntoIterator<Item = (Id, OpMeta)>,
Id: Eq + Hash,
{
OpTable {
operators: ops.into_iter().collect(),
}
}
}
impl<Id> OpTable<Id>
where
Id: Eq + Hash + AsRef<str> + ::std::fmt::Debug,
{
fn get_at(&self, name: &SpannedIdent<Id>) -> Result<&OpMeta, Spanned<Error, BytePos>> {
self.get(&name.value.name).ok_or_else(|| {
pos::spanned(
name.span,
Error::UndefinedFixity(name.value.name.as_ref().to_string()),
)
})
}
fn get(&self, name: &Id) -> Option<&OpMeta> {
self.operators.get(name).or_else(|| {
let name = name.as_ref();
if name.starts_with('#') || name == "&&" || name == "||" {
const OPS: &[(&str, OpMeta)] = &[
(
"*",
OpMeta {
precedence: 7,
fixity: Fixity::Left,
},
),
(
"/",
OpMeta {
precedence: 7,
fixity: Fixity::Left,
},
),
(
"+",
OpMeta {
precedence: 6,
fixity: Fixity::Left,
},
),
(
"-",
OpMeta {
precedence: 6,
fixity: Fixity::Left,
},
),
(
"==",
OpMeta {
precedence: 4,
fixity: Fixity::Left,
},
),
(
"/=",
OpMeta {
precedence: 4,
fixity: Fixity::Left,
},
),
(
"<",
OpMeta {
precedence: 4,
fixity: Fixity::Left,
},
),
(
">",
OpMeta {
precedence: 4,
fixity: Fixity::Left,
},
),
(
"<=",
OpMeta {
precedence: 4,
fixity: Fixity::Left,
},
),
(
">=",
OpMeta {
precedence: 4,
fixity: Fixity::Left,
},
),
(
"&&",
OpMeta {
precedence: 3,
fixity: Fixity::Right,
},
),
(
"||",
OpMeta {
precedence: 2,
fixity: Fixity::Right,
},
),
];
let op = name
.trim_left_matches('#')
.trim_left_matches(char::is_alphanumeric);
OPS.iter().find(|t| t.0 == op).map(|t| &t.1)
} else {
None
}
})
}
}
pub struct Reparser<'s, Id: 's> {
operators: OpTable<Id>,
symbols: &'s IdentEnv<Ident = Id>,
errors: Errors<Spanned<Error, BytePos>>,
_marker: PhantomData<Id>,
}
impl<'s, Id> Reparser<'s, Id> {
pub fn new(operators: OpTable<Id>, symbols: &'s IdentEnv<Ident = Id>) -> Reparser<'s, Id> {
Reparser {
operators: operators,
symbols: symbols,
errors: Errors::new(),
_marker: PhantomData,
}
}
pub fn reparse(
&mut self,
expr: &mut SpannedExpr<Id>,
) -> Result<(), Errors<Spanned<Error, BytePos>>>
where
Id: Eq + Hash + AsRef<str> + ::std::fmt::Debug,
{
self.visit_expr(expr);
if self.errors.has_errors() {
Err(mem::replace(&mut self.errors, Errors::new()))
} else {
Ok(())
}
}
}
impl<'a, 's, Id> MutVisitor<'a> for Reparser<'s, Id>
where
Id: Eq + Hash + AsRef<str> + ::std::fmt::Debug + 'a,
{
type Ident = Id;
fn visit_expr(&mut self, e: &mut SpannedExpr<Self::Ident>) {
if let Expr::Infix { .. } = e.value {
let dummy = pos::spanned(e.span, Expr::Error(None));
let expr = mem::replace(e, dummy);
match reparse(expr, self.symbols, &self.operators) {
Ok(expr) => {
*e = expr;
}
Err((err, reconstructed_expr)) => {
info!("Infix error: {}", err);
if let Some(reconstructed_expr) = reconstructed_expr {
e.value = reconstructed_expr;
}
match err.value {
// Undefined fixity errors are reported at the definition site
Error::UndefinedFixity(_) => (),
_ => self.errors.push(err),
}
}
}
}
walk_mut_expr(self, e);
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum Error {
ConflictingFixities((String, OpMeta), (String, OpMeta)),
UndefinedFixity(String),
InvalidFixity,
InvalidPrecedence,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match *self {
ConflictingFixities((ref lhs_name, lhs_meta), (ref rhs_name, rhs_meta)) => {
write!(f, "Conflicting fixities at the same precedence level. ")?;
write!(
f,
"left: `{} {}`, right: `{} {}`",
lhs_meta, lhs_name, rhs_meta, rhs_name
)
}
UndefinedFixity(ref op) => write!(f, "No fixity specified for `{}`. Fixity must be specified with the `#[infix]` attribute", op),
InvalidFixity => write!(
f,
"Only `left` or `right` is valid associativity specifications"
),
InvalidPrecedence => write!(f, "Only positive integers are valid precedences"),
}
}
}
impl StdError for Error {
fn description(&self) -> &str {
"Conflicting fixities at the same precedence level"
}
}
/// Reconstruct the infix expression using the correct associativities
/// and precedences.
///
/// Inspired by [`Language.Haskell.Infix`].
///
/// [`Language.Haskell.Infix`]: https://hackage.haskell.org/package/infix-0.1.1/docs/src/Language-Haskell-Infix.html
pub fn reparse<Id>(
expr: SpannedExpr<Id>,
symbols: &IdentEnv<Ident = Id>,
operators: &OpTable<Id>,
) -> Result<SpannedExpr<Id>, (Spanned<Error, BytePos>, Option<Expr<Id>>)>
where
Id: Eq + Hash + AsRef<str> + ::std::fmt::Debug,
{
use self::Error::*;
use base::pos;
let make_op = |lhs: Box<SpannedExpr<Id>>, op, rhs: Box<SpannedExpr<Id>>| {
let span = pos::span(lhs.span.start(), rhs.span.end());
Box::new(pos::spanned(
span,
Expr::Infix {
lhs,
op,
rhs,
implicit_args: Vec::new(),
},
))
};
let mut infixes = Infixes::new(expr);
let mut arg_stack = Vec::new();
let mut op_stack = Vec::new();
while let Some(token) = infixes.next() {
match token {
InfixToken::Arg(next_expr) => arg_stack.push(next_expr),
InfixToken::Op(next_op) => {
let stack_op = match op_stack.pop() {
Some(stack_op) => stack_op,
None => {
op_stack.push(next_op);
continue;
}
};
macro_rules! try_infix {
($expr:expr) => {
match $expr {
|
match infixes.remaining_expr {
Some(expr) => arg_stack.push(expr),
None => ()
}
op_stack.push(next_op);
op_stack.push(stack_op);
while arg_stack.len() > 1 {
let rhs = arg_stack.pop().unwrap();
let lhs = arg_stack.pop().unwrap();
let op = op_stack.pop().unwrap();
arg_stack.push(make_op(lhs, op, rhs));
}
return Err((err, arg_stack.pop().map(|original| original.value)));
}
}
};
}
let next_op_meta = *try_infix!(operators.get_at(&next_op));
let stack_op_meta = *try_infix!(operators.get_at(&stack_op));
match i32::cmp(&next_op_meta.precedence, &stack_op_meta.precedence) {
// Reduce
Ordering::Less => {
let rhs = arg_stack.pop().unwrap();
let lhs = arg_stack.pop().unwrap();
infixes.next_op = Some(next_op);
arg_stack.push(make_op(lhs, stack_op, rhs));
}
// Shift
Ordering::Greater => {
op_stack.push(stack_op);
op_stack.push(next_op);
}
Ordering::Equal => {
match (next_op_meta.fixity, stack_op_meta.fixity) {
// Reduce
(Fixity::Left, Fixity::Left) => {
let rhs = arg_stack.pop().unwrap();
let lhs = arg_stack.pop().unwrap();
infixes.next_op = Some(next_op);
arg_stack.push(make_op(lhs, stack_op, rhs));
}
// Shift
(Fixity::Right, Fixity::Right) => {
op_stack.push(stack_op);
op_stack.push(next_op);
}
// Conflicting fixities at the same precedence level!
(Fixity::Left, Fixity::Right) | (Fixity::Right, Fixity::Left) => {
let next_op_name = symbols.string(&next_op.value.name).to_string();
let stack_op_name =
symbols.string(&stack_op.value.name).to_string();
let span = pos::span(stack_op.span.start(), next_op.span.end());
let error = ConflictingFixities(
(stack_op_name, stack_op_meta),
(next_op_name, next_op_meta),
);
return Err((pos::spanned(span, error), None));
}
}
}
}
}
}
}
for op in op_stack.into_iter().rev() {
let rhs = arg_stack.pop().unwrap();
let lhs = arg_stack.pop().unwrap();
arg_stack.push(make_op(lhs, op, rhs));
}
assert_eq!(arg_stack.len(), 1);
Ok(*arg_stack.pop().unwrap())
}
#[derive(Debug, Clone, PartialEq)]
enum InfixToken<Id> {
Arg(Box<SpannedExpr<Id>>),
// TODO: Make this spanned to allow for accurate error reporting
Op(SpannedIdent<Id>),
}
/// An iterator that takes an expression that has had its operators grouped
/// with _right associativity_, and yeilds a sequence of `InfixToken`s. This
/// is useful for reparsing the operators with their correct associativies
/// and precedences.
///
/// For example, the expression:
///
/// ```text
/// (1 + (2 ^ (4 * (6 - 8))))
/// ```
///
/// Will result in the following iterations:
///
/// ```text
/// Arg: 1
/// Op: +
/// Arg: 2
/// Op: ^
/// Arg: 4
/// Op: *
/// Arg: 6
/// Op: -
/// Arg: 8
/// ```
struct Infixes<Id> {
/// The next part of the expression that we need to flatten
remaining_expr: Option<Box<SpannedExpr<Id>>>,
/// Cached operator from a previous iteration
next_op: Option<SpannedIdent<Id>>,
}
impl<Id> Infixes<Id> {
fn new(expr: SpannedExpr<Id>) -> Infixes<Id> {
Infixes {
remaining_expr: Some(Box::new(expr)),
next_op: None,
}
}
}
impl<Id> Iterator for Infixes<Id> {
type Item = InfixToken<Id>;
fn next(&mut self) -> Option<InfixToken<Id>> {
if let Some(op) = self.next_op.take() {
return Some(InfixToken::Op(op));
}
self.remaining_expr.take().map(|expr| {
let expr = *expr; // Workaround for http://stackoverflow.com/questions/28466809/
match expr.value {
Expr::Infix {
lhs,
op,
rhs,
implicit_args,
} => {
assert!(
implicit_args.is_empty(),
"Implicit args on infix operators is not implemented"
);
self.remaining_expr = Some(rhs);
self.next_op = Some(op);
InfixToken::Arg(lhs)
}
_ => InfixToken::Arg(Box::new(expr)), // FIXME: remove reallocation?
}
})
}
}
#[cfg(test)]
mod tests {
use base::ast::{DisplayEnv, Expr, IdentEnv, Literal, SpannedExpr, TypedIdent};
use base::pos::{self, BytePos, Spanned};
use std::marker::PhantomData;
use super::Error::*;
use super::*;
fn reparse<Id>(
expr: SpannedExpr<Id>,
symbols: &IdentEnv<Ident = Id>,
operators: &OpTable<Id>,
) -> Result<SpannedExpr<Id>, Spanned<Error, BytePos>>
where
Id: Eq + Hash + AsRef<str> + ::std::fmt::Debug,
{
super::reparse(expr, symbols, operators).map_err(|t| t.0)
}
pub struct MockEnv<T>(PhantomData<T>);
impl<T> MockEnv<T> {
pub fn new() -> MockEnv<T> {
MockEnv(PhantomData)
}
}
impl<T: AsRef<str>> DisplayEnv for MockEnv<T> {
type Ident = T;
fn string<'a>(&'a self, ident: &'a Self::Ident) -> &'a str {
ident.as_ref()
}
}
impl<T> IdentEnv for MockEnv<T>
where
T: AsRef<str> + for<'a> From<&'a str>,
{
fn from_str(&mut self, s: &str) -> Self::Ident {
T::from(s)
}
}
fn no_loc<T>(value: T) -> Spanned<T, BytePos> {
pos::spanned2(BytePos::from(0), BytePos::from(0), value)
}
fn ident(name: &str) -> TypedIdent<String> {
TypedIdent::new(name.to_string())
}
fn op(
lhs: Box<SpannedExpr<String>>,
op_str: &str,
rhs: Box<SpannedExpr<String>>,
) -> Box<SpannedExpr<String>> {
Box::new(no_loc(Expr::Infix {
lhs,
op: no_loc(ident(op_str)),
rhs,
implicit_args: Vec::new(),
}))
}
fn int(value: i64) -> Box<SpannedExpr<String>> {
Box::new(no_loc(Expr::Literal(Literal::Int(value))))
}
#[test]
fn infixes() {
let expr = op(
int(1),
"+",
op(int(2), "^", op(int(4), "*", op(int(6), "-", int(8)))),
);
let result: Vec<_> = Infixes::new(*expr).collect();
let expected = vec![
InfixToken::Arg(int(1)),
InfixToken::Op(no_loc(ident("+"))),
InfixToken::Arg(int(2)),
InfixToken::Op(no_loc(ident("^"))),
InfixToken::Arg(int(4)),
InfixToken::Op(no_loc(ident("*"))),
InfixToken::Arg(int(6)),
InfixToken::Op(no_loc(ident("-"))),
InfixToken::Arg(int(8)),
];
assert_eq!(result, expected);
}
#[test]
fn reparse_single() {
let env = MockEnv::new();
let ops = OpTable::new(vec![]);
let expr = *op(int(1), "+", int(2));
let expected = Ok(expr.clone());
assert_eq!(reparse(expr, &env, &ops), expected);
}
#[test]
fn reparse_less_precedence() {
let env = MockEnv::new();
let ops = OpTable::new(vec![
("*".to_string(), OpMeta::new(7, Fixity::Left)),
("+".to_string(), OpMeta::new(6, Fixity::Left)),
]);
// 1 + (2 * 8)
let expr = *op(int(1), "+", op(int(2), "*", int(8)));
let expected = Ok(expr.clone());
assert_eq!(reparse(expr, &env, &ops), expected);
}
#[test]
fn reparse_greater_precedence() {
let env = MockEnv::new();
let ops = OpTable::new(vec![
("*".to_string(), OpMeta::new(7, Fixity::Left)),
("+".to_string(), OpMeta::new(6, Fixity::Left)),
]);
// 1 * (2 + 8)
let expr = *op(int(1), "*", op(int(2), "+", int(8)));
// (1 * 2) + 8
let expected = Ok(*op(op(int(1), "*", int(2)), "+", int(8)));
assert_eq!(reparse(expr, &env, &ops), expected);
}
#[test]
fn reparse_equal_precedence_left_fixity() {
let env = MockEnv::new();
let ops = OpTable::new(vec![
("-".to_string(), OpMeta::new(6, Fixity::Left)),
("+".to_string(), OpMeta::new(6, Fixity::Left)),
]);
// 1 + (2 - 8)
let expr = *op(int(1), "+", op(int(2), "-", int(8)));
// (1 + 2) - 8
let expected = Ok(*op(op(int(1), "+", int(2)), "-", int(8)));
assert_eq!(reparse(expr, &env, &ops), expected);
}
#[test]
fn reparse_equal_precedence_right_fixity() {
let env = MockEnv::new();
let ops = OpTable::new(vec![
("-".to_string(), OpMeta::new(6, Fixity::Right)),
("+".to_string(), OpMeta::new(6, Fixity::Right)),
]);
// 1 + (2 - 8)
let expr = *op(int(1), "+", op(int(2), "-", int(8)));
let expected = Ok(expr.clone());
assert_eq!(reparse(expr, &env, &ops), expected);
}
#[test]
fn reparse_mixed_precedences_mixed_fixities() {
let env = MockEnv::new();
let ops = OpTable::new(vec![
("*".to_string(), OpMeta::new(7, Fixity::Left)),
("-".to_string(), OpMeta::new(6, Fixity::Left)),
("+".to_string(), OpMeta::new(6, Fixity::Left)),
]);
// 1 + (2 * (6 - 8))
let expr = *op(int(1), "+", op(int(2), "*", op(int(6), "-", int(8))));
// (1 + (2 * 6)) - 8
let expected = Ok(*op(op(int(1), "+", op(int(2), "*", int(6))), "-", int(8)));
assert_eq!(reparse(expr, &env, &ops), expected);
}
#[test]
fn reparse_equal_precedence_conflicting_fixities() {
let env = MockEnv::new();
let ops = OpTable::new(vec![
("|>".to_string(), OpMeta::new(5, Fixity::Left)),
("<|".to_string(), OpMeta::new(5, Fixity::Right)),
]);
// 1 |> (2 <| 8)
let expr = *op(int(1), "|>", op(int(2), "<|", int(8)));
let error = ConflictingFixities(
("|>".to_string(), OpMeta::new(5, Fixity::Left)),
("<|".to_string(), OpMeta::new(5, Fixity::Right)),
);
let expected = Err(no_loc(error));
assert_eq!(reparse(expr, &env, &ops), expected);
}
#[test]
fn reparse_equal_precedence_conflicting_fixities_nested() {
let env = MockEnv::new();
let ops = OpTable::new(vec![
("+".to_string(), OpMeta::new(6, Fixity::Left)),
("|>".to_string(), OpMeta::new(5, Fixity::Left)),
("<|".to_string(), OpMeta::new(5, Fixity::Right)),
]);
// 1 + (1 |> (2 <| 8))
let expr = *op(int(1), "+", op(int(1), "|>", op(int(2), "<|", int(8))));
let error = ConflictingFixities(
("|>".to_string(), OpMeta::new(5, Fixity::Left)),
("<|".to_string(), OpMeta::new(5, Fixity::Right)),
);
let expected = Err(no_loc(error));
assert_eq!(reparse(expr, &env, &ops), expected);
}
}
|
Ok(e) => e,
Err(err) => {
|
test_networks.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Ethos-N integration end-to-end network tests"""
import pytest
pytest.importorskip("tflite")
pytest.importorskip("tensorflow")
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.contrib import download
from tvm.testing import requires_ethosn
import tvm.relay.testing.tf as tf_testing
import tflite.Model
from . import infrastructure as tei
def _get_tflite_model(tflite_model_path, inputs_dict, dtype):
with open(tflite_model_path, "rb") as f:
tflite_model_buffer = f.read()
try:
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
except AttributeError:
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buffer, 0)
shape_dict = {}
dtype_dict = {}
for input in inputs_dict:
input_shape = inputs_dict[input]
shape_dict[input] = input_shape
dtype_dict[input] = dtype
return relay.frontend.from_tflite(
tflite_model,
shape_dict=shape_dict,
dtype_dict=dtype_dict,
)
def _test_image_network(
model_url,
model_sub_path,
input_dict,
compile_hash,
output_count,
host_ops=0,
npu_partitions=1,
run=False,
):
"""Test an image network.
Parameters
----------
model_url : str
The URL to the model.
model_sub_path : str
The name of the model file.
input_dict : dict
The input dict.
compile_hash : str, set
The compile hash(es) to check the compilation output against.
output_count : int
The expected number of outputs.
host_ops : int
The expected number of host operators.
npu_partitions : int
The expected number of Ethos-N partitions.
run : bool
Whether or not to try running the network. If hardware isn't
available, the run will still take place but with a mocked
inference function, so the results will be incorrect. This is
therefore just to test the runtime flow is working rather than
to check the correctness/accuracy.
"""
def get_model():
if model_url[-3:] in ("tgz", "zip"):
model_path = tf_testing.get_workload_official(
model_url,
model_sub_path,
)
else:
model_path = download.download_testdata(
model_url,
model_sub_path,
)
return _get_tflite_model(model_path, input_dict, "uint8")
inputs = {}
for input_name in input_dict:
input_shape = input_dict[input_name]
inputs[input_name] = tei.get_real_image(input_shape[1], input_shape[2])
mod, params = get_model()
m = tei.build(mod, params, npu=True, expected_host_ops=host_ops, npu_partitions=npu_partitions)
tei.assert_lib_hash(m.get_lib(), compile_hash)
if run:
tei.run(m, inputs, output_count, npu=True)
@requires_ethosn
def test_mobilenet_v1():
# If this test is failing due to a hash mismatch, please notify @mbaret and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"1fd4ef29a1ea9f3a015cab87c0b8014a"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
_compile_hash = {"b879dfbff1f907eaf6129dfd41b44ece"}
if tei.get_ethosn_api_version() == 2011:
_compile_hash = {"9c9f63b30824f5b223cdb27d2f22c857"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
_compile_hash = {"cd13279061df2319124a7aac81581d81"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
model_sub_path="mobilenet_v1_1.0_224_quant.tflite",
input_dict={"input": (1, 224, 224, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=3,
|
run=True,
)
@requires_ethosn
def test_inception_v3():
# If this test is failing due to a hash mismatch, please notify @mbaret and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"b90ed315639c6a0e97584c2dbc42a55c"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
_compile_hash = {"5693569055695e581a8739194d0301aa"}
if tei.get_ethosn_api_version() == 2011:
_compile_hash = {"46ccafc840633633aca441645e41b444"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
_compile_hash = {"4a33f397ac3e15c0f9869f7b8286fc2f"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/tflite_11_05_08/inception_v3_quant.tgz",
model_sub_path="inception_v3_quant.tflite",
input_dict={"input": (1, 299, 299, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=0,
npu_partitions=1,
)
@requires_ethosn
def test_inception_v4():
# If this test is failing due to a hash mismatch, please notify @mbaret and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"b36877d2386d9f9c37a11772e3c4072c"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
_compile_hash = {"b5046a6f56d78af0b4f51960bf2deeda"}
if tei.get_ethosn_api_version() == 2011:
_compile_hash = {"4a1a56393078367dd27915a188d6a6af"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
_compile_hash = {"905caf389dd6b868aeff6acbca1fecef"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/inception_v4_299_quant_20181026.tgz",
model_sub_path="inception_v4_299_quant.tflite",
input_dict={"input": (1, 299, 299, 3)},
compile_hash=_compile_hash,
output_count=1,
host_ops=3,
npu_partitions=1,
)
@requires_ethosn
def test_ssd_mobilenet_v1():
# If this test is failing due to a hash mismatch, please notify @mbaret and
# @Leo-arm. The hash is there to catch any changes in the behaviour of the
# codegen, which could come about from either a change in Support Library
# version or a change in the Ethos-N codegen. To update this requires running
# on hardware that isn't available in CI.
_compile_hash = {"956caf9e7fe5cfd5c042bd17857f7407", "4313033d14328e2aa022b1bd71b27b1c"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
_compile_hash = {"dc60cc687d892cd2877873094e9dfc0b", "6b3deeec16c24c0dcef23df0db5fb162"}
if tei.get_ethosn_api_version() == 2011:
_compile_hash = {"10826406ae724e52f360a06c35ced09d", "9a484d5ecec7acb18c9d6bc6058be031"}
if tei.get_ethosn_variant() == "Ethos-N78_1TOPS_2PLE_RATIO":
_compile_hash = {"425b38830f34b6eb448fa77dbfe9ac96", "de49128643cbf1c659a9a63aad1cba62"}
_test_image_network(
model_url="https://storage.googleapis.com/download.tensorflow.org/"
"models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip",
model_sub_path="detect.tflite",
input_dict={"normalized_input_image_tensor": (1, 300, 300, 3)},
compile_hash=_compile_hash,
output_count=4,
host_ops=28,
npu_partitions=2,
)
|
npu_partitions=1,
|
utils.test.ts
|
import { BinanceClient, Balance } from '@thorchain/asgardex-binance'
import { assetToBase, assetAmount, PoolData, EMPTY_ASSET } from '@thorchain/asgardex-util'
import * as E from 'fp-ts/lib/Either'
import * as FP from 'fp-ts/lib/function'
import * as O from 'fp-ts/lib/Option'
import { PoolAsset } from '../../views/pools/types'
import { PoolDetails } from '../midgard/types'
import { BinanceClientState } from './types'
import {
hasBinanceClient,
getBinanceClient,
getBinanceClientStateForViews,
bncSymbolToAsset,
bncSymbolToAssetString,
getPoolPriceValue
} from './utils'
// Mocking non default class exports
// https://jestjs.io/docs/en/es6-class-mocks#mocking-non-default-class-exports
jest.mock('@thorchain/asgardex-binance', () => {
return {
BinanceClient: jest.fn().mockImplementation(() =>
/* return empty object - we don't need mock any functions in tests here */
{}
)
}
})
describe('services/binance/utils/', () => {
let mockClient: BinanceClient
beforeEach(() => {
BinanceClient.mockClear()
mockClient = new BinanceClient()
})
describe('getBinanceClient', () => {
it('returns a client if it has been created before', () => {
const state: BinanceClientState = O.some(E.right(mockClient))
const result = getBinanceClient(state)
expect(O.toNullable(result)).toEqual(mockClient)
})
it('returns none if a client has not been created before', () => {
const result = getBinanceClient(O.none)
expect(result).toBeNone()
})
it('returns none if creating a client has throw an error before', () => {
const state: BinanceClientState = O.some(E.left(new Error('any error')))
const result = getBinanceClient(state)
expect(result).toBeNone()
})
})
describe('hasBinanceClient', () => {
it('returns true if a client has been created', () => {
const state: BinanceClientState = O.some(E.right(mockClient))
const result = hasBinanceClient(state)
expect(result).toBeTruthy()
})
it('returns false if no client has been created', () => {
const result = hasBinanceClient(O.none)
expect(result).toBeFalsy()
})
it('returns false if any errors occur', () => {
const state: BinanceClientState = O.some(E.left(new Error('any error')))
const result = hasBinanceClient(state)
expect(result).toBeFalsy()
})
})
describe('getBinanceClientStateForViews', () => {
it('returns true if a client has been created', () => {
const state: BinanceClientState = O.some(E.right(mockClient))
const result = getBinanceClientStateForViews(state)
expect(result).toEqual('ready')
})
it('returns false if no client has been created', () => {
const result = getBinanceClientStateForViews(O.none)
|
})
it('returns false if any errors occur', () => {
const state: BinanceClientState = O.some(E.left(new Error('any error')))
const result = getBinanceClientStateForViews(state)
expect(result).toEqual('error')
})
})
describe('getPoolPriceValue', () => {
const poolDetails: PoolDetails = [
{
asset: PoolAsset.BNB,
assetDepth: '1000000000',
runeDepth: '10000000000'
}
]
const usdPool: PoolData = {
assetBalance: assetToBase(assetAmount(110000)),
runeBalance: assetToBase(assetAmount(100000))
}
it('returns a price for BNB in USD', () => {
const balance: Balance = {
free: '1',
symbol: 'BNB',
locked: '',
frozen: ''
}
const result = FP.pipe(
getPoolPriceValue(balance, poolDetails, usdPool),
O.fold(
() => 'failure',
(price) => price.amount().toString()
)
)
expect(result).toEqual('1100000000')
})
it('returns a price for RUNE in USD', () => {
const balance: Balance = {
free: '1',
symbol: 'RUNE-A1A',
locked: '',
frozen: ''
}
const result = FP.pipe(
getPoolPriceValue(balance, [], usdPool),
O.fold(
() => 'failure',
(price) => price.amount().toString()
)
)
expect(result).toEqual('110000000')
})
it('returns a no price if no pools are available', () => {
const balance: Balance = {
free: '1',
symbol: 'BNB',
locked: '',
frozen: ''
}
const result = getPoolPriceValue(balance, [], usdPool)
expect(result).toBeNone()
})
})
describe('bncSymbolToAssetString', () => {
it('creates a RUNE `Asset` as string', () => {
const result = bncSymbolToAssetString('RUNE-B1A')
expect(result).toEqual('BNB.RUNE-B1A')
})
})
describe('bncSymbolToAsset', () => {
it('creates a RUNE `Asset`', () => {
const result = FP.pipe(
bncSymbolToAsset('RUNE-B1A'),
O.getOrElse(() => EMPTY_ASSET)
)
expect(result).toEqual({
chain: 'BNB',
symbol: 'RUNE-B1A',
ticker: 'RUNE'
})
})
})
})
|
expect(result).toEqual('notready')
|
task.py
|
class
|
:
# create __init__ and add_money methods
def __init__(self, dollars, cents):
self.dollars = dollars
self.cents = cents
def add_money(self, deposit_dollars, deposit_cents):
self.dollars += deposit_dollars
self.cents += deposit_cents
if self.cents >= 100:
to_dollar = int(self.cents / 100)
self.dollars += to_dollar
self.cents -= to_dollar * 100
|
PiggyBank
|
index.js
|
'use strict';
class PolygonPoints {
/**
*
* @param {Array} vertexes - Array of objects containing x and y value.
*/
constructor(vertexes) {
this._checkVertexes(vertexes);
}
/**
*
* Get the total number of x y points that exist in polygon.
* @readonly
* @returns {Number}
*/
get pointsLength() {
return this._pointsLength || this._countPointsInPolygon();
}
/**
*
* Get the bounding box as an array of x y coordinates.
* @readonly
* @returns {Array}
*/
get boundingBox() {
return [{x: this.minX, y: this.minY}, {x: this.maxX, y: this.minY}, {x: this.maxX, y: this.maxY}, {x :this.minX, y: this.maxY}];
}
/**
*
* Get the minimum x value.
* @readonly
* @returns {Number}
*/
get minX() {
return this._minX || 0;
}
/**
*
* Get the maximum x value.
* @readonly
* @returns {Number}
*/
get maxX() {
return this._maxX || 0;
}
/**
*
* Get the minimum y value.
* @readonly
* @returns {Number}
*/
get minY() {
return this._minY || 0;
}
/**
*
* Get the maximum y value.
* @readonly
* @returns {Number}
*/
get maxY() {
return this._maxY || 0;
}
/**
* @ignore
* @param {Array} value - Array of objects containing x and y value.
*/
set vertexes(value) {
this._checkVertexes(value);
delete this._pointsLength;
}
/**
*
* Get or set the array of vertexes.
* @returns {Array}
*/
get vertexes() {
return this._vertexes || [];
}
/**
*
* @param {Array} vertexes - Array of objects containing x and y value.
* @private
*/
_checkVertexes(vertexes) {
if (!Array.isArray(vertexes) || vertexes.length < 3) {
throw new Error('Polygon needs at least 3 sets of x y vertexes.');
}
for (const vertex of vertexes) {
const x = vertex.x;
const y = vertex.y;
if (x < 0 || y < 0 || parseInt(x) !== x || parseInt(y) !== y) {
throw new Error('Each vertex of the polygon must consist of an object with an x and y unsigned integer.');
}
}
this._vertexes = vertexes;
this._vertexesLength = vertexes.length;
this._maxX = this._vertexes[0].x;
this._minX = this._vertexes[0].x;
this._maxY = this._vertexes[0].y;
this._minY = this._vertexes[0].y;
for (let i = 1; i < this._vertexesLength; i++) {
this._maxX = Math.max(this._maxX, this._vertexes[i].x);
this._minX = Math.min(this._minX, this._vertexes[i].x);
this._maxY = Math.max(this._maxY, this._vertexes[i].y);
this._minY = Math.min(this._minY, this._vertexes[i].y);
}
}
|
* @private
*/
_countPointsInPolygon() {
this._pointsLength = 0;
for (let y = this._minY; y <= this._maxY; y++) {
for (let x = this._minX; x <= this._maxX; x++) {
if (this.containsPoint(x, y) === true) {
this._pointsLength++;
}
}
}
return this._pointsLength;
}
/**
*
* Check if x y point is contained in polygon.
* @param {Number} x - x coordinate
* @param {Number} y - y coordinate
* @returns {Boolean}
*/
containsPoint(x, y) {
//algorithm based on http://www.ecse.rpi.edu/Homepages/wrf/Research/Short_Notes/pnpoly.html
if (x < this._minX || x > this._maxX || y < this._minY || y > this._maxY) {
return false;
}
let inside = false;
const length = this._vertexesLength;
const array = this._vertexes;
for (let i = 0, j = length - 1; i < length; j = i++) {
const xi = array[i].x;
const yi = array[i].y;
const xj = array[j].x;
const yj = array[j].y;
if ((xi === x && yi === y) || (xj === x && yj === y)) {
// point is on corner
return true;
}
if (Math.hypot(xj - xi, yj - yi) === Math.hypot(xj - x, yj - y) + Math.hypot(xi - x, yi - y)) {
// point is on perimeter
return true;
}
const intersect = ((yi > y) !== (yj > y)) && (x < (xj - xi) * (y - yi) / (yj - yi) + xi);
if (intersect) {
inside = !inside;
}
}
return inside;
}
/**
*
* Get a Buffer of 0's and 1's to indicate if point's index is in polygon.
* @param {Number} width - width of coordinates
* @param {Number} height - height of coordinates
* @returns {{buffer: Buffer, count: number, length: number}}
*/
getBitset(width, height) {
const length = width * height;
const buffer = Buffer.alloc(length, 0);
let count = 0;
let minX = width;
let maxX = 0;
let minY = height;
let maxY = 0;
for (let y = 0, i = 0; y < height; y++) {
for (let x = 0; x < width; x++, i++) {
if (this.containsPoint(x, y) === true) {
minX = Math.min(minX, x);
maxX = Math.max(maxX, x);
minY = Math.min(minY, y);
maxY = Math.max(maxY, y);
buffer[i] = 1;
count++;
}
}
}
return {buffer: buffer, count: count, length: length, minX: minX, maxX: maxX, minY: minY, maxY: maxY};
}
}
/**
*
* @type {PolygonPoints}
*/
module.exports = PolygonPoints;
|
/**
*
* @returns {Number}
|
lib.rs
|
//! `async fn(HttpRequest) -> Result<HttpResponse, Error>`
//!
//! # Overview
//!
//! `tower-http` is a library that provides HTTP-specific middlewares and utilities built on top of
//! [`tower`].
//!
//! All middlewares uses the [`http`] and [`http-body`] crates as the HTTP abstractions. That means
//! they're compatible with any library or framework that also uses those crates, such as
//! [`hyper`].
//!
//! # Example server
//!
//! This example shows how to apply middlewares from `tower-http` to a [`Service`] and then run
//! that service using [`hyper`].
//!
//! ```rust,no_run
//! use tower_http::{
//! add_extension::AddExtensionLayer,
//! compression::CompressionLayer,
//! propagate_header::PropagateHeaderLayer,
//! sensitive_header::SetSensitiveRequestHeaderLayer,
//! set_header::SetResponseHeaderLayer,
//! trace::TraceLayer,
//! };
//! use tower::{ServiceBuilder, service_fn, make::Shared};
//! use http::{Request, Response, header::{HeaderName, CONTENT_TYPE, AUTHORIZATION}};
//! use hyper::{Body, Error, server::Server, service::make_service_fn};
//! use std::{sync::Arc, net::SocketAddr, convert::Infallible};
//! # struct DatabaseConnectionPool;
//! # impl DatabaseConnectionPool {
//! # fn new() -> DatabaseConnectionPool { DatabaseConnectionPool }
//! # }
//! # fn content_length_from_response<B>(_: &http::Response<B>) -> Option<http::HeaderValue> { None }
//!
//! // Our request handler. This is where we would implement the application logic
//! // for responding to HTTP requests...
//! async fn handler(request: Request<Body>) -> Result<Response<Body>, Error> {
//! // ...
//! # todo!()
//! }
//!
//! // Shared state across all request handlers --- in this case, a pool of database connections.
//! struct State {
//! pool: DatabaseConnectionPool,
//! }
//!
//! #[tokio::main]
//! async fn main() {
//! // Construct the shared state.
//! let state = State {
//! pool: DatabaseConnectionPool::new(),
//! };
//!
//! // Use `tower`'s `ServiceBuilder` API to build a stack of `tower` middleware
//! // wrapping our request handler.
//! let service = ServiceBuilder::new()
//! // Mark the `Authorization` request header as sensitive so it doesn't show in logs
//! .layer(SetSensitiveRequestHeaderLayer::new(AUTHORIZATION))
//! // High level logging of requests and responses
//! .layer(TraceLayer::new_for_http())
//! // Share an `Arc<State>` with all requests
//! .layer(AddExtensionLayer::new(Arc::new(state)))
//! // Compress responses
//! .layer(CompressionLayer::new())
//! // Propagate `X-Request-Id`s from requests to responses
//! .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id")))
//! // If the response has a known size set the `Content-Length` header
//! .layer(SetResponseHeaderLayer::overriding(CONTENT_TYPE, content_length_from_response))
//! // Wrap a `Service` in our middleware stack
//! .service_fn(handler);
//!
//! // And run our service using `hyper`
//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
//! Server::bind(&addr)
//! .serve(Shared::new(service))
//! .await
//! .expect("server error");
//! }
//! ```
//!
//! Keep in mind that while this example uses [`hyper`], `tower-http` supports any HTTP
//! client/server implementation that uses the [`http`] and [`http-body`] crates.
//!
//! # Example client
//!
//! `tower-http` middlewares can also be applied to HTTP clients:
//!
//! ```rust,no_run
//! use tower_http::{
//! decompression::DecompressionLayer,
//! set_header::SetRequestHeaderLayer,
//! };
//! use tower::{ServiceBuilder, Service, ServiceExt};
//! use hyper::Body;
//! use http::{Request, Response, HeaderValue, header::USER_AGENT};
//!
//! #[tokio::main]
//! async fn main() {
//! let mut client = ServiceBuilder::new()
//! // Set a `User-Agent` header on all requests
//! .layer(SetRequestHeaderLayer::<_, Body>::overriding(
//! USER_AGENT,
//! HeaderValue::from_static("tower-http demo")
//! ))
//! // Decompress response bodies
//! .layer(DecompressionLayer::new())
//! // Wrap a `hyper::Client` in our middleware stack
//! .service(hyper::Client::new());
//!
//! // Make a request
//! let request = Request::builder()
//! .uri("http://example.com")
//! .body(Body::empty())
//! .unwrap();
//!
//! let response = client
//! .ready()
//! .await
//! .unwrap()
//! .call(request)
//! .await
//! .unwrap();
//! }
//! ```
//!
//! # Feature Flags
//!
//! All middleware are disabled by default and can be enabled using [cargo features].
//!
//! For example, to enable the [`Trace`] middleware, add the "trace" feature flag in
//! your `Cargo.toml`:
//!
//! ```toml
//! tower-http = { version = "0.1.0", features = ["trace"] }
//! ```
//!
//! You can use `"full"` to enable everything:
//!
//! ```toml
//! tower-http = { version = "0.1.0", features = ["full"] }
//! ```
//!
//! # Getting Help
//!
//! First, see if the answer to your question can be found in the API documentation. If the answer
//! is not there, there is an active community in the [Tower Discord channel][chat]. We would be
//! happy to try to answer your question. If that doesn't work, try opening an [issue] with the
//! question.
//!
//! [`tower`]: https://crates.io/crates/tower
//! [`http`]: https://crates.io/crates/http
//! [`http-body`]: https://crates.io/crates/http-body
//! [`hyper`]: https://crates.io/crates/hyper
//! [cargo features]: https://doc.rust-lang.org/cargo/reference/features.html
//! [`AddExtension`]: crate::add_extension::AddExtension
//! [`Service`]: https://docs.rs/tower/latest/tower/trait.Service.html
//! [chat]: https://discord.gg/tokio
//! [issue]: https://github.com/tower-rs/tower-http/issues/new
#![doc(html_root_url = "https://docs.rs/tower-http/0.1.0")]
#![warn(
clippy::all,
clippy::dbg_macro,
clippy::todo,
clippy::empty_enum,
clippy::enum_glob_use,
clippy::pub_enum_variant_names,
clippy::mem_forget,
clippy::unused_self,
clippy::filter_map_next,
clippy::needless_continue,
clippy::needless_borrow,
clippy::match_wildcard_for_single_variants,
clippy::if_let_mutex,
clippy::mismatched_target_os,
clippy::await_holding_lock,
clippy::match_on_vec_items,
clippy::imprecise_flops,
clippy::suboptimal_flops,
clippy::lossy_float_literal,
clippy::rest_pat_in_fully_bound_structs,
clippy::fn_params_excessive_bools,
clippy::exit,
clippy::inefficient_to_string,
clippy::linkedlist,
clippy::macro_use_imports,
clippy::option_option,
clippy::verbose_file_reads,
clippy::unnested_or_patterns,
rust_2018_idioms,
future_incompatible,
nonstandard_style,
missing_docs
)]
#![deny(unreachable_pub, broken_intra_doc_links, private_in_public)]
#![allow(
elided_lifetimes_in_paths,
// TODO: Remove this once the MSRV bumps to 1.42.0 or above.
clippy::match_like_matches_macro,
clippy::type_complexity
)]
#![forbid(unsafe_code)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(test, allow(clippy::float_cmp))]
#[macro_use]
pub(crate) mod macros;
#[cfg(feature = "set-header")]
#[cfg_attr(docsrs, doc(cfg(feature = "set-header")))]
pub mod set_header;
#[cfg(feature = "propagate-header")]
#[cfg_attr(docsrs, doc(cfg(feature = "propagate-header")))]
pub mod propagate_header;
#[cfg(feature = "compression")]
#[cfg_attr(docsrs, doc(cfg(feature = "compression")))]
pub mod compression;
#[cfg(feature = "add-extension")]
#[cfg_attr(docsrs, doc(cfg(feature = "add-extension")))]
pub mod add_extension;
#[cfg(feature = "sensitive-header")]
#[cfg_attr(docsrs, doc(cfg(feature = "sensitive-header")))]
pub mod sensitive_header;
#[cfg(feature = "decompression")]
#[cfg_attr(docsrs, doc(cfg(feature = "decompression")))]
pub mod decompression;
#[cfg(any(feature = "compression", feature = "decompression"))]
mod compression_utils;
#[cfg(feature = "map-response-body")]
#[cfg_attr(docsrs, doc(cfg(feature = "map-response-body")))]
pub mod map_response_body;
#[cfg(feature = "map-request-body")]
#[cfg_attr(docsrs, doc(cfg(feature = "map-request-body")))]
pub mod map_request_body;
#[cfg(feature = "trace")]
#[cfg_attr(docsrs, doc(cfg(feature = "trace")))]
pub mod trace;
#[cfg(feature = "follow-redirect")]
#[cfg_attr(docsrs, doc(cfg(feature = "follow-redirect")))]
pub mod follow_redirect;
pub mod classify;
pub mod services;
/// Error type containing either a body error or an IO error.
///
/// This type is used to combine errors produced by response bodies with compression or
/// decompression applied. The body itself can produce errors of type `E` whereas compression or
/// decompression can produce [`io::Error`]s.
///
/// [`io::Error`]: std::io::Error
#[cfg(any(feature = "compression", feature = "decompression"))]
#[cfg_attr(
docsrs,
doc(cfg(any(feature = "compression", feature = "decompression")))
)]
#[derive(Debug)]
pub enum
|
<E> {
/// Errors produced by the body.
Body(E),
/// IO errors produced by compression or decompression.
Io(std::io::Error),
}
#[cfg(any(feature = "compression", feature = "decompression"))]
impl<E> std::fmt::Display for BodyOrIoError<E>
where
E: std::fmt::Display,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BodyOrIoError::Io(inner) => inner.fmt(f),
BodyOrIoError::Body(inner) => inner.fmt(f),
}
}
}
#[cfg(any(feature = "compression", feature = "decompression"))]
impl<E> std::error::Error for BodyOrIoError<E>
where
E: std::error::Error,
{
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
BodyOrIoError::Io(inner) => inner.source(),
BodyOrIoError::Body(inner) => inner.source(),
}
}
}
/// The latency unit used to report latencies by middlewares.
#[non_exhaustive]
#[derive(Copy, Clone, Debug)]
pub enum LatencyUnit {
/// Use milliseconds.
Millis,
/// Use microseconds.
Micros,
/// Use nanoseconds.
Nanos,
}
|
BodyOrIoError
|
_non_string_enums_client_enums.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class FloatEnum(with_metaclass(_CaseInsensitiveEnumMeta, float, Enum)):
"""List of float enums
"""
TWO_HUNDRED4 = 200.4
FOUR_HUNDRED_THREE4 = 403.4
FOUR_HUNDRED_FIVE3 = 405.3
FOUR_HUNDRED_SIX2 = 406.2
FOUR_HUNDRED_TWENTY_NINE1 = 429.1
class IntEnum(with_metaclass(_CaseInsensitiveEnumMeta, int, Enum)):
"""List of integer enums
|
FOUR_HUNDRED_THREE = 403
FOUR_HUNDRED_FIVE = 405
FOUR_HUNDRED_SIX = 406
FOUR_HUNDRED_TWENTY_NINE = 429
|
"""
TWO_HUNDRED = 200
|
blinky_random.rs
|
#![deny(warnings)]
#![no_main]
#![no_std]
use cortex_m_rt::entry;
use stm32h7xx_hal::{pac, prelude::*};
#[macro_use]
mod utilities;
use log::info;
#[entry]
fn
|
() -> ! {
utilities::logger::init();
let cp = cortex_m::Peripherals::take().unwrap();
let dp = pac::Peripherals::take().expect("cannot take peripherals");
// Constrain and Freeze power
info!("Setup PWR... ");
let pwr = dp.PWR.constrain();
let pwrcfg = example_power!(pwr).freeze();
// Constrain and Freeze clock
info!("Setup RCC... ");
let rcc = dp.RCC.constrain();
let ccdr = rcc.sys_ck(100.mhz()).freeze(pwrcfg, &dp.SYSCFG);
info!("");
info!("stm32h7xx-hal example - Random Blinky");
info!("");
let gpioe = dp.GPIOE.split(ccdr.peripheral.GPIOE);
// Configure PE1 as output.
let mut led = gpioe.pe1.into_push_pull_output();
// Get the delay provider.
let mut delay = cp.SYST.delay(ccdr.clocks);
// Get true random number generator
let mut rng = dp.RNG.constrain(ccdr.peripheral.RNG, &ccdr.clocks);
let mut random_bytes = [0u16; 3];
match rng.fill(&mut random_bytes) {
Ok(()) => info!("random bytes: {:?}", random_bytes),
Err(err) => info!("RNG error: {:?}", err),
}
loop {
let random_element: Result<u32, _> = rng.gen();
match random_element {
Ok(random) => {
// NOTE: the result of the expression `random % 200`
// is biased. This bias is called "modulo-bias". It is
// acceptable here for simplicity, but may not be
// acceptable for your application.
let period = random % 200_u32;
led.toggle();
delay.delay_ms(period);
}
Err(err) => info!("RNG error: {:?}", err),
}
}
}
|
main
|
span_utils.py
|
from typing import Callable, List, Set, Tuple, TypeVar, Optional
import warnings
from allennlp.common.checks import ConfigurationError
from allennlp.data.tokenizers import Token
TypedSpan = Tuple[int, Tuple[int, int]]
TypedStringSpan = Tuple[str, Tuple[int, int]]
class InvalidTagSequence(Exception):
def __init__(self, tag_sequence=None):
super().__init__()
self.tag_sequence = tag_sequence
def __str__(self):
return " ".join(self.tag_sequence)
T = TypeVar("T", str, Token)
def enumerate_spans(
sentence: List[T],
offset: int = 0,
max_span_width: int = None,
min_span_width: int = 1,
filter_function: Callable[[List[T]], bool] = None,
) -> List[Tuple[int, int]]:
"""
Given a sentence, return all token spans within the sentence. Spans are `inclusive`.
Additionally, you can provide a maximum and minimum span width, which will be used
to exclude spans outside of this range.
Finally, you can provide a function mapping `List[T] -> bool`, which will
be applied to every span to decide whether that span should be included. This
allows filtering by length, regex matches, pos tags or any Spacy `Token`
attributes, for example.
# Parameters
sentence : `List[T]`, required.
The sentence to generate spans for. The type is generic, as this function
can be used with strings, or Spacy `Tokens` or other sequences.
offset : `int`, optional (default = `0`)
A numeric offset to add to all span start and end indices. This is helpful
if the sentence is part of a larger structure, such as a document, which
the indices need to respect.
max_span_width : `int`, optional (default = `None`)
The maximum length of spans which should be included. Defaults to len(sentence).
min_span_width : `int`, optional (default = `1`)
The minimum length of spans which should be included. Defaults to 1.
filter_function : `Callable[[List[T]], bool]`, optional (default = `None`)
A function mapping sequences of the passed type T to a boolean value.
If `True`, the span is included in the returned spans from the
sentence, otherwise it is excluded..
"""
max_span_width = max_span_width or len(sentence)
filter_function = filter_function or (lambda x: True)
spans: List[Tuple[int, int]] = []
for start_index in range(len(sentence)):
last_end_index = min(start_index + max_span_width, len(sentence))
first_end_index = min(start_index + min_span_width - 1, len(sentence))
for end_index in range(first_end_index, last_end_index):
start = offset + start_index
end = offset + end_index
# add 1 to end index because span indices are inclusive.
if filter_function(sentence[slice(start_index, end_index + 1)]):
spans.append((start, end))
return spans
def bio_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to BIO tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
as otherwise it is possible to get a perfect precision score whilst still predicting
ill-formed spans in addition to the correct spans. This function works properly when
the spans are unlabeled (i.e., your labels are simply "B", "I", and "O").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
classes_to_ignore = classes_to_ignore or []
spans: Set[Tuple[str, Tuple[int, int]]] = set()
span_start = 0
span_end = 0
active_conll_tag = None
for index, string_tag in enumerate(tag_sequence):
# Actual BIO tag.
bio_tag = string_tag[0]
if bio_tag not in ["B", "I", "O"]:
raise InvalidTagSequence(tag_sequence)
conll_tag = string_tag[2:]
if bio_tag == "O" or conll_tag in classes_to_ignore:
# The span has ended.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = None
# We don't care about tags we are
# told to ignore, so we do nothing.
continue
elif bio_tag == "B":
# We are entering a new span; reset indices
# and active tag to new span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = conll_tag
span_start = index
span_end = index
elif bio_tag == "I" and conll_tag == active_conll_tag:
# We're inside a span.
span_end += 1
else:
# This is the case the bio label is an "I", but either:
# 1) the span hasn't started - i.e. an ill formed span.
# 2) The span is an I tag for a different conll annotation.
# We'll process the previous span if it exists, but also
# include this span. This is important, because otherwise,
# a model may get a perfect F1 score whilst still including
# false positive ill-formed spans.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = conll_tag
span_start = index
span_end = index
# Last token might have been a part of a valid span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
return list(spans)
def iob1_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to IOB1 tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e., those where "B-LABEL" is not preceded
by "I-LABEL" or "B-LABEL").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
classes_to_ignore = classes_to_ignore or []
spans: Set[Tuple[str, Tuple[int, int]]] = set()
span_start = 0
span_end = 0
active_conll_tag = None
prev_bio_tag = None
prev_conll_tag = None
for index, string_tag in enumerate(tag_sequence):
curr_bio_tag = string_tag[0]
curr_conll_tag = string_tag[2:]
if curr_bio_tag not in ["B", "I", "O"]:
raise InvalidTagSequence(tag_sequence)
if curr_bio_tag == "O" or curr_conll_tag in classes_to_ignore:
# The span has ended.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = None
elif _iob1_start_of_chunk(prev_bio_tag, prev_conll_tag, curr_bio_tag, curr_conll_tag):
# We are entering a new span; reset indices
# and active tag to new span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
active_conll_tag = curr_conll_tag
span_start = index
span_end = index
else:
# bio_tag == "I" and curr_conll_tag == active_conll_tag
# We're continuing a span.
span_end += 1
prev_bio_tag = string_tag[0]
prev_conll_tag = string_tag[2:]
# Last token might have been a part of a valid span.
if active_conll_tag is not None:
spans.add((active_conll_tag, (span_start, span_end)))
return list(spans)
def _iob1_start_of_chunk(
prev_bio_tag: Optional[str],
prev_conll_tag: Optional[str],
curr_bio_tag: str,
curr_conll_tag: str,
) -> bool:
if curr_bio_tag == "B":
return True
if curr_bio_tag == "I" and prev_bio_tag == "O":
return True
if curr_bio_tag != "O" and prev_conll_tag != curr_conll_tag:
return True
return False
def bioul_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to BIOUL tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are not allowed and will raise `InvalidTagSequence`.
This function works properly when the spans are unlabeled (i.e., your labels are
simply "B", "I", "O", "U", and "L").
# Parameters
tag_sequence : `List[str]`, required.
The tag sequence encoded in BIOUL, e.g. ["B-PER", "L-PER", "O"].
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
"""
spans = []
classes_to_ignore = classes_to_ignore or []
index = 0
while index < len(tag_sequence):
label = tag_sequence[index]
if label[0] == "U":
spans.append((label.partition("-")[2], (index, index)))
elif label[0] == "B":
start = index
while label[0] != "L":
index += 1
if index >= len(tag_sequence):
raise InvalidTagSequence(tag_sequence)
label = tag_sequence[index]
if not (label[0] == "I" or label[0] == "L"):
raise InvalidTagSequence(tag_sequence)
spans.append((label.partition("-")[2], (start, index)))
else:
if label != "O":
raise InvalidTagSequence(tag_sequence)
index += 1
return [span for span in spans if span[0] not in classes_to_ignore]
def
|
(tag_sequence: List[str]) -> List[str]:
warnings.warn(
"iob1_to_bioul has been replaced with 'to_bioul' to allow more encoding options.",
FutureWarning,
)
return to_bioul(tag_sequence)
def to_bioul(tag_sequence: List[str], encoding: str = "IOB1") -> List[str]:
"""
Given a tag sequence encoded with IOB1 labels, recode to BIOUL.
In the IOB1 scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of span immediately following another
span of the same type.
In the BIO scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of a span.
# Parameters
tag_sequence : `List[str]`, required.
The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"].
encoding : `str`, optional, (default = `"IOB1"`).
The encoding type to convert from. Must be either "IOB1" or "BIO".
# Returns
bioul_sequence : `List[str]`
The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"].
"""
if encoding not in {"IOB1", "BIO"}:
raise ConfigurationError(f"Invalid encoding {encoding} passed to 'to_bioul'.")
def replace_label(full_label, new_label):
# example: full_label = 'I-PER', new_label = 'U', returns 'U-PER'
parts = list(full_label.partition("-"))
parts[0] = new_label
return "".join(parts)
def pop_replace_append(in_stack, out_stack, new_label):
# pop the last element from in_stack, replace the label, append
# to out_stack
tag = in_stack.pop()
new_tag = replace_label(tag, new_label)
out_stack.append(new_tag)
def process_stack(stack, out_stack):
# process a stack of labels, add them to out_stack
if len(stack) == 1:
# just a U token
pop_replace_append(stack, out_stack, "U")
else:
# need to code as BIL
recoded_stack = []
pop_replace_append(stack, recoded_stack, "L")
while len(stack) >= 2:
pop_replace_append(stack, recoded_stack, "I")
pop_replace_append(stack, recoded_stack, "B")
recoded_stack.reverse()
out_stack.extend(recoded_stack)
# Process the tag_sequence one tag at a time, adding spans to a stack,
# then recode them.
bioul_sequence = []
stack: List[str] = []
for label in tag_sequence:
# need to make a dict like
# token = {'token': 'Matt', "labels": {'conll2003': "B-PER"}
# 'gold': 'I-PER'}
# where 'gold' is the raw value from the CoNLL data set
if label == "O" and len(stack) == 0:
bioul_sequence.append(label)
elif label == "O" and len(stack) > 0:
# need to process the entries on the stack plus this one
process_stack(stack, bioul_sequence)
bioul_sequence.append(label)
elif label[0] == "I":
# check if the previous type is the same as this one
# if it is then append to stack
# otherwise this start a new entity if the type
# is different
if len(stack) == 0:
if encoding == "BIO":
raise InvalidTagSequence(tag_sequence)
stack.append(label)
else:
# check if the previous type is the same as this one
this_type = label.partition("-")[2]
prev_type = stack[-1].partition("-")[2]
if this_type == prev_type:
stack.append(label)
else:
if encoding == "BIO":
raise InvalidTagSequence(tag_sequence)
# a new entity
process_stack(stack, bioul_sequence)
stack.append(label)
elif label[0] == "B":
if len(stack) > 0:
process_stack(stack, bioul_sequence)
stack.append(label)
else:
raise InvalidTagSequence(tag_sequence)
# process the stack
if len(stack) > 0:
process_stack(stack, bioul_sequence)
return bioul_sequence
def bmes_tags_to_spans(
tag_sequence: List[str], classes_to_ignore: List[str] = None
) -> List[TypedStringSpan]:
"""
Given a sequence corresponding to BMES tags, extracts spans.
Spans are inclusive and can be of zero length, representing a single word span.
Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
as otherwise it is possible to get a perfect precision score whilst still predicting
ill-formed spans in addition to the correct spans.
This function works properly when the spans are unlabeled (i.e., your labels are
simply "B", "M", "E" and "S").
# Parameters
tag_sequence : `List[str]`, required.
The integer class labels for a sequence.
classes_to_ignore : `List[str]`, optional (default = `None`).
A list of string class labels `excluding` the bio tag
which should be ignored when extracting spans.
# Returns
spans : `List[TypedStringSpan]`
The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
Note that the label `does not` contain any BIO tag prefixes.
"""
def extract_bmes_tag_label(text):
bmes_tag = text[0]
label = text[2:]
return bmes_tag, label
spans: List[Tuple[str, List[int]]] = []
prev_bmes_tag: Optional[str] = None
for index, tag in enumerate(tag_sequence):
bmes_tag, label = extract_bmes_tag_label(tag)
if bmes_tag in ("B", "S"):
# Regardless of tag, we start a new span when reaching B & S.
spans.append((label, [index, index]))
elif bmes_tag in ("M", "E") and prev_bmes_tag in ("B", "M") and spans[-1][0] == label:
# Only expand the span if
# 1. Valid transition: B/M -> M/E.
# 2. Matched label.
spans[-1][1][1] = index
else:
# Best effort split for invalid span.
spans.append((label, [index, index]))
# update previous BMES tag.
prev_bmes_tag = bmes_tag
classes_to_ignore = classes_to_ignore or []
return [
# to tuple.
(span[0], (span[1][0], span[1][1]))
for span in spans
if span[0] not in classes_to_ignore
]
|
iob1_to_bioul
|
paths.rs
|
use std::env;
use std::ffi::{OsStr, OsString};
use std::fs::{self, File, OpenOptions};
use std::io;
use std::io::prelude::*;
use std::iter;
use std::path::{Component, Path, PathBuf};
use filetime::FileTime;
use crate::util::errors::{CargoError, CargoResult, CargoResultExt, Internal};
pub fn join_paths<T: AsRef<OsStr>>(paths: &[T], env: &str) -> CargoResult<OsString> {
let err = match env::join_paths(paths.iter()) {
Ok(paths) => return Ok(paths),
Err(e) => e,
};
let paths = paths.iter().map(Path::new).collect::<Vec<_>>();
let err = CargoError::from(err);
let explain = Internal::new(format_err!("failed to join path array: {:?}", paths));
let err = CargoError::from(err.context(explain));
let more_explain = format!(
"failed to join search paths together\n\
Does ${} have an unterminated quote character?",
env
);
Err(err.context(more_explain).into())
}
pub fn dylib_path_envvar() -> &'static str {
if cfg!(windows) {
"PATH"
} else if cfg!(target_os = "macos") {
"DYLD_LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
pub fn dylib_path() -> Vec<PathBuf> {
match env::var_os(dylib_path_envvar()) {
Some(var) => env::split_paths(&var).collect(),
None => Vec::new(),
}
}
pub fn normalize_path(path: &Path) -> PathBuf {
let mut components = path.components().peekable();
let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {
components.next();
PathBuf::from(c.as_os_str())
} else {
PathBuf::new()
};
for component in components {
match component {
Component::Prefix(..) => unreachable!(),
Component::RootDir => {
ret.push(component.as_os_str());
}
Component::CurDir => {}
Component::ParentDir => {
ret.pop();
}
Component::Normal(c) => {
ret.push(c);
}
}
}
ret
}
pub fn without_prefix<'a>(long_path: &'a Path, prefix: &'a Path) -> Option<&'a Path> {
let mut a = long_path.components();
let mut b = prefix.components();
loop {
match b.next() {
Some(y) => match a.next() {
Some(x) if x == y => continue,
_ => return None,
},
None => return Some(a.as_path()),
}
}
}
pub fn resolve_executable(exec: &Path) -> CargoResult<PathBuf> {
if exec.components().count() == 1 {
let paths = env::var_os("PATH").ok_or_else(|| format_err!("no PATH"))?;
let candidates = env::split_paths(&paths).flat_map(|path| {
let candidate = path.join(&exec);
let with_exe = if env::consts::EXE_EXTENSION == "" {
None
} else {
Some(candidate.with_extension(env::consts::EXE_EXTENSION))
};
iter::once(candidate).chain(with_exe)
});
for candidate in candidates {
if candidate.is_file() {
// PATH may have a component like "." in it, so we still need to
// canonicalize.
return Ok(candidate.canonicalize()?);
}
}
bail!("no executable for `{}` found in PATH", exec.display())
} else {
Ok(exec.canonicalize()?)
}
}
pub fn read(path: &Path) -> CargoResult<String> {
match String::from_utf8(read_bytes(path)?) {
Ok(s) => Ok(s),
Err(_) => bail!("path at `{}` was not valid utf-8", path.display()),
}
}
pub fn read_bytes(path: &Path) -> CargoResult<Vec<u8>> {
let res = (|| -> CargoResult<_> {
let mut ret = Vec::new();
let mut f = File::open(path)?;
if let Ok(m) = f.metadata() {
ret.reserve(m.len() as usize + 1);
}
f.read_to_end(&mut ret)?;
Ok(ret)
})()
.chain_err(|| format!("failed to read `{}`", path.display()))?;
Ok(res)
}
pub fn write(path: &Path, contents: &[u8]) -> CargoResult<()> {
(|| -> CargoResult<()> {
let mut f = File::create(path)?;
f.write_all(contents)?;
Ok(())
})()
.chain_err(|| format!("failed to write `{}`", path.display()))?;
Ok(())
}
pub fn write_if_changed<P: AsRef<Path>, C: AsRef<[u8]>>(path: P, contents: C) -> CargoResult<()> {
(|| -> CargoResult<()> {
let contents = contents.as_ref();
let mut f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
let mut orig = Vec::new();
f.read_to_end(&mut orig)?;
if orig != contents {
f.set_len(0)?;
f.seek(io::SeekFrom::Start(0))?;
f.write_all(contents)?;
}
Ok(())
})()
.chain_err(|| format!("failed to write `{}`", path.as_ref().display()))?;
Ok(())
}
pub fn append(path: &Path, contents: &[u8]) -> CargoResult<()> {
(|| -> CargoResult<()> {
let mut f = OpenOptions::new()
.write(true)
.append(true)
.create(true)
.open(path)?;
f.write_all(contents)?;
Ok(())
})()
.chain_err(|| format!("failed to write `{}`", path.display()))?;
Ok(())
}
pub fn mtime(path: &Path) -> CargoResult<FileTime> {
let meta = fs::metadata(path).chain_err(|| format!("failed to stat `{}`", path.display()))?;
Ok(FileTime::from_last_modification_time(&meta))
}
#[cfg(unix)]
pub fn path2bytes(path: &Path) -> CargoResult<&[u8]> {
use std::os::unix::prelude::*;
Ok(path.as_os_str().as_bytes())
}
#[cfg(windows)]
pub fn path2bytes(path: &Path) -> CargoResult<&[u8]>
|
#[cfg(unix)]
pub fn bytes2path(bytes: &[u8]) -> CargoResult<PathBuf> {
use std::ffi::OsStr;
use std::os::unix::prelude::*;
Ok(PathBuf::from(OsStr::from_bytes(bytes)))
}
#[cfg(windows)]
pub fn bytes2path(bytes: &[u8]) -> CargoResult<PathBuf> {
use std::str;
match str::from_utf8(bytes) {
Ok(s) => Ok(PathBuf::from(s)),
Err(..) => Err(format_err!("invalid non-unicode path")),
}
}
pub fn ancestors(path: &Path) -> PathAncestors {
PathAncestors::new(path)
}
pub struct PathAncestors<'a> {
current: Option<&'a Path>,
stop_at: Option<PathBuf>,
}
impl<'a> PathAncestors<'a> {
fn new(path: &Path) -> PathAncestors {
PathAncestors {
current: Some(path),
//HACK: avoid reading `~/.cargo/config` when testing Cargo itself.
stop_at: env::var("__CARGO_TEST_ROOT").ok().map(PathBuf::from),
}
}
}
impl<'a> Iterator for PathAncestors<'a> {
type Item = &'a Path;
fn next(&mut self) -> Option<&'a Path> {
if let Some(path) = self.current {
self.current = path.parent();
if let Some(ref stop_at) = self.stop_at {
if path == stop_at {
self.current = None;
}
}
Some(path)
} else {
None
}
}
}
pub fn remove_dir_all<P: AsRef<Path>>(p: P) -> CargoResult<()> {
_remove_dir_all(p.as_ref())
}
fn _remove_dir_all(p: &Path) -> CargoResult<()> {
if p.symlink_metadata()?.file_type().is_symlink() {
return remove_file(p);
}
let entries = p
.read_dir()
.chain_err(|| format!("failed to read directory `{}`", p.display()))?;
for entry in entries {
let entry = entry?;
let path = entry.path();
if entry.file_type()?.is_dir() {
remove_dir_all(&path)?;
} else {
remove_file(&path)?;
}
}
remove_dir(&p)
}
pub fn remove_dir<P: AsRef<Path>>(p: P) -> CargoResult<()> {
_remove_dir(p.as_ref())
}
fn _remove_dir(p: &Path) -> CargoResult<()> {
fs::remove_dir(p).chain_err(|| format!("failed to remove directory `{}`", p.display()))?;
Ok(())
}
pub fn remove_file<P: AsRef<Path>>(p: P) -> CargoResult<()> {
_remove_file(p.as_ref())
}
fn _remove_file(p: &Path) -> CargoResult<()> {
let mut err = match fs::remove_file(p) {
Ok(()) => return Ok(()),
Err(e) => e,
};
if err.kind() == io::ErrorKind::PermissionDenied && set_not_readonly(p).unwrap_or(false) {
match fs::remove_file(p) {
Ok(()) => return Ok(()),
Err(e) => err = e,
}
}
Err(err).chain_err(|| format!("failed to remove file `{}`", p.display()))?;
Ok(())
}
fn set_not_readonly(p: &Path) -> io::Result<bool> {
let mut perms = p.metadata()?.permissions();
if !perms.readonly() {
return Ok(false);
}
perms.set_readonly(false);
fs::set_permissions(p, perms)?;
Ok(true)
}
|
{
match path.as_os_str().to_str() {
Some(s) => Ok(s.as_bytes()),
None => Err(format_err!("invalid non-unicode path: {}", path.display())),
}
}
|
index.ts
|
export {ChartReport} from './chartReport';
|
||
default.rs
|
// Copyright (C) 2021 Paolo Jovon <[email protected]>
// SPDX-License-Identifier: Apache-2.0
use libktx_rs::{
sources::{Ktx1CreateInfo, Ktx2CreateInfo},
Texture,
};
#[test]
fn create_default_ktx1() {
let texture = Texture::new(Ktx1CreateInfo::default()).expect("a default KTX1 texture");
// 1x1 RGBA8 texel
assert_eq!(texture.element_size(), 4);
assert_eq!(texture.row_pitch(0), 4);
assert_eq!(texture.data_size(), 4);
texture
.iterate_levels(|mip, face, width, height, depth, pixel_data| {
dbg!(mip, face, width, height, depth, pixel_data);
Ok(())
})
.expect("mip/face iteration to succeed");
}
#[test]
fn create_default_ktx2() {
let texture = Texture::new(Ktx2CreateInfo::default()).expect("a default KTX2 texture");
// 1x1 RGBA8 texel
assert_eq!(texture.element_size(), 4);
assert_eq!(texture.row_pitch(0), 4);
assert_eq!(texture.data_size(), 4);
texture
.iterate_levels(|mip, face, width, height, depth, pixel_data| {
dbg!(mip, face, width, height, depth, pixel_data);
Ok(())
})
.expect("mip/face iteration to succeed");
|
}
| |
move-4-unique.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax)]
struct Triple {a: isize, b: isize, c: isize}
fn test(foo: Box<Triple>) -> Box<Triple> {
let foo = foo;
let bar = foo;
let baz = bar;
let quux = baz;
return quux;
|
let y = test(x);
assert!((y.c == 3));
}
|
}
pub fn main() {
let x = box Triple{a: 1, b: 2, c: 3};
|
shield_check.rs
|
use seed::{prelude::*, *};
use super::{outline_trait_private::OutlinePrivate, Outline};
pub struct ShieldCheck;
impl OutlinePrivate for ShieldCheck {
fn base<T>(classes: impl ToClasses) -> Node<T> {
svg![
C![classes],
|
),
path![attrs!(
At::from("d") => "M9 12l2 2 4-4m5.618-4.016A11.955 11.955 0 0112 2.944a11.955 11.955 0 01-8.618 3.04A12.02 12.02 0 003 9c0 5.591 3.824 10.29 9 11.622 5.176-1.332 9-6.03 9-11.622 0-1.042-.133-2.052-.382-3.016z",
At::from("stroke-linecap") => "round",
At::from("stroke-linejoin") => "round",
At::from("stroke-width") => "2",
),],
]
}
}
impl Outline for ShieldCheck {}
|
attrs!(
At::from("fill") => "none",
At::from("stroke") => "currentColor",
At::from("viewBox") => "0 0 24 24",
|
__init__.py
|
import dataclasses
import enum
import inspect
import json
import struct
import sys
import typing
from abc import ABC
from base64 import b64decode, b64encode
from datetime import datetime, timedelta, timezone
from dateutil.parser import isoparse
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Optional,
Set,
Tuple,
Type,
Union,
get_type_hints,
)
from ._types import T
from .casing import camel_case, safe_snake_case, snake_case
from .grpc.grpclib_client import ServiceStub
# Proto 3 data types
TYPE_ENUM = "enum"
TYPE_BOOL = "bool"
TYPE_INT32 = "int32"
TYPE_INT64 = "int64"
TYPE_UINT32 = "uint32"
TYPE_UINT64 = "uint64"
TYPE_SINT32 = "sint32"
TYPE_SINT64 = "sint64"
TYPE_FLOAT = "float"
TYPE_DOUBLE = "double"
TYPE_FIXED32 = "fixed32"
TYPE_SFIXED32 = "sfixed32"
TYPE_FIXED64 = "fixed64"
TYPE_SFIXED64 = "sfixed64"
TYPE_STRING = "string"
TYPE_BYTES = "bytes"
TYPE_MESSAGE = "message"
TYPE_MAP = "map"
# Fields that use a fixed amount of space (4 or 8 bytes)
FIXED_TYPES = [
TYPE_FLOAT,
TYPE_DOUBLE,
TYPE_FIXED32,
TYPE_SFIXED32,
TYPE_FIXED64,
TYPE_SFIXED64,
]
# Fields that are numerical 64-bit types
INT_64_TYPES = [TYPE_INT64, TYPE_UINT64, TYPE_SINT64, TYPE_FIXED64, TYPE_SFIXED64]
# Fields that are efficiently packed when
PACKED_TYPES = [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
TYPE_SINT32,
TYPE_SINT64,
TYPE_FLOAT,
TYPE_DOUBLE,
TYPE_FIXED32,
TYPE_SFIXED32,
TYPE_FIXED64,
TYPE_SFIXED64,
]
# Wire types
# https://developers.google.com/protocol-buffers/docs/encoding#structure
WIRE_VARINT = 0
WIRE_FIXED_64 = 1
WIRE_LEN_DELIM = 2
WIRE_FIXED_32 = 5
# Mappings of which Proto 3 types correspond to which wire types.
WIRE_VARINT_TYPES = [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
TYPE_SINT32,
TYPE_SINT64,
]
WIRE_FIXED_32_TYPES = [TYPE_FLOAT, TYPE_FIXED32, TYPE_SFIXED32]
WIRE_FIXED_64_TYPES = [TYPE_DOUBLE, TYPE_FIXED64, TYPE_SFIXED64]
WIRE_LEN_DELIM_TYPES = [TYPE_STRING, TYPE_BYTES, TYPE_MESSAGE, TYPE_MAP]
# Protobuf datetimes start at the Unix Epoch in 1970 in UTC.
def datetime_default_gen() -> datetime:
return datetime(1970, 1, 1, tzinfo=timezone.utc)
DATETIME_ZERO = datetime_default_gen()
class Casing(enum.Enum):
"""Casing constants for serialization."""
CAMEL = camel_case #: A camelCase sterilization function.
SNAKE = snake_case #: A snake_case sterilization function.
PLACEHOLDER: Any = object()
@dataclasses.dataclass(frozen=True)
class FieldMetadata:
"""Stores internal metadata used for parsing & serialization."""
# Protobuf field number
number: int
# Protobuf type name
proto_type: str
# Map information if the proto_type is a map
map_types: Optional[Tuple[str, str]] = None
# Groups several "one-of" fields together
group: Optional[str] = None
# Describes the wrapped type (e.g. when using google.protobuf.BoolValue)
wraps: Optional[str] = None
@staticmethod
def get(field: dataclasses.Field) -> "FieldMetadata":
"""Returns the field metadata for a dataclass field."""
return field.metadata["betterproto"]
def dataclass_field(
number: int,
proto_type: str,
*,
map_types: Optional[Tuple[str, str]] = None,
group: Optional[str] = None,
wraps: Optional[str] = None,
) -> dataclasses.Field:
"""Creates a dataclass field with attached protobuf metadata."""
return dataclasses.field(
default=PLACEHOLDER,
metadata={
"betterproto": FieldMetadata(number, proto_type, map_types, group, wraps)
},
)
# Note: the fields below return `Any` to prevent type errors in the generated
# data classes since the types won't match with `Field` and they get swapped
# out at runtime. The generated dataclass variables are still typed correctly.
def enum_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_ENUM, group=group)
def bool_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_BOOL, group=group)
def int32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_INT32, group=group)
def int64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_INT64, group=group)
def uint32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_UINT32, group=group)
def uint64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_UINT64, group=group)
def sint32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SINT32, group=group)
def sint64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SINT64, group=group)
def float_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FLOAT, group=group)
def double_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_DOUBLE, group=group)
def fixed32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FIXED32, group=group)
def fixed64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_FIXED64, group=group)
def sfixed32_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SFIXED32, group=group)
def sfixed64_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_SFIXED64, group=group)
def string_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_STRING, group=group)
def bytes_field(number: int, group: Optional[str] = None) -> Any:
return dataclass_field(number, TYPE_BYTES, group=group)
def message_field(
number: int, group: Optional[str] = None, wraps: Optional[str] = None
) -> Any:
return dataclass_field(number, TYPE_MESSAGE, group=group, wraps=wraps)
def map_field(
number: int, key_type: str, value_type: str, group: Optional[str] = None
) -> Any:
return dataclass_field(
number, TYPE_MAP, map_types=(key_type, value_type), group=group
)
class Enum(enum.IntEnum):
"""
The base class for protobuf enumerations, all generated enumerations will inherit
from this. Bases :class:`enum.IntEnum`.
"""
@classmethod
def from_string(cls, name: str) -> "Enum":
"""Return the value which corresponds to the string name.
Parameters
-----------
name: :class:`str`
The name of the enum member to get
Raises
-------
:exc:`ValueError`
The member was not found in the Enum.
"""
try:
return cls._member_map_[name]
except KeyError as e:
raise ValueError(f"Unknown value {name} for enum {cls.__name__}") from e
def _pack_fmt(proto_type: str) -> str:
"""Returns a little-endian format string for reading/writing binary."""
return {
TYPE_DOUBLE: "<d",
TYPE_FLOAT: "<f",
TYPE_FIXED32: "<I",
TYPE_FIXED64: "<Q",
TYPE_SFIXED32: "<i",
TYPE_SFIXED64: "<q",
}[proto_type]
def encode_varint(value: int) -> bytes:
"""Encodes a single varint value for serialization."""
b: List[int] = []
if value < 0:
value += 1 << 64
bits = value & 0x7F
value >>= 7
while value:
b.append(0x80 | bits)
bits = value & 0x7F
value >>= 7
return bytes(b + [bits])
def _preprocess_single(proto_type: str, wraps: str, value: Any) -> bytes:
"""Adjusts values before serialization."""
if proto_type in [
TYPE_ENUM,
TYPE_BOOL,
TYPE_INT32,
TYPE_INT64,
TYPE_UINT32,
TYPE_UINT64,
]:
return encode_varint(value)
elif proto_type in [TYPE_SINT32, TYPE_SINT64]:
# Handle zig-zag encoding.
return encode_varint(value << 1 if value >= 0 else (value << 1) ^ (~0))
elif proto_type in FIXED_TYPES:
return struct.pack(_pack_fmt(proto_type), value)
elif proto_type == TYPE_STRING:
return value.encode("utf-8")
elif proto_type == TYPE_MESSAGE:
if isinstance(value, datetime):
# Convert the `datetime` to a timestamp message.
seconds = int(value.timestamp())
nanos = int(value.microsecond * 1e3)
value = _Timestamp(seconds=seconds, nanos=nanos)
elif isinstance(value, timedelta):
# Convert the `timedelta` to a duration message.
total_ms = value // timedelta(microseconds=1)
seconds = int(total_ms / 1e6)
nanos = int((total_ms % 1e6) * 1e3)
value = _Duration(seconds=seconds, nanos=nanos)
elif wraps:
if value is None:
return b""
value = _get_wrapper(wraps)(value=value)
return bytes(value)
return value
def _serialize_single(
field_number: int,
proto_type: str,
value: Any,
*,
serialize_empty: bool = False,
wraps: str = "",
) -> bytes:
"""Serializes a single field and value."""
value = _preprocess_single(proto_type, wraps, value)
output = bytearray()
if proto_type in WIRE_VARINT_TYPES:
key = encode_varint(field_number << 3)
output += key + value
elif proto_type in WIRE_FIXED_32_TYPES:
key = encode_varint((field_number << 3) | 5)
output += key + value
elif proto_type in WIRE_FIXED_64_TYPES:
key = encode_varint((field_number << 3) | 1)
output += key + value
elif proto_type in WIRE_LEN_DELIM_TYPES:
if len(value) or serialize_empty or wraps:
key = encode_varint((field_number << 3) | 2)
output += key + encode_varint(len(value)) + value
else:
raise NotImplementedError(proto_type)
return bytes(output)
def decode_varint(buffer: bytes, pos: int) -> Tuple[int, int]:
"""
Decode a single varint value from a byte buffer. Returns the value and the
new position in the buffer.
"""
result = 0
shift = 0
while 1:
b = buffer[pos]
result |= (b & 0x7F) << shift
pos += 1
if not (b & 0x80):
return result, pos
shift += 7
if shift >= 64:
raise ValueError("Too many bytes when decoding varint.")
@dataclasses.dataclass(frozen=True)
class ParsedField:
number: int
wire_type: int
value: Any
raw: bytes
def parse_fields(value: bytes) -> Generator[ParsedField, None, None]:
i = 0
while i < len(value):
start = i
num_wire, i = decode_varint(value, i)
number = num_wire >> 3
wire_type = num_wire & 0x7
decoded: Any = None
if wire_type == WIRE_VARINT:
decoded, i = decode_varint(value, i)
elif wire_type == WIRE_FIXED_64:
decoded, i = value[i : i + 8], i + 8
elif wire_type == WIRE_LEN_DELIM:
length, i = decode_varint(value, i)
decoded = value[i : i + length]
i += length
elif wire_type == WIRE_FIXED_32:
decoded, i = value[i : i + 4], i + 4
yield ParsedField(
number=number, wire_type=wire_type, value=decoded, raw=value[start:i]
)
class ProtoClassMetadata:
__slots__ = (
"oneof_group_by_field",
"oneof_field_by_group",
"default_gen",
"cls_by_field",
"field_name_by_number",
"meta_by_field_name",
"sorted_field_names",
)
oneof_group_by_field: Dict[str, str]
oneof_field_by_group: Dict[str, Set[dataclasses.Field]]
field_name_by_number: Dict[int, str]
meta_by_field_name: Dict[str, FieldMetadata]
sorted_field_names: Tuple[str, ...]
default_gen: Dict[str, Callable[[], Any]]
cls_by_field: Dict[str, Type]
def __init__(self, cls: Type["Message"]):
by_field = {}
by_group: Dict[str, Set] = {}
by_field_name = {}
by_field_number = {}
fields = dataclasses.fields(cls)
for field in fields:
meta = FieldMetadata.get(field)
if meta.group:
# This is part of a one-of group.
by_field[field.name] = meta.group
by_group.setdefault(meta.group, set()).add(field)
by_field_name[field.name] = meta
by_field_number[meta.number] = field.name
self.oneof_group_by_field = by_field
self.oneof_field_by_group = by_group
self.field_name_by_number = by_field_number
self.meta_by_field_name = by_field_name
self.sorted_field_names = tuple(
by_field_number[number] for number in sorted(by_field_number)
)
self.default_gen = self._get_default_gen(cls, fields)
self.cls_by_field = self._get_cls_by_field(cls, fields)
@staticmethod
def _get_default_gen(
cls: Type["Message"], fields: List[dataclasses.Field]
) -> Dict[str, Callable[[], Any]]:
return {field.name: cls._get_field_default_gen(field) for field in fields}
@staticmethod
def _get_cls_by_field(
cls: Type["Message"], fields: List[dataclasses.Field]
) -> Dict[str, Type]:
field_cls = {}
for field in fields:
meta = FieldMetadata.get(field)
if meta.proto_type == TYPE_MAP:
assert meta.map_types
kt = cls._cls_for(field, index=0)
vt = cls._cls_for(field, index=1)
field_cls[field.name] = dataclasses.make_dataclass(
"Entry",
[
("key", kt, dataclass_field(1, meta.map_types[0])),
("value", vt, dataclass_field(2, meta.map_types[1])),
],
bases=(Message,),
)
field_cls[f"{field.name}.value"] = vt
else:
field_cls[field.name] = cls._cls_for(field)
return field_cls
class Message(ABC):
"""
The base class for protobuf messages, all generated messages will inherit from
this. This class registers the message fields which are used by the serializers and
parsers to go between the Python, binary and JSON representations of the message.
.. container:: operations
.. describe:: bytes(x)
Calls :meth:`__bytes__`.
.. describe:: bool(x)
Calls :meth:`__bool__`.
"""
_serialized_on_wire: bool
_unknown_fields: bytes
_group_current: Dict[str, str]
def __post_init__(self) -> None:
# Keep track of whether every field was default
all_sentinel = True
# Set current field of each group after `__init__` has already been run.
group_current: Dict[str, Optional[str]] = {}
for field_name, meta in self._betterproto.meta_by_field_name.items():
if meta.group:
group_current.setdefault(meta.group)
if self.__raw_get(field_name) != PLACEHOLDER:
# Found a non-sentinel value
all_sentinel = False
if meta.group:
# This was set, so make it the selected value of the one-of.
group_current[meta.group] = field_name
# Now that all the defaults are set, reset it!
self.__dict__["_serialized_on_wire"] = not all_sentinel
self.__dict__["_unknown_fields"] = b""
self.__dict__["_group_current"] = group_current
def __raw_get(self, name: str) -> Any:
return super().__getattribute__(name)
def __eq__(self, other) -> bool:
if type(self) is not type(other):
return False
for field_name in self._betterproto.meta_by_field_name:
self_val = self.__raw_get(field_name)
other_val = other.__raw_get(field_name)
if self_val is PLACEHOLDER:
if other_val is PLACEHOLDER:
continue
self_val = self._get_field_default(field_name)
elif other_val is PLACEHOLDER:
other_val = other._get_field_default(field_name)
if self_val != other_val:
return False
return True
def __repr__(self) -> str:
parts = [
f"{field_name}={value!r}"
for field_name in self._betterproto.sorted_field_names
for value in (self.__raw_get(field_name),)
if value is not PLACEHOLDER
]
return f"{self.__class__.__name__}({', '.join(parts)})"
def __getattribute__(self, name: str) -> Any:
"""
Lazily initialize default values to avoid infinite recursion for recursive
message types
"""
value = super().__getattribute__(name)
if value is not PLACEHOLDER:
return value
value = self._get_field_default(name)
super().__setattr__(name, value)
return value
def __setattr__(self, attr: str, value: Any) -> None:
if attr != "_serialized_on_wire":
# Track when a field has been set.
self.__dict__["_serialized_on_wire"] = True
if hasattr(self, "_group_current"): # __post_init__ had already run
if attr in self._betterproto.oneof_group_by_field:
group = self._betterproto.oneof_group_by_field[attr]
for field in self._betterproto.oneof_field_by_group[group]:
|
if field.name == attr:
self._group_current[group] = field.name
else:
super().__setattr__(field.name, PLACEHOLDER)
super().__setattr__(attr, value)
def __bool__(self) -> bool:
"""True if the Message has any fields with non-default values."""
return any(
self.__raw_get(field_name)
not in (PLACEHOLDER, self._get_field_default(field_name))
for field_name in self._betterproto.meta_by_field_name
)
@property
def _betterproto(self) -> ProtoClassMetadata:
"""
Lazy initialize metadata for each protobuf class.
It may be initialized multiple times in a multi-threaded environment,
but that won't affect the correctness.
"""
meta = getattr(self.__class__, "_betterproto_meta", None)
if not meta:
meta = ProtoClassMetadata(self.__class__)
self.__class__._betterproto_meta = meta
return meta
def __bytes__(self) -> bytes:
"""
Get the binary encoded Protobuf representation of this message instance.
"""
output = bytearray()
for field_name, meta in self._betterproto.meta_by_field_name.items():
value = getattr(self, field_name)
if value is None:
# Optional items should be skipped. This is used for the Google
# wrapper types.
continue
# Being selected in a a group means this field is the one that is
# currently set in a `oneof` group, so it must be serialized even
# if the value is the default zero value.
selected_in_group = (
meta.group and self._group_current[meta.group] == field_name
)
# Empty messages can still be sent on the wire if they were
# set (or received empty).
serialize_empty = isinstance(value, Message) and value._serialized_on_wire
include_default_value_for_oneof = self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
if value == self._get_field_default(field_name) and not (
selected_in_group or serialize_empty or include_default_value_for_oneof
):
# Default (zero) values are not serialized. Two exceptions are
# if this is the selected oneof item or if we know we have to
# serialize an empty message (i.e. zero value was explicitly
# set by the user).
continue
if isinstance(value, list):
if meta.proto_type in PACKED_TYPES:
# Packed lists look like a length-delimited field. First,
# preprocess/encode each value into a buffer and then
# treat it like a field of raw bytes.
buf = bytearray()
for item in value:
buf += _preprocess_single(meta.proto_type, "", item)
output += _serialize_single(meta.number, TYPE_BYTES, buf)
else:
for item in value:
output += _serialize_single(
meta.number, meta.proto_type, item, wraps=meta.wraps or ""
)
elif isinstance(value, dict):
for k, v in value.items():
assert meta.map_types
sk = _serialize_single(1, meta.map_types[0], k)
sv = _serialize_single(2, meta.map_types[1], v)
output += _serialize_single(meta.number, meta.proto_type, sk + sv)
else:
# If we have an empty string and we're including the default value for
# a oneof, make sure we serialize it. This ensures that the byte string
# output isn't simply an empty string. This also ensures that round trip
# serialization will keep `which_one_of` calls consistent.
if (
isinstance(value, str)
and value == ""
and include_default_value_for_oneof
):
serialize_empty = True
output += _serialize_single(
meta.number,
meta.proto_type,
value,
serialize_empty=serialize_empty,
wraps=meta.wraps or "",
)
output += self._unknown_fields
return bytes(output)
# For compatibility with other libraries
def SerializeToString(self: T) -> bytes:
"""
Get the binary encoded Protobuf representation of this message instance.
.. note::
This is a method for compatibility with other libraries,
you should really use ``bytes(x)``.
Returns
--------
:class:`bytes`
The binary encoded Protobuf representation of this message instance
"""
return bytes(self)
@classmethod
def _type_hint(cls, field_name: str) -> Type:
return cls._type_hints()[field_name]
@classmethod
def _type_hints(cls) -> Dict[str, Type]:
module = sys.modules[cls.__module__]
return get_type_hints(cls, vars(module))
@classmethod
def _cls_for(cls, field: dataclasses.Field, index: int = 0) -> Type:
"""Get the message class for a field from the type hints."""
field_cls = cls._type_hint(field.name)
if hasattr(field_cls, "__args__") and index >= 0:
if field_cls.__args__ is not None:
field_cls = field_cls.__args__[index]
return field_cls
def _get_field_default(self, field_name: str) -> Any:
return self._betterproto.default_gen[field_name]()
@classmethod
def _get_field_default_gen(cls, field: dataclasses.Field) -> Any:
t = cls._type_hint(field.name)
if hasattr(t, "__origin__"):
if t.__origin__ in (dict, Dict):
# This is some kind of map (dict in Python).
return dict
elif t.__origin__ in (list, List):
# This is some kind of list (repeated) field.
return list
elif t.__origin__ is Union and t.__args__[1] is type(None):
# This is an optional (wrapped) field. For setting the default we
# really don't care what kind of field it is.
return type(None)
else:
return t
elif issubclass(t, Enum):
# Enums always default to zero.
return int
elif t is datetime:
# Offsets are relative to 1970-01-01T00:00:00Z
return datetime_default_gen
else:
# This is either a primitive scalar or another message type. Calling
# it should result in its zero value.
return t
def _postprocess_single(
self, wire_type: int, meta: FieldMetadata, field_name: str, value: Any
) -> Any:
"""Adjusts values after parsing."""
if wire_type == WIRE_VARINT:
if meta.proto_type in [TYPE_INT32, TYPE_INT64]:
bits = int(meta.proto_type[3:])
value = value & ((1 << bits) - 1)
signbit = 1 << (bits - 1)
value = int((value ^ signbit) - signbit)
elif meta.proto_type in [TYPE_SINT32, TYPE_SINT64]:
# Undo zig-zag encoding
value = (value >> 1) ^ (-(value & 1))
elif meta.proto_type == TYPE_BOOL:
# Booleans use a varint encoding, so convert it to true/false.
value = value > 0
elif wire_type in [WIRE_FIXED_32, WIRE_FIXED_64]:
fmt = _pack_fmt(meta.proto_type)
value = struct.unpack(fmt, value)[0]
elif wire_type == WIRE_LEN_DELIM:
if meta.proto_type == TYPE_STRING:
value = value.decode("utf-8")
elif meta.proto_type == TYPE_MESSAGE:
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
value = _Timestamp().parse(value).to_datetime()
elif cls == timedelta:
value = _Duration().parse(value).to_timedelta()
elif meta.wraps:
# This is a Google wrapper value message around a single
# scalar type.
value = _get_wrapper(meta.wraps)().parse(value).value
else:
value = cls().parse(value)
value._serialized_on_wire = True
elif meta.proto_type == TYPE_MAP:
value = self._betterproto.cls_by_field[field_name]().parse(value)
return value
def _include_default_value_for_oneof(
self, field_name: str, meta: FieldMetadata
) -> bool:
return (
meta.group is not None and self._group_current.get(meta.group) == field_name
)
def parse(self: T, data: bytes) -> T:
"""
Parse the binary encoded Protobuf into this message instance. This
returns the instance itself and is therefore assignable and chainable.
Parameters
-----------
data: :class:`bytes`
The data to parse the protobuf from.
Returns
--------
:class:`Message`
The initialized message.
"""
# Got some data over the wire
self._serialized_on_wire = True
proto_meta = self._betterproto
for parsed in parse_fields(data):
field_name = proto_meta.field_name_by_number.get(parsed.number)
if not field_name:
self._unknown_fields += parsed.raw
continue
meta = proto_meta.meta_by_field_name[field_name]
value: Any
if parsed.wire_type == WIRE_LEN_DELIM and meta.proto_type in PACKED_TYPES:
# This is a packed repeated field.
pos = 0
value = []
while pos < len(parsed.value):
if meta.proto_type in [TYPE_FLOAT, TYPE_FIXED32, TYPE_SFIXED32]:
decoded, pos = parsed.value[pos : pos + 4], pos + 4
wire_type = WIRE_FIXED_32
elif meta.proto_type in [TYPE_DOUBLE, TYPE_FIXED64, TYPE_SFIXED64]:
decoded, pos = parsed.value[pos : pos + 8], pos + 8
wire_type = WIRE_FIXED_64
else:
decoded, pos = decode_varint(parsed.value, pos)
wire_type = WIRE_VARINT
decoded = self._postprocess_single(
wire_type, meta, field_name, decoded
)
value.append(decoded)
else:
value = self._postprocess_single(
parsed.wire_type, meta, field_name, parsed.value
)
current = getattr(self, field_name)
if meta.proto_type == TYPE_MAP:
# Value represents a single key/value pair entry in the map.
current[value.key] = value.value
elif isinstance(current, list) and not isinstance(value, list):
current.append(value)
else:
setattr(self, field_name, value)
return self
# For compatibility with other libraries.
@classmethod
def FromString(cls: Type[T], data: bytes) -> T:
"""
Parse the binary encoded Protobuf into this message instance. This
returns the instance itself and is therefore assignable and chainable.
.. note::
This is a method for compatibility with other libraries,
you should really use :meth:`parse`.
Parameters
-----------
data: :class:`bytes`
The data to parse the protobuf from.
Returns
--------
:class:`Message`
The initialized message.
"""
return cls().parse(data)
def to_dict(
self, casing: Casing = Casing.CAMEL, include_default_values: bool = False
) -> Dict[str, Any]:
"""
Returns a JSON serializable dict representation of this object.
Parameters
-----------
casing: :class:`Casing`
The casing to use for key values. Default is :attr:`Casing.CAMEL` for
compatibility purposes.
include_default_values: :class:`bool`
If ``True`` will include the default values of fields. Default is ``False``.
E.g. an ``int32`` field will be included with a value of ``0`` if this is
set to ``True``, otherwise this would be ignored.
Returns
--------
Dict[:class:`str`, Any]
The JSON serializable dict representation of this object.
"""
output: Dict[str, Any] = {}
field_types = self._type_hints()
defaults = self._betterproto.default_gen
for field_name, meta in self._betterproto.meta_by_field_name.items():
field_is_repeated = defaults[field_name] is list
value = getattr(self, field_name)
cased_name = casing(field_name).rstrip("_") # type: ignore
if meta.proto_type == TYPE_MESSAGE:
if isinstance(value, datetime):
if (
value != DATETIME_ZERO
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = _Timestamp.timestamp_to_json(value)
elif isinstance(value, timedelta):
if (
value != timedelta(0)
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = _Duration.delta_to_json(value)
elif meta.wraps:
if value is not None or include_default_values:
output[cased_name] = value
elif field_is_repeated:
# Convert each item.
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
value = [_Timestamp.timestamp_to_json(i) for i in value]
elif cls == timedelta:
value = [_Duration.delta_to_json(i) for i in value]
else:
value = [
i.to_dict(casing, include_default_values) for i in value
]
if value or include_default_values:
output[cased_name] = value
elif (
value._serialized_on_wire
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
output[cased_name] = value.to_dict(casing, include_default_values)
elif meta.proto_type == TYPE_MAP:
for k in value:
if hasattr(value[k], "to_dict"):
value[k] = value[k].to_dict(casing, include_default_values)
if value or include_default_values:
output[cased_name] = value
elif (
value != self._get_field_default(field_name)
or include_default_values
or self._include_default_value_for_oneof(
field_name=field_name, meta=meta
)
):
if meta.proto_type in INT_64_TYPES:
if field_is_repeated:
output[cased_name] = [str(n) for n in value]
else:
output[cased_name] = str(value)
elif meta.proto_type == TYPE_BYTES:
if field_is_repeated:
output[cased_name] = [
b64encode(b).decode("utf8") for b in value
]
else:
output[cased_name] = b64encode(value).decode("utf8")
elif meta.proto_type == TYPE_ENUM:
if field_is_repeated:
enum_class: Type[Enum] = field_types[field_name].__args__[0]
if isinstance(value, typing.Iterable) and not isinstance(
value, str
):
output[cased_name] = [enum_class(el).name for el in value]
else:
# transparently upgrade single value to repeated
output[cased_name] = [enum_class(value).name]
else:
enum_class: Type[Enum] = field_types[field_name] # noqa
output[cased_name] = enum_class(value).name
else:
output[cased_name] = value
return output
def from_dict(self: T, value: Dict[str, Any]) -> T:
"""
Parse the key/value pairs into the current message instance. This returns the
instance itself and is therefore assignable and chainable.
Parameters
-----------
value: Dict[:class:`str`, Any]
The dictionary to parse from.
Returns
--------
:class:`Message`
The initialized message.
"""
self._serialized_on_wire = True
for key in value:
field_name = safe_snake_case(key)
meta = self._betterproto.meta_by_field_name.get(field_name)
if not meta:
continue
if value[key] is not None:
if meta.proto_type == TYPE_MESSAGE:
v = getattr(self, field_name)
if isinstance(v, list):
cls = self._betterproto.cls_by_field[field_name]
if cls == datetime:
v = [isoparse(item) for item in value[key]]
elif cls == timedelta:
v = [
timedelta(seconds=float(item[:-1]))
for item in value[key]
]
else:
v = [cls().from_dict(item) for item in value[key]]
elif isinstance(v, datetime):
v = isoparse(value[key])
setattr(self, field_name, v)
elif isinstance(v, timedelta):
v = timedelta(seconds=float(value[key][:-1]))
setattr(self, field_name, v)
elif meta.wraps:
setattr(self, field_name, value[key])
else:
# NOTE: `from_dict` mutates the underlying message, so no
# assignment here is necessary.
v.from_dict(value[key])
elif meta.map_types and meta.map_types[1] == TYPE_MESSAGE:
v = getattr(self, field_name)
cls = self._betterproto.cls_by_field[f"{field_name}.value"]
for k in value[key]:
v[k] = cls().from_dict(value[key][k])
else:
v = value[key]
if meta.proto_type in INT_64_TYPES:
if isinstance(value[key], list):
v = [int(n) for n in value[key]]
else:
v = int(value[key])
elif meta.proto_type == TYPE_BYTES:
if isinstance(value[key], list):
v = [b64decode(n) for n in value[key]]
else:
v = b64decode(value[key])
elif meta.proto_type == TYPE_ENUM:
enum_cls = self._betterproto.cls_by_field[field_name]
if isinstance(v, list):
v = [enum_cls.from_string(e) for e in v]
elif isinstance(v, str):
v = enum_cls.from_string(v)
if v is not None:
setattr(self, field_name, v)
return self
def to_json(self, indent: Union[None, int, str] = None) -> str:
"""A helper function to parse the message instance into its JSON
representation.
This is equivalent to::
json.dumps(message.to_dict(), indent=indent)
Parameters
-----------
indent: Optional[Union[:class:`int`, :class:`str`]]
The indent to pass to :func:`json.dumps`.
Returns
--------
:class:`str`
The JSON representation of the message.
"""
return json.dumps(self.to_dict(), indent=indent)
def from_json(self: T, value: Union[str, bytes]) -> T:
"""A helper function to return the message instance from its JSON
representation. This returns the instance itself and is therefore assignable
and chainable.
This is equivalent to::
return message.from_dict(json.loads(value))
Parameters
-----------
value: Union[:class:`str`, :class:`bytes`]
The value to pass to :func:`json.loads`.
Returns
--------
:class:`Message`
The initialized message.
"""
return self.from_dict(json.loads(value))
def serialized_on_wire(message: Message) -> bool:
"""
If this message was or should be serialized on the wire. This can be used to detect
presence (e.g. optional wrapper message) and is used internally during
parsing/serialization.
Returns
--------
:class:`bool`
Whether this message was or should be serialized on the wire.
"""
return message._serialized_on_wire
def which_one_of(message: Message, group_name: str) -> Tuple[str, Optional[Any]]:
"""
Return the name and value of a message's one-of field group.
Returns
--------
Tuple[:class:`str`, Any]
The field name and the value for that field.
"""
field_name = message._group_current.get(group_name)
if not field_name:
return "", None
return field_name, getattr(message, field_name)
# Circular import workaround: google.protobuf depends on base classes defined above.
from .lib.google.protobuf import ( # noqa
BoolValue,
BytesValue,
DoubleValue,
Duration,
FloatValue,
Int32Value,
Int64Value,
StringValue,
Timestamp,
UInt32Value,
UInt64Value,
)
class _Duration(Duration):
def to_timedelta(self) -> timedelta:
return timedelta(seconds=self.seconds, microseconds=self.nanos / 1e3)
@staticmethod
def delta_to_json(delta: timedelta) -> str:
parts = str(delta.total_seconds()).split(".")
if len(parts) > 1:
while len(parts[1]) not in [3, 6, 9]:
parts[1] = f"{parts[1]}0"
return f"{'.'.join(parts)}s"
class _Timestamp(Timestamp):
def to_datetime(self) -> datetime:
ts = self.seconds + (self.nanos / 1e9)
return datetime.fromtimestamp(ts, tz=timezone.utc)
@staticmethod
def timestamp_to_json(dt: datetime) -> str:
nanos = dt.microsecond * 1e3
copy = dt.replace(microsecond=0, tzinfo=None)
result = copy.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return f"{result}Z"
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return f"{result}.{int(nanos // 1e6) :03d}Z"
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return f"{result}.{int(nanos // 1e3) :06d}Z"
# Serialize 9 fractional digits.
return f"{result}.{nanos:09d}"
class _WrappedMessage(Message):
"""
Google protobuf wrapper types base class. JSON representation is just the
value itself.
"""
value: Any
def to_dict(self, casing: Casing = Casing.CAMEL) -> Any:
return self.value
def from_dict(self: T, value: Any) -> T:
if value is not None:
self.value = value
return self
def _get_wrapper(proto_type: str) -> Type:
"""Get the wrapper message class for a wrapped type."""
return {
TYPE_BOOL: BoolValue,
TYPE_INT32: Int32Value,
TYPE_UINT32: UInt32Value,
TYPE_INT64: Int64Value,
TYPE_UINT64: UInt64Value,
TYPE_FLOAT: FloatValue,
TYPE_DOUBLE: DoubleValue,
TYPE_STRING: StringValue,
TYPE_BYTES: BytesValue,
}[proto_type]
| |
trace_table.rs
|
use super::{
inputs::{Claim, Witness},
pedersen_points::{PEDERSEN_POINTS, SHIFT_POINT},
};
use std::prelude::v1::*;
use zkp_elliptic_curve::Affine;
use zkp_primefield::FieldElement;
use zkp_stark::TraceTable;
use zkp_u256::U256;
// TODO: Naming
#[allow(clippy::module_name_repetitions)]
pub fn get_trace_table(claim: &Claim, witness: &Witness) -> TraceTable {
let num_columns = 8;
let mut trace = TraceTable::new(claim.path_length * 256, num_columns);
let mut row: Row = Row::default();
row.right.point = Affine::Point {
x: claim.leaf.clone(),
y: FieldElement::ZERO,
};
for path_index in 0..claim.path_length {
for bit_index in 0..256 {
if bit_index % 256 == 0
|
else {
row = hash_next_bit(&row, bit_index);
}
let row_index = path_index * 256 + bit_index;
let (left_x, left_y) = get_coordinates(&row.left.point);
trace[(row_index, 0)] = FieldElement::from(row.left.source.clone());
trace[(row_index, 1)] = row.left.slope.clone();
trace[(row_index, 2)] = left_x.clone();
trace[(row_index, 3)] = left_y.clone();
let (right_x, right_y) = get_coordinates(&row.right.point);
trace[(row_index, 4)] = FieldElement::from(row.right.source.clone());
trace[(row_index, 5)] = row.right.slope.clone();
trace[(row_index, 6)] = right_x.clone();
trace[(row_index, 7)] = right_y.clone();
}
}
trace
}
fn initialize_hash(left_source: U256, right_source: U256) -> Row {
let mut row: Row = Row::default();
row.left.source = left_source;
row.right.source = right_source;
row.right.point = SHIFT_POINT;
row
}
fn hash_next_bit(row: &Row, bit_index: usize) -> Row {
let mut next_row = Row {
left: Subrow {
source: row.left.source.clone() >> 1,
point: row.right.point.clone(),
..Subrow::default()
},
right: Subrow {
source: row.right.source.clone() >> 1,
..Subrow::default()
},
};
if row.left.source.bit(0) {
let p = &PEDERSEN_POINTS[bit_index];
next_row.left.slope = get_slope(&next_row.left.point, &p);
next_row.left.point += p;
}
next_row.right.point = next_row.left.point.clone();
if row.right.source.bit(0) {
let p = &PEDERSEN_POINTS[bit_index + 252];
next_row.right.slope = get_slope(&next_row.right.point, &p);
next_row.right.point += p;
}
next_row
}
#[derive(Default)]
struct Row {
left: Subrow,
right: Subrow,
}
struct Subrow {
source: U256,
slope: FieldElement,
point: Affine,
}
impl Default for Subrow {
fn default() -> Self {
Self {
source: U256::ZERO,
slope: FieldElement::ZERO,
point: Affine::Point {
x: FieldElement::ZERO,
y: FieldElement::ZERO,
},
}
}
}
fn get_slope(p_1: &Affine, p_2: &Affine) -> FieldElement {
let (x_1, y_1) = get_coordinates(p_1);
let (x_2, y_2) = get_coordinates(p_2);
(y_1 - y_2) / (x_1 - x_2)
}
fn get_coordinates(p: &Affine) -> (&FieldElement, &FieldElement) {
match p {
Affine::Zero => panic!(),
Affine::Point { x, y } => (x, y),
}
}
#[cfg(test)]
mod tests {
use super::{
super::inputs::{short_witness, SHORT_CLAIM},
*,
};
#[test]
fn short_inputs_consistent() {
let trace = get_trace_table(&SHORT_CLAIM, &short_witness());
assert_eq!(trace[(trace.num_rows() - 1, 6)], SHORT_CLAIM.root);
}
}
|
{
let other_hash = U256::from(&witness.path[path_index]);
let (x, _) = get_coordinates(&row.right.point);
if witness.directions[path_index] {
row = initialize_hash(other_hash, U256::from(x));
} else {
row = initialize_hash(U256::from(x), other_hash);
}
}
|
Testimonials.tsx
|
import React, { useEffect } from 'react';
import i18next from 'i18next';
// eslint-disable-next-line no-unused-vars
import { withTranslation, WithTranslation } from 'react-i18next';
import {
Typography, Grid, Container, Chip,
} from '@material-ui/core';
import RemoveIcon from '@material-ui/icons/Remove';
import { useStateValue } from '../../../Initial/Context/StateProvider';
import useStyles from './Styles';
import './Language';
import LanguagePT from './LanguagePT';
import LanguageENG from './LanguageENG';
export default withTranslation()(
(props: WithTranslation): React.ReactElement<WithTranslation> => {
const { t } = props;
const [{ Language }] = useStateValue();
const classes = useStyles({});
useEffect((): void => {
if (!i18next.hasResourceBundle('PT', 'HomeTestimonials')) {
i18next.addResourceBundle('PT', 'HomeTestimonials', LanguagePT);
}
if (!i18next.hasResourceBundle('ENG', 'HomeTestimonials')) {
i18next.addResourceBundle('ENG', 'HomeTestimonials', LanguageENG);
}
i18next.changeLanguage(Language);
// return type void != (): void... so as unknown as void
return ((): void => {
i18next.removeResourceBundle('PT', 'HomeTestimonials');
i18next.removeResourceBundle('ENG', 'HomeTestimonials');
}) as unknown as void;
}, []);
return (
<Container maxWidth="lg" className={classes.main}>
<Grid
container
spacing={6}
alignItems="flex-start"
>
<Grid item sm={12} md={4} className={classes.divider}>
<Typography variant="overline" className={classes.text} component="p">
{t('HomeTestimonials:text1.text')}
</Typography>
<Chip
icon={<RemoveIcon />}
label={t('HomeTestimonials:text2.author')}
/>
</Grid>
<Grid item sm={12} md={4} className={classes.divider}>
<Typography variant="overline" className={classes.text} component="p">
{t('HomeTestimonials:text2.text')}
</Typography>
<Chip
icon={<RemoveIcon />}
label={t('HomeTestimonials:text2.author')}
/>
</Grid>
<Grid item sm={12} md={4}>
<Typography variant="overline" className={classes.text} component="p">
{t('HomeTestimonials:text3.text')}
</Typography>
<Chip
icon={<RemoveIcon />}
label={t('HomeTestimonials:text3.author')}
/>
</Grid>
</Grid>
</Container>
|
},
);
|
);
|
register.py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.codegen import export_codegen_goal
from pants.backend.docker.goals.tailor import rules as tailor_rules
from pants.backend.docker.rules import rules as docker_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.python.util_rules.pex import rules as pex_rules
def rules():
return (
*docker_rules(),
*export_codegen_goal.rules(),
*pex_rules(),
*tailor_rules(),
)
def
|
():
return (DockerImageTarget,)
|
target_types
|
crate-method-reexport-grrrrrrr2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
#![crate_id="crate_method_reexport_grrrrrrr2"]
pub use name_pool::add;
pub mod name_pool {
pub type name_pool = ();
pub trait add {
fn add(&self, s: ~str);
}
impl add for name_pool {
fn add(&self, _s: ~str) {
}
}
}
pub mod rust {
pub use name_pool::add;
pub type rt = @();
pub trait cx {
fn cx(&self);
}
impl cx for rt {
fn cx(&self)
|
}
}
|
{
}
|
overlays_message.rs
|
use crate::message_prelude::*;
use graphene::Operation as DocumentOperation;
use serde::{Deserialize, Serialize};
#[remain::sorted]
#[impl_message(Message, DocumentMessage, Overlays)]
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum OverlaysMessage {
// Sub-messages
#[remain::unsorted]
DispatchOperation(Box<DocumentOperation>),
// Messages
ClearAllOverlays,
Rerender,
}
|
}
}
|
impl From<DocumentOperation> for OverlaysMessage {
fn from(operation: DocumentOperation) -> OverlaysMessage {
Self::DispatchOperation(Box::new(operation))
|
spring_source_row.rs
|
// This file is part of https://github.com/SpringQL/SpringQL-client-c which is licensed under MIT OR Apache-2.0. See file LICENSE-MIT or LICENSE-APACHE for full license details.
use ::springql::SpringSourceRow as SourceRow;
use std::{ffi::c_void, mem};
/// Row object to push into an in memory queue.
#[non_exhaustive]
#[repr(transparent)]
pub struct SpringSourceRow(*mut c_void);
impl SpringSourceRow {
pub fn
|
(source_row: SourceRow) -> Self {
SpringSourceRow(unsafe { mem::transmute(Box::new(source_row)) })
}
pub fn to_row(&self) -> SourceRow {
unsafe { &*(self.0 as *const SourceRow) }.clone()
}
pub fn drop(ptr: *mut SpringSourceRow) {
let outer = unsafe { Box::from_raw(ptr) };
let inner = unsafe { Box::from_raw(outer.0) };
drop(inner);
drop(outer);
}
pub fn into_ptr(self) -> *mut SpringSourceRow {
Box::into_raw(Box::new(self))
}
}
|
new
|
collect.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Collection" is the process of determining the type and other external
//! details of each item in Rust. Collection is specifically concerned
//! with *interprocedural* things -- for example, for a function
//! definition, collection will figure out the type and signature of the
//! function, but it will not visit the *body* of the function in any way,
//! nor examine type annotations on local variables (that's the job of
//! type *checking*).
//!
//! Collecting is ultimately defined by a bundle of queries that
//! inquire after various facts about the items in the crate (e.g.,
//! `type_of`, `generics_of`, `predicates_of`, etc). See the `provide` function
//! for the full set.
//!
//! At present, however, we do run collection across all items in the
//! crate as a kind of pass. This should eventually be factored away.
use astconv::{AstConv, Bounds};
use lint;
use constrained_type_params as ctp;
use middle::lang_items::SizedTraitLangItem;
use middle::const_val::ConstVal;
use middle::resolve_lifetime as rl;
use rustc::traits::Reveal;
use rustc::ty::subst::Substs;
use rustc::ty::{ToPredicate, ReprOptions};
use rustc::ty::{self, AdtKind, ToPolyTraitRef, Ty, TyCtxt};
use rustc::ty::maps::Providers;
use rustc::ty::util::IntTypeExt;
use util::nodemap::FxHashMap;
use rustc_const_math::ConstInt;
use syntax::{abi, ast};
use syntax::codemap::Spanned;
use syntax::symbol::{Symbol, keywords};
use syntax_pos::{Span, DUMMY_SP};
use rustc::hir::{self, map as hir_map};
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use rustc::hir::def::{Def, CtorKind};
use rustc::hir::def_id::DefId;
///////////////////////////////////////////////////////////////////////////
// Main entry point
pub fn collect_item_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut visitor = CollectItemTypesVisitor { tcx: tcx };
tcx.hir.krate().visit_all_item_likes(&mut visitor.as_deep_visitor());
}
pub fn provide(providers: &mut Providers) {
*providers = Providers {
type_of,
generics_of,
predicates_of,
super_predicates_of,
type_param_predicates,
trait_def,
adt_def,
fn_sig,
impl_trait_ref,
impl_polarity,
is_foreign_item,
..*providers
};
}
///////////////////////////////////////////////////////////////////////////
/// Context specific to some particular item. This is what implements
/// AstConv. It has information about the predicates that are defined
/// on the trait. Unfortunately, this predicate information is
/// available in various different forms at various points in the
/// process. So we can't just store a pointer to e.g. the AST or the
/// parsed ty form, we have to be more flexible. To this end, the
/// `ItemCtxt` is parameterized by a `DefId` that it uses to satisfy
/// `get_type_parameter_bounds` requests, drawing the information from
/// the AST (`hir::Generics`), recursively.
pub struct ItemCtxt<'a,'tcx:'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
item_def_id: DefId,
}
///////////////////////////////////////////////////////////////////////////
struct CollectItemTypesVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>
}
impl<'a, 'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::OnlyBodies(&self.tcx.hir)
}
fn visit_item(&mut self, item: &'tcx hir::Item) {
convert_item(self.tcx, item.id);
intravisit::walk_item(self, item);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics) {
for param in generics.ty_params() {
if param.default.is_some() {
let def_id = self.tcx.hir.local_def_id(param.id);
self.tcx.type_of(def_id);
}
}
intravisit::walk_generics(self, generics);
}
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
if let hir::ExprClosure(..) = expr.node {
let def_id = self.tcx.hir.local_def_id(expr.id);
self.tcx.generics_of(def_id);
self.tcx.type_of(def_id);
}
intravisit::walk_expr(self, expr);
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
if let hir::TyImplTraitExistential(..) = ty.node {
let def_id = self.tcx.hir.local_def_id(ty.id);
self.tcx.generics_of(def_id);
self.tcx.predicates_of(def_id);
}
intravisit::walk_ty(self, ty);
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) {
convert_trait_item(self.tcx, trait_item.id);
intravisit::walk_trait_item(self, trait_item);
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) {
convert_impl_item(self.tcx, impl_item.id);
intravisit::walk_impl_item(self, impl_item);
}
}
///////////////////////////////////////////////////////////////////////////
// Utility types and common code for the above passes.
impl<'a, 'tcx> ItemCtxt<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_def_id: DefId)
-> ItemCtxt<'a,'tcx> {
ItemCtxt {
tcx,
item_def_id,
}
}
}
impl<'a,'tcx> ItemCtxt<'a,'tcx> {
pub fn to_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> {
AstConv::ast_ty_to_ty(self, ast_ty)
}
}
impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { self.tcx }
fn
|
(&self,
span: Span,
def_id: DefId)
-> ty::GenericPredicates<'tcx>
{
self.tcx.at(span).type_param_predicates((self.item_def_id, def_id))
}
fn re_infer(&self, _span: Span, _def: Option<&ty::RegionParameterDef>)
-> Option<ty::Region<'tcx>> {
None
}
fn ty_infer(&self, span: Span) -> Ty<'tcx> {
struct_span_err!(
self.tcx().sess,
span,
E0121,
"the type placeholder `_` is not allowed within types on item signatures"
).span_label(span, "not allowed in type signatures")
.emit();
self.tcx().types.err
}
fn projected_ty_from_poly_trait_ref(&self,
span: Span,
item_def_id: DefId,
poly_trait_ref: ty::PolyTraitRef<'tcx>)
-> Ty<'tcx>
{
if let Some(trait_ref) = poly_trait_ref.no_late_bound_regions() {
self.tcx().mk_projection(item_def_id, trait_ref.substs)
} else {
// no late-bound regions, we can just ignore the binder
span_err!(self.tcx().sess, span, E0212,
"cannot extract an associated type from a higher-ranked trait bound \
in this context");
self.tcx().types.err
}
}
fn normalize_ty(&self, _span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
// types in item signatures are not normalized, to avoid undue
// dependencies.
ty
}
fn set_tainted_by_errors(&self) {
// no obvious place to track this, just let it go
}
fn record_ty(&self, _hir_id: hir::HirId, _ty: Ty<'tcx>, _span: Span) {
// no place to record types from signatures?
}
}
fn type_param_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
(item_def_id, def_id): (DefId, DefId))
-> ty::GenericPredicates<'tcx> {
use rustc::hir::map::*;
use rustc::hir::*;
// In the AST, bounds can derive from two places. Either
// written inline like `<T:Foo>` or in a where clause like
// `where T:Foo`.
let param_id = tcx.hir.as_local_node_id(def_id).unwrap();
let param_owner = tcx.hir.ty_param_owner(param_id);
let param_owner_def_id = tcx.hir.local_def_id(param_owner);
let generics = tcx.generics_of(param_owner_def_id);
let index = generics.type_param_to_index[&def_id];
let ty = tcx.mk_param(index, tcx.hir.ty_param_name(param_id));
// Don't look for bounds where the type parameter isn't in scope.
let parent = if item_def_id == param_owner_def_id {
None
} else {
tcx.generics_of(item_def_id).parent
};
let mut result = parent.map_or(ty::GenericPredicates {
parent: None,
predicates: vec![]
}, |parent| {
let icx = ItemCtxt::new(tcx, parent);
icx.get_type_parameter_bounds(DUMMY_SP, def_id)
});
let item_node_id = tcx.hir.as_local_node_id(item_def_id).unwrap();
let ast_generics = match tcx.hir.get(item_node_id) {
NodeTraitItem(item) => &item.generics,
NodeImplItem(item) => &item.generics,
NodeItem(item) => {
match item.node {
ItemFn(.., ref generics, _) |
ItemImpl(_, _, _, ref generics, ..) |
ItemTy(_, ref generics) |
ItemEnum(_, ref generics) |
ItemStruct(_, ref generics) |
ItemUnion(_, ref generics) => generics,
ItemTrait(_, _, ref generics, ..) => {
// Implied `Self: Trait` and supertrait bounds.
if param_id == item_node_id {
result.predicates.push(ty::TraitRef {
def_id: item_def_id,
substs: Substs::identity_for_item(tcx, item_def_id)
}.to_predicate());
}
generics
}
_ => return result
}
}
NodeForeignItem(item) => {
match item.node {
ForeignItemFn(_, _, ref generics) => generics,
_ => return result
}
}
_ => return result
};
let icx = ItemCtxt::new(tcx, item_def_id);
result.predicates.extend(
icx.type_parameter_bounds_in_generics(ast_generics, param_id, ty));
result
}
impl<'a, 'tcx> ItemCtxt<'a, 'tcx> {
/// Find bounds from hir::Generics. This requires scanning through the
/// AST. We do this to avoid having to convert *all* the bounds, which
/// would create artificial cycles. Instead we can only convert the
/// bounds for a type parameter `X` if `X::Foo` is used.
fn type_parameter_bounds_in_generics(&self,
ast_generics: &hir::Generics,
param_id: ast::NodeId,
ty: Ty<'tcx>)
-> Vec<ty::Predicate<'tcx>>
{
let from_ty_params =
ast_generics.ty_params()
.filter(|p| p.id == param_id)
.flat_map(|p| p.bounds.iter())
.flat_map(|b| predicates_from_bound(self, ty, b));
let from_where_clauses =
ast_generics.where_clause
.predicates
.iter()
.filter_map(|wp| match *wp {
hir::WherePredicate::BoundPredicate(ref bp) => Some(bp),
_ => None
})
.filter(|bp| is_param(self.tcx, &bp.bounded_ty, param_id))
.flat_map(|bp| bp.bounds.iter())
.flat_map(|b| predicates_from_bound(self, ty, b));
from_ty_params.chain(from_where_clauses).collect()
}
}
/// Tests whether this is the AST for a reference to the type
/// parameter with id `param_id`. We use this so as to avoid running
/// `ast_ty_to_ty`, because we want to avoid triggering an all-out
/// conversion of the type to avoid inducing unnecessary cycles.
fn is_param<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ast_ty: &hir::Ty,
param_id: ast::NodeId)
-> bool
{
if let hir::TyPath(hir::QPath::Resolved(None, ref path)) = ast_ty.node {
match path.def {
Def::SelfTy(Some(def_id), None) |
Def::TyParam(def_id) => {
def_id == tcx.hir.local_def_id(param_id)
}
_ => false
}
} else {
false
}
}
fn ensure_no_param_bounds(tcx: TyCtxt,
span: Span,
generics: &hir::Generics,
thing: &'static str) {
let mut warn = false;
for ty_param in generics.ty_params() {
if !ty_param.bounds.is_empty() {
warn = true;
}
}
for lft_param in generics.lifetimes() {
if !lft_param.bounds.is_empty() {
warn = true;
}
}
if !generics.where_clause.predicates.is_empty() {
warn = true;
}
if warn {
// According to accepted RFC #XXX, we should
// eventually accept these, but it will not be
// part of this PR. Still, convert to warning to
// make bootstrapping easier.
span_warn!(tcx.sess, span, E0122,
"generic bounds are ignored in {}",
thing);
}
}
fn convert_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId) {
let it = tcx.hir.expect_item(item_id);
debug!("convert: item {} with id {}", it.name, it.id);
let def_id = tcx.hir.local_def_id(item_id);
match it.node {
// These don't define types.
hir::ItemExternCrate(_) |
hir::ItemUse(..) |
hir::ItemMod(_) |
hir::ItemGlobalAsm(_) => {}
hir::ItemForeignMod(ref foreign_mod) => {
for item in &foreign_mod.items {
let def_id = tcx.hir.local_def_id(item.id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
if let hir::ForeignItemFn(..) = item.node {
tcx.fn_sig(def_id);
}
}
}
hir::ItemEnum(ref enum_definition, _) => {
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
convert_enum_variant_types(tcx, def_id, &enum_definition.variants);
},
hir::ItemImpl(..) => {
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.impl_trait_ref(def_id);
tcx.predicates_of(def_id);
},
hir::ItemTrait(..) => {
tcx.generics_of(def_id);
tcx.trait_def(def_id);
tcx.at(it.span).super_predicates_of(def_id);
tcx.predicates_of(def_id);
},
hir::ItemTraitAlias(..) => {
span_err!(tcx.sess, it.span, E0645,
"trait aliases are not yet implemented (see issue #41517)");
},
hir::ItemStruct(ref struct_def, _) |
hir::ItemUnion(ref struct_def, _) => {
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
for f in struct_def.fields() {
let def_id = tcx.hir.local_def_id(f.id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
}
if !struct_def.is_struct() {
convert_variant_ctor(tcx, struct_def.id());
}
},
hir::ItemTy(_, ref generics) => {
ensure_no_param_bounds(tcx, it.span, generics, "type aliases");
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
}
hir::ItemStatic(..) | hir::ItemConst(..) | hir::ItemFn(..) => {
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
if let hir::ItemFn(..) = it.node {
tcx.fn_sig(def_id);
}
}
}
}
fn convert_trait_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_item_id: ast::NodeId) {
let trait_item = tcx.hir.expect_trait_item(trait_item_id);
let def_id = tcx.hir.local_def_id(trait_item.id);
tcx.generics_of(def_id);
match trait_item.node {
hir::TraitItemKind::Const(..) |
hir::TraitItemKind::Type(_, Some(_)) |
hir::TraitItemKind::Method(..) => {
tcx.type_of(def_id);
if let hir::TraitItemKind::Method(..) = trait_item.node {
tcx.fn_sig(def_id);
}
}
hir::TraitItemKind::Type(_, None) => {}
};
tcx.predicates_of(def_id);
}
fn convert_impl_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_item_id: ast::NodeId) {
let def_id = tcx.hir.local_def_id(impl_item_id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
if let hir::ImplItemKind::Method(..) = tcx.hir.expect_impl_item(impl_item_id).node {
tcx.fn_sig(def_id);
}
}
fn convert_variant_ctor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ctor_id: ast::NodeId) {
let def_id = tcx.hir.local_def_id(ctor_id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
}
fn convert_enum_variant_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
variants: &[hir::Variant]) {
let param_env = ty::ParamEnv::empty(Reveal::UserFacing);
let def = tcx.adt_def(def_id);
let repr_type = def.repr.discr_type();
let initial = repr_type.initial_discriminant(tcx);
let mut prev_discr = None::<ConstInt>;
// fill the discriminant values and field types
for variant in variants {
let wrapped_discr = prev_discr.map_or(initial, |d| d.wrap_incr());
prev_discr = Some(if let Some(e) = variant.node.disr_expr {
let expr_did = tcx.hir.local_def_id(e.node_id);
let substs = Substs::identity_for_item(tcx, expr_did);
let result = tcx.at(variant.span).const_eval(param_env.and((expr_did, substs)));
// enum variant evaluation happens before the global constant check
// so we need to report the real error
if let Err(ref err) = result {
err.report(tcx, variant.span, "enum discriminant");
}
match result {
Ok(&ty::Const { val: ConstVal::Integral(x), .. }) => Some(x),
_ => None
}
} else if let Some(discr) = repr_type.disr_incr(tcx, prev_discr) {
Some(discr)
} else {
struct_span_err!(tcx.sess, variant.span, E0370,
"enum discriminant overflowed")
.span_label(variant.span, format!("overflowed on value after {}",
prev_discr.unwrap()))
.note(&format!("explicitly set `{} = {}` if that is desired outcome",
variant.node.name, wrapped_discr))
.emit();
None
}.unwrap_or(wrapped_discr));
for f in variant.node.data.fields() {
let def_id = tcx.hir.local_def_id(f.id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
}
// Convert the ctor, if any. This also registers the variant as
// an item.
convert_variant_ctor(tcx, variant.node.data.id());
}
}
fn convert_struct_variant<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
did: DefId,
name: ast::Name,
discr: ty::VariantDiscr,
def: &hir::VariantData)
-> ty::VariantDef {
let mut seen_fields: FxHashMap<ast::Name, Span> = FxHashMap();
let node_id = tcx.hir.as_local_node_id(did).unwrap();
let fields = def.fields().iter().map(|f| {
let fid = tcx.hir.local_def_id(f.id);
let dup_span = seen_fields.get(&f.name).cloned();
if let Some(prev_span) = dup_span {
struct_span_err!(tcx.sess, f.span, E0124,
"field `{}` is already declared",
f.name)
.span_label(f.span, "field already declared")
.span_label(prev_span, format!("`{}` first declared here", f.name))
.emit();
} else {
seen_fields.insert(f.name, f.span);
}
ty::FieldDef {
did: fid,
name: f.name,
vis: ty::Visibility::from_hir(&f.vis, node_id, tcx)
}
}).collect();
ty::VariantDef {
did,
name,
discr,
fields,
ctor_kind: CtorKind::from_hir(def),
}
}
fn adt_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> &'tcx ty::AdtDef {
use rustc::hir::map::*;
use rustc::hir::*;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let item = match tcx.hir.get(node_id) {
NodeItem(item) => item,
_ => bug!()
};
let repr = ReprOptions::new(tcx, def_id);
let (kind, variants) = match item.node {
ItemEnum(ref def, _) => {
let mut distance_from_explicit = 0;
(AdtKind::Enum, def.variants.iter().map(|v| {
let did = tcx.hir.local_def_id(v.node.data.id());
let discr = if let Some(e) = v.node.disr_expr {
distance_from_explicit = 0;
ty::VariantDiscr::Explicit(tcx.hir.local_def_id(e.node_id))
} else {
ty::VariantDiscr::Relative(distance_from_explicit)
};
distance_from_explicit += 1;
convert_struct_variant(tcx, did, v.node.name, discr, &v.node.data)
}).collect())
}
ItemStruct(ref def, _) => {
// Use separate constructor id for unit/tuple structs and reuse did for braced structs.
let ctor_id = if !def.is_struct() {
Some(tcx.hir.local_def_id(def.id()))
} else {
None
};
(AdtKind::Struct, vec![
convert_struct_variant(tcx, ctor_id.unwrap_or(def_id), item.name,
ty::VariantDiscr::Relative(0), def)
])
}
ItemUnion(ref def, _) => {
(AdtKind::Union, vec![
convert_struct_variant(tcx, def_id, item.name,
ty::VariantDiscr::Relative(0), def)
])
}
_ => bug!()
};
tcx.alloc_adt_def(def_id, kind, variants, repr)
}
/// Ensures that the super-predicates of the trait with def-id
/// trait_def_id are converted and stored. This also ensures that
/// the transitive super-predicates are converted;
fn super_predicates_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_def_id: DefId)
-> ty::GenericPredicates<'tcx> {
debug!("super_predicates(trait_def_id={:?})", trait_def_id);
let trait_node_id = tcx.hir.as_local_node_id(trait_def_id).unwrap();
let item = match tcx.hir.get(trait_node_id) {
hir_map::NodeItem(item) => item,
_ => bug!("trait_node_id {} is not an item", trait_node_id)
};
let (generics, bounds) = match item.node {
hir::ItemTrait(.., ref generics, ref supertraits, _) => (generics, supertraits),
hir::ItemTraitAlias(ref generics, ref supertraits) => (generics, supertraits),
_ => span_bug!(item.span,
"super_predicates invoked on non-trait"),
};
let icx = ItemCtxt::new(tcx, trait_def_id);
// Convert the bounds that follow the colon, e.g. `Bar+Zed` in `trait Foo : Bar+Zed`.
let self_param_ty = tcx.mk_self_type();
let superbounds1 = compute_bounds(&icx,
self_param_ty,
bounds,
SizedByDefault::No,
item.span);
let superbounds1 = superbounds1.predicates(tcx, self_param_ty);
// Convert any explicit superbounds in the where clause,
// e.g. `trait Foo where Self : Bar`:
let superbounds2 = icx.type_parameter_bounds_in_generics(generics, item.id, self_param_ty);
// Combine the two lists to form the complete set of superbounds:
let superbounds: Vec<_> = superbounds1.into_iter().chain(superbounds2).collect();
// Now require that immediate supertraits are converted,
// which will, in turn, reach indirect supertraits.
for bound in superbounds.iter().filter_map(|p| p.to_opt_poly_trait_ref()) {
tcx.at(item.span).super_predicates_of(bound.def_id());
}
ty::GenericPredicates {
parent: None,
predicates: superbounds
}
}
fn trait_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> &'tcx ty::TraitDef {
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let item = tcx.hir.expect_item(node_id);
let (is_auto, unsafety) = match item.node {
hir::ItemTrait(is_auto, unsafety, ..) => (is_auto == hir::IsAuto::Yes, unsafety),
hir::ItemTraitAlias(..) => (false, hir::Unsafety::Normal),
_ => span_bug!(item.span, "trait_def_of_item invoked on non-trait"),
};
let paren_sugar = tcx.has_attr(def_id, "rustc_paren_sugar");
if paren_sugar && !tcx.features().unboxed_closures {
let mut err = tcx.sess.struct_span_err(
item.span,
"the `#[rustc_paren_sugar]` attribute is a temporary means of controlling \
which traits can use parenthetical notation");
help!(&mut err,
"add `#![feature(unboxed_closures)]` to \
the crate attributes to use it");
err.emit();
}
let def_path_hash = tcx.def_path_hash(def_id);
let def = ty::TraitDef::new(def_id,
unsafety,
paren_sugar,
is_auto,
def_path_hash);
tcx.alloc_trait_def(def)
}
fn has_late_bound_regions<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
node: hir_map::Node<'tcx>)
-> Option<Span> {
struct LateBoundRegionsDetector<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
binder_depth: u32,
has_late_bound_regions: Option<Span>,
}
impl<'a, 'tcx> Visitor<'tcx> for LateBoundRegionsDetector<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
if self.has_late_bound_regions.is_some() { return }
match ty.node {
hir::TyBareFn(..) => {
self.binder_depth += 1;
intravisit::walk_ty(self, ty);
self.binder_depth -= 1;
}
_ => intravisit::walk_ty(self, ty)
}
}
fn visit_poly_trait_ref(&mut self,
tr: &'tcx hir::PolyTraitRef,
m: hir::TraitBoundModifier) {
if self.has_late_bound_regions.is_some() { return }
self.binder_depth += 1;
intravisit::walk_poly_trait_ref(self, tr, m);
self.binder_depth -= 1;
}
fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) {
if self.has_late_bound_regions.is_some() { return }
let hir_id = self.tcx.hir.node_to_hir_id(lt.id);
match self.tcx.named_region(hir_id) {
Some(rl::Region::Static) | Some(rl::Region::EarlyBound(..)) => {}
Some(rl::Region::LateBound(debruijn, _, _)) |
Some(rl::Region::LateBoundAnon(debruijn, _))
if debruijn.depth < self.binder_depth => {}
_ => self.has_late_bound_regions = Some(lt.span),
}
}
}
fn has_late_bound_regions<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
generics: &'tcx hir::Generics,
decl: &'tcx hir::FnDecl)
-> Option<Span> {
let mut visitor = LateBoundRegionsDetector {
tcx, binder_depth: 1, has_late_bound_regions: None
};
for lifetime in generics.lifetimes() {
let hir_id = tcx.hir.node_to_hir_id(lifetime.lifetime.id);
if tcx.is_late_bound(hir_id) {
return Some(lifetime.lifetime.span);
}
}
visitor.visit_fn_decl(decl);
visitor.has_late_bound_regions
}
match node {
hir_map::NodeTraitItem(item) => match item.node {
hir::TraitItemKind::Method(ref sig, _) =>
has_late_bound_regions(tcx, &item.generics, &sig.decl),
_ => None,
},
hir_map::NodeImplItem(item) => match item.node {
hir::ImplItemKind::Method(ref sig, _) =>
has_late_bound_regions(tcx, &item.generics, &sig.decl),
_ => None,
},
hir_map::NodeForeignItem(item) => match item.node {
hir::ForeignItemFn(ref fn_decl, _, ref generics) =>
has_late_bound_regions(tcx, generics, fn_decl),
_ => None,
},
hir_map::NodeItem(item) => match item.node {
hir::ItemFn(ref fn_decl, .., ref generics, _) =>
has_late_bound_regions(tcx, generics, fn_decl),
_ => None,
},
_ => None
}
}
fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> &'tcx ty::Generics {
use rustc::hir::map::*;
use rustc::hir::*;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let node = tcx.hir.get(node_id);
let parent_def_id = match node {
NodeImplItem(_) |
NodeTraitItem(_) |
NodeVariant(_) |
NodeStructCtor(_) |
NodeField(_) => {
let parent_id = tcx.hir.get_parent(node_id);
Some(tcx.hir.local_def_id(parent_id))
}
NodeExpr(&hir::Expr { node: hir::ExprClosure(..), .. }) => {
Some(tcx.closure_base_def_id(def_id))
}
NodeTy(&hir::Ty { node: hir::TyImplTraitExistential(..), .. }) => {
let mut parent_id = node_id;
loop {
match tcx.hir.get(parent_id) {
NodeItem(_) | NodeImplItem(_) | NodeTraitItem(_) => break,
_ => {
parent_id = tcx.hir.get_parent_node(parent_id);
}
}
}
Some(tcx.hir.local_def_id(parent_id))
}
_ => None
};
let mut opt_self = None;
let mut allow_defaults = false;
let no_generics = hir::Generics::empty();
let ast_generics = match node {
NodeTraitItem(item) => &item.generics,
NodeImplItem(item) => &item.generics,
NodeItem(item) => {
match item.node {
ItemFn(.., ref generics, _) |
ItemImpl(_, _, _, ref generics, ..) => generics,
ItemTy(_, ref generics) |
ItemEnum(_, ref generics) |
ItemStruct(_, ref generics) |
ItemUnion(_, ref generics) => {
allow_defaults = true;
generics
}
ItemTrait(_, _, ref generics, ..) | ItemTraitAlias(ref generics, ..) => {
// Add in the self type parameter.
//
// Something of a hack: use the node id for the trait, also as
// the node id for the Self type parameter.
let param_id = item.id;
opt_self = Some(ty::TypeParameterDef {
index: 0,
name: keywords::SelfType.name(),
def_id: tcx.hir.local_def_id(param_id),
has_default: false,
object_lifetime_default: rl::Set1::Empty,
pure_wrt_drop: false,
synthetic: None,
});
allow_defaults = true;
generics
}
_ => &no_generics,
}
}
NodeForeignItem(item) => {
match item.node {
ForeignItemStatic(..) => &no_generics,
ForeignItemFn(_, _, ref generics) => generics,
ForeignItemType => &no_generics,
}
}
NodeTy(&hir::Ty { node: hir::TyImplTraitExistential(ref exist_ty, _), .. }) => {
&exist_ty.generics
}
_ => &no_generics,
};
let has_self = opt_self.is_some();
let mut parent_has_self = false;
let mut own_start = has_self as u32;
let (parent_regions, parent_types) = parent_def_id.map_or((0, 0), |def_id| {
let generics = tcx.generics_of(def_id);
assert_eq!(has_self, false);
parent_has_self = generics.has_self;
own_start = generics.count() as u32;
(generics.parent_regions + generics.regions.len() as u32,
generics.parent_types + generics.types.len() as u32)
});
let early_lifetimes = early_bound_lifetimes_from_generics(tcx, ast_generics);
let regions = early_lifetimes.enumerate().map(|(i, l)| {
ty::RegionParameterDef {
name: l.lifetime.name.name(),
index: own_start + i as u32,
def_id: tcx.hir.local_def_id(l.lifetime.id),
pure_wrt_drop: l.pure_wrt_drop,
}
}).collect::<Vec<_>>();
let hir_id = tcx.hir.node_to_hir_id(node_id);
let object_lifetime_defaults = tcx.object_lifetime_defaults(hir_id);
// Now create the real type parameters.
let type_start = own_start + regions.len() as u32;
let types = ast_generics.ty_params().enumerate().map(|(i, p)| {
if p.name == keywords::SelfType.name() {
span_bug!(p.span, "`Self` should not be the name of a regular parameter");
}
if !allow_defaults && p.default.is_some() {
if !tcx.features().default_type_parameter_fallback {
tcx.lint_node(
lint::builtin::INVALID_TYPE_PARAM_DEFAULT,
p.id,
p.span,
&format!("defaults for type parameters are only allowed in `struct`, \
`enum`, `type`, or `trait` definitions."));
}
}
ty::TypeParameterDef {
index: type_start + i as u32,
name: p.name,
def_id: tcx.hir.local_def_id(p.id),
has_default: p.default.is_some(),
object_lifetime_default:
object_lifetime_defaults.as_ref().map_or(rl::Set1::Empty, |o| o[i]),
pure_wrt_drop: p.pure_wrt_drop,
synthetic: p.synthetic,
}
});
let mut types: Vec<_> = opt_self.into_iter().chain(types).collect();
// provide junk type parameter defs - the only place that
// cares about anything but the length is instantiation,
// and we don't do that for closures.
if let NodeExpr(&hir::Expr { node: hir::ExprClosure(..), .. }) = node {
// add a dummy parameter for the closure kind
types.push(ty::TypeParameterDef {
index: type_start,
name: Symbol::intern("<closure_kind>"),
def_id,
has_default: false,
object_lifetime_default: rl::Set1::Empty,
pure_wrt_drop: false,
synthetic: None,
});
// add a dummy parameter for the closure signature
types.push(ty::TypeParameterDef {
index: type_start + 1,
name: Symbol::intern("<closure_signature>"),
def_id,
has_default: false,
object_lifetime_default: rl::Set1::Empty,
pure_wrt_drop: false,
synthetic: None,
});
tcx.with_freevars(node_id, |fv| {
types.extend(fv.iter().zip(2..).map(|(_, i)| ty::TypeParameterDef {
index: type_start + i,
name: Symbol::intern("<upvar>"),
def_id,
has_default: false,
object_lifetime_default: rl::Set1::Empty,
pure_wrt_drop: false,
synthetic: None,
}));
});
}
let type_param_to_index = types.iter()
.map(|param| (param.def_id, param.index))
.collect();
tcx.alloc_generics(ty::Generics {
parent: parent_def_id,
parent_regions,
parent_types,
regions,
types,
type_param_to_index,
has_self: has_self || parent_has_self,
has_late_bound_regions: has_late_bound_regions(tcx, node),
})
}
fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> Ty<'tcx> {
use rustc::hir::map::*;
use rustc::hir::*;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let icx = ItemCtxt::new(tcx, def_id);
match tcx.hir.get(node_id) {
NodeTraitItem(item) => {
match item.node {
TraitItemKind::Method(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
TraitItemKind::Const(ref ty, _) |
TraitItemKind::Type(_, Some(ref ty)) => icx.to_ty(ty),
TraitItemKind::Type(_, None) => {
span_bug!(item.span, "associated type missing default");
}
}
}
NodeImplItem(item) => {
match item.node {
ImplItemKind::Method(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
ImplItemKind::Const(ref ty, _) => icx.to_ty(ty),
ImplItemKind::Type(ref ty) => {
if tcx.impl_trait_ref(tcx.hir.get_parent_did(node_id)).is_none() {
span_err!(tcx.sess, item.span, E0202,
"associated types are not allowed in inherent impls");
}
icx.to_ty(ty)
}
}
}
NodeItem(item) => {
match item.node {
ItemStatic(ref t, ..) | ItemConst(ref t, _) |
ItemTy(ref t, _) | ItemImpl(.., ref t, _) => {
icx.to_ty(t)
}
ItemFn(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
ItemEnum(..) |
ItemStruct(..) |
ItemUnion(..) => {
let def = tcx.adt_def(def_id);
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_adt(def, substs)
}
ItemTrait(..) | ItemTraitAlias(..) |
ItemMod(..) |
ItemForeignMod(..) |
ItemGlobalAsm(..) |
ItemExternCrate(..) |
ItemUse(..) => {
span_bug!(
item.span,
"compute_type_of_item: unexpected item type: {:?}",
item.node);
}
}
}
NodeForeignItem(foreign_item) => {
match foreign_item.node {
ForeignItemFn(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
ForeignItemStatic(ref t, _) => icx.to_ty(t),
ForeignItemType => tcx.mk_foreign(def_id),
}
}
NodeStructCtor(&ref def) |
NodeVariant(&Spanned { node: hir::Variant_ { data: ref def, .. }, .. }) => {
match *def {
VariantData::Unit(..) | VariantData::Struct(..) => {
tcx.type_of(tcx.hir.get_parent_did(node_id))
}
VariantData::Tuple(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
}
}
NodeField(field) => icx.to_ty(&field.ty),
NodeExpr(&hir::Expr { node: hir::ExprClosure(.., gen), .. }) => {
if gen.is_some() {
let hir_id = tcx.hir.node_to_hir_id(node_id);
return tcx.typeck_tables_of(def_id).node_id_to_type(hir_id);
}
let substs = ty::ClosureSubsts {
substs: Substs::for_item(
tcx,
def_id,
|def, _| {
let region = def.to_early_bound_region_data();
tcx.mk_region(ty::ReEarlyBound(region))
},
|def, _| tcx.mk_param_from_def(def)
)
};
tcx.mk_closure(def_id, substs)
}
NodeExpr(_) => match tcx.hir.get(tcx.hir.get_parent_node(node_id)) {
NodeTy(&hir::Ty { node: TyArray(_, body), .. }) |
NodeTy(&hir::Ty { node: TyTypeof(body), .. }) |
NodeExpr(&hir::Expr { node: ExprRepeat(_, body), .. })
if body.node_id == node_id => tcx.types.usize,
NodeVariant(&Spanned { node: Variant_ { disr_expr: Some(e), .. }, .. })
if e.node_id == node_id => {
tcx.adt_def(tcx.hir.get_parent_did(node_id))
.repr.discr_type().to_ty(tcx)
}
x => {
bug!("unexpected expr parent in type_of_def_id(): {:?}", x);
}
},
NodeTyParam(&hir::TyParam { default: Some(ref ty), .. }) => {
icx.to_ty(ty)
}
NodeTy(&hir::Ty { node: TyImplTraitExistential(..), .. }) => {
let owner = tcx.hir.get_parent_did(node_id);
let hir_id = tcx.hir.node_to_hir_id(node_id);
tcx.typeck_tables_of(owner).node_id_to_type(hir_id)
}
x => {
bug!("unexpected sort of node in type_of_def_id(): {:?}", x);
}
}
}
fn fn_sig<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> ty::PolyFnSig<'tcx> {
use rustc::hir::map::*;
use rustc::hir::*;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let icx = ItemCtxt::new(tcx, def_id);
match tcx.hir.get(node_id) {
NodeTraitItem(&hir::TraitItem { node: TraitItemKind::Method(ref sig, _), .. }) |
NodeImplItem(&hir::ImplItem { node: ImplItemKind::Method(ref sig, _), .. }) => {
AstConv::ty_of_fn(&icx, sig.unsafety, sig.abi, &sig.decl)
}
NodeItem(&hir::Item { node: ItemFn(ref decl, unsafety, _, abi, _, _), .. }) => {
AstConv::ty_of_fn(&icx, unsafety, abi, decl)
}
NodeForeignItem(&hir::ForeignItem { node: ForeignItemFn(ref fn_decl, _, _), .. }) => {
let abi = tcx.hir.get_foreign_abi(node_id);
compute_sig_of_foreign_fn_decl(tcx, def_id, fn_decl, abi)
}
NodeStructCtor(&VariantData::Tuple(ref fields, _)) |
NodeVariant(&Spanned { node: hir::Variant_ {
data: VariantData::Tuple(ref fields, _), ..
}, .. }) => {
let ty = tcx.type_of(tcx.hir.get_parent_did(node_id));
let inputs = fields.iter().map(|f| {
tcx.type_of(tcx.hir.local_def_id(f.id))
});
ty::Binder(tcx.mk_fn_sig(
inputs,
ty,
false,
hir::Unsafety::Normal,
abi::Abi::Rust
))
}
NodeExpr(&hir::Expr { node: hir::ExprClosure(..), .. }) => {
// Closure signatures are not like other function
// signatures and cannot be accessed through `fn_sig`. For
// example, a closure signature excludes the `self`
// argument. In any case they are embedded within the
// closure type as part of the `ClosureSubsts`.
//
// To get
// the signature of a closure, you should use the
// `closure_sig` method on the `ClosureSubsts`:
//
// closure_substs.closure_sig(def_id, tcx)
//
// or, inside of an inference context, you can use
//
// infcx.closure_sig(def_id, closure_substs)
bug!("to get the signature of a closure, use `closure_sig()` not `fn_sig()`");
}
x => {
bug!("unexpected sort of node in fn_sig(): {:?}", x);
}
}
}
fn impl_trait_ref<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> Option<ty::TraitRef<'tcx>> {
let icx = ItemCtxt::new(tcx, def_id);
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
match tcx.hir.expect_item(node_id).node {
hir::ItemImpl(.., ref opt_trait_ref, _, _) => {
opt_trait_ref.as_ref().map(|ast_trait_ref| {
let selfty = tcx.type_of(def_id);
AstConv::instantiate_mono_trait_ref(&icx, ast_trait_ref, selfty)
})
}
_ => bug!()
}
}
fn impl_polarity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> hir::ImplPolarity {
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
match tcx.hir.expect_item(node_id).node {
hir::ItemImpl(_, polarity, ..) => polarity,
ref item => bug!("impl_polarity: {:?} not an impl", item)
}
}
// Is it marked with ?Sized
fn is_unsized<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>,
ast_bounds: &[hir::TyParamBound],
span: Span) -> bool
{
let tcx = astconv.tcx();
// Try to find an unbound in bounds.
let mut unbound = None;
for ab in ast_bounds {
if let &hir::TraitTyParamBound(ref ptr, hir::TraitBoundModifier::Maybe) = ab {
if unbound.is_none() {
unbound = Some(ptr.trait_ref.clone());
} else {
span_err!(tcx.sess, span, E0203,
"type parameter has more than one relaxed default \
bound, only one is supported");
}
}
}
let kind_id = tcx.lang_items().require(SizedTraitLangItem);
match unbound {
Some(ref tpb) => {
// FIXME(#8559) currently requires the unbound to be built-in.
if let Ok(kind_id) = kind_id {
if tpb.path.def != Def::Trait(kind_id) {
tcx.sess.span_warn(span,
"default bound relaxed for a type parameter, but \
this does nothing because the given bound is not \
a default. Only `?Sized` is supported");
}
}
}
_ if kind_id.is_ok() => {
return false;
}
// No lang item for Sized, so we can't add it as a bound.
None => {}
}
true
}
/// Returns the early-bound lifetimes declared in this generics
/// listing. For anything other than fns/methods, this is just all
/// the lifetimes that are declared. For fns or methods, we have to
/// screen out those that do not appear in any where-clauses etc using
/// `resolve_lifetime::early_bound_lifetimes`.
fn early_bound_lifetimes_from_generics<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
ast_generics: &'a hir::Generics)
-> impl Iterator<Item=&'a hir::LifetimeDef>
{
ast_generics
.lifetimes()
.filter(move |l| {
let hir_id = tcx.hir.node_to_hir_id(l.lifetime.id);
!tcx.is_late_bound(hir_id)
})
}
fn predicates_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> ty::GenericPredicates<'tcx> {
let explicit = explicit_predicates_of(tcx, def_id);
ty::GenericPredicates {
parent: explicit.parent,
predicates: [&explicit.predicates[..], &tcx.inferred_outlives_of(def_id)[..]].concat()
}
}
fn explicit_predicates_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> ty::GenericPredicates<'tcx> {
use rustc::hir::map::*;
use rustc::hir::*;
debug!("explicit_predicates_of(def_id={:?})", def_id);
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let node = tcx.hir.get(node_id);
let mut is_trait = None;
let mut is_default_impl_trait = None;
let icx = ItemCtxt::new(tcx, def_id);
let no_generics = hir::Generics::empty();
let ast_generics = match node {
NodeTraitItem(item) => &item.generics,
NodeImplItem(item) => &item.generics,
NodeItem(item) => {
match item.node {
ItemImpl(_, _, defaultness, ref generics, ..) => {
if defaultness.is_default() {
is_default_impl_trait = tcx.impl_trait_ref(def_id);
}
generics
}
ItemFn(.., ref generics, _) |
ItemTy(_, ref generics) |
ItemEnum(_, ref generics) |
ItemStruct(_, ref generics) |
ItemUnion(_, ref generics) => generics,
ItemTrait(_, _, ref generics, .., ref items) => {
is_trait = Some((ty::TraitRef {
def_id,
substs: Substs::identity_for_item(tcx, def_id)
}, items));
generics
}
_ => &no_generics,
}
}
NodeForeignItem(item) => {
match item.node {
ForeignItemStatic(..) => &no_generics,
ForeignItemFn(_, _, ref generics) => generics,
ForeignItemType => &no_generics,
}
}
NodeTy(&Ty { node: TyImplTraitExistential(ref exist_ty, _), span, .. }) => {
let substs = Substs::identity_for_item(tcx, def_id);
let anon_ty = tcx.mk_anon(def_id, substs);
debug!("explicit_predicates_of: anon_ty={:?}", anon_ty);
// Collect the bounds, i.e. the `A+B+'c` in `impl A+B+'c`.
let bounds = compute_bounds(&icx,
anon_ty,
&exist_ty.bounds,
SizedByDefault::Yes,
span);
debug!("explicit_predicates_of: bounds={:?}", bounds);
let predicates = bounds.predicates(tcx, anon_ty);
debug!("explicit_predicates_of: predicates={:?}", predicates);
return ty::GenericPredicates {
parent: None,
predicates: predicates
};
}
_ => &no_generics,
};
let generics = tcx.generics_of(def_id);
let parent_count = generics.parent_count() as u32;
let has_own_self = generics.has_self && parent_count == 0;
let mut predicates = vec![];
// Below we'll consider the bounds on the type parameters (including `Self`)
// and the explicit where-clauses, but to get the full set of predicates
// on a trait we need to add in the supertrait bounds and bounds found on
// associated types.
if let Some((trait_ref, _)) = is_trait {
predicates = tcx.super_predicates_of(def_id).predicates;
// Add in a predicate that `Self:Trait` (where `Trait` is the
// current trait). This is needed for builtin bounds.
predicates.push(trait_ref.to_poly_trait_ref().to_predicate());
}
// In default impls, we can assume that the self type implements
// the trait. So in:
//
// default impl Foo for Bar { .. }
//
// we add a default where clause `Foo: Bar`. We do a similar thing for traits
// (see below). Recall that a default impl is not itself an impl, but rather a
// set of defaults that can be incorporated into another impl.
if let Some(trait_ref) = is_default_impl_trait {
predicates.push(trait_ref.to_poly_trait_ref().to_predicate());
}
// Collect the region predicates that were declared inline as
// well. In the case of parameters declared on a fn or method, we
// have to be careful to only iterate over early-bound regions.
let mut index = parent_count + has_own_self as u32;
for param in early_bound_lifetimes_from_generics(tcx, ast_generics) {
let region = tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion {
def_id: tcx.hir.local_def_id(param.lifetime.id),
index,
name: param.lifetime.name.name(),
}));
index += 1;
for bound in ¶m.bounds {
let bound_region = AstConv::ast_region_to_region(&icx, bound, None);
let outlives = ty::Binder(ty::OutlivesPredicate(region, bound_region));
predicates.push(outlives.to_predicate());
}
}
// Collect the predicates that were written inline by the user on each
// type parameter (e.g., `<T:Foo>`).
for param in ast_generics.ty_params() {
let param_ty = ty::ParamTy::new(index, param.name).to_ty(tcx);
index += 1;
let bounds = compute_bounds(&icx,
param_ty,
¶m.bounds,
SizedByDefault::Yes,
param.span);
predicates.extend(bounds.predicates(tcx, param_ty));
}
// Add in the bounds that appear in the where-clause
let where_clause = &ast_generics.where_clause;
for predicate in &where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(ref bound_pred) => {
let ty = icx.to_ty(&bound_pred.bounded_ty);
for bound in bound_pred.bounds.iter() {
match bound {
&hir::TyParamBound::TraitTyParamBound(ref poly_trait_ref, _) => {
let mut projections = Vec::new();
let trait_ref =
AstConv::instantiate_poly_trait_ref(&icx,
poly_trait_ref,
ty,
&mut projections);
predicates.push(trait_ref.to_predicate());
for projection in &projections {
predicates.push(projection.to_predicate());
}
}
&hir::TyParamBound::RegionTyParamBound(ref lifetime) => {
let region = AstConv::ast_region_to_region(&icx,
lifetime,
None);
let pred = ty::Binder(ty::OutlivesPredicate(ty, region));
predicates.push(ty::Predicate::TypeOutlives(pred))
}
}
}
}
&hir::WherePredicate::RegionPredicate(ref region_pred) => {
let r1 = AstConv::ast_region_to_region(&icx, ®ion_pred.lifetime, None);
for bound in ®ion_pred.bounds {
let r2 = AstConv::ast_region_to_region(&icx, bound, None);
let pred = ty::Binder(ty::OutlivesPredicate(r1, r2));
predicates.push(ty::Predicate::RegionOutlives(pred))
}
}
&hir::WherePredicate::EqPredicate(..) => {
// FIXME(#20041)
}
}
}
// Add predicates from associated type bounds.
if let Some((self_trait_ref, trait_items)) = is_trait {
predicates.extend(trait_items.iter().flat_map(|trait_item_ref| {
let trait_item = tcx.hir.trait_item(trait_item_ref.id);
let bounds = match trait_item.node {
hir::TraitItemKind::Type(ref bounds, _) => bounds,
_ => {
return vec![].into_iter();
}
};
let assoc_ty = tcx.mk_projection(
tcx.hir.local_def_id(trait_item.id),
self_trait_ref.substs,
);
let bounds = compute_bounds(&ItemCtxt::new(tcx, def_id),
assoc_ty,
bounds,
SizedByDefault::Yes,
trait_item.span);
bounds.predicates(tcx, assoc_ty).into_iter()
}))
}
// Subtle: before we store the predicates into the tcx, we
// sort them so that predicates like `T: Foo<Item=U>` come
// before uses of `U`. This avoids false ambiguity errors
// in trait checking. See `setup_constraining_predicates`
// for details.
if let NodeItem(&Item { node: ItemImpl(..), .. }) = node {
let self_ty = tcx.type_of(def_id);
let trait_ref = tcx.impl_trait_ref(def_id);
ctp::setup_constraining_predicates(tcx,
&mut predicates,
trait_ref,
&mut ctp::parameters_for_impl(self_ty, trait_ref));
}
ty::GenericPredicates {
parent: generics.parent,
predicates,
}
}
pub enum SizedByDefault { Yes, No, }
/// Translate the AST's notion of ty param bounds (which are an enum consisting of a newtyped Ty or
/// a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the
/// built-in trait (formerly known as kind): Send.
pub fn compute_bounds<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>,
param_ty: Ty<'tcx>,
ast_bounds: &[hir::TyParamBound],
sized_by_default: SizedByDefault,
span: Span)
-> Bounds<'tcx>
{
let mut region_bounds = vec![];
let mut trait_bounds = vec![];
for ast_bound in ast_bounds {
match *ast_bound {
hir::TraitTyParamBound(ref b, hir::TraitBoundModifier::None) => {
trait_bounds.push(b);
}
hir::TraitTyParamBound(_, hir::TraitBoundModifier::Maybe) => {}
hir::RegionTyParamBound(ref l) => {
region_bounds.push(l);
}
}
}
let mut projection_bounds = vec![];
let mut trait_bounds: Vec<_> = trait_bounds.iter().map(|&bound| {
astconv.instantiate_poly_trait_ref(bound,
param_ty,
&mut projection_bounds)
}).collect();
let region_bounds = region_bounds.into_iter().map(|r| {
astconv.ast_region_to_region(r, None)
}).collect();
trait_bounds.sort_by(|a,b| a.def_id().cmp(&b.def_id()));
let implicitly_sized = if let SizedByDefault::Yes = sized_by_default {
!is_unsized(astconv, ast_bounds, span)
} else {
false
};
Bounds {
region_bounds,
implicitly_sized,
trait_bounds,
projection_bounds,
}
}
/// Converts a specific TyParamBound from the AST into a set of
/// predicates that apply to the self-type. A vector is returned
/// because this can be anywhere from 0 predicates (`T:?Sized` adds no
/// predicates) to 1 (`T:Foo`) to many (`T:Bar<X=i32>` adds `T:Bar`
/// and `<T as Bar>::X == i32`).
fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx, 'tcx>,
param_ty: Ty<'tcx>,
bound: &hir::TyParamBound)
-> Vec<ty::Predicate<'tcx>>
{
match *bound {
hir::TraitTyParamBound(ref tr, hir::TraitBoundModifier::None) => {
let mut projections = Vec::new();
let pred = astconv.instantiate_poly_trait_ref(tr,
param_ty,
&mut projections);
projections.into_iter()
.map(|p| p.to_predicate())
.chain(Some(pred.to_predicate()))
.collect()
}
hir::RegionTyParamBound(ref lifetime) => {
let region = astconv.ast_region_to_region(lifetime, None);
let pred = ty::Binder(ty::OutlivesPredicate(param_ty, region));
vec![ty::Predicate::TypeOutlives(pred)]
}
hir::TraitTyParamBound(_, hir::TraitBoundModifier::Maybe) => {
Vec::new()
}
}
}
fn compute_sig_of_foreign_fn_decl<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
decl: &hir::FnDecl,
abi: abi::Abi)
-> ty::PolyFnSig<'tcx>
{
let fty = AstConv::ty_of_fn(&ItemCtxt::new(tcx, def_id), hir::Unsafety::Unsafe, abi, decl);
// feature gate SIMD types in FFI, since I (huonw) am not sure the
// ABIs are handled at all correctly.
if abi != abi::Abi::RustIntrinsic && abi != abi::Abi::PlatformIntrinsic
&& !tcx.features().simd_ffi {
let check = |ast_ty: &hir::Ty, ty: Ty| {
if ty.is_simd() {
tcx.sess.struct_span_err(ast_ty.span,
&format!("use of SIMD type `{}` in FFI is highly experimental and \
may result in invalid code",
tcx.hir.node_to_pretty_string(ast_ty.id)))
.help("add #![feature(simd_ffi)] to the crate attributes to enable")
.emit();
}
};
for (input, ty) in decl.inputs.iter().zip(*fty.inputs().skip_binder()) {
check(&input, ty)
}
if let hir::Return(ref ty) = decl.output {
check(&ty, *fty.output().skip_binder())
}
}
fty
}
fn is_foreign_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> bool {
match tcx.hir.get_if_local(def_id) {
Some(hir_map::NodeForeignItem(..)) => true,
Some(_) => false,
_ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id)
}
}
|
get_type_parameter_bounds
|
iso.py
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <[email protected]>
# Angelos Tzotsos <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
""" ISO metadata parser """
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
""" Process gmd:MD_Metadata """
def __init__(self, md=None):
if md is None:
self.xml = None
self.identifier = None
self.parentidentifier = None
self.language = None
self.dataseturi = None
self.languagecode = None
self.datestamp = None
self.charset = None
self.hierarchy = None
self.contact = []
self.datetimestamp = None
self.stdname = None
self.stdver = None
self.referencesystem = None
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
self.distribution = None
self.dataquality = None
else:
if hasattr(md, 'getroot'): # standalone document
self.xml = etree.tostring(md.getroot())
else: # part of a larger document
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
# TODO: merge .identificationinfo into .identification
#warnings.warn(
# 'the .identification and .serviceidentification properties will merge into '
# '.identification being a list of properties. This is currently implemented '
# 'in .identificationinfo. '
# 'Please see https://github.com/geopython/OWSLib/issues/38 for more information',
# FutureWarning)
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
class CI_Date(object):
""" process CI_Date """
def __init__(self, md=None):
if md is None:
self.date = None
self.type = None
else:
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
""" process CI_ResponsibleParty """
def __init__(self, md=None):
if md is None:
self.name = None
self.organization = None
self.position = None
self.phone = None
self.fax = None
self.address = None
self.city = None
self.region = None
self.postcode = None
self.country = None
self.email = None
self.onlineresource = None
self.role = None
else:
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organisationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:voice/gco:CharacterString', namespaces))
self.phone = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:facsimile/gco:CharacterString', namespaces))
self.fax = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:deliveryPoint/gco:CharacterString', namespaces))
self.address = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:city/gco:CharacterString', namespaces))
self.city = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:administrativeArea/gco:CharacterString', namespaces))
self.region = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:postalCode/gco:CharacterString', namespaces))
self.postcode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:country/gco:CharacterString', namespaces))
self.country = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString', namespaces))
self.email = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:onlineResource/gmd:CI_OnlineResource', namespaces))
if val is not None:
self.onlineresource = CI_OnlineResource(val)
else:
|
self.role = _testCodeListValue(md.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces)))
class MD_DataIdentification(object):
""" process MD_DataIdentification """
def __init__(self, md=None, identtype=None):
if md is None:
self.identtype = None
self.title = None
self.alternatetitle = None
self.aggregationinfo = None
self.uricode = []
self.uricodespace = []
self.date = []
self.datetype = []
self.uselimitation = []
self.accessconstraints = []
self.classification = []
self.otherconstraints = []
self.securityconstraints = []
self.useconstraints = []
self.denominators = []
self.distance = []
self.uom = []
self.resourcelanguage = []
self.creator = None
self.publisher = None
self.originator = None
self.edition = None
self.abstract = None
self.purpose = None
self.status = None
self.contact = []
self.keywords = []
self.topiccategory = []
self.supplementalinformation = None
self.extent = None
self.bbox = None
self.temporalextent_start = None
self.temporalextent_end = None
else:
self.identtype = identtype
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', namespaces))
self.alternatetitle = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:aggregationInfo', namespaces))
self.aggregationinfo = util.testXMLValue(val)
self.uricode = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricode.append(val)
self.uricodespace = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricodespace.append(val)
self.date = []
self.datetype = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
self.date.append(CI_Date(i))
self.uselimitation = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uselimitation.append(val)
self.accessconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.accessconstraints.append(val)
self.classification = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.classification.append(val)
self.otherconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.otherconstraints.append(val)
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:useLimitation', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.securityconstraints.append(val)
self.useconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.useconstraints.append(val)
self.denominators = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.denominators.append(val)
self.distance = []
self.uom = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.distance.append(val)
self.uom.append(i.get("uom"))
self.resourcelanguage = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.resourcelanguage.append(val)
val = md.find(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName', namespaces))
if val is not None:
val2 = val.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces))
if val2 is not None:
clv = _testCodeListValue(val)
if clv == 'originator':
self.creator = util.testXMLValue(val)
elif clv == 'publisher':
self.publisher = util.testXMLValue(val)
elif clv == 'contributor':
self.originator = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:edition/gco:CharacterString', namespaces))
self.edition = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:purpose/gco:CharacterString', namespaces))
self.purpose = util.testXMLValue(val)
self.status = _testCodeListValue(md.find(util.nspath_eval('gmd:status/gmd:MD_ProgressCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
self.keywords = []
for i in md.findall(util.nspath_eval('gmd:descriptiveKeywords', namespaces)):
mdkw = {}
mdkw['type'] = _testCodeListValue(i.find(util.nspath_eval('gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', namespaces)))
mdkw['thesaurus'] = {}
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
mdkw['thesaurus']['title'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
mdkw['thesaurus']['datetype'] = util.testXMLValue(val)
mdkw['keywords'] = []
for k in i.findall(util.nspath_eval('gmd:MD_Keywords/gmd:keyword', namespaces)):
val = k.find(util.nspath_eval('gco:CharacterString', namespaces))
if val is not None:
val2 = util.testXMLValue(val)
if val2 is not None:
mdkw['keywords'].append(val2)
self.keywords.append(mdkw)
self.topiccategory = []
for i in md.findall(util.nspath_eval('gmd:topicCategory/gmd:MD_TopicCategoryCode', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.topiccategory.append(val)
val = md.find(util.nspath_eval('gmd:supplementalInformation/gco:CharacterString', namespaces))
self.supplementalinformation = util.testXMLValue(val)
# There may be multiple geographicElement, create an extent
# from the one containing either an EX_GeographicBoundingBox or EX_BoundingPolygon.
# The schema also specifies an EX_GeographicDescription. This is not implemented yet.
val = None
val2 = None
val3 = None
extents = md.findall(util.nspath_eval('gmd:extent', namespaces))
extents.extend(md.findall(util.nspath_eval('srv:extent', namespaces)))
for extent in extents:
if val is None:
for e in extent.findall(util.nspath_eval('gmd:EX_Extent/gmd:geographicElement', namespaces)):
if e.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces)) is not None or e.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces)) is not None:
val = e
break
self.extent = EX_Extent(val)
self.bbox = self.extent.boundingBox # for backwards compatibility
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', namespaces))
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition', namespaces))
self.temporalextent_start = util.testXMLValue(val2)
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', namespaces))
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition', namespaces))
self.temporalextent_end = util.testXMLValue(val3)
class MD_Distributor(object):
""" process MD_Distributor """
def __init__(self, md=None):
if md is None:
self.contact = None
self.online = []
else:
self.contact = None
val = md.find(util.nspath_eval('gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.online = []
for ol in md.findall(util.nspath_eval('gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class MD_Distribution(object):
""" process MD_Distribution """
def __init__(self, md=None):
if md is None:
self.format = None
self.version = None
self.distributor = []
self.online = []
pass
else:
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', namespaces))
self.format = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:version/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
self.distributor = []
for dist in md.findall(util.nspath_eval('gmd:distributor', namespaces)):
self.distributor.append(MD_Distributor(dist))
self.online = []
for ol in md.findall(util.nspath_eval('gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class DQ_DataQuality(object):
''' process DQ_DataQuality'''
def __init__(self, md=None):
if md is None:
self.conformancetitle = []
self.conformancedate = []
self.conformancedatetype = []
self.conformancedegree = []
self.lineage = None
self.specificationtitle = None
self.specificationdate = []
else:
self.conformancetitle = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancetitle.append(val)
self.conformancedate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedate.append(val)
self.conformancedatetype = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.conformancedatetype.append(val)
self.conformancedegree = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedegree.append(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', namespaces))
self.lineage = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.specificationtitle = util.testXMLValue(val)
self.specificationdate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.specificationdate.append(val)
class SV_ServiceIdentification(object):
""" process SV_ServiceIdentification """
def __init__(self, md=None):
if md is None:
self.identtype = 'service'
self.type = None
self.version = None
self.fees = None
self.bbox = None
self.couplingtype = None
self.operations = []
self.operateson = []
else:
self.identtype = 'service'
val = md.find(util.nspath_eval('srv:serviceType/gco:LocalName', namespaces))
self.type = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:serviceTypeVersion/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:accessProperties/gmd:MD_StandardOrderProcess/gmd:fees/gco:CharacterString', namespaces))
self.fees = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:extent/gmd:EX_Extent', namespaces))
if val is not None:
self.bbox = EX_Extent(val)
else:
self.bbox = None
self.couplingtype = _testCodeListValue(md.find(util.nspath_eval('gmd:couplingType/gmd:SV_CouplingType', namespaces)))
self.operations = []
for i in md.findall(util.nspath_eval('srv:containsOperations', namespaces)):
tmp = {}
val = i.find(util.nspath_eval('srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', namespaces))
tmp['name'] = util.testXMLValue(val)
tmp['dcplist'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:DCP', namespaces)):
tmp2 = _testCodeListValue(d.find(util.nspath_eval('srv:DCPList', namespaces)))
tmp['dcplist'].append(tmp2)
tmp['connectpoint'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:connectPoint', namespaces)):
tmp3 = d.find(util.nspath_eval('gmd:CI_OnlineResource', namespaces))
tmp['connectpoint'].append(CI_OnlineResource(tmp3))
self.operations.append(tmp)
self.operateson = []
for i in md.findall(util.nspath_eval('srv:operatesOn', namespaces)):
tmp = {}
tmp['uuidref'] = i.attrib.get('uuidref')
tmp['href'] = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
tmp['title'] = i.attrib.get(util.nspath_eval('xlink:title', namespaces))
self.operateson.append(tmp)
class CI_OnlineResource(object):
""" process CI_OnlineResource """
def __init__(self,md=None):
if md is None:
self.url = None
self.protocol = None
self.name = None
self.description = None
self.function = None
else:
val = md.find(util.nspath_eval('gmd:linkage/gmd:URL', namespaces))
self.url = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:protocol/gco:CharacterString', namespaces))
self.protocol = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:description/gco:CharacterString', namespaces))
self.description = util.testXMLValue(val)
self.function = _testCodeListValue(md.find(util.nspath_eval('gmd:function/gmd:CI_OnLineFunctionCode', namespaces)))
class EX_GeographicBoundingBox(object):
def __init__(self, md=None):
if md is None:
self.minx = None
self.maxx = None
self.miny = None
self.maxy = None
else:
val = md.find(util.nspath_eval('gmd:westBoundLongitude/gco:Decimal', namespaces))
self.minx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:eastBoundLongitude/gco:Decimal', namespaces))
self.maxx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:southBoundLatitude/gco:Decimal', namespaces))
self.miny = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:northBoundLatitude/gco:Decimal', namespaces))
self.maxy = util.testXMLValue(val)
class EX_Polygon(object):
def __init__(self, md=None):
if md is None:
self.exterior_ring = None
self.interior_rings = []
else:
linear_ring = md.find(util.nspath_eval('gml32:Polygon/gml32:exterior/gml32:LinearRing', namespaces))
if linear_ring is not None:
self.exterior_ring = self._coordinates_for_ring(linear_ring)
interior_ring_elements = md.findall(util.nspath_eval('gml32:Polygon/gml32:interior', namespaces))
self.interior_rings = []
for iring_element in interior_ring_elements:
linear_ring = iring_element.find(util.nspath_eval('gml32:LinearRing', namespaces))
self.interior_rings.append(self._coordinates_for_ring(linear_ring))
def _coordinates_for_ring(self, linear_ring):
coordinates = []
positions = linear_ring.findall(util.nspath_eval('gml32:pos', namespaces))
for pos in positions:
tokens = pos.text.split()
coords = tuple([float(t) for t in tokens])
coordinates.append(coords)
return coordinates
class EX_GeographicBoundingPolygon(object):
def __init__(self, md=None):
if md is None:
self.is_extent = None
self.polygons = []
else:
val = md.find(util.nspath_eval('gmd:extentTypeCode', namespaces))
self.is_extent = util.testXMLValue(val)
md_polygons = md.findall(util.nspath_eval('gmd:polygon', namespaces))
self.polygons = []
for val in md_polygons:
self.polygons.append(EX_Polygon(val))
class EX_Extent(object):
""" process EX_Extent """
def __init__(self, md=None):
if md is None:
self.boundingBox = None
self.boundingPolygon = None
self.description_code = None
else:
self.boundingBox = None
self.boundingPolygon = None
if md is not None:
bboxElement = md.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces))
if bboxElement is not None:
self.boundingBox = EX_GeographicBoundingBox(bboxElement)
polygonElement = md.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces))
if polygonElement is not None:
self.boundingPolygon = EX_GeographicBoundingPolygon(polygonElement)
val = md.find(util.nspath_eval('gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces))
self.description_code = util.testXMLValue(val)
class MD_ReferenceSystem(object):
""" process MD_ReferenceSystem """
def __init__(self, md):
if md is None:
pass
else:
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
def _testCodeListValue(elpath):
""" get gco:CodeListValue_Type attribute, else get text content """
if elpath is not None: # try to get @codeListValue
val = util.testXMLValue(elpath.attrib.get('codeListValue'), True)
if val is not None:
return val
else: # see if there is element text
return util.testXMLValue(elpath)
else:
return None
class CodelistCatalogue(object):
""" process CT_CodelistCatalogue """
def __init__(self, ct):
val = ct.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:scope/gco:CharacterString', namespaces))
self.scope = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:fieldOfApplication/gco:CharacterString', namespaces))
self.fieldapp = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionNumber/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.date = util.testXMLValue(val)
self.dictionaries = {}
for i in ct.findall(util.nspath_eval('gmx:codelistItem/gmx:CodeListDictionary', namespaces)):
id = i.attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id] = {}
val = i.find(util.nspath_eval('gml32:description', namespaces))
self.dictionaries[id]['description'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gml32:identifier', namespaces))
self.dictionaries[id]['identifier'] = util.testXMLValue(val)
self.dictionaries[id]['entries'] = {}
for j in i.findall(util.nspath_eval('gmx:codeEntry', namespaces)):
id2 = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id]['entries'][id2] = {}
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:description', namespaces))
self.dictionaries[id]['entries'][id2]['description'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:identifier', namespaces))
self.dictionaries[id]['entries'][id2]['identifier'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get('codeSpace')
self.dictionaries[id]['entries'][id2]['codespace'] = util.testXMLValue(val, True)
def getcodelistdictionaries(self):
return self.dictionaries.keys()
def getcodedefinitionidentifiers(self, cdl):
if self.dictionaries.has_key(cdl):
ids = []
for i in self.dictionaries[cdl]['entries']:
ids.append(self.dictionaries[cdl]['entries'][i]['identifier'])
return ids
else:
return None
|
self.onlineresource = None
|
company.service.ts
|
import { Injectable, BadRequestException } from '@nestjs/common';
import { InjectRepository } from '@nestjs/typeorm';
import { CompanyRepository } from './company.repository';
import { CarrierService } from '../carrier/carrier.service';
@Injectable()
export class
|
{
constructor(
private readonly carrierService: CarrierService,
@InjectRepository(CompanyRepository)
private readonly companyRepository: CompanyRepository,
) {}
async getCompanies() {
const data = await this.companyRepository.find();
return { data };
}
getCompany(companyID: number) {
return this.companyRepository.findOne(companyID);
}
getCarriersByCompany(companyID: number) {
return this.carrierService.getCarriersByCompany(companyID);
}
}
|
CompanyService
|
App.test.js
|
import React from 'react';
import ReactDOM from 'react-dom';
import { Provider } from 'react-redux';
import { MemoryRouter } from 'react-router-dom';
import App from './App';
it('renders without crashing', () => {
const storeFake = (state) => ({
default: () => { },
subscribe: () => { },
dispatch: () => { },
exchangeslist: () => { },
getState: () => ({ ...state })
|
});
const store = storeFake({});
const div = document.createElement('div');
ReactDOM.render(
<Provider store={store}>
<MemoryRouter>
<App />
</MemoryRouter>
</Provider>, div);
});
| |
__init__.py
|
import os
from dotenv import load_dotenv
# Load environment variables
|
# Set hosting environment, if not set, default to production for security
HOSTING_ENV = os.getenv("HOSTING_ENV", "production")
if HOSTING_ENV == "dev":
from .dev import *
else:
from .production import *
|
load_dotenv()
|
mask.rs
|
//! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs)
use std::cmp::min;
use std::mem::uninitialized;
use std::ptr::copy_nonoverlapping;
/// Mask/unmask a frame.
#[inline]
pub fn
|
(buf: &mut [u8], mask: u32) {
apply_mask_fast32(buf, mask)
}
/// A safe unoptimized mask application.
#[inline]
#[allow(dead_code)]
fn apply_mask_fallback(buf: &mut [u8], mask: &[u8; 4]) {
for (i, byte) in buf.iter_mut().enumerate() {
*byte ^= mask[i & 3];
}
}
/// Faster version of `apply_mask()` which operates on 8-byte blocks.
#[inline]
#[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
fn apply_mask_fast32(buf: &mut [u8], mask_u32: u32) {
let mut ptr = buf.as_mut_ptr();
let mut len = buf.len();
// Possible first unaligned block.
let head = min(len, (8 - (ptr as usize & 0x7)) & 0x3);
let mask_u32 = if head > 0 {
let n = if head > 4 { head - 4 } else { head };
let mask_u32 = if n > 0 {
unsafe {
xor_mem(ptr, mask_u32, n);
ptr = ptr.offset(head as isize);
}
len -= n;
if cfg!(target_endian = "big") {
mask_u32.rotate_left(8 * n as u32)
} else {
mask_u32.rotate_right(8 * n as u32)
}
} else {
mask_u32
};
if head > 4 {
unsafe {
*(ptr as *mut u32) ^= mask_u32;
ptr = ptr.offset(4);
len -= 4;
}
}
mask_u32
} else {
mask_u32
};
if len > 0 {
debug_assert_eq!(ptr as usize % 4, 0);
}
// Properly aligned middle of the data.
if len >= 8 {
let mut mask_u64 = mask_u32 as u64;
mask_u64 = mask_u64 << 32 | mask_u32 as u64;
while len >= 8 {
unsafe {
*(ptr as *mut u64) ^= mask_u64;
ptr = ptr.offset(8);
len -= 8;
}
}
}
while len >= 4 {
unsafe {
*(ptr as *mut u32) ^= mask_u32;
ptr = ptr.offset(4);
len -= 4;
}
}
// Possible last block.
if len > 0 {
unsafe {
xor_mem(ptr, mask_u32, len);
}
}
}
#[inline]
// TODO: copy_nonoverlapping here compiles to call memcpy. While it is not so
// inefficient, it could be done better. The compiler does not see that len is
// limited to 3.
unsafe fn xor_mem(ptr: *mut u8, mask: u32, len: usize) {
let mut b: u32 = uninitialized();
#[allow(trivial_casts)]
copy_nonoverlapping(ptr, &mut b as *mut _ as *mut u8, len);
b ^= mask;
#[allow(trivial_casts)]
copy_nonoverlapping(&b as *const _ as *const u8, ptr, len);
}
#[cfg(test)]
mod tests {
use super::{apply_mask_fallback, apply_mask_fast32};
use std::ptr;
#[test]
fn test_apply_mask() {
let mask = [0x6d, 0xb6, 0xb2, 0x80];
let mask_u32: u32 = unsafe { ptr::read_unaligned(mask.as_ptr() as *const u32) };
let unmasked = vec![
0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17,
0x74, 0xf9, 0x12, 0x03,
];
// Check masking with proper alignment.
{
let mut masked = unmasked.clone();
apply_mask_fallback(&mut masked, &mask);
let mut masked_fast = unmasked.clone();
apply_mask_fast32(&mut masked_fast, mask_u32);
assert_eq!(masked, masked_fast);
}
// Check masking without alignment.
{
let mut masked = unmasked.clone();
apply_mask_fallback(&mut masked[1..], &mask);
let mut masked_fast = unmasked.clone();
apply_mask_fast32(&mut masked_fast[1..], mask_u32);
assert_eq!(masked, masked_fast);
}
}
}
|
apply_mask
|
0005_friend_current_user.py
|
# Generated by Django 2.1.15 on 2020-09-01 19:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
|
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0004_friend'),
]
operations = [
migrations.AddField(
model_name='friend',
name='current_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='owner', to=settings.AUTH_USER_MODEL),
),
]
|
|
mod.rs
|
use crate::internal::*;
use std::collections::HashSet;
use std::fmt::Debug;
use tract_itertools::Itertools;
pub mod change_axes;
mod op_optim;
mod prop_const;
mod push_split_down;
use self::change_axes::ChangeAxes;
use self::prop_const::PropConst;
use self::push_split_down::PushSplitDown;
use op_optim::OpOptim;
pub trait TypedPass: Debug + Send + Sync + dyn_clone::DynClone {
fn reset(&mut self) -> TractResult<()>;
fn next(
&mut self,
session: &mut OptimizerSession,
model: &TypedModel,
) -> TractResult<Option<TypedModelPatch>>;
}
dyn_clone::clone_trait_object!(TypedPass);
#[derive(Debug)]
pub struct Optimizer {
passes: Vec<Box<dyn TypedPass>>,
steps: Option<usize>,
}
impl Optimizer {
fn passes(passes: Vec<Box<dyn TypedPass>>) -> Optimizer {
Optimizer { passes, steps: None }
}
pub fn stopping_at(self, steps: usize) -> Optimizer {
Optimizer { steps: Some(steps), ..self }
}
pub fn declutter() -> Optimizer {
Optimizer::passes(vec![
Box::new(PropConst),
Box::new(OpOptim("declutter", TypedOp::declutter_with_session, 0)),
Box::new(PushSplitDown),
Box::new(ChangeAxes),
])
}
pub fn codegen() -> Optimizer {
Optimizer::passes(vec![
Box::new(PropConst),
Box::new(OpOptim(
"codegen",
|op, _session, model, node| TypedOp::codegen(op, model, node),
0,
)),
Box::new(OpOptim("declutter", TypedOp::declutter_with_session, 0)),
Box::new(PushSplitDown),
Box::new(OpOptim(
"fuse",
|op, _session, model, node| TypedOp::fuse(op, model, node),
0,
)),
])
}
pub fn optimize(&self, model: &mut TypedModel) -> TractResult<()> {
#[cfg(all(debug_assertions, feature = "paranoid_assertions"))]
{
model.check_consistent_facts()?;
}
model.compact()?;
self.session().optimize(model)
}
pub fn session(&self) -> OptimizerSession {
OptimizerSession { optimizer: self, counter: 0, seen: Default::default() }
}
}
#[derive(Debug)]
pub struct OptimizerSession<'o> {
optimizer: &'o Optimizer,
counter: usize,
seen: HashSet<String>,
}
impl<'o> OptimizerSession<'o> {
pub fn optimize(&mut self, model: &mut TypedModel) -> TractResult<()> {
for i in 0.. {
let old = self.counter;
self.run_all_passes(i, model)?;
if old == self.counter {
return Ok(());
}
model.compact()?;
}
unreachable!()
}
pub fn run_all_passes(&mut self, i: usize, model: &mut TypedModel) -> TractResult<()> {
let mut passes = self.optimizer.passes.clone();
for p in passes.iter_mut() {
self.run_one_pass_outer(i, p.as_mut(), model)?;
model.compact()?;
}
Ok(())
}
pub fn run_one_pass_outer(
&mut self,
i: usize,
p: &mut dyn TypedPass,
model: &mut TypedModel,
) -> TractResult<()> {
loop {
let old_counter = self.counter;
self.run_one_pass_inner(i, p, model)?;
if self.counter == old_counter {
return Ok(());
}
model.compact().with_context(|| format!("after pass {:?}", p))?;
}
}
pub fn
|
(
&mut self,
i: usize,
p: &mut dyn TypedPass,
model: &mut TypedModel,
) -> TractResult<()> {
p.reset()?;
if let Some(steps) = self.optimizer.steps {
if self.counter >= steps {
return Ok(());
}
}
while let Some(mut patch) = p.next(self, &model)? {
patch.push_context(format!("{:?}/{}", p, i));
#[cfg(all(debug_assertions, feature = "paranoid_assertions"))]
{
patch.model.check_consistent_facts()?;
model.check_consistent_facts()?;
patch.model.invariants()?;
model.invariants()?;
}
if let Some(watchdog) = patch.dont_apply_twice.take() {
if self.seen.contains(&watchdog) {
debug!("Loop detected: {} seen before", watchdog);
continue;
} else {
self.seen.insert(watchdog);
}
}
debug!("applying patch #{}: {}", self.counter, patch.context.iter().rev().join(" >> "),);
patch.apply(model)?;
self.counter += 1;
if let Some(steps) = self.optimizer.steps {
if self.counter >= steps {
return Ok(());
}
}
}
#[cfg(all(debug_assertions, feature = "paranoid_assertions"))]
{
model.check_edges().with_context(|| format!("after declutter pass {:?}", p))?;
model
.check_consistent_facts()
.with_context(|| format!("after declutter pass {:?}", p))?
}
Ok(())
}
}
|
run_one_pass_inner
|
SimpleRankAggregation.py
|
# RUN WITH /usr/bin/python3 minet.py (python 3.6)
import sys
import numpy as np
from sklearn.metrics import roc_curve, auc
import pandas as pd
def compute_aggregated_matrix(matrixfiles_num, matrixfiles, savematrixfile, saveresultfile, coeffs=[1, 1, 1, 1]):
# matrixfiles_num = int(sys.argv[1])
# matrixfiles = [sys.argv[i] for i in range(2, matrixfiles_num + 2)]
# savematrixfile = sys.argv[matrixfiles_num + 2]
# saveresultfile = sys.argv[matrixfiles_num + 3]
|
matricesdirname = "/home/user/Sirius/gene_network_sirius_2019/Matrices_1"
savematricesdirname = "/home/user/Sirius/gene_network_sirius_2019/Matrices_6"
predictedfilename = matricesdirname + "/{1}_{0}_predicted.txt"
truefilename = matricesdirname + "/{1}_{0}_true.txt"
savematricesfilename = savematricesdirname + "/{0}_predicted.txt"
# datalist = ['exps_10', 'exps_10_2', 'exps_10_bgr', 'exps_50', 'exps_50_2', 'exps_50_bgr', 'exps_100', 'exps_100_2', 'exps_100_bgr', 'genes_200_exps_10_bgr', 'genes_400_exps_10_bgr', 'genes_600_exps_10_bgr', 'genes_700_exps_10_bgr', 'genes_1000_exps_10_bgr']
datalist = ['genes_200_exps_10_bgr', 'genes_200_exps_20_bgr', 'genes_200_exps_40_bgr', 'genes_400_exps_10_bgr', 'genes_400_exps_40_bgr', 'genes_400_exps_80_bgr', 'genes_500_exps_10_bgr', 'genes_500_exps_50_bgr', 'genes_500_exps_100_bgr']
algolist = ['aracne', 'mrnet', 'mrnetb']
saveresultsfile = "/home/user/Sirius/gene_network_sirius_2019/RankAggregation/res_arrgeg_on_petr_big_data_many_exps.txt"
tmpfile = "/home/user/Sirius/gene_network_sirius_2019/RankAggregation/data/tmp5.txt"
if __name__ == "__main__":
results = np.zeros(shape=(len(datalist)))
for i, dataname in enumerate(datalist):
true_df = pd.read_csv(truefilename.format(dataname, algolist[1]), index_col=0, sep='\t')
predicted_df = compute_aggregated_matrix(len(algolist), [predictedfilename.format(dataname, algo) for algo in algolist], tmpfile, savematricesfilename.format(dataname))
true_df.to_csv(savematricesdirname + "/{0}_true.txt".format(dataname), index=True, header=True, sep='\t')
# print(true_df)
true_array = true_df.values[np.triu_indices(true_df.values.shape[0], k=1)]
predicted_array = predicted_df.values[np.triu_indices(predicted_df.values.shape[0], k=1)]
roc_auc = 0
# try:
# fpr, tpr, thresholds = roc_curve(true_array, predicted_array)
# roc_auc = auc(fpr, tpr)
# except:
# print("error", dataname, algo)
fpr, tpr, thresholds = roc_curve(true_array, predicted_array)
roc_auc = auc(fpr, tpr)
results[i] = roc_auc
with open(savematricesdirname + "/{0}_auc.txt".format(dataname), 'w') as f:
f.write(str(roc_auc) + '\n')
print("done", dataname, results[i])
with open(saveresultsfile, "a") as f:
f.write("done " + dataname + str(results[i]))
# print("done", dataname, algo)
print(results)
|
matrices = [pd.read_csv(f, index_col=0, sep='\t') for f in matrixfiles]
genes = matrices[0].index
# print(genes)
# print(matrices)
sz = len(matrices[0])
for matrix in matrices:
assert len(matrix) == sz
for matrix in matrices:
for column in matrix:
temp = matrix[column].argsort()
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(matrix[column]))
matrix[column] = ranks
res = np.zeros(shape=(sz, sz))
for s in range(sz):
for i, matrix in enumerate(matrices):
res[s] += matrix.iloc[:, s].values * coeffs[i]
res[s] /= len(matrices)
for row in res:
row /= row.sum()
result_df = pd.DataFrame(res, columns=genes, index=genes)
result_df.to_csv(saveresultfile, index=True, header=True, sep='\t')
# print(result_df)
return result_df
|
price.controller.ts
|
import { Controller, HttpStatus } from '@nestjs/common';
import { PriceService } from '@services';
import { MessagePattern, RpcException } from '@nestjs/microservices';
@Controller()
export class PriceController {
constructor(private readonly priceService: PriceService) {}
@MessagePattern({ cmd: 'load-prices' })
loadPrices(payload: any) {
const response = this.priceService.loadPrices(payload);
return response.then(({ data: { prices } }) => {
return {
|
status: HttpStatus.OK,
prices,
};
})
.catch(err => {
throw new RpcException({
error: {
status: err.response.status,
message: err.response.data,
},
});
});
}
}
| |
mechanisms.py
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import ssl
import logging
from sleekxmpp.util import sasl
from sleekxmpp.util.stringprep_profiles import StringPrepError
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.xmlstream import RestartStream, register_stanza_plugin
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.xmlstream.matcher import MatchXPath
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.features.feature_mechanisms import stanza
log = logging.getLogger(__name__)
class FeatureMechanisms(BasePlugin):
name = 'feature_mechanisms'
description = 'RFC 6120: Stream Feature: SASL'
dependencies = set()
stanza = stanza
default_config = {
'use_mech': None,
'use_mechs': None,
'min_mech': None,
'sasl_callback': None,
'security_callback': None,
'encrypted_plain': True,
'unencrypted_plain': False,
'unencrypted_digest': False,
'unencrypted_cram': False,
'unencrypted_scram': True,
'order': 100
}
def plugin_init(self):
if self.sasl_callback is None:
self.sasl_callback = self._default_credentials
if self.security_callback is None:
self.security_callback = self._default_security
|
self.mech = None
self.mech_list = set()
self.attempted_mechs = set()
register_stanza_plugin(StreamFeatures, stanza.Mechanisms)
self.xmpp.register_stanza(stanza.Success)
self.xmpp.register_stanza(stanza.Failure)
self.xmpp.register_stanza(stanza.Auth)
self.xmpp.register_stanza(stanza.Challenge)
self.xmpp.register_stanza(stanza.Response)
self.xmpp.register_stanza(stanza.Abort)
self.xmpp.register_handler(
Callback('SASL Success',
MatchXPath(stanza.Success.tag_name()),
self._handle_success,
instream=True))
self.xmpp.register_handler(
Callback('SASL Failure',
MatchXPath(stanza.Failure.tag_name()),
self._handle_fail,
instream=True))
self.xmpp.register_handler(
Callback('SASL Challenge',
MatchXPath(stanza.Challenge.tag_name()),
self._handle_challenge))
self.xmpp.register_feature('mechanisms',
self._handle_sasl_auth,
restart=True,
order=self.order)
def _default_credentials(self, required_values, optional_values):
creds = self.xmpp.credentials
result = {}
values = required_values.union(optional_values)
for value in values:
if value == 'username':
result[value] = creds.get('username', self.xmpp.requested_jid.user)
elif value == 'email':
jid = self.xmpp.requested_jid.bare
result[value] = creds.get('email', jid)
elif value == 'channel_binding':
if hasattr(self.xmpp.socket, 'get_channel_binding'):
result[value] = self.xmpp.socket.get_channel_binding()
else:
log.debug("Channel binding not supported.")
log.debug("Use Python 3.3+ for channel binding and " + \
"SCRAM-SHA-1-PLUS support")
result[value] = None
elif value == 'host':
result[value] = creds.get('host', self.xmpp.requested_jid.domain)
elif value == 'realm':
result[value] = creds.get('realm', self.xmpp.requested_jid.domain)
elif value == 'service-name':
result[value] = creds.get('service-name', self.xmpp._service_name)
elif value == 'service':
result[value] = creds.get('service', 'xmpp')
elif value in creds:
result[value] = creds[value]
return result
def _default_security(self, values):
result = {}
for value in values:
if value == 'encrypted':
if 'starttls' in self.xmpp.features:
result[value] = True
elif isinstance(self.xmpp.socket, ssl.SSLSocket):
result[value] = True
else:
result[value] = False
else:
result[value] = self.config.get(value, False)
return result
def _handle_sasl_auth(self, features):
"""
Handle authenticating using SASL.
Arguments:
features -- The stream features stanza.
"""
if 'mechanisms' in self.xmpp.features:
# SASL authentication has already succeeded, but the
# server has incorrectly offered it again.
return False
enforce_limit = False
limited_mechs = self.use_mechs
if limited_mechs is None:
limited_mechs = set()
elif limited_mechs and not isinstance(limited_mechs, set):
limited_mechs = set(limited_mechs)
enforce_limit = True
if self.use_mech:
limited_mechs.add(self.use_mech)
enforce_limit = True
if enforce_limit:
self.use_mechs = limited_mechs
self.mech_list = set(features['mechanisms'])
return self._send_auth()
def _send_auth(self):
mech_list = self.mech_list - self.attempted_mechs
try:
self.mech = sasl.choose(mech_list,
self.sasl_callback,
self.security_callback,
limit=self.use_mechs,
min_mech=self.min_mech)
except sasl.SASLNoAppropriateMechanism:
log.error("No appropriate login method.")
self.xmpp.event("no_auth", direct=True)
self.xmpp.event("failed_auth", direct=True)
self.attempted_mechs = set()
return self.xmpp.disconnect()
except StringPrepError:
log.exception("A credential value did not pass SASLprep.")
self.xmpp.disconnect()
resp = stanza.Auth(self.xmpp)
resp['mechanism'] = self.mech.name
try:
resp['value'] = self.mech.process()
except sasl.SASLCancelled:
self.attempted_mechs.add(self.mech.name)
self._send_auth()
except sasl.SASLFailed:
self.attempted_mechs.add(self.mech.name)
self._send_auth()
except sasl.SASLMutualAuthFailed:
log.error("Mutual authentication failed! " + \
"A security breach is possible.")
self.attempted_mechs.add(self.mech.name)
self.xmpp.disconnect()
else:
resp.send(now=True)
return True
def _handle_challenge(self, stanza):
"""SASL challenge received. Process and send response."""
resp = self.stanza.Response(self.xmpp)
try:
resp['value'] = self.mech.process(stanza['value'])
except sasl.SASLCancelled:
self.stanza.Abort(self.xmpp).send()
except sasl.SASLFailed:
self.stanza.Abort(self.xmpp).send()
except sasl.SASLMutualAuthFailed:
log.error("Mutual authentication failed! " + \
"A security breach is possible.")
self.attempted_mechs.add(self.mech.name)
self.xmpp.disconnect()
else:
resp.send(now=True)
def _handle_success(self, stanza):
"""SASL authentication succeeded. Restart the stream."""
try:
final = self.mech.process(stanza['value'])
except sasl.SASLMutualAuthFailed:
log.error("Mutual authentication failed! " + \
"A security breach is possible.")
self.attempted_mechs.add(self.mech.name)
self.xmpp.disconnect()
else:
self.attempted_mechs = set()
self.xmpp.authenticated = True
self.xmpp.features.add('mechanisms')
self.xmpp.event('auth_success', stanza, direct=True)
raise RestartStream()
def _handle_fail(self, stanza):
"""SASL authentication failed. Disconnect and shutdown."""
self.attempted_mechs.add(self.mech.name)
log.info("Authentication failed: %s", stanza['condition'])
self.xmpp.event("failed_auth", stanza, direct=True)
self._send_auth()
return True
|
creds = self.sasl_callback(set(['username']), set())
if not self.use_mech and not creds['username']:
self.use_mech = 'ANONYMOUS'
|
Tools.ts
|
//taken from: https://kentcdodds.com/blog/get-a-catch-block-error-message-with-typescript
type ErrorWithMessage = {
message: string
}
function isErrorWithMessage(error: unknown): error is ErrorWithMessage {
return (
typeof error === 'object' &&
error !== null &&
'message' in error &&
typeof (error as Record<string, unknown>).message === 'string'
)
}
function toErrorWithMessage(maybeError: unknown): ErrorWithMessage {
if (isErrorWithMessage(maybeError)) return maybeError
try {
return new Error(JSON.stringify(maybeError))
} catch {
// fallback in case there's an error stringifying the maybeError
// like with circular references for example.
return new Error(String(maybeError))
}
}
export function
|
(error: unknown) {
return toErrorWithMessage(error).message
}
|
getErrorMessage
|
entry.go
|
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package lambda
import (
"context"
"errors"
"log"
"os"
)
// Start takes a handler and talks to an internal Lambda endpoint to pass requests to the handler. If the
// handler does not match one of the supported types an appropriate error message will be returned to the caller.
// Start blocks, and does not return after being called.
//
// Rules:
//
// * handler must be a function
// * handler may take between 0 and two arguments.
// * if there are two arguments, the first argument must satisfy the "context.Context" interface.
// * handler may return between 0 and two arguments.
// * if there are two return values, the second argument must be an error.
// * if there is one return value it must be an error.
//
// Valid function signatures:
//
// func ()
// func () error
// func (TIn) error
// func () (TOut, error)
// func (TIn) (TOut, error)
// func (context.Context) error
// func (context.Context, TIn) error
// func (context.Context) (TOut, error)
// func (context.Context, TIn) (TOut, error)
//
// Where "TIn" and "TOut" are types compatible with the "encoding/json" standard library.
// See https://golang.org/pkg/encoding/json/#Unmarshal for how deserialization behaves
func Start(handler interface{}) {
StartWithOptions(handler)
}
// StartWithContext is the same as Start except sets the base context for the function.
//
// Deprecated: use lambda.StartWithOptions(handler, lambda.WithContext(ctx)) instead
func StartWithContext(ctx context.Context, handler interface{}) {
StartWithOptions(handler, WithContext(ctx))
}
// StartHandler takes in a Handler wrapper interface which can be implemented either by a
// custom function or a struct.
//
// Handler implementation requires a single "Invoke()" function:
//
// func Invoke(context.Context, []byte) ([]byte, error)
//
// Deprecated: use lambda.Start(handler) instead
func StartHandler(handler Handler) {
StartWithOptions(handler)
}
// StartWithOptions is the same as Start after the application of any handler options specified
func StartWithOptions(handler interface{}, options ...Option)
|
type startFunction struct {
env string
f func(envValue string, handler Handler) error
}
var (
// This allows users to save a little bit of coldstart time in the download, by the dependencies brought in for RPC support.
// The tradeoff is dropping compatibility with the go1.x runtime, functions must be "Custom Runtime" instead.
// To drop the rpc dependencies, compile with `-tags lambda.norpc`
rpcStartFunction = &startFunction{
env: "_LAMBDA_SERVER_PORT",
f: func(_ string, _ Handler) error {
return errors.New("_LAMBDA_SERVER_PORT was present but the function was compiled without RPC support")
},
}
runtimeAPIStartFunction = &startFunction{
env: "AWS_LAMBDA_RUNTIME_API",
f: startRuntimeAPILoop,
}
startFunctions = []*startFunction{rpcStartFunction, runtimeAPIStartFunction}
// This allows end to end testing of the Start functions, by tests overwriting this function to keep the program alive
logFatalf = log.Fatalf
)
// StartHandlerWithContext is the same as StartHandler except sets the base context for the function.
//
// Handler implementation requires a single "Invoke()" function:
//
// func Invoke(context.Context, []byte) ([]byte, error)
//
// Deprecated: use lambda.StartWithOptions(handler, lambda.WithContext(ctx)) instead
func StartHandlerWithContext(ctx context.Context, handler Handler) {
StartWithOptions(handler, WithContext(ctx))
}
func start(handler *handlerOptions) {
var keys []string
for _, start := range startFunctions {
config := os.Getenv(start.env)
if config != "" {
// in normal operation, the start function never returns
// if it does, exit!, this triggers a restart of the lambda function
err := start.f(config, handler)
logFatalf("%v", err)
}
keys = append(keys, start.env)
}
logFatalf("expected AWS Lambda environment variables %s are not defined", keys)
}
|
{
start(newHandler(handler, options...))
}
|
Navigation.js
|
import React, { useState } from 'react';
import { useParams } from 'react-router-dom';
import logo from '../img/logo-earth.svg';
import {
Collapse,
Navbar,
NavbarToggler,
NavbarBrand,
Nav,
NavItem,
NavLink,
UncontrolledDropdown,
DropdownToggle,
DropdownMenu,
DropdownItem,
NavbarText
} from 'reactstrap';
const Navigation = (props) => {
const { id } = useParams();
const [isOpen, setIsOpen] = useState(false);
const toggle = () => setIsOpen(!isOpen);
const newId = localStorage.getItem('id')
console.log('this is the id from local storage', newId);
return (
<div>
<Navbar color="light" light expand="md">
<img className='logo-img' src={logo}/>
<NavbarBrand className='WMP-text' href="/">Water My Plants</NavbarBrand>
<NavbarToggler onClick={toggle} />
<Collapse isOpen={isOpen} navbar>
<Nav className="mr-auto" navbar>
<NavItem>
<NavLink href="/login">Login</NavLink>
</NavItem>
<NavItem>
<NavLink href="/register">Sign Up</NavLink>
</NavItem>
<UncontrolledDropdown nav inNavbar>
<DropdownToggle nav caret >
My Plants
|
List of My Plants
</DropdownItem>
<DropdownItem href={`/users/${newId}/plantform`}>
Add New Plant
</DropdownItem>
</DropdownMenu>
</UncontrolledDropdown>
</Nav>
<NavbarText className='WMP-text'>Always Reminding You To Water Your Plants!</NavbarText>
</Collapse>
</Navbar>
</div>
);
}
export default Navigation;
|
</DropdownToggle>
<DropdownMenu right>
<DropdownItem href={`/users/${newId}/plants`}>
|
gpio28_ctrl.rs
|
#[doc = "Reader of register GPIO28_CTRL"]
pub type R = crate::R<u32, super::GPIO28_CTRL>;
#[doc = "Writer for register GPIO28_CTRL"]
pub type W = crate::W<u32, super::GPIO28_CTRL>;
#[doc = "Register GPIO28_CTRL `reset()`'s with value 0x1f"]
impl crate::ResetValue for super::GPIO28_CTRL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x1f
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum IRQOVER_A {
#[doc = "0: don't invert the interrupt"]
NORMAL = 0,
#[doc = "1: invert the interrupt"]
INVERT = 1,
#[doc = "2: drive interrupt low"]
LOW = 2,
#[doc = "3: drive interrupt high"]
HIGH = 3,
}
impl From<IRQOVER_A> for u8 {
#[inline(always)]
fn from(variant: IRQOVER_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `IRQOVER`"]
pub type IRQOVER_R = crate::R<u8, IRQOVER_A>;
impl IRQOVER_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IRQOVER_A {
match self.bits {
0 => IRQOVER_A::NORMAL,
1 => IRQOVER_A::INVERT,
2 => IRQOVER_A::LOW,
3 => IRQOVER_A::HIGH,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == IRQOVER_A::NORMAL
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == IRQOVER_A::INVERT
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == IRQOVER_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == IRQOVER_A::HIGH
}
}
#[doc = "Write proxy for field `IRQOVER`"]
pub struct IRQOVER_W<'a> {
w: &'a mut W,
}
impl<'a> IRQOVER_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: IRQOVER_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "don't invert the interrupt"]
#[inline(always)]
pub fn normal(self) -> &'a mut W {
self.variant(IRQOVER_A::NORMAL)
}
#[doc = "invert the interrupt"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(IRQOVER_A::INVERT)
}
#[doc = "drive interrupt low"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(IRQOVER_A::LOW)
}
#[doc = "drive interrupt high"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(IRQOVER_A::HIGH)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 28)) | (((value as u32) & 0x03) << 28);
self.w
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum INOVER_A {
#[doc = "0: don't invert the peri input"]
NORMAL = 0,
#[doc = "1: invert the peri input"]
INVERT = 1,
#[doc = "2: drive peri input low"]
LOW = 2,
#[doc = "3: drive peri input high"]
HIGH = 3,
}
impl From<INOVER_A> for u8 {
#[inline(always)]
fn from(variant: INOVER_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `INOVER`"]
pub type INOVER_R = crate::R<u8, INOVER_A>;
impl INOVER_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> INOVER_A {
match self.bits {
0 => INOVER_A::NORMAL,
1 => INOVER_A::INVERT,
2 => INOVER_A::LOW,
3 => INOVER_A::HIGH,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == INOVER_A::NORMAL
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == INOVER_A::INVERT
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == INOVER_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == INOVER_A::HIGH
}
}
#[doc = "Write proxy for field `INOVER`"]
pub struct INOVER_W<'a> {
w: &'a mut W,
}
impl<'a> INOVER_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: INOVER_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "don't invert the peri input"]
#[inline(always)]
pub fn normal(self) -> &'a mut W {
self.variant(INOVER_A::NORMAL)
}
#[doc = "invert the peri input"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(INOVER_A::INVERT)
}
#[doc = "drive peri input low"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(INOVER_A::LOW)
}
#[doc = "drive peri input high"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(INOVER_A::HIGH)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 16)) | (((value as u32) & 0x03) << 16);
self.w
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum OEOVER_A {
#[doc = "0: drive output enable from peripheral signal selected by funcsel"]
NORMAL = 0,
#[doc = "1: drive output enable from inverse of peripheral signal selected by funcsel"]
INVERT = 1,
#[doc = "2: disable output"]
DISABLE = 2,
#[doc = "3: enable output"]
ENABLE = 3,
}
impl From<OEOVER_A> for u8 {
#[inline(always)]
fn from(variant: OEOVER_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `OEOVER`"]
pub type OEOVER_R = crate::R<u8, OEOVER_A>;
impl OEOVER_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OEOVER_A {
match self.bits {
0 => OEOVER_A::NORMAL,
1 => OEOVER_A::INVERT,
2 => OEOVER_A::DISABLE,
3 => OEOVER_A::ENABLE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == OEOVER_A::NORMAL
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == OEOVER_A::INVERT
}
#[doc = "Checks if the value of the field is `DISABLE`"]
#[inline(always)]
pub fn is_disable(&self) -> bool {
*self == OEOVER_A::DISABLE
}
#[doc = "Checks if the value of the field is `ENABLE`"]
#[inline(always)]
pub fn is_enable(&self) -> bool {
*self == OEOVER_A::ENABLE
}
}
#[doc = "Write proxy for field `OEOVER`"]
pub struct OEOVER_W<'a> {
w: &'a mut W,
}
impl<'a> OEOVER_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: OEOVER_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "drive output enable from peripheral signal selected by funcsel"]
#[inline(always)]
pub fn normal(self) -> &'a mut W {
self.variant(OEOVER_A::NORMAL)
}
#[doc = "drive output enable from inverse of peripheral signal selected by funcsel"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(OEOVER_A::INVERT)
}
#[doc = "disable output"]
#[inline(always)]
pub fn disable(self) -> &'a mut W {
self.variant(OEOVER_A::DISABLE)
}
#[doc = "enable output"]
#[inline(always)]
pub fn enable(self) -> &'a mut W {
self.variant(OEOVER_A::ENABLE)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 12)) | (((value as u32) & 0x03) << 12);
self.w
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum OUTOVER_A {
#[doc = "0: drive output from peripheral signal selected by funcsel"]
NORMAL = 0,
#[doc = "1: drive output from inverse of peripheral signal selected by funcsel"]
INVERT = 1,
#[doc = "2: drive output low"]
LOW = 2,
#[doc = "3: drive output high"]
HIGH = 3,
}
impl From<OUTOVER_A> for u8 {
#[inline(always)]
fn from(variant: OUTOVER_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `OUTOVER`"]
pub type OUTOVER_R = crate::R<u8, OUTOVER_A>;
impl OUTOVER_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OUTOVER_A {
match self.bits {
0 => OUTOVER_A::NORMAL,
1 => OUTOVER_A::INVERT,
2 => OUTOVER_A::LOW,
3 => OUTOVER_A::HIGH,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == OUTOVER_A::NORMAL
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == OUTOVER_A::INVERT
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline(always)]
pub fn is_low(&self) -> bool {
*self == OUTOVER_A::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline(always)]
pub fn is_high(&self) -> bool {
*self == OUTOVER_A::HIGH
}
}
#[doc = "Write proxy for field `OUTOVER`"]
pub struct OUTOVER_W<'a> {
w: &'a mut W,
}
impl<'a> OUTOVER_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: OUTOVER_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "drive output from peripheral signal selected by funcsel"]
#[inline(always)]
pub fn normal(self) -> &'a mut W {
self.variant(OUTOVER_A::NORMAL)
}
#[doc = "drive output from inverse of peripheral signal selected by funcsel"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(OUTOVER_A::INVERT)
}
#[doc = "drive output low"]
#[inline(always)]
pub fn low(self) -> &'a mut W {
self.variant(OUTOVER_A::LOW)
}
#[doc = "drive output high"]
#[inline(always)]
pub fn high(self) -> &'a mut W {
self.variant(OUTOVER_A::HIGH)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 8)) | (((value as u32) & 0x03) << 8);
self.w
}
}
#[doc = "0-31 -> selects pin function according to the gpio table\\n 31 == NULL\n\nValue on reset: 31"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum FUNCSEL_A {
#[doc = "1: `1`"]
SPI1_RX = 1,
#[doc = "2: `10`"]
UART0_TX = 2,
#[doc = "3: `11`"]
I2C0_SDA = 3,
#[doc = "4: `100`"]
PWM_A_6 = 4,
#[doc = "5: `101`"]
SIO_28 = 5,
#[doc = "6: `110`"]
PIO0_28 = 6,
#[doc = "7: `111`"]
PIO1_28 = 7,
#[doc = "9: `1001`"]
USB_MUXING_VBUS_DETECT = 9,
#[doc = "31: `11111`"]
NULL = 31,
}
impl From<FUNCSEL_A> for u8 {
#[inline(always)]
fn from(variant: FUNCSEL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `FUNCSEL`"]
pub type FUNCSEL_R = crate::R<u8, FUNCSEL_A>;
impl FUNCSEL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, FUNCSEL_A> {
use crate::Variant::*;
match self.bits {
1 => Val(FUNCSEL_A::SPI1_RX),
2 => Val(FUNCSEL_A::UART0_TX),
3 => Val(FUNCSEL_A::I2C0_SDA),
4 => Val(FUNCSEL_A::PWM_A_6),
5 => Val(FUNCSEL_A::SIO_28),
6 => Val(FUNCSEL_A::PIO0_28),
7 => Val(FUNCSEL_A::PIO1_28),
9 => Val(FUNCSEL_A::USB_MUXING_VBUS_DETECT),
31 => Val(FUNCSEL_A::NULL),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `SPI1_RX`"]
#[inline(always)]
pub fn is_spi1_rx(&self) -> bool {
*self == FUNCSEL_A::SPI1_RX
}
#[doc = "Checks if the value of the field is `UART0_TX`"]
#[inline(always)]
pub fn is_uart0_tx(&self) -> bool {
*self == FUNCSEL_A::UART0_TX
}
#[doc = "Checks if the value of the field is `I2C0_SDA`"]
#[inline(always)]
pub fn is_i2c0_sda(&self) -> bool {
*self == FUNCSEL_A::I2C0_SDA
}
#[doc = "Checks if the value of the field is `PWM_A_6`"]
#[inline(always)]
pub fn is_pwm_a_6(&self) -> bool {
*self == FUNCSEL_A::PWM_A_6
}
#[doc = "Checks if the value of the field is `SIO_28`"]
#[inline(always)]
pub fn is_sio_28(&self) -> bool {
*self == FUNCSEL_A::SIO_28
}
#[doc = "Checks if the value of the field is `PIO0_28`"]
#[inline(always)]
pub fn is_pio0_28(&self) -> bool {
*self == FUNCSEL_A::PIO0_28
}
#[doc = "Checks if the value of the field is `PIO1_28`"]
#[inline(always)]
pub fn is_pio1_28(&self) -> bool {
*self == FUNCSEL_A::PIO1_28
}
#[doc = "Checks if the value of the field is `USB_MUXING_VBUS_DETECT`"]
#[inline(always)]
pub fn is_usb_muxing_vbus_detect(&self) -> bool {
*self == FUNCSEL_A::USB_MUXING_VBUS_DETECT
}
#[doc = "Checks if the value of the field is `NULL`"]
#[inline(always)]
pub fn is_null(&self) -> bool {
*self == FUNCSEL_A::NULL
}
}
#[doc = "Write proxy for field `FUNCSEL`"]
pub struct FUNCSEL_W<'a> {
w: &'a mut W,
}
impl<'a> FUNCSEL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: FUNCSEL_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "`1`"]
#[inline(always)]
pub fn spi1_rx(self) -> &'a mut W {
self.variant(FUNCSEL_A::SPI1_RX)
}
#[doc = "`10`"]
#[inline(always)]
pub fn
|
(self) -> &'a mut W {
self.variant(FUNCSEL_A::UART0_TX)
}
#[doc = "`11`"]
#[inline(always)]
pub fn i2c0_sda(self) -> &'a mut W {
self.variant(FUNCSEL_A::I2C0_SDA)
}
#[doc = "`100`"]
#[inline(always)]
pub fn pwm_a_6(self) -> &'a mut W {
self.variant(FUNCSEL_A::PWM_A_6)
}
#[doc = "`101`"]
#[inline(always)]
pub fn sio_28(self) -> &'a mut W {
self.variant(FUNCSEL_A::SIO_28)
}
#[doc = "`110`"]
#[inline(always)]
pub fn pio0_28(self) -> &'a mut W {
self.variant(FUNCSEL_A::PIO0_28)
}
#[doc = "`111`"]
#[inline(always)]
pub fn pio1_28(self) -> &'a mut W {
self.variant(FUNCSEL_A::PIO1_28)
}
#[doc = "`1001`"]
#[inline(always)]
pub fn usb_muxing_vbus_detect(self) -> &'a mut W {
self.variant(FUNCSEL_A::USB_MUXING_VBUS_DETECT)
}
#[doc = "`11111`"]
#[inline(always)]
pub fn null(self) -> &'a mut W {
self.variant(FUNCSEL_A::NULL)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x1f) | ((value as u32) & 0x1f);
self.w
}
}
impl R {
#[doc = "Bits 28:29"]
#[inline(always)]
pub fn irqover(&self) -> IRQOVER_R {
IRQOVER_R::new(((self.bits >> 28) & 0x03) as u8)
}
#[doc = "Bits 16:17"]
#[inline(always)]
pub fn inover(&self) -> INOVER_R {
INOVER_R::new(((self.bits >> 16) & 0x03) as u8)
}
#[doc = "Bits 12:13"]
#[inline(always)]
pub fn oeover(&self) -> OEOVER_R {
OEOVER_R::new(((self.bits >> 12) & 0x03) as u8)
}
#[doc = "Bits 8:9"]
#[inline(always)]
pub fn outover(&self) -> OUTOVER_R {
OUTOVER_R::new(((self.bits >> 8) & 0x03) as u8)
}
#[doc = "Bits 0:4 - 0-31 -> selects pin function according to the gpio table\\n 31 == NULL"]
#[inline(always)]
pub fn funcsel(&self) -> FUNCSEL_R {
FUNCSEL_R::new((self.bits & 0x1f) as u8)
}
}
impl W {
#[doc = "Bits 28:29"]
#[inline(always)]
pub fn irqover(&mut self) -> IRQOVER_W {
IRQOVER_W { w: self }
}
#[doc = "Bits 16:17"]
#[inline(always)]
pub fn inover(&mut self) -> INOVER_W {
INOVER_W { w: self }
}
#[doc = "Bits 12:13"]
#[inline(always)]
pub fn oeover(&mut self) -> OEOVER_W {
OEOVER_W { w: self }
}
#[doc = "Bits 8:9"]
#[inline(always)]
pub fn outover(&mut self) -> OUTOVER_W {
OUTOVER_W { w: self }
}
#[doc = "Bits 0:4 - 0-31 -> selects pin function according to the gpio table\\n 31 == NULL"]
#[inline(always)]
pub fn funcsel(&mut self) -> FUNCSEL_W {
FUNCSEL_W { w: self }
}
}
|
uart0_tx
|
parsers.rs
|
use crate::region::{Region, RegionError};
use nom::{
multi::{many0, separated_list1},
sequence::preceded,
combinator::{opt, eof, peek},
branch::alt,
bytes::complete::{tag, is_not},
character::complete::{char, digit1, alpha1, alphanumeric1},
error::{Error, ErrorKind},
IResult, Slice, Parser,
bytes::complete::{take_while, take_while_m_n, take_till1},
};
use crate::naming::FlagType;
use crate::naming::parsers::*;
use crate::naming::nointro::tokens::*;
use nom::sequence::pair;
use nom::combinator::recognize;
use nom::bytes::complete::take_until;
fn parse_region(input: &str) -> IResult<&str, (Vec<&str>, Vec<Region>)>
{
let regions = Region::try_from_nointro_region_with_strs(input)
.map_err(|e|
{
match e {
RegionError::BadRegionCode(_, _, idx)
=> nom::Err::Error(Error::new(input.slice(idx..),
ErrorKind::Tag)),
_ => nom::Err::Error(Error::new(input, ErrorKind::Tag))
}
})?;
// yes, this is not how nom parsers generally work...
Ok(("", regions))
}
fn parse_region_tag(input: &str) -> IResult<&str, NoIntroToken>
{
// Hack because we don't want nom to backtrack :|
let (input, region_inner) = in_parens(is_not(")"))(input)?;
let (_, (strs, regions)) = parse_region(region_inner)?;
Ok((input, NoIntroToken::Region(strs, regions)))
}
macro_rules! nointro_brackets_flag_parser {
($fn_name:ident, $tag:literal) =>
{
fn $fn_name<'a>(input: &'a str) -> IResult<&'a str, NoIntroToken>
{
let (input, tag) = in_brackets(tag($tag))(input)?;
Ok((input, NoIntroToken::Flag(FlagType::Bracketed, tag)))
}
}
}
nointro_brackets_flag_parser!(parse_baddump_tag, "b");
nointro_brackets_flag_parser!(parse_bios_tag, "BIOS");
// should be handled by parse_additional_tag
// nointro_parens_flag_parser!(parse_prototype_tag, "Proto");
// nointro_parens_flag_parser!(parse_kiosk_tag, "Kiosk");
// nointro_parens_flag_parser!(parse_demo_tag, "Demo");
// nointro_parens_flag_parser!(parse_sample_tag, "Sample");
// nointro_parens_flag_parser!(parse_bonus_disc_tag, "Bonus Disc");
// nointro_parens_flag_parser!(parse_bonus_cd_tag, "Bonus CD");
// nointro_parens_flag_parser!(parse_disc_tag, "Disc");
// nointro_parens_flag_parser!(parse_update_tag, "Update");
// nointro_parens_flag_parser!(parse_dlc_tag, "DLC");
// nointro_parens_flag_parser!(parse_taikenban_tag, "Taikenban"); /* 体験版 == Demo */
// nointro_parens_flag_parser!(parse_tentoutaikenban_tag, "Tentou Taikenban"); /* 店頭体験版 == Kiosk */
// nointro_parens_flag_parser!(parse_unlicensed_tag, "Unl");
// nointro_parens_flag_parser!(parse_tool_tag, "Tool");
// nointro_parens_flag_parser!(parse_psp_the_best_tag, "PSP the Best");
// nointro_parens_flag_parser!(parse_psn_tag, "PSN");
// nointro_parens_flag_parser!(parse_eshop_tag, "eShop");
// nointro_parens_flag_parser!(parse_aftermarket_tag, "Aftermarket");
// todo: tag prefixes and suffixes ('Alt') and 'PS3 v...')
// 4 digit versions can only appear AFTER a v... tag.
make_parens_tag!(parse_version_tag, parse_version_string, NoIntroToken);
fn parse_version_string(input: &str) -> IResult<&str, NoIntroToken>
{
fn parse_revision_version(input: &str) -> IResult<&str, (&str, &str, Option<&str>,
Option<&str>, Option<Vec<&str>>)>
{
let (input, tag) = tag("Rev")(input)?;
let (input, _) = char(' ')(input)?;
let (input, major) = alphanumeric1(input)?;
let (input, _) = opt(char('.'))(input)?;
let (input, minor) = opt(alphanumeric1)(input)?;
Ok((input, (tag, major, minor, None, None)))
}
fn parse_single_prefixed_version(input: &str) -> IResult<&str, (&str, &str, Option<&str>,
Option<&str>, Option<Vec<&str>>)>
{
let (input, ver) = tag("v")(input)?;
let (input, major) = digit1(input)?;
let (input, minor) = opt(preceded(char('.'),
take_while(|c: char| c.is_alphanumeric()
|| c == '.' || c == '-')))(input)?;
let (input, suffix) =
opt(preceded(char(' '), tag("Alt")))(input)?;
Ok((input,(ver.trim(), major, minor, None, suffix.map(|x| vec![x]))))
}
fn parse_unprefixed_dot_version(input: &str) -> IResult<&str, (&str, &str, Option<&str>,
Option<&str>, Option<Vec<&str>>)>
{
let (input, major) = digit1(input)?;
let (input, _) = char('.')(input)?;
let (input, minor) = digit1(input)?;
Ok((input, ("", major, Some(minor), None, None)))
}
fn parse_single_prefixed_version_with_full_tag(input: &str) -> IResult<&str, (&str, &str, Option<&str>,
Option<&str>, Option<Vec<&str>>)>
{
// Redump BIOS versions include date
fn parse_date(input: &str) -> IResult<&str, &str>
{
fn parse_date_check(input: &str) -> IResult<&str, (&str, &str, &str)> {
let (input, month) = take_while_m_n(2, 2, |c: char| c.is_ascii_digit())(input)?;
let (input, _) = char('/')(input)?;
let (input, day) = take_while_m_n(2, 2, |c: char| c.is_ascii_digit())(input)?;
let (input, _) = char('/')(input)?;
let (input, year) = take_while_m_n(2, 2, |c: char| c.is_ascii_digit())(input)?;
Ok((input, (month, day, year)))
}
let (input, _) = peek(parse_date_check)(input)?;
let (input, datestr) = take_while_m_n(8, 8, |c: char| c.is_ascii_digit() || c == '/')(input)?;
Ok((input, datestr))
}
let (input, ver) = tag("Version")(input)?;
let (input, _) = char(' ')(input)?;
let (input, major) = digit1(input)?;
let (input, minor) = opt(preceded(char('.'),
take_while(|c: char| c.is_ascii_alphanumeric()
|| c == '.' || c == '-')))(input)?;
let mut suffixes = Vec::new();
let (input, datestr) = opt(preceded(char(' '),parse_date))(input)?;
let (input, suffix) = opt(
preceded(char(' '),
alt((
tag("Alt"),
take_while_m_n(1,1, |c: char| c.is_ascii_uppercase() && c.is_ascii_alphabetic()),
))
))
(input)?;
if datestr.is_none() && suffix.is_none() {
return Ok((input,(ver.trim(), major, minor, None, None)));
}
if let Some(datestr) = datestr {
suffixes.push(datestr);
}
if let Some(suffix) = suffix {
suffixes.push(suffix);
}
Ok((input,(ver.trim(), major, minor, None, Some(suffixes))))
}
fn parse_playstation_version(input: &str) -> IResult<&str, (&str, &str, Option<&str>,
Option<&str>, Option<Vec<&str>>)>
{
let (input, prefix) = alt((tag("PS3"), tag("PSP")))(input)?;
let (input, _) = char(' ')(input)?;
let (input, (ver, major, minor, _, _)) = parse_single_prefixed_version(input)?;
Ok((input, (ver, major, minor, Some(prefix), None)))
}
let (input, vers1) =
alt((
parse_playstation_version,
parse_single_prefixed_version,
parse_single_prefixed_version_with_full_tag,
parse_revision_version,
parse_unprefixed_dot_version))(input)?;
let vers1 = (vers1.0, vers1.1, vers1.2, vers1.3, vers1.4, None);
let (input, nextvers) =
many0(pair(
opt(alt((tag(", "), tag(","), tag(" ")))),
alt((
parse_playstation_version,
parse_single_prefixed_version,
parse_single_prefixed_version_with_full_tag,
parse_revision_version,
take_while_m_n(4, 4, |c: char| c.is_ascii_alphanumeric())
.map(|s| ("", s, None, None, None)
)))
))(input)?;
let mut nextvers: Vec<_> = nextvers
.into_iter()
.map(|(sep, (v, maj, min, pref, suff ))| {
(v, maj, min, pref, suff, sep)
}).collect();
nextvers.insert(0, vers1);
Ok((input, NoIntroToken::Version(nextvers)))
}
make_parens_tag!(parse_dev_status_tag, parse_dev_status, NoIntroToken);
fn parse_dev_status(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, status) =
alt((
tag("Demo"),
tag("Beta"),
tag("Sample"),
tag("Prototype"),
tag("Proto"),
))(input)?;
let (input, beta) = opt(preceded(char(' '),
take_while(|c: char| c.is_ascii_alphanumeric() || c == ' ')))(input)?;
Ok((input, NoIntroToken::Release(status, beta)))
}
make_parens_tag!(parse_disc_tag, parse_disc, NoIntroToken);
fn parse_disc(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, disc) = tag("Disc")(input)?;
let (input, _) = char(' ')(input)?;
let (input, number) = digit1(input)?;
Ok((input, NoIntroToken::Media(disc, number)))
}
fn parse_scene_number(input: &str) -> IResult<&str, NoIntroToken>
{
fn parse_regular_scene_number(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, scene) = take_while_m_n(4, 4,
|c: char| c.is_ascii_digit())(input)?;
Ok((input, NoIntroToken::Scene(scene, None)))
}
fn parse_z_or_x_scene_number(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, z) = alt((tag("z"), tag("x")))(input)?;
let (input, scene) = take_while_m_n(3, 3, |c: char| c.is_ascii_digit())(input)?;
Ok((input, NoIntroToken::Scene(scene, Some(z))))
}
fn parse_bios_scene_number(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, b) = tag("xB")(input)?;
let (input, scene) = take_while_m_n(2, 2, |c: char| c.is_ascii_digit())(input)?;
Ok((input, NoIntroToken::Scene(scene, Some(b))))
}
let (input, scene) = alt((
parse_regular_scene_number, // ####
parse_bios_scene_number, // xB##
parse_z_or_x_scene_number, // z|x###
))(input)?;
Ok((input, scene))
}
fn parse_scene_tag(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, scene) = parse_scene_number(input)?;
let (input, _) = tag(" - ")(input)?;
Ok((input, scene))
}
make_parens_tag!(parse_language_tag, parse_language, NoIntroToken);
fn parse_language(i
|
IResult<&str, NoIntroToken>
{
fn parse_language_code(input: &str) -> IResult<&str, &str>
{
let (input, code) = take_while_m_n(2, 2, |c: char| c.is_ascii_alphabetic())(input)?;
Ok((input, code))
}
fn parse_language_variant(input: &str) -> IResult<&str, (&str, Option<&str>)>
{
let (input, code) = parse_language_code(input)?;
let (input, _) = tag("-")(input)?;
let (input, variant) = alpha1(input)?;
Ok((input, (code, Some(variant))))
}
let (input, languages) = separated_list1(
char(','),
alt((
parse_language_variant,
parse_language_code
.map(|s| (s, None))
)),
)(input)?;
Ok((input, NoIntroToken::Languages(languages)))
}
fn parse_additional_tag(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, _) = tag("(")(input)?;
let (input, add_tag) = take_till1(|c: char| c == ')')(input)?;
let (input, _) = tag(")")(input)?;
Ok((input, NoIntroToken::Flag(FlagType::Parenthesized, add_tag)))
}
// No one ever told inner parens were allowed!
fn parse_redump_multitap_flag(input: &str) -> IResult<&str, NoIntroToken>
{
// (Multi Tap (SCPH-10090) Doukonban)
fn parse_redump_multitap_flag_inner(input: &str) -> IResult<&str, ()>
{
let (input, _) = tag("Multi Tap (")(input)?;
let (input, _) = take_until(")")(input)?;
let (input, _) = char(')')(input)?;
let (input, _) = take_until(")")(input)?;
Ok((input, ()))
}
let (input, _) = char('(')(input)?;
let (input, flag) = recognize(parse_redump_multitap_flag_inner)(input)?;
let (input, _) = char(')')(input)?;
Ok((input, NoIntroToken::Flag(FlagType::Parenthesized, flag)))
}
fn parse_known_flags(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, tag) = alt((
parse_language_tag,
parse_version_tag,
parse_dev_status_tag,
parse_disc_tag,
parse_redump_multitap_flag,
parse_additional_tag
))(input)?;
Ok((input, tag))
}
pub(crate) fn do_parse(input: &str) -> IResult<&str, Vec<NoIntroToken>>
{
// We need this because of "FIFA 20 - Portuguese (Brazil) In-Game Commentary"
fn parse_region_tag_and_ensure_end(input: &str) -> IResult<&str, NoIntroToken>
{
let (input, code) = parse_region_tag(input)?;
let (input, _) = alt(
(eof,
peek(preceded(char(' '), alt((
parse_additional_tag,
parse_baddump_tag
)))).map(|_| "")))(input)?;
Ok((input, code))
}
let mut tokens = Vec::new();
let (input, scene) = opt(parse_scene_tag)(input)?;
let (input, bios) = opt(parse_bios_tag)(input)?;
if let Some(token) = scene {
tokens.push(token);
}
if let Some(token) = bios {
tokens.push(token);
}
// Trim left whitespace
let (input, _) = many0(char(' '))(input)?;
let (input, (title, region))
= take_up_to(parse_region_tag_and_ensure_end)(input)?;
tokens.push(NoIntroToken::Title(title.trim()));
tokens.push(region);
let (input, mut known_tags) = many0(
preceded(opt(char(' ')), parse_known_flags))(input)?;
tokens.append(&mut known_tags);
// end with [b]
let (input, bad_dump) = opt(preceded(opt(char(' ')),
parse_baddump_tag))(input)?;
if let Some(token) = bad_dump {
tokens.push(token);
}
// make sure we are EOF.
let (input, _) = eof(input)?;
match input {
"" => Ok((input, tokens)),
_ => Err(nom::Err::Error(Error::new(input, ErrorKind::NonEmpty)))
}
}
#[cfg(test)]
mod tests
{
use crate::naming::nointro::parsers::*;
use crate::region::Region;
use nom::error::{ErrorKind, Error};
use crate::naming::TokenizedName;
#[test]
fn parse_weird_beta()
{
println!("{:?}", do_parse("Isle of Minno (Europe) (0.01) (Beta)").unwrap());
}
#[test]
fn parse_scene_tags()
{
assert_eq!(Ok(("", NoIntroToken::Scene("1234", None))), parse_scene_number("1234"));
assert_eq!(Ok(("", NoIntroToken::Scene("234", Some("z")))), parse_scene_number("z234"));
assert_eq!(Ok(("", NoIntroToken::Scene("234", Some("x")))), parse_scene_number("x234"));
assert_eq!(Ok(("", NoIntroToken::Scene("34", Some("xB")))), parse_scene_number("xB34"));
}
#[test]
fn parse_language_test()
{
let langs = parse_language_tag("(En,Fr,Es,Zh-Hant)");
assert_eq!(Ok(("", NoIntroToken::Languages(vec![("En", None),
("Fr", None), ("Es", None), ("Zh", Some("Hant"))]))), langs);
}
#[test]
fn parse_odekake()
{
let (input, stuff) = do_parse("Odekake Lester - Lelele no Le (^^; (Japan) (Unl) (Rev 1)").unwrap();
assert_eq!("", input);
assert_eq!(Some(&NoIntroToken::Title("Odekake Lester - Lelele no Le (^^;")), stuff.first())
}
#[test]
fn parse_additional()
{
let stuff = parse_additional_tag("()");
assert_eq!(stuff, Err(nom::Err::Error(Error::new(")", ErrorKind::TakeTill1))));
}
#[test]
fn parse_no_region_fail()
{
let err = do_parse("void tRrLM(); Void Terrarium");
assert_eq!(Err(nom::Err::Error(Error::new("void tRrLM(); Void Terrarium", ErrorKind::ManyTill))), err);
}
#[test]
fn parse_void()
{
let (input, stuff) = do_parse("void tRrLM(); Void Terrarium (Japan)").unwrap();
assert_eq!("", input);
assert_eq!(Some(&NoIntroToken::Title("void tRrLM(); Void Terrarium")), stuff.first())
}
#[test]
fn parse_test_multitap()
{
assert_eq!(do_parse("Konjiki no Gashbell!! Go! Go! Mamono Fight!! (Japan) (Multi Tap (SCPH-10090) Doukonban)"),
Ok(("",
vec![
NoIntroToken::Title("Konjiki no Gashbell!! Go! Go! Mamono Fight!!"),
NoIntroToken::Region(vec!["Japan"], vec![Region::Japan]),
NoIntroToken::Flag(FlagType::Parenthesized, "Multi Tap (SCPH-10090) Doukonban")
])))
}
#[test]
fn parse_to_string()
{
for string in &[
"Cube CD 20, The (40) - Testing (Europe) (Rev 10)",
"void tRrLM(); Void Terrarium (Japan)",
"FIFA 20 - Portuguese (Brazil) In-Game Commentary (World) (Version 10.5.6-10, PS3 v10.0) (Pt-BR) (DLC) (eShop)",
"Isle of Minno (Europe) (0.01) (Beta)",
"Isle of Minno (Europe) (v0.01) (Beta)",
]
{
assert_eq!(string,
&NoIntroName::try_parse(string).unwrap().to_string())
}
}
#[test]
fn parse_disc_test()
{
assert_eq!(parse_disc_tag("(Disc 5)"),
Ok(("", NoIntroToken::Media("Disc", "5"))));
}
#[test]
fn parse_beta_test()
{
assert_eq!(parse_dev_status_tag("(Beta)"),
Ok(("", NoIntroToken::Release("Beta", None))));
assert_eq!(parse_dev_status_tag("(Beta 3)"),
Ok(("", NoIntroToken::Release("Beta", Some("3")))));
assert_eq!(parse_dev_status_tag("(Beta 55)"),
Ok(("", NoIntroToken::Release("Beta", Some("55")))));
assert_eq!(parse_dev_status_tag("(Beta Phase 2)"),
Ok(("", NoIntroToken::Release("Beta", Some("Phase 2")))));
}
#[test]
fn parse_redump_ver_test()
{
assert_eq!(parse_version_tag("(Version 5.0 04/15/10 E)"),
Ok(("", NoIntroToken::Version(vec![
("Version", "5", Some("0"), None, Some(
vec![
"04/15/10",
"E"
]
), None)
]))));
assert_eq!(parse_version_tag("(Version 4.5 05/25/00 A)"),
Ok(("", NoIntroToken::Version(vec![
("Version", "4", Some("5"), None, Some(
vec![
"05/25/00",
"A"
]
), None)
]))));
}
#[test]
fn parse_ver_test()
{
assert_eq!(parse_version_tag("(v10.XX)"),
Ok(("", NoIntroToken::Version(vec![("v", "10", Some("XX"), None, None, None)]))));
assert_eq!(parse_version_tag("(Version 10.5.6-10)"),
Ok(("", NoIntroToken::Version(vec![("Version", "10", Some("5.6-10"), None, None, None)]))));
assert_eq!(parse_version_tag("(Version 9)"),
Ok(("", NoIntroToken::Version(vec![("Version", "9", None, None, None, None)]))));
assert_eq!(parse_version_tag("(v1.0.0, v12342)"),
Ok(("", NoIntroToken::Version(vec![
("v", "1", Some("0.0"), None, None, None),
("v", "12342", None, None, None, Some(", "))
]))));
assert_eq!(parse_version_tag("(Rev 10)"),
Ok(("", NoIntroToken::Version(vec![("Rev", "10", None, None, None, None)]))));
assert_eq!(parse_version_tag("(Rev 10.08)"),
Ok(("", NoIntroToken::Version(vec![("Rev", "10", Some("08"), None, None, None)]))));
assert_eq!(parse_version_tag("(Rev 5C21)"),
Ok(("", NoIntroToken::Version(vec![("Rev", "5C21", None, None, None, None)]))));
assert_eq!(parse_version_tag("(0.01)"),
Ok(("", NoIntroToken::Version(vec![("", "0", Some("01"), None, None, None)]))));
assert_eq!(parse_version_tag("(v1.07 Rev 1)"),
Ok(("", NoIntroToken::Version(vec![
("v", "1", Some("07"), None, None, None),
("Rev", "1", None, None, None, Some(" "))
]))));
assert_eq!(parse_version_tag("(v1.07 1023)"),
Ok(("", NoIntroToken::Version(vec![
("v", "1", Some("07"), None, None, None),
("", "1023", None, None, None, Some(" "))
]))));
assert_eq!(parse_version_tag("(v1.07, 1023)"),
Ok(("", NoIntroToken::Version(vec![
("v", "1", Some("07"),None, None, None),
("", "1023", None, None, None, Some(", "))
]))));
assert_eq!(parse_version_tag("(v1.07, v1023)"),
Ok(("", NoIntroToken::Version(vec![
("v", "1", Some("07"),None, None, None),
("v", "1023", None, None, None, Some(", "))
]))));
assert_eq!(parse_version_tag("(v1.07b, v1023)"),
Ok(("", NoIntroToken::Version(vec![
("v", "1", Some("07b"),None, None, None),
("v", "1023", None, None, None, Some(", "))
]))));
assert_eq!(parse_version_tag("(1984)"),
Err(nom::Err::Error(Error::new(")", ErrorKind::Char))));
assert_eq!(parse_version_tag("(v1.07, v1023)"),
Ok(("", NoIntroToken::Version(vec![
("v", "1", Some("07"),None, None, None),
("v", "1023", None, None, None, Some(", "))
]))));
assert_eq!(parse_version_tag("(v1.07, v1023, PS3 v1.70, PSP v5.51, v60 Alt)"),
Ok(("", NoIntroToken::Version(vec![
("v", "1", Some("07"),None, None, None),
("v", "1023", None, None, None, Some(", ")),
("v", "1", Some("70"), Some("PS3"), None, Some(", ")),
("v", "5", Some("51"), Some("PSP"), None, Some(", ")),
("v", "60", None, None, Some(vec!["Alt"]), Some(", "))
]))));
assert_eq!(parse_version_tag("(Version 5.0 04/15/10 E)"),
Ok(("", NoIntroToken::Version(vec![
("Version", "5", Some("0"), None, Some(
vec![
"04/15/10",
"E"
]
), None)
]))));
assert_eq!(parse_version_tag("(Version 4.5 05/25/00 A)"),
Ok(("", NoIntroToken::Version(vec![
("Version", "4", Some("5"), None, Some(
vec![
"05/25/00",
"A"
]
), None)
]))));
//
// (v1.01, )
//v1.07 Rev 1
}
#[test]
fn parse_argentina()
{
assert_eq!(do_parse("Truco '96 (Argentina) (Unl)"),
Ok(("",
vec![NoIntroToken::Title("Truco '96"),
NoIntroToken::Region(vec!["Argentina"], vec![Region::Argentina]),
NoIntroToken::Flag(FlagType::Parenthesized, "Unl")]
)));
assert_eq!(Ok(String::from("Truco '96 (Argentina) (Unl)")), NoIntroName
::try_parse("Truco '96 (Argentina) (Unl)").map(|s| s.to_string()));
assert_eq!("AR", Region::Argentina.as_ref());
assert_eq!(Ok(vec![Region::Argentina]), Region::try_from_tosec_region("AR"));
}
#[test]
fn parse_region_test()
{
assert_eq!(parse_region("Japan, Europe, Australia, New Zealand"),
Ok(("",
(vec!["Japan", "Europe", "Australia", "New Zealand"],
vec![Region::Japan, Region::Europe, Region::Australia, Region::NewZealand]
))));
}
#[test]
fn parse_region_tag_test()
{
assert_eq!(parse_region_tag("(Japan, Europe, Australia, New Zealand)"),
Ok(("", NoIntroToken::Region(vec!["Japan", "Europe", "Australia", "New Zealand"],
vec![Region::Japan, Region::Europe, Region::Australia, Region::NewZealand]))));
}
#[test]
fn parse_region_test_fail()
{
assert_eq!(parse_region("Japan, Europe, Apustralia, New Zealand"),
Err(nom::Err::Error(Error::new("Apustralia, New Zealand", ErrorKind::Tag))))
}
#[test]
fn parse_brazil()
{
// FIFA 20 - Portuguese (Brazil) In-Game Commentary (World) (Pt-BR) (DLC) (eShop)
// bruh this is dumb.
let (input, stuff) =
do_parse("FIFA 20 - Portuguese (Brazil) In-Game Commentary (World) (Pt-BR) (DLC) (eShop)").unwrap();
assert_eq!("", input);
assert_eq!(Some(
&NoIntroToken::Title("FIFA 20 - Portuguese (Brazil) In-Game Commentary")), stuff.first())
}
#[test]
fn parse_unl()
{
assert_eq!(parse_additional_tag("(Unl)"), Ok(("", NoIntroToken::Flag(FlagType::Parenthesized, "Unl"))))
}
}
|
nput: &str) ->
|
query.js
|
var codeMirrorData ;
var codeMirrorQuery ;
function changeMode(element,syntax) {
var mode = "turtle";
switch (syntax.toUpperCase()) {
case "TURTLE": mode = "turtle" ;
break ;
case "N-TRIPLES": mode = "turtle" ;
break ;
case "RDF/XML": mode = "xml" ;
break ;
case "TRIX": mode = "xml" ;
break ;
case "SHEXJ" : mode = "javascript" ;
break ;
case "RDF/JSON" : mode = "javascript" ;
break ;
case "JSON-LD" : mode = "javascript" ;
break ;
case "SHEXC": mode = "shex" ;
break ;
}
element.setOption("mode",mode);
}
function changeTheme(theme) {
codeMirrorData.setOption("theme",theme);
codeMirrorQuery.setOption("theme",theme);
}
$(document).ready(function(){
function showNodeQualified(node, prefix) {
console.log("showQualify node)");
console.log(node);
if (node.type=="uri") {
var rawNode = node.value;
for (var key in prefix) {
if (rawNode.startsWith(prefix[key])) {
var longNode = "<" + rawNode + ">";
return "<abbr title=\"" + longNode + "\">" + key + ":" + rawNode.slice(prefix[key].length) + "</abbr>";
}
}
return "<" + rawNode + ">" ;
} else {
return node.value
}
}
function showResult(result,nodesPrefixMap) {
if (result) {
var tableHead = '<thead>';
var vars = result.head.vars;
console.log(vars);
$.each(vars, function(i) {
tableHead += "<th data-sortable=\"true\">" + vars[i] + "</th>";
});
tableHead += '</thead>';
var tableBody = '';
$.each(result.results.bindings, function(i) {
var row = result.results.bindings[i];
console.log("Binding: " + JSON.stringify(row) );
tableBody += "<tr>";
$.each(vars,function(i) {
tableBody += "<td><code>";
if (row[vars[i]]) tableBody += showNodeQualified(row[vars[i]], nodesPrefixMap);
tableBody += "</code></td>";
});
tableBody += "</tr>" ;
});
$("#resultDiv").append("<table data-toggle=\"table\" data-sort-order=\"desc\" data-sort-name=\"node\">" +
tableHead +
tableBody +
"</table>");
var pre = $("<pre/>").text(JSON.stringify(result,undefined,2));
var details = $("<details/>").append(pre);
$("#resultDiv").append(details);
}
else {
console.log("Result: ")
console.log(result);
}
}
var urlShaclex = getHost();
console.log("urlShaclex: " + urlShaclex);
// When loading document get result from data-result attribute and show it
var result = $("#resultDiv").data("result");
showResult(result);
var rdfData = document.getElementById("rdfData");
if (rdfData) {
codeMirrorData = CodeMirror.fromTextArea(rdfData, {
lineNumbers: true,
mode: "turtle",
viewportMargin: Infinity,
matchBrackets: true,
});
}
|
console.log("Formatting query")
var query = document.getElementById("queryData")
if (query) {
codeMirrorQuery = CodeMirror.fromTextArea(query, {
lineNumbers: true,
mode: "sparql",
viewportMargin: Infinity,
matchBrackets: true
});
}
/** The following lines associate events to the panel tabs, when a user clicks on a panel,
* the corresponding xxxActiveTab is changed
* dataPanel2, schemaPanel, shapeMapPanel are the classes of the panels
* dataPanel2 is because it appears in 2 panels (validation requires 2)
*/
$('.dataPanel2 a[data-toggle="tab"]').on('shown.bs.tab', function (e) {
var name = $(e.target).attr("href");
console.log("New data tab: " + name);
$('#rdfDataActiveTab').val(name);
});
$('.queryPanel a[data-toggle="tab"]').on('shown.bs.tab', function (e) {
var name = $(e.target).attr("href");
console.log("New Query tab: " + name);
$('#activeQueryTab').val(name);
});
$("#permalink").click(function(e) {
e.preventDefault();
console.log("click on permalink...");
var data = codeMirrorData.getValue();
var query = codeMirrorQuery.getValue();
var dataFormat = $("#dataFormat").find(":selected").text();
var inference = $("#inference").find(":selected").text();
var dataActiveTab = $("#rdfDataActiveTab").attr("value");
var dataFormat = "";
var dataPart="";
switch (dataActiveTab) {
case "#dataTextArea":
dataFormat = $("#dataFormatTextArea").find(":selected").text();
dataPart = "data=" + encodeURIComponent(data) ;
break;
case "#dataFile":
dataFormat = $("#dataFormatFile").find(":selected").text();
dataPart = "data=" + encodeURIComponent(data) ;
break;
case "#dataUrl":
dataFormat = $("#dataFormatUrl").find(":selected").text();
var dataURL = $("#dataURL").val();
dataPart = "dataURL=" + encodeURIComponent(dataURL) ;
break;
case "#dataEndpoint":
var endpoint = $("#inputDataEndpoint").val();
dataPart = "endpoint=" + encodeURIComponent(endpoint) ;
break;
default:
console.log("Unknown value of dataActiveTab:" + dataActiveTab);
dataFormat = $("#dataFormatTextArea").find(":selected").text();
dataPart = "data=" + encodeURIComponent(data) ;
break;
}
var queryPart = "";
var activeQueryTab = $("#activeQueryTab").attr("value");
switch (activeQueryTab) {
case "#queryTextArea":
queryPart = "query=" + encodeURIComponent(query) ;
break;
case "#queryFile":
queryPart = "query=" + encodeURIComponent(query) ;
break;
case "#queryUrl":
var queryURL = $("#queryURL").val();
queryPart = "queryURL=" + encodeURIComponent(queryURL) ;
break;
default:
console.log("Unknown value of activeQueryTab:" + activeQueryTab);
queryPart = "query=" + encodeURIComponent(query) ;
break;
}
var location = "/query?" +
dataPart + "&" +
"dataFormat=" + encodeURIComponent(dataFormat) + "&" +
queryPart + "&" +
"&inference=" + encodeURIComponent(inference) + "&" +
"activeDataTab=" + encodeURIComponent(dataActiveTab) + "&" +
"activeQueryTab=" + encodeURIComponent(activeQueryTab)
;
var href = urlShaclex + location
console.log("NewHRef: " + href)
window.location.assign(href) ;
});
});
| |
template_fragmenter.py
|
import logging
# Getting the name of the module for the log system
logger = logging.getLogger(__name__)
# Definition of reggex patterns
HEADER_OPLS2005 = "* LIGAND DATABASE FILE (OPLS2005)\n*\n"
PATTERN_OPLS2005_RESX_HEADER = "{:5} {:6d} {:6d} {:7d} {:7d} {:8d} \n"
PATTERN_OPLS2005_RESX_LINE = "{:5d} {:5d} {:1} {:4} {:4} {:5d} {: >11.5f} {: >11.5f} {: >11.5f}\n"
PATTERN_OPLS2005_NBON = "{:5d} {: >8.4f} {: >8.4f} {: >10.6f} {: >8.4f} {: >8.4f} {: >13.9f} {: >13.9f}\n"
PATTERN_OPLS2005_BOND = "{:5d} {:5d} {:>9.3f} {:>6.3f}\n"
PATTERN_OPLS2005_THETA = "{:5d} {:5d} {:5d} {:>11.5f} {: >11.5f}\n"
PATTERN_OPLS2005_PHI = "{:5d} {:5d} {: 5d} {:5d} {:>9.5f} {: >4.1f} {: >3.1f}\n"
class Atom:
"""A class which contains all the information and properties of an Atom, and several methods to build templates from
this data (currently only in OPLS2005)."""
def __init__(self, atom_id, parent_id, location, atom_type, pdb_atom_name, unknown, x_zmatrix=0, y_zmatrix=0,
z_zmatrix=0, sigma=0, epsilon=0, charge=0, radnpSGB=0, radnpType=0, sgbnpGamma=0, sgbnpType=0,
is_linker=False, is_fragment=False):
"""
:param atom_id: ID of the atom in the template.
:type atom_id: int
:param parent_id: ID ot the parent atom.
:type parent_id: int
:param location: Location of the atom. M for backbone, S for side chain; ligand links and non-ligand links use
these classification for some checks. However, protein links define its backbone atoms independently of this
flag; protein residue templates have M only for N, C and CA, under whatever name is actually used for those
atoms.
:type location: str
:param atom_type: Atom type. For example: CA (carbon aromatic), CT (carbon terminal), etc.
:type atom_type: str
:param pdb_atom_name: PDB atom name.
:type pdb_atom_name: str
:param unknown: Nobody knows what is this...
:type unknown: int
:param x_zmatrix: Coord X of the Z-matrix.
:type x_zmatrix: float
:param y_zmatrix: Coord Y of the Z-matrix.
:type y_zmatrix: float
:param z_zmatrix: Coord Z of the Z-matrix.
:type z_zmatrix: float
:param sigma: sigma value, used to compute Van Der Waals terms. Units in Armstrong.
:type sigma: float
:param epsilon: epsilon value, used to compute Van Der Waals terms. Units in kcal/mol.
:type epsilon: float
:param charge: charge value, used to compute electrostatic potentials. Units are elementary charge.
:type charge: float
:param radnpSGB: radii of non polar SGB. Atomic radii used to calculate the surface of the molecules when
obtaining SGB Born radii.
:type radnpSGB: float
:param radnpType: radii of non polar Type. Atomic radii used to calculate SASA in the non-polar term of the SGB
and VDGBNP models
:type radnpType: float
:param sgbnpGamma: SGB non polar Gamma. Gamma parameter of the nonpolar model.
:type sgbnpGamma: float
:param sgbnpType: SGB non polar Type. Alpha parameter for the nonpolar model
:param is_linker: Flag set when the atom is linking the fragment and the core.
:type is_linker: bool
:param is_fragment: Flag set when the atom is of the fragment.
:type is_fragment: bool
"""
self.atom_id = int(atom_id)
self.parent_id = int(parent_id)
self.location = str(location)
self.atom_type = str(atom_type)
self.pdb_atom_name = str(pdb_atom_name)
self.unknown = int(unknown)
self.x_zmatrix = float(x_zmatrix)
self.y_zmatrix = float(y_zmatrix)
self.z_zmatrix = float(z_zmatrix)
self.sigma = float(sigma)
self.epsilon = float(epsilon)
self.charge = float(charge)
self.radnpSGB = float(radnpSGB)
self.radnpType = float(radnpType)
self.sgbnpGamma = float(sgbnpGamma)
self.sgbnpType = float(sgbnpType)
self.bonds = []
self.thetas = []
self.phis = []
self.iphis = []
self.is_fragment = bool(is_fragment)
self.is_linker = bool(is_linker)
def write_resx(self):
return PATTERN_OPLS2005_RESX_LINE.format(self.atom_id, self.parent_id, self.location, self.atom_type.strip(),
self.pdb_atom_name, self.unknown, self.x_zmatrix,
self.y_zmatrix, self.z_zmatrix)
def write_nbon(self):
return PATTERN_OPLS2005_NBON.format(self.atom_id, self.sigma, self.epsilon, self.charge, self.radnpSGB,
self.radnpType, self.sgbnpGamma, self.sgbnpType)
class Bond:
def __init__(self, atom1, atom2, spring, eq_dist, is_fragment=False, is_linker=False):
self.atom1 = int(atom1)
self.atom2 = int(atom2)
self.spring = float(spring)
self.eq_dist = float(eq_dist)
self.is_fragment = bool(is_fragment)
self.is_linker = bool(is_linker)
def write_bond(self):
return PATTERN_OPLS2005_BOND.format(self.atom1, self.atom2, self.spring, self.eq_dist)
class Theta:
def __init__(self, atom1, atom2, atom3, spring, eq_angle, is_fragment=False):
self.atom1 = int(atom1)
self.atom2 = int(atom2)
self.atom3 = int(atom3)
self.spring = float(spring)
self.eq_angle = float(eq_angle)
self.is_fragment = bool(is_fragment)
def write_theta(self):
return PATTERN_OPLS2005_THETA.format(self.atom1, self.atom2, self.atom3, self.spring, self.eq_angle)
class Phi:
def __init__(self, atom1, atom2, atom3, atom4, constant, prefactor, nterm, improper, is_fragment=False):
self.atom1 = int(atom1)
self.atom2 = int(atom2)
self.atom3 = int(atom3)
self.atom4 = int(atom4)
self.constant = float(constant)
self.prefactor = float(prefactor)
self.nterm = float(nterm)
self.improper = bool(improper)
self.is_fragment = bool(is_fragment)
def write_phi(self):
if not self.improper:
return PATTERN_OPLS2005_PHI.format(self.atom1, self.atom2, self.atom3, self.atom4, self.constant,
self.prefactor, self.nterm)
def write_iphi(self):
if self.improper:
return PATTERN_OPLS2005_PHI.format(self.atom1, self.atom2, self.atom3, self.atom4, self.constant,
self.prefactor, self.nterm)
class TemplateOPLS2005:
def __init__(self, path_to_template):
self.path_to_template = path_to_template
self.template_name = ""
self.num_nbon_params = 0
self.num_bond_params = 0
self.num_angle_params = 0
self.num_dihedr_params = 0
self.num_nonnull = 0
self.list_of_atoms = {}
self.list_of_bonds = {}
self.list_of_thetas = {}
self.list_of_phis = []
self.list_of_iphis = []
self.unique_atoms = []
self.read_template()
def read_template(self):
template = file_to_list_of_lines(self.path_to_template)
for line in template[2:3]:
self.template_name = get_string_from_line(line=line, index_initial=0, index_final=5)
self.num_nbon_params = int(get_string_from_line(line=line, index_initial=6, index_final=11))
self.num_bond_params = int(get_string_from_line(line=line, index_initial=13, index_final=17))
self.num_angle_params = int(get_string_from_line(line=line, index_initial=18, index_final=24))
self.num_dihedr_params = int(get_string_from_line(line=line, index_initial=25, index_final=31))
self.num_nonnull = int(get_string_from_line(line=line, index_initial=32, index_final=39))
for line in template[3:]:
if line.startswith("NBON"):
index = template.index(line)
break
try:
atom_id = get_string_from_line(line=line, index_initial=0, index_final=6)
parent_id = get_string_from_line(line=line, index_initial=6, index_final=11)
location = get_string_from_line(line=line, index_initial=12, index_final=13)
atom_type = get_string_from_line(line=line, index_initial=15, index_final=20)
pdb_atom_name = get_string_from_line(line=line, index_initial=21, index_final=25)
unknown = get_string_from_line(line=line, index_initial=26, index_final=31)
x_zmatrix = get_string_from_line(line=line, index_initial=32, index_final=43)
y_zmatrix = get_string_from_line(line=line, index_initial=44, index_final=55)
z_zmatrix = get_string_from_line(line=line, index_initial=56, index_final=67)
atom = Atom(atom_id=atom_id, parent_id=parent_id, location=location, atom_type=atom_type,
pdb_atom_name=pdb_atom_name, unknown=unknown, x_zmatrix=x_zmatrix, y_zmatrix=y_zmatrix,
z_zmatrix=z_zmatrix)
self.list_of_atoms.setdefault(atom.atom_id, atom)
if pdb_atom_name not in self.unique_atoms:
self.unique_atoms.append(pdb_atom_name)
else:
raise ValueError("ERROR: PDB ATOM NAME {} ALREADY EXISTS in the template {}!".format(pdb_atom_name,
self.path_to_template))
except ValueError:
raise ValueError(
"Unexpected type in line {} of {}\n{}".format(template.index(line), self.path_to_template, line))
for line in template[index + 1:]:
if line.startswith("BOND"):
index = template.index(line)
break
try:
id = int(get_string_from_line(line=line, index_initial=0, index_final=6))
self.list_of_atoms[id].sigma = float(get_string_from_line(line=line, index_initial=7, index_final=14))
self.list_of_atoms[id].epsilon = float(get_string_from_line(line=line, index_initial=15, index_final=23))
self.list_of_atoms[id].charge = float(get_string_from_line(line=line, index_initial=24, index_final=34))
self.list_of_atoms[id].radnpSGB = float(get_string_from_line(line=line, index_initial=35, index_final=43))
self.list_of_atoms[id].radnpType = float(get_string_from_line(line=line, index_initial=44, index_final=52))
self.list_of_atoms[id].sgbnpGamma = float(get_string_from_line(line=line, index_initial=53,
index_final=66))
self.list_of_atoms[id].sgbnpType = float(get_string_from_line(line=line, index_initial=67, index_final=80))
except ValueError:
raise ValueError(
"Unexpected type in line {} of {}\n{}".format(template.index(line), self.path_to_template, line))
for line in template[index + 1:]:
if line.startswith("THET"):
index = template.index(line)
break
try:
id_atom1 = int(get_string_from_line(line=line, index_initial=0, index_final=6))
id_atom2 = int(get_string_from_line(line=line, index_initial=6, index_final=12))
spring = get_string_from_line(line=line, index_initial=13, index_final=21)
eq_dist = get_string_from_line(line=line, index_initial=23, index_final=28)
# Create bond instance
bond = Bond(atom1=id_atom1, atom2=id_atom2, spring=spring, eq_dist=eq_dist)
self.list_of_bonds.setdefault((id_atom1, id_atom2), bond)
# Set which atom is bonded with
self.list_of_atoms[id_atom1].bonds.append(bond)
except ValueError:
raise ValueError(
"Unexpected type in line {} of {}\n{}".format(template.index(line), self.path_to_template, line))
for line in template[index + 1:]:
if line.startswith("PHI"):
index = template.index(line)
break
try:
id_atom1 = int(get_string_from_line(line=line, index_initial=0, index_final=6))
id_atom2 = int(get_string_from_line(line=line, index_initial=6, index_final=12))
id_atom3 = int(get_string_from_line(line=line, index_initial=13, index_final=18))
spring = get_string_from_line(line=line, index_initial=19, index_final=29)
eq_angle = get_string_from_line(line=line, index_initial=31, index_final=40)
# Create bond instance
theta = Theta(atom1=id_atom1, atom2=id_atom2, atom3=id_atom3, spring=spring, eq_angle=eq_angle)
self.list_of_thetas.setdefault((id_atom1, id_atom2, id_atom3), theta)
self.list_of_atoms[id_atom1].thetas.append(theta)
except ValueError:
raise ValueError(
"Unexpected type in line {} of {}\n{}".format(template.index(line), self.path_to_template,
line))
for line in template[index + 1:]:
if line.startswith("IPHI"):
index = template.index(line)
break
try:
id_atom1 = int(get_string_from_line(line=line, index_initial=0, index_final=5))
id_atom2 = int(get_string_from_line(line=line, index_initial=6, index_final=11))
id_atom3 = int(get_string_from_line(line=line, index_initial=12, index_final=17))
id_atom4 = int(get_string_from_line(line=line, index_initial=18, index_final=23))
constant = get_string_from_line(line=line, index_initial=26, index_final=32)
preafactor = get_string_from_line(line=line, index_initial=33, index_final=38)
nterm = get_string_from_line(line=line, index_initial=39, index_final=42)
# Create bond instance
phi = Phi(atom1=id_atom1, atom2=id_atom2, atom3=id_atom3, atom4=id_atom4, constant=constant,
prefactor=preafactor, nterm=nterm, improper=False)
self.list_of_phis.append(phi)
self.list_of_atoms[id_atom1].phis.append(phi)
except ValueError:
raise ValueError(
"Unexpected type in line {} of {}\n{}".format(template.index(line), self.path_to_template,
line))
for line in template[index + 1:]:
if line.startswith("END"):
break
try:
id_atom1 = int(get_string_from_line(line=line, index_initial=0, index_final=6))
id_atom2 = int(get_string_from_line(line=line, index_initial=7, index_final=12))
id_atom3 = int(get_string_from_line(line=line, index_initial=13, index_final=18))
id_atom4 = int(get_string_from_line(line=line, index_initial=19, index_final=24))
constant = get_string_from_line(line=line, index_initial=26, index_final=34)
preafactor = get_string_from_line(line=line, index_initial=34, index_final=39)
nterm = get_string_from_line(line=line, index_initial=40, index_final=43)
# Create bond instance
phi = Phi(atom1=id_atom1, atom2=id_atom2, atom3=id_atom3, atom4=id_atom4, constant=constant,
prefactor=preafactor, nterm=nterm, improper=True)
self.list_of_iphis.append(phi)
except ValueError:
raise ValueError(
"Unexpected type in line {} of {}\n{}".format(template.index(line), self.path_to_template,
line))
def write_header(self):
return HEADER_OPLS2005+PATTERN_OPLS2005_RESX_HEADER.format(self.template_name, self.num_nbon_params,
self.num_bond_params, self.num_angle_params,
self.num_dihedr_params, self.num_nonnull)
def write_xres(self):
content = []
for n in range(1, len(self.list_of_atoms)+1):
line = self.list_of_atoms[n].write_resx()
content.append(line)
return "".join(content)
def write_nbon(self):
content = []
for n in range(1, len(self.list_of_atoms)+1):
line = self.list_of_atoms[n].write_nbon()
content.append(line)
return "".join(content)
def write_bond(self):
|
def write_theta(self):
content = []
for key in self.list_of_thetas.keys():
line = self.list_of_thetas[key].write_theta()
content.append(line)
return "".join(content)
def write_phis(self):
content = []
for phi in self.list_of_phis:
line = phi.write_phi()
content.append(line)
return "".join(content)
def write_iphis(self):
content = []
for phi in self.list_of_iphis:
line = phi.write_iphi()
content.append(line)
return "".join(content)
def write_template(self):
header = self.write_header()
xres = self.write_xres()
nbon_header = "NBON\n"
nbon = self.write_nbon()
bond_header = "BOND\n"
bond = self.write_bond()
theta_header = "THET\n"
theta = self.write_theta()
phis_header = "PHI\n"
phis = self.write_phis()
iphis_header = "IPHI\n"
iphis = self.write_iphis()
ending = "END"
return header+xres+nbon_header+nbon+bond_header+bond+theta_header+theta+phis_header+phis+iphis_header+iphis+ending
def write_template_to_file(self, template_new_name=None):
if not template_new_name:
name = self.template_name.lower()+"z"
else:
name = template_new_name
with open(name, "w") as template:
template.write(self.write_template())
def get_list_of_fragment_atoms(self):
atoms = []
for key, atom in self.list_of_atoms.items():
if atom.is_fragment:
atoms.append((key, atom))
return atoms
def get_list_of_fragment_bonds(self):
bonds = []
for key, bond in self.list_of_bonds.items():
if bond.is_fragment:
bonds.append((key, bond))
return bonds
def get_list_of_fragment_thetas(self):
thetas = []
for key, theta in self.list_of_thetas.items():
if theta.is_fragment:
thetas.append((key, theta))
return thetas
def get_list_of_fragment_phis(self):
phis = []
for phi in self.list_of_phis:
if phi.is_fragment:
phis.append(phi)
return phis
def get_list_of_fragment_iphis(self):
iphis = []
for iphi in self.list_of_iphis:
if iphi.is_fragment:
iphis.append(iphi)
return iphis
class ReduceProperty:
def __init__(self, template, lambda_to_reduce):
self.template = template
self.lambda_to_reduce = lambda_to_reduce
def reduce_epsilons(self, function):
atoms = self.template.get_list_of_fragment_atoms()
for key, atom in atoms:
epsilon = atom.epsilon
result = function(epsilon)
self.template.list_of_atoms[key].epsilon = result
def reduce_sigmas(self, function):
atoms = self.template.get_list_of_fragment_atoms()
for key, atom in atoms:
sigma = atom.sigma
result = function(sigma)
self.template.list_of_atoms[key].sigma = result
def reduce_charges(self, function):
atoms = self.template.get_list_of_fragment_atoms()
for key, atom in atoms:
charge = atom.charge
result = function(charge)
self.template.list_of_atoms[key].charge = result
def reduce_sgbnpGamma(self, function):
atoms = self.template.get_list_of_fragment_atoms()
for key, atom in atoms:
sgbnpGamma = atom.sgbnpGamma
result = function(sgbnpGamma)
self.template.list_of_atoms[key].sgbnpGamma = result
def reduce_sgbnpType(self, function):
atoms = self.template.get_list_of_fragment_atoms()
for key, atom in atoms:
sgbnpType = atom.sgbnpType
result = function(sgbnpType)
self.template.list_of_atoms[key].sgbnpType = result
def reduce_radnpSGB(self, function):
atoms = self.template.get_list_of_fragment_atoms()
for key, atom in atoms:
radnpSGB = atom.radnpSGB
result = function(radnpSGB)
self.template.list_of_atoms[key].radnpSGB = result
def reduce_radnpType(self, function):
atoms = self.template.get_list_of_fragment_atoms()
for key, atom in atoms:
radnpType = atom.radnpType
result = function(radnpType)
self.template.list_of_atoms[key].radnpType = result
def reduce_bond_eq_dist(self, function):
bonds = self.template.get_list_of_fragment_bonds()
for key, bond in bonds:
eq_dist = bond.eq_dist
result = function(eq_dist)
self.template.list_of_bonds[key].eq_dist = result
class ReduceLinearly(ReduceProperty):
def __init__(self, template, lambda_to_reduce):
ReduceProperty.__init__(self, template, lambda_to_reduce)
def reduce_value(self, value):
result = value * self.lambda_to_reduce
return result
class ReduceExponentially(ReduceProperty):
def __init__(self, template, lambda_to_reduce):
ReduceProperty.__init__(self, template, lambda_to_reduce)
def reduce_value(self, value):
result = value * self.lambda_to_reduce ** 2
return result
def file_to_list_of_lines(file_path):
with open(file_path, "r") as template:
content = template.readlines()
return content
def get_string_from_line(line, index_initial, index_final):
string = line[index_initial:index_final]
return string.strip()
def find_equal_pdb_atom_names(template1, template2):
pdb_atom_names_tmpl_1 = [template1.list_of_atoms[n].pdb_atom_name for n in range(1, len(template1.list_of_atoms)+1)]
pdb_atom_names_tmpl_2 = [template2.list_of_atoms[n].pdb_atom_name for n in range(1, len(template2.list_of_atoms)+1)]
return list(set(pdb_atom_names_tmpl_1).intersection(pdb_atom_names_tmpl_2))
def detect_fragment_atoms(template_initial, template_grown, hydrogen_to_replace):
fragment_atoms = []
core_atoms = find_equal_pdb_atom_names(template_initial, template_grown)
for key, atom in template_grown.list_of_atoms.items():
pdb_atom_name = atom.pdb_atom_name
if pdb_atom_name not in core_atoms:
fragment_atoms.append(atom)
elif hydrogen_to_replace in pdb_atom_name:
fragment_atoms.append(atom)
return fragment_atoms
def set_fragment_atoms(list_of_fragment_atoms):
for atom in list_of_fragment_atoms:
atom.is_fragment = True
def detect_fragment_bonds(list_of_fragment_atoms, template_grown):
fragment_bonds = []
fragment_indexes = []
for atom in list_of_fragment_atoms:
fragment_indexes.append(atom.atom_id)
fragment_indexes = list(set(fragment_indexes))
for key, bond in template_grown.list_of_bonds.items():
if key[0] in fragment_indexes and key[1] in fragment_indexes:
fragment_bonds.append(bond)
return fragment_bonds
def set_fragment_bonds(list_of_fragment_bonds):
for bond in list_of_fragment_bonds:
bond.is_fragment = True
def set_connecting_atom(template_grown, pdb_atom_name):
for key, atom in template_grown.list_of_atoms.items():
if pdb_atom_name in atom.pdb_atom_name:
atom.is_linker = True
def main(template_initial_path, template_grown_path, step, total_steps, hydrogen_to_replace, core_atom_linker,
tmpl_out_path):
"""
Module to modify templates, currently working in OPLS2005. This main function basically compares two templates;
an initial and a grown one, extracting the atoms of the fragment (that have been grown). Then, it uses this data
to modify Linearly different attributes of the template, particularly, sigmas, charges, bond equilibrium distance,
and the radius non polar SGB from atoms and bonds of the fragment. This modification is performed according to a
lambda parameter that is computed dividing the current step by the total number of steps. Finally, the template is
modified and written again to an output file.
:param template_initial_path: Path to an OPLS2005 template of the core ligand.
:type template_initial_path: str
:param template_grown_path: Path to an OPLS2005 template of the ligand with the fragment added to the core.
:type template_grown_path: str
:param step: Current step of the total steps.
:type step: int
:param total_steps: Total number of steps.
:type total_steps: int
:param hydrogen_to_replace: PDB atom name of the hydrogen that will be replaced for the linking atom of the fragment.
:type hydrogen_to_replace: str
:param core_atom_linker: PDB atom name of the core that is linking the fragment.
:type core_atom_linker: str
:param tmpl_out_path: Output path for the template modified.
:type tmpl_out_path: str
:return: None
"""
lambda_to_reduce = float(step/(total_steps+1))
templ_ini = TemplateOPLS2005(template_initial_path)
templ_grw = TemplateOPLS2005(template_grown_path)
fragment_atoms = detect_fragment_atoms(template_initial=templ_ini, template_grown=templ_grw,
hydrogen_to_replace=hydrogen_to_replace)
set_fragment_atoms(list_of_fragment_atoms=fragment_atoms)
set_connecting_atom(template_grown=templ_grw, pdb_atom_name=hydrogen_to_replace)
set_connecting_atom(template_grown=templ_grw, pdb_atom_name=core_atom_linker)
fragment_bonds = detect_fragment_bonds(list_of_fragment_atoms=fragment_atoms, template_grown=templ_grw)
set_fragment_bonds(list_of_fragment_bonds=fragment_bonds)
reductor = ReduceLinearly(templ_grw, lambda_to_reduce)
reductor.reduce_sigmas(reductor.reduce_value)
reductor.reduce_epsilons(reductor.reduce_value)
reductor.reduce_charges(reductor.reduce_value)
reductor.reduce_bond_eq_dist(reductor.reduce_value)
reductor.reduce_radnpSGB(reductor.reduce_value)
reductor.reduce_radnpType(reductor.reduce_value)
reductor.reduce_sgbnpGamma(reductor.reduce_value)
reductor.reduce_sgbnpType(reductor.reduce_value)
templ_grw.write_template_to_file(template_new_name=tmpl_out_path)
|
content = []
for key in self.list_of_bonds.keys():
line = self.list_of_bonds[key].write_bond()
content.append(line)
return "".join(content)
|
base.py
|
import os
import logging
import datetime
import numpy as np
import tensorflow as tf
from gym.spaces import Box, Discrete
from gym.utils import colorize
from control.utils.misc import Config
from control.utils.misc import REPO_ROOT, RESOURCE_ROOT
from abc import ABC, abstractmethod
class TrainConfigBase(Config):
lr = 0.001
n_steps = 10000
warmup_steps = 5000
batch_size = 64
log_every_step = 1000
# give an extra bonus if done; only needed for certain tasks.
done_reward = None
class Policy(ABC):
def __init__(self, env, name, training=True, deterministic=False):
self.env = env
self.training = training
self.name = self.__class__.__name__ + '--' + name
if deterministic:
np.random.seed(1)
# Logger
self.logger = logging.getLogger(name)
logging.basicConfig()
self.logger.setLevel(os.getenv('LOG_LEVEL', 'INFO'))
# self.logger.info('Instantiated class ' + self.__class__.__name__)
@property
def act_size(self):
# number of options of an action; this only makes sense for discrete actions.
if isinstance(self.env.action_space, Discrete):
return self.env.action_space.n
else:
return None
@property
def act_dim(self):
# dimension of an action; this only makes sense for continuous actions.
if isinstance(self.env.action_space, Box):
return list(self.env.action_space.shape)
else:
return []
@property
def state_dim(self):
# dimension of a state.
return list(self.env.observation_space.shape)
@staticmethod
def obs_to_inputs(self, ob):
return ob.flatten()
@abstractmethod
def get_action(self, state, **kwargs):
pass
@abstractmethod
def build(self):
pass
@abstractmethod
def train(self, *args, **kwargs):
pass
def evaluate(self, n_episodes):
# TODO: evaluate uses default setting of the environment, i.g., random start
# this should be done in parallel
# and it should be depending on a starting state!
reward_history = []
for i in range(n_episodes):
ob = self.env.reset()
done = False
reward = 0.
while not done:
a, q = self.get_action(ob, epsilon=0.0)
new_ob, r, done, _ = self.env.step(a)
# self.env.render()
reward += r
ob = new_ob
reward_history.append(reward)
#print("Avg. reward over {} episodes: {:.4f}".format(n_episodes, np.mean(reward_history)))
self.logger.info("Avg. reward over {} episodes: {:.4f}".format(n_episodes, np.mean(reward_history)))
return reward_history
class BaseModelMixin(ABC):
def __init__(self, model_name, experiment_name=None):
self._saver = None
self._writer = None
self._experiment_name = experiment_name
self.model_name = model_name
self.current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
def _get_dir(self, dir_name):
if self._experiment_name is not None:
path = os.path.join(RESOURCE_ROOT, dir_name, self._experiment_name, self.model_name, self.current_time)
else:
|
os.makedirs(path, exist_ok=True)
return path
@property
def log_dir(self):
return self._get_dir('training_logs')
@property
def checkpoint_dir(self):
return self._get_dir('checkpoints')
@property
def model_dir(self):
return self._get_dir('models')
@property
def tb_dir(self):
# tensorboard
return self._get_dir('tb_logs')
@property
def writer(self):
if self._writer is None:
self._writer = tf.summary.create_file_writer(self.tb_dir)
return self._writer
|
path = os.path.join(RESOURCE_ROOT, dir_name, self.model_name, self.current_time)
|
show_routing.py
|
'''
show_route.py
'''
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional
# ====================================================
# schema for show route ipv4
# ====================================================
class ShowRouteIpv4Schema(MetaParser):
"""Schema for show route ipv4"""
schema = {
'vrf': {
Any(): {
Optional('address_family'): {
Any(): {
Optional('routes'): {
Any(): {
Optional('route'): str,
Optional('active'): bool,
Optional('route_preference'): int,
Optional('metric'): int,
Optional('source_protocol'): str,
Optional('source_protocol_codes'): str,
Optional('next_hop'): {
Optional('outgoing_interface'): {
Any(): { # interface if there is no next_hop
Optional('outgoing_interface'): str,
Optional('updated'): str,
},
},
Optional('next_hop_list'): {
Any(): { # index
Optional('index'): int,
Optional('next_hop'): str,
Optional('outgoing_interface'): str,
Optional('updated'): str,
},
},
},
},
},
},
},
},
},
}
"""
Codes: C - connected, S - static, R - RIP, B - BGP, (>) - Diversion path
D - EIGRP, EX - EIGRP external, O - OSPF, IA - OSPF inter area
N1 - OSPF NSSA external type 1, N2 - OSPF NSSA external type 2
E1 - OSPF external type 1, E2 - OSPF external type 2, E - EGP
i - ISIS, L1 - IS-IS level-1, L2 - IS-IS level-2
ia - IS-IS inter area, su - IS-IS summary null, * - candidate default
U - per-user static route, o - ODR, L - local, G - DAGR, l - LISP
A - access/subscriber, a - Application route
M - mobile route, r - RPL, t - Traffic Engineering, (!) - FRR Backup path
"""
source_protocol_dict = {
'ospf' : ['O','IA','N1','N2','E1','E2'],
'odr' : ['o'],
'isis' : ['i','su','L1','L2','ia'],
'eigrp' : ['D','EX'],
'static' : ['S'],
'egp' : ['E'],
'dagr' : ['G'],
'rpl' : ['r'],
'mobile router' : ['M'],
'lisp' : ['I', 'l'],
'nhrp' : ['H'],
'local' : ['L'],
'connected' : ['C'],
'bgp' : ['B'],
'rip' : ['R'],
'per-user static route' : ['U'],
'rip' : ['R'],
'access/subscriber' : ['A'],
'traffic engineering' : ['t'],
}
# ====================================================
# parser for show ip route
# ====================================================
class ShowRouteIpv4(ShowRouteIpv4Schema):
"""Parser for :
show route ipv4
show route vrf <vrf> ipv4"""
cli_command = ['show route vrf {vrf} ipv4','show route ipv4']
def cli(self, vrf="",output=None):
if output is None:
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
if not vrf:
vrf = 'default'
af = 'ipv4'
route = ""
result_dict = {}
for line in out.splitlines():
line = line.strip()
next_hop = interface = updated = metrics = route_preference = ""
# VRF: VRF501
p1 = re.compile(r'^\s*VRF: +(?P<vrf>[\w]+)$')
|
# S 10.4.1.1/32 is directly connected, 01:51:13, GigabitEthernet0/0/0/0
# S 10.36.3.3/32 [1/0] via 10.2.3.3, 01:51:13, GigabitEthernet0/0/0/1
# B 10.19.31.31/32 [200/0] via 10.229.11.11, 00:55:14
# i L1 10.76.23.23/32 [115/11] via 10.2.3.3, 00:52:41, GigabitEthernet0/0/0/1
# S* 192.168.4.4/10 [111/10] via 172.16.84.11, 1w0d
# L ::ffff:192.168.13.12/19
# O E1 2001:db8::/39
# R 10.145.110.10/4 [10/10] via 192.168.10.12, 12:03:42, GigabitEthernet0/0/1/1.1
p3 = re.compile(r'^\s*(?P<code1>[\w\*\(\>\)\!]+) +(?P<code2>[\w\*\(\>\)\!]+)? +(?P<network>[\w\/\:\.]+)'
'( +is +directly +connected,)?( +\[(?P<route_preference>[\d\/]+)\]?'
'( +via )?(?P<next_hop>[\w\/\:\.]+)?,)?( +(?P<date>[0-9][\w\:]+))?,?( +(?P<interface>[\S]+))?$')
m = p3.match(line)
if m:
group = m.groupdict()
if line == cmd:
continue
active = True
updated = ""
if group['code1']:
source_protocol_codes = group['code1'].strip()
for key,val in super().source_protocol_dict.items():
source_protocol_replaced = re.split('\*|\(\!\)|\(\>\)',source_protocol_codes)[0].strip()
if source_protocol_replaced in val:
source_protocol = key
if group['code2']:
source_protocol_codes = '{} {}'.format(source_protocol_codes, m.groupdict()['code2'])
if group['network']:
route = m.groupdict()['network']
if group['route_preference']:
routepreference = m.groupdict()['route_preference']
if '/' in routepreference:
route_preference = int(routepreference.split('/')[0])
metrics = routepreference.split('/')[1]
index = 1
if group['next_hop']:
next_hop = group['next_hop']
if group['interface']:
interface = group['interface']
if group['date']:
updated = group['date']
if vrf:
if 'vrf' not in result_dict:
result_dict['vrf'] = {}
if vrf not in result_dict['vrf']:
result_dict['vrf'][vrf] = {}
if 'address_family' not in result_dict['vrf'][vrf]:
result_dict['vrf'][vrf]['address_family'] = {}
if af and af not in result_dict['vrf'][vrf]['address_family']:
result_dict['vrf'][vrf]['address_family'][af] = {}
if 'routes' not in result_dict['vrf'][vrf]['address_family'][af]:
result_dict['vrf'][vrf]['address_family'][af]['routes'] = {}
if route not in result_dict['vrf'][vrf]['address_family'][af]['routes']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['route'] = route
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['active'] = active
if metrics:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['metric'] = int(metrics)
if route_preference:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['route_preference'] = route_preference
if source_protocol_codes:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['source_protocol_codes'] = source_protocol_codes
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['source_protocol'] = source_protocol
if 'next_hop' not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] = {}
if not next_hop:
if 'outgoing_interface' not in result_dict['vrf'][vrf]['address_family'][af] \
['routes'][route]['next_hop']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]\
['next_hop']['outgoing_interface'] = {}
if m.groupdict()['interface'] and interface not in \
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]\
['next_hop']['outgoing_interface']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]\
['next_hop']['outgoing_interface'][interface] = {}
if interface:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['outgoing_interface'] = interface
if updated:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['updated'] = updated
else:
if 'next_hop_list' not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'][
'next_hop_list'] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['index'] = index
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['next_hop'] = next_hop
if updated:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['updated'] = updated
if interface:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['outgoing_interface'] = interface
continue
# [110/2] via 10.1.2.1, 01:50:49, GigabitEthernet0/0/0/3
p4 = re.compile(r'^\s*\[(?P<route_preference>[\d\/]+)\]'
' +via +(?P<next_hop>[\d\.]+)?,?( +(?P<date>[0-9][\w\:]+),)?( +(?P<interface>[\S]+))?$')
m = p4.match(line)
if m:
updated = ""
routepreference = m.groupdict()['route_preference']
if '/' in routepreference:
route_preference = int(routepreference.split('/')[0])
metrics = routepreference.split('/')[1]
next_hop = m.groupdict()['next_hop']
index += 1
if m.groupdict()['interface']:
interface = m.groupdict()['interface']
if m.groupdict()['date']:
updated = m.groupdict()['date']
if 'routes' not in result_dict['vrf'][vrf]['address_family'][af]:
result_dict['vrf'][vrf]['address_family'][af]['routes'] = {}
if route not in result_dict['vrf'][vrf]['address_family'][af]['routes']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['route'] = route
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['active'] = active
if metrics:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['metric'] = int(metrics)
if route_preference:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['route_preference'] = route_preference
if source_protocol_codes:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['source_protocol_codes'] = source_protocol_codes
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['source_protocol'] = source_protocol
if 'next_hop' not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] = {}
if not next_hop:
if 'outgoing_interface' not in result_dict['vrf'][vrf]['address_family'][af] \
['routes'][route]['next_hop']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'] = {}
if m.groupdict()['interface'] and interface not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['outgoing_interface'] = interface
if updated:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['updated'] = updated
else:
if 'next_hop_list' not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'][
'next_hop_list'] = {}
if index not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['index'] = index
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['next_hop'] = next_hop
if updated:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['updated'] = updated
if interface:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['outgoing_interface'] = interface
continue
# is directly connected, 01:51:13, GigabitEthernet0/0/0/3
p4 = re.compile(r'^\s*is +directly +connected,'
'( +(?P<date>[0-9][\w\:]+),)?( +(?P<interface>[\S]+))?$')
m = p4.match(line)
if m:
updated = ""
if m.groupdict()['interface']:
interface = m.groupdict()['interface']
if m.groupdict()['date']:
updated = m.groupdict()['date']
if 'outgoing_interface' not in result_dict['vrf'][vrf]['address_family'][af] \
['routes'][route]['next_hop']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'] = {}
if m.groupdict()['interface'] and interface not in \
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['outgoing_interface'] = interface
if updated:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['updated'] = updated
continue
return result_dict
# ====================================================
# parser for show route ipv6
# ====================================================
class ShowRouteIpv6(ShowRouteIpv4Schema):
"""Parser for :
show route ipv6
show route vrf <vrf> ipv6"""
cli_command = ['show route vrf {vrf} ipv6', 'show route ipv6']
def cli(self, vrf="", output=None):
if output is None:
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
if not vrf:
vrf = 'default'
af = 'ipv6'
route = ""
result_dict = {}
for line in out.splitlines():
line = line.strip()
# VRF: VRF501
p1 = re.compile(r'^\s*VRF: +(?P<vrf>[\w]+)$')
m = p1.match(line)
if m:
vrf = m.groupdict()['vrf']
continue
# S 2001:1:1:1::1/128
# L 2001:2:2:2::2/128 is directly connected,
#i L1 2001:23:23:23::23/128
# R* ::/128
# L ::ffff:192.168.1.1/10
p2 = re.compile(r'^(?P<code1>[\w\*\(\>\)\!]+)( +'
'(?P<code2>[\w\*\(\>\)\!]+))? +(?P<route>[\w\/\:\.]+)'
'( +is +directly +connected,)?$')
m = p2.match(line)
if m:
group = m.groupdict()
active = True
if line == cmd:
continue
if group['code1']:
source_protocol_codes = group['code1'].strip()
for key, val in super().source_protocol_dict.items():
source_protocol_replaced = re.split('\*|\(\!\)|\(\>\)',source_protocol_codes)[0].strip()
if source_protocol_replaced in val:
source_protocol = key
if group['code2']:
source_protocol_codes = '{} {}'.format(source_protocol_codes, group['code2'])
if group['route']:
route = group['route']
index = 1
if vrf:
if 'vrf' not in result_dict:
result_dict['vrf'] = {}
if vrf not in result_dict['vrf']:
result_dict['vrf'][vrf] = {}
if 'address_family' not in result_dict['vrf'][vrf]:
result_dict['vrf'][vrf]['address_family'] = {}
if af and af not in result_dict['vrf'][vrf]['address_family']:
result_dict['vrf'][vrf]['address_family'][af] = {}
if 'routes' not in result_dict['vrf'][vrf]['address_family'][af]:
result_dict['vrf'][vrf]['address_family'][af]['routes'] = {}
if route not in result_dict['vrf'][vrf]['address_family'][af]['routes']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['route'] = route
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['active'] = active
if source_protocol_codes:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['source_protocol_codes'] = source_protocol_codes
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['source_protocol'] = source_protocol
continue
# [1/0] via 2001:20:1:2::1, 01:52:23, GigabitEthernet0/0/0/0
# [200/0] via ::ffff:10.229.11.11 (nexthop in vrf default), 00:55:12
p3 = re.compile(r'^\s*\[(?P<route_preference>[\d\/]+)\]'
' +via +(?P<next_hop>[\w\:\.)]+)?( \(nexthop in '
'vrf default\))?,? +(?P<date>[0-9][\w\:]+)?,?( +'
'(?P<interface>[\S]+))?$')
m = p3.match(line)
if m:
updated = interface = ""
routepreference = m.groupdict()['route_preference']
if '/' in routepreference:
route_preference = int(routepreference.split('/')[0])
metrics = routepreference.split('/')[1]
if m.groupdict()['next_hop']:
next_hop = m.groupdict()['next_hop']
if m.groupdict()['interface']:
interface = m.groupdict()['interface']
if m.groupdict()['date']:
updated = m.groupdict()['date']
if 'vrf' not in result_dict:
result_dict['vrf'] = {}
if vrf not in result_dict['vrf']:
result_dict['vrf'][vrf] = {}
if 'address_family' not in result_dict['vrf'][vrf]:
result_dict['vrf'][vrf]['address_family'] = {}
if af and af not in result_dict['vrf'][vrf]['address_family']:
result_dict['vrf'][vrf]['address_family'][af] = {}
if 'routes' not in result_dict['vrf'][vrf]['address_family'][af]:
result_dict['vrf'][vrf]['address_family'][af]['routes'] = {}
if route not in result_dict['vrf'][vrf]['address_family'][af]['routes']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['route'] = route
if metrics:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['metric'] = int(metrics)
if route_preference:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['route_preference'] = route_preference
if 'next_hop' not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] = {}
if not next_hop:
if 'outgoing_interface' not in result_dict['vrf'][vrf]['address_family'][af] \
['routes'][route]['next_hop']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'] = {}
if m.groupdict()['interface'] and interface not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface] = {}
if interface:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['outgoing_interface'] = interface
if updated:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['updated'] = updated
else:
if 'next_hop_list' not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'][
'next_hop_list'] = {}
if index not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['index'] = index
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['next_hop'] = next_hop
if interface:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['outgoing_interface'] = interface
if updated:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] \
['next_hop_list'][index]['updated'] = updated
index += 1
continue
# 01:52:24, Loopback0
p4 = re.compile(r'^\s*(?P<date>[\w\:]+),'
' +(?P<interface>[\S]+)$')
m = p4.match(line)
if m:
interface = updated = ""
if m.groupdict()['interface']:
interface = m.groupdict()['interface']
if m.groupdict()['date']:
updated = m.groupdict()['date']
if 'vrf' not in result_dict:
result_dict['vrf'] = {}
if vrf not in result_dict['vrf']:
result_dict['vrf'][vrf] = {}
if 'address_family' not in result_dict['vrf'][vrf]:
result_dict['vrf'][vrf]['address_family'] = {}
if af and af not in result_dict['vrf'][vrf]['address_family']:
result_dict['vrf'][vrf]['address_family'][af] = {}
if 'routes' not in result_dict['vrf'][vrf]['address_family'][af]:
result_dict['vrf'][vrf]['address_family'][af]['routes'] = {}
if route not in result_dict['vrf'][vrf]['address_family'][af]['routes']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] = {}
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['route'] = route
if 'next_hop' not in result_dict['vrf'][vrf]['address_family'][af]['routes'][route]:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route]['next_hop'] = {}
if 'outgoing_interface' not in result_dict['vrf'][vrf]['address_family'][af] \
['routes'][route]['next_hop']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'] = {}
if m.groupdict()['interface'] and interface not in \
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface']:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface] = {}
if interface:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['outgoing_interface'] = interface
if updated:
result_dict['vrf'][vrf]['address_family'][af]['routes'][route] \
['next_hop']['outgoing_interface'][interface]['updated'] = updated
continue
return result_dict
|
m = p1.match(line)
if m:
vrf = m.groupdict()['vrf']
continue
|
liquid.py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class liquid(Exchange):
def describe(self):
return self.deep_extend(super(liquid, self).describe(), {
'id': 'liquid',
'name': 'Liquid',
'countries': ['JP', 'CN', 'TW'],
'version': '2',
'rateLimit': 1000,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/45798859-1a872600-bcb4-11e8-8746-69291ce87b04.jpg',
'api': 'https://api.liquid.com',
'www': 'https://www.liquid.com',
'doc': [
'https://developers.liquid.com',
],
'fees': 'https://help.liquid.com/getting-started-with-liquid/the-platform/fee-structure',
'referral': 'https://www.liquid.com/sign-up/?affiliate=SbzC62lt30976',
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}',
'products/{id}/price_levels',
'executions',
'ir_ladders/{currency}',
'fees', # add fetchFees, fetchTradingFees, fetchFundingFees
],
},
'private': {
'get': [
'accounts', # undocumented https://github.com/ccxt/ccxt/pull/7493
'accounts/balance',
'accounts/main_asset',
'accounts/{id}',
'accounts/{currency}/reserved_balance_details',
'crypto_accounts', # add fetchAccounts
'crypto_withdrawals', # add fetchWithdrawals
'executions/me',
'fiat_accounts', # add fetchAccounts
'fund_infos', # add fetchDeposits
'loan_bids',
'loans',
'orders',
'orders/{id}',
'orders/{id}/trades', # add fetchOrderTrades
'trades',
'trades/{id}/loans',
'trading_accounts',
'trading_accounts/{id}',
'transactions',
'withdrawals', # add fetchWithdrawals
],
'post': [
'crypto_withdrawals',
'fund_infos',
'fiat_accounts',
'loan_bids',
'orders',
'withdrawals',
],
'put': [
'crypto_withdrawal/{id}/cancel',
'loan_bids/{id}/close',
'loans/{id}',
'orders/{id}', # add editOrder
'orders/{id}/cancel',
'trades/{id}',
'trades/{id}/adjust_margin',
'trades/{id}/close',
'trades/close_all',
'trading_accounts/{id}',
'withdrawals/{id}/cancel',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.0030,
'maker': 0.0000,
'tiers': {
'perpetual': {
'maker': [
[0, 0.0000],
[25000, 0.0000],
[50000, -0.00025],
[100000, -0.00025],
[1000000, -0.00025],
[10000000, -0.00025],
[25000000, -0.00025],
[50000000, -0.00025],
[75000000, -0.00025],
[100000000, -0.00025],
[200000000, -0.00025],
[300000000, -0.00025],
],
'taker': [
[0, 0.00120],
[25000, 0.00115],
[50000, 0.00110],
[100000, 0.00105],
[1000000, 0.00100],
[10000000, 0.00095],
[25000000, 0.00090],
[50000000, 0.00085],
[75000000, 0.00080],
[100000000, 0.00075],
[200000000, 0.00070],
[300000000, 0.00065],
],
},
'spot': {
'taker': [
[0, 0.003],
[10000, 0.0029],
[20000, 0.0028],
[50000, 0.0026],
[100000, 0.0020],
[1000000, 0.0016],
[5000000, 0.0012],
[10000000, 0.0010],
[25000000, 0.0009],
[50000000, 0.0008],
[100000000, 0.0007],
[200000000, 0.0006],
[500000000, 0.0004],
[1000000000, 0.0003],
],
'maker': [
[0, 0.0000],
[10000, 0.0020],
[20000, 0.0019],
[50000, 0.0018],
[100000, 0.0016],
[1000000, 0.0008],
[5000000, 0.0007],
[10000000, 0.0005],
[25000000, 0.0000],
[50000000, 0.0000],
[100000000, 0.0000],
[200000000, 0.0000],
[500000000, 0.0000],
[1000000000, 0.0000],
],
},
},
},
},
'precisionMode': TICK_SIZE,
'exceptions': {
'API rate limit exceeded. Please retry after 300s': DDoSProtection,
'API Authentication failed': AuthenticationError,
'Nonce is too small': InvalidNonce,
'Order not found': OrderNotFound,
'Can not update partially filled order': InvalidOrder,
'Can not update non-live order': OrderNotFound,
'not_enough_free_balance': InsufficientFunds,
'must_be_positive': InvalidOrder,
'less_than_order_size': InvalidOrder,
'price_too_high': InvalidOrder,
'price_too_small': InvalidOrder, # {"errors":{"order":["price_too_small"]}}
'product_disabled': BadSymbol, # {"errors":{"order":["product_disabled"]}}
},
'commonCurrencies': {
'WIN': 'WCOIN',
'HOT': 'HOT Token',
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
'options': {
'cancelOrderException': True,
},
})
def fetch_currencies(self, params={}):
response = self.publicGetCurrencies(params)
#
# [
# {
# currency_type: 'fiat',
# currency: 'USD',
# symbol: '$',
# assets_precision: 2,
# quoting_precision: 5,
# minimum_withdrawal: '15.0',
# withdrawal_fee: 5,
# minimum_fee: null,
# minimum_order_quantity: null,
# display_precision: 2,
# depositable: True,
# withdrawable: True,
# discount_fee: 0.5,
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
active = currency['depositable'] and currency['withdrawable']
amountPrecision = self.safe_integer(currency, 'assets_precision')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': code,
'active': active,
'fee': self.safe_number(currency, 'withdrawal_fee'),
'precision': amountPrecision,
'limits': {
'amount': {
'min': math.pow(10, -amountPrecision),
'max': math.pow(10, amountPrecision),
},
'withdraw': {
'min': self.safe_number(currency, 'minimum_withdrawal'),
'max': None,
},
},
}
return result
def fetch_markets(self, params={}):
spot = self.publicGetProducts(params)
#
# [
# {
# "id":"637",
# "product_type":"CurrencyPair",
# "code":"CASH",
# "name":null,
# "market_ask":"0.00000797",
# "market_bid":"0.00000727",
# "indicator":null,
# "currency":"BTC",
# "currency_pair_code":"TFTBTC",
# "symbol":null,
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_tftbtc_637",
# "taker_fee":"0.0",
# "maker_fee":"0.0",
# "low_market_bid":"0.00000685",
# "high_market_ask":"0.00000885",
# "volume_24h":"3696.0755956",
# "last_price_24h":"0.00000716",
# "last_traded_price":"0.00000766",
# "last_traded_quantity":"1748.0377978",
# "average_price":null,
# "quoted_currency":"BTC",
# "base_currency":"TFT",
# "tick_size":"0.00000001",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":false,
# "last_event_timestamp":"1596962820.000797146",
# "timestamp":"1596962820.000797146",
# "multiplier_up":"9.0",
# "multiplier_down":"0.1",
# "average_time_interval":null
# },
# ]
#
perpetual = self.publicGetProducts({'perpetual': '1'})
#
# [
# {
# "id":"604",
# "product_type":"Perpetual",
# "code":"CASH",
# "name":null,
# "market_ask":"11721.5",
# "market_bid":"11719.0",
# "indicator":null,
# "currency":"USD",
# "currency_pair_code":"P-BTCUSD",
# "symbol":"$",
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_p-btcusd_604",
# "taker_fee":"0.0012",
# "maker_fee":"0.0",
# "low_market_bid":"11624.5",
# "high_market_ask":"11859.0",
# "volume_24h":"0.271",
# "last_price_24h":"11621.5",
# "last_traded_price":"11771.5",
# "last_traded_quantity":"0.09",
# "average_price":"11771.5",
# "quoted_currency":"USD",
# "base_currency":"P-BTC",
# "tick_size":"0.5",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":true,
# "last_event_timestamp":"1596963309.418853092",
# "timestamp":"1596963309.418853092",
# "multiplier_up":null,
# "multiplier_down":"0.1",
# "average_time_interval":300,
# "index_price":"11682.8124",
# "mark_price":"11719.96781",
# "funding_rate":"0.00273",
# "fair_price":"11720.2745"
# },
# ]
#
currencies = self.fetch_currencies()
currenciesByCode = self.index_by(currencies, 'code')
result = []
markets = self.array_concat(spot, perpetual)
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quoted_currency')
productType = self.safe_string(market, 'product_type')
type = 'spot'
spot = True
swap = False
if productType == 'Perpetual':
spot = False
swap = True
type = 'swap'
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = None
if swap:
symbol = self.safe_string(market, 'currency_pair_code')
else:
symbol = base + '/' + quote
maker = self.fees['trading']['maker']
taker = self.fees['trading']['taker']
if type == 'swap':
maker = self.safe_number(market, 'maker_fee', self.fees['trading']['maker'])
taker = self.safe_number(market, 'taker_fee', self.fees['trading']['taker'])
disabled = self.safe_value(market, 'disabled', False)
active = not disabled
baseCurrency = self.safe_value(currenciesByCode, base)
precision = {
'amount': 0.00000001,
'price': self.safe_number(market, 'tick_size'),
}
minAmount = None
if baseCurrency is not None:
minAmount = self.safe_number(baseCurrency['info'], 'minimum_order_quantity')
limits = {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': type,
'spot': spot,
'swap': swap,
'maker': maker,
'taker': taker,
'limits': limits,
'precision': precision,
'active': active,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
#
# {
# crypto_accounts: [
# {
# id: 2221179,
# currency: 'USDT',
# balance: '0.0',
# reserved_balance: '0.0',
# pusher_channel: 'user_xxxxx_account_usdt',
# lowest_offer_interest_rate: null,
# highest_offer_interest_rate: null,
# address: '0',
# currency_symbol: 'USDT',
# minimum_withdraw: null,
# currency_type: 'crypto'
# },
# ],
# fiat_accounts: [
# {
# id: 1112734,
# currency: 'USD',
# balance: '0.0',
# reserved_balance: '0.0',
# pusher_channel: 'user_xxxxx_account_usd',
# lowest_offer_interest_rate: null,
# highest_offer_interest_rate: null,
# currency_symbol: '$',
# send_to_btc_address: null,
# exchange_rate: '1.0',
# currency_type: 'fiat'
# }
# ]
# }
#
result = {'info': response}
crypto = self.safe_value(response, 'crypto_accounts', [])
fiat = self.safe_value(response, 'fiat_accounts', [])
for i in range(0, len(crypto)):
balance = crypto[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'reserved_balance')
result[code] = account
for i in range(0, len(fiat)):
balance = fiat[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'reserved_balance')
result[code] = account
return self.parse_balance(result, False)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'id': self.market_id(symbol),
}
response = self.publicGetProductsIdPriceLevels(self.extend(request, params))
return self.parse_order_book(response, None, 'buy_price_levels', 'sell_price_levels')
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
last = None
if 'last_traded_price' in ticker:
if ticker['last_traded_price']:
length = len(ticker['last_traded_price'])
if length > 0:
last = self.safe_number(ticker, 'last_traded_price')
symbol = None
if market is None:
marketId = self.safe_string(ticker, 'id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId = self.safe_string(ticker, 'base_currency')
quoteId = self.safe_string(ticker, 'quoted_currency')
if symbol in self.markets:
market = self.markets[symbol]
else:
symbol = self.safe_currency_code(baseId) + '/' + self.safe_currency_code(quoteId)
if market is not None:
symbol = market['symbol']
change = None
percentage = None
average = None
open = self.safe_number(ticker, 'last_price_24h')
if open is not None and last is not None:
change = last - open
average = self.sum(last, open) / 2
if open > 0:
percentage = change / open * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high_market_ask'),
'low': self.safe_number(ticker, 'low_market_bid'),
'bid': self.safe_number(ticker, 'market_bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'market_ask'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': self.safe_number(ticker, 'volume_24h'),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetProducts(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
response = self.publicGetProductsId(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
# { id: 12345,
# quantity: "6.789",
# price: "98765.4321",
# taker_side: "sell",
# created_at: 1512345678,
# my_side: "buy" }
timestamp = self.safe_timestamp(trade, 'created_at')
orderId = self.safe_string(trade, 'order_id')
# 'taker_side' gets filled for both fetchTrades and fetchMyTrades
takerSide = self.safe_string(trade, 'taker_side')
# 'my_side' gets filled for fetchMyTrades only and may differ from 'taker_side'
mySide = self.safe_string(trade, 'my_side')
side = mySide if (mySide is not None) else takerSide
takerOrMaker = None
if mySide is not None:
takerOrMaker = 'taker' if (takerSide == mySide) else 'maker'
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'quantity')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
id = self.safe_string(trade, 'id')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['id'],
}
if limit is not None:
request['limit'] = limit
if since is not None:
# timestamp should be in seconds, whereas we use milliseconds in since and everywhere
request['timestamp'] = int(since / 1000)
response = self.publicGetExecutions(self.extend(request, params))
result = response if (since is not None) else response['models']
return self.parse_trades(result, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
# the `with_details` param is undocumented - it adds the order_id to the results
request = {
'product_id': market['id'],
'with_details': True,
}
if limit is not None:
request['limit'] = limit
response = self.privateGetExecutionsMe(self.extend(request, params))
return self.parse_trades(response['models'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
params = self.omit(params, ['clientOrderId', 'client_order_id'])
request = {
'order_type': type,
'product_id': self.market_id(symbol),
'side': side,
'quantity': self.amount_to_precision(symbol, amount),
}
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
if (type == 'limit') or (type == 'limit_post_only') or (type == 'market_with_range') or (type == 'stop'):
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "client_order_id": null,
# }
#
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privatePutOrdersIdCancel(self.extend(request, params))
order = self.parse_order(response)
if order['status'] == 'closed':
if self.options['cancelOrderException']:
raise OrderNotFound(self.id + ' order closed already: ' + self.json(response))
return order
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if price is None:
raise ArgumentsRequired(self.id + ' editOrder() requires the price argument')
request = {
'order': {
'quantity': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
},
'id': id,
}
response = self.privatePutOrdersId(self.extend(request, params))
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'live': 'open',
'filled': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0"
# "client_order_id": null,
# }
#
# fetchOrder, fetchOrders, fetchOpenOrders, fetchClosedOrders
#
# {
# "id": 2157479,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.01",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "filled",
# "leverage_level": 2,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "executions": [
# {
# "id": 4566133,
# "quantity": "0.01",
# "price": "500.0",
# "taker_side": "buy",
# "my_side": "sell",
# "created_at": 1465396785
# }
# ]
# }
#
orderId = self.safe_string(order, 'id')
timestamp = self.safe_timestamp(order, 'created_at')
marketId = self.safe_string(order, 'product_id')
market = self.safe_value(self.markets_by_id, marketId)
status = self.parse_order_status(self.safe_string(order, 'status'))
amount = self.safe_number(order, 'quantity')
filled = self.safe_number(order, 'filled_quantity')
price = self.safe_number(order, 'price')
symbol = None
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
type = self.safe_string(order, 'order_type')
tradeCost = 0
tradeFilled = 0
average = self.safe_number(order, 'average_price')
trades = self.parse_trades(self.safe_value(order, 'executions', []), market, None, None, {
'order': orderId,
'type': type,
})
numTrades = len(trades)
for i in range(0, numTrades):
# php copies values upon assignment, but not references them
# todo rewrite self(shortly)
trade = trades[i]
trade['order'] = orderId
trade['type'] = type
tradeFilled = self.sum(tradeFilled, trade['amount'])
tradeCost = self.sum(tradeCost, trade['cost'])
cost = None
lastTradeTimestamp = None
if numTrades > 0:
lastTradeTimestamp = trades[numTrades - 1]['timestamp']
if not average and (tradeFilled > 0):
average = tradeCost / tradeFilled
if cost is None:
cost = tradeCost
if filled is None:
filled = tradeFilled
remaining = None
if amount is not None and filled is not None:
remaining = amount - filled
side = self.safe_string(order, 'side')
clientOrderId = self.safe_string(order, 'client_order_id')
return {
'id': orderId,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'type': type,
'timeInForce': None,
'postOnly': None,
'status': status,
'symbol': symbol,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': filled,
'cost': cost,
'remaining': remaining,
'average': average,
'trades': trades,
'fee': {
'currency': feeCurrency,
'cost': self.safe_number(order, 'order_fee'),
},
'info': order,
}
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrdersId(self.extend(request, params))
return self.parse_order(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
# 'funding_currency': market['quoteId'], # filter orders based on "funding" currency(quote currency)
# 'product_id': market['id'],
# 'status': 'live', # 'filled', 'cancelled'
# 'trading_type': 'spot', # 'margin', 'cfd'
'with_details': 1, # return full order details including executions
}
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
if limit is not None:
request['limit'] = limit
response = self.privateGetOrders(self.extend(request, params))
#
# {
# "models": [
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "executions": [], # optional
# }
# ],
# "current_page": 1,
# "total_pages": 1
# }
#
orders = self.safe_value(response, 'models', [])
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {'status': 'live'}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {'status': 'filled'}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
# 'auth_code': '', # optional 2fa code
'currency': currency['id'],
'address': address,
'amount': self.currency_to_precision(code, amount),
# 'payment_id': tag, # for XRP only
# 'memo_type': 'text', # 'text', 'id' or 'hash', for XLM only
# 'memo_value': tag, # for XLM only
}
if tag is not None:
if code == 'XRP':
request['payment_id'] = tag
elif code == 'XLM':
request['memo_type'] = 'text' # overrideable via params
request['memo_value'] = tag
else:
raise NotSupported(self.id + ' withdraw() only supports a tag along the address for XRP or XLM')
response = self.privatePostCryptoWithdrawals(self.extend(request, params))
#
# {
# "id": 1353,
# "address": "1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2",
# "amount": 1.0,
# "state": "pending",
# "currency": "BTC",
# "withdrawal_fee": 0.0,
# "created_at": 1568016450,
# "updated_at": 1568016450,
# "payment_id": null
# }
#
return self.parse_transaction(response, currency)
def parse_transaction_status(self, status):
statuses = {
'pending': 'pending',
'cancelled': 'canceled',
'approved': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "id": 1353,
# "address": "1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2",
# "amount": 1.0,
# "state": "pending",
# "currency": "BTC",
# "withdrawal_fee": 0.0,
# "created_at": 1568016450,
# "updated_at": 1568016450,
# "payment_id": null
# }
#
# fetchDeposits, fetchWithdrawals
#
# ...
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string_2(transaction, 'payment_id', 'memo_value')
txid = None
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_timestamp(transaction, 'created_at')
updated = self.safe_timestamp(transaction, 'updated_at')
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
amount = self.safe_number(transaction, 'amount')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': None,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
headers = {
'X-Quoine-API-Version': self.version,
'Content-Type': 'application/json',
}
if api == 'private':
self.check_required_credentials()
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif query:
body = self.json(query)
nonce = self.nonce()
request = {
'path': url,
'token_id': self.apiKey,
'iat': int(math.floor(nonce / 1000)), # issued at
}
if not ('client_order_id' in query):
request['nonce'] = nonce
headers['X-Quoine-Auth'] = self.jwt(request, self.encode(self.secret))
else:
if query:
url += '?' + self.urlencode(query)
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code >= 200 and code < 300:
return
if code == 401:
# expected non-json response
self.throw_exactly_matched_exception(self.exceptions, body, body)
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if response is None:
return
feedback = self.id + ' ' + body
message = self.safe_string(response, 'message')
|
# {"message": "Order not found"}
#
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
elif errors is not None:
#
# {"errors": {"user": ["not_enough_free_balance"]}}
# {"errors": {"quantity": ["less_than_order_size"]}}
# {"errors": {"order": ["Can not update partially filled order"]}}
#
types = list(errors.keys())
for i in range(0, len(types)):
type = types[i]
errorMessages = errors[type]
for j in range(0, len(errorMessages)):
message = errorMessages[j]
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
else:
raise ExchangeError(feedback)
|
errors = self.safe_value(response, 'errors')
if message is not None:
#
|
log.go
|
// Copyright (c) 2020 Target Brands, Inc. All rights reserved.
//
// Use of this source code is governed by the LICENSE file in this repository.
package library
import "fmt"
// Log is the library representation of a log for a step in a build.
//
// swagger:model Log
type Log struct {
ID *int64 `json:"id,omitempty"`
BuildID *int64 `json:"build_id,omitempty"`
RepoID *int64 `json:"repo_id,omitempty"`
ServiceID *int64 `json:"service_id,omitempty"`
StepID *int64 `json:"step_id,omitempty"`
Data *[]byte `json:"data,omitempty"`
}
// AppendData adds the provided data to the end of
// the Data field for the Log type. If the Data
// field is empty, then the function overwrites
// the entire Data field.
func (l *Log) AppendData(data []byte) {
// check if Data field is empty
if len(l.GetData()) == 0 {
// overwrite the Data field
l.SetData(data)
return
}
// add the data to the Data field
l.SetData(append(l.GetData(), data...))
}
// GetID returns the ID field.
//
// When the provided Log type is nil, or the field within
// the type is nil, it returns the zero value for the field.
func (l *Log) GetID() int64 {
// return zero value if Log type or ID field is nil
if l == nil || l.ID == nil {
return 0
}
return *l.ID
}
// GetBuildID returns the BuildID field.
//
// When the provided Log type is nil, or the field within
// the type is nil, it returns the zero value for the field.
func (l *Log) GetBuildID() int64 {
// return zero value if Log type or BuildID field is nil
if l == nil || l.BuildID == nil {
return 0
}
return *l.BuildID
}
// GetRepoID returns the RepoID field.
//
// When the provided Log type is nil, or the field within
// the type is nil, it returns the zero value for the field.
func (l *Log) GetRepoID() int64 {
// return zero value if Log type or RepoID field is nil
if l == nil || l.RepoID == nil {
return 0
}
return *l.RepoID
}
// GetServiceID returns the ServiceID field.
//
// When the provided Log type is nil, or the field within
// the type is nil, it returns the zero value for the field.
func (l *Log) GetServiceID() int64 {
// return zero value if Log type or ServiceID field is nil
if l == nil || l.ServiceID == nil {
return 0
}
return *l.ServiceID
}
// GetStepID returns the StepID field.
//
// When the provided Log type is nil, or the field within
// the type is nil, it returns the zero value for the field.
func (l *Log) GetStepID() int64 {
// return zero value if Log type or StepID field is nil
if l == nil || l.StepID == nil {
return 0
}
return *l.StepID
}
// GetData returns the Data field.
//
// When the provided Log type is nil, or the field within
// the type is nil, it returns the zero value for the field.
func (l *Log) GetData() []byte {
// return zero value if Log type or Data field is nil
if l == nil || l.Data == nil {
return []byte{}
}
return *l.Data
}
// SetID sets the ID field.
//
// When the provided Log type is nil, it
// will set nothing and immediately return.
func (l *Log) SetID(v int64) {
// return if Log type is nil
if l == nil {
return
}
l.ID = &v
}
// SetBuildID sets the BuildID field.
//
// When the provided Log type is nil, it
// will set nothing and immediately return.
func (l *Log) SetBuildID(v int64) {
// return if Log type is nil
if l == nil {
return
}
l.BuildID = &v
}
// SetRepoID sets the RepoID field.
//
// When the provided Log type is nil, it
// will set nothing and immediately return.
func (l *Log) SetRepoID(v int64) {
// return if Log type is nil
if l == nil
|
l.RepoID = &v
}
// SetServiceID sets the ServiceID field.
//
// When the provided Log type is nil, it
// will set nothing and immediately return.
func (l *Log) SetServiceID(v int64) {
// return if Log type is nil
if l == nil {
return
}
l.ServiceID = &v
}
// SetStepID sets the StepID field.
//
// When the provided Log type is nil, it
// will set nothing and immediately return.
func (l *Log) SetStepID(v int64) {
// return if Log type is nil
if l == nil {
return
}
l.StepID = &v
}
// SetData sets the Data field.
//
// When the provided Log type is nil, it
// will set nothing and immediately return.
func (l *Log) SetData(v []byte) {
// return if Log type is nil
if l == nil {
return
}
l.Data = &v
}
// String implements the Stringer interface for the Log type.
func (l *Log) String() string {
return fmt.Sprintf(`{
BuildID: %d,
Data: %s,
ID: %d,
RepoID: %d,
ServiceID: %d,
StepID: %d,
}`,
l.GetBuildID(),
l.GetData(),
l.GetID(),
l.GetRepoID(),
l.GetServiceID(),
l.GetStepID(),
)
}
|
{
return
}
|
register.go
|
package services
import (
"github.com/MehmetHanGulter/RubyRuby/models"
"github.com/MehmetHanGulter/RubyRuby/repositories"
"golang.org/x/crypto/bcrypt"
"strconv"
)
const YEAR = 365.0
type RegisterService struct {
RegisterRepository *repositories.RegisterRepository
}
func NewRegisterService(p *repositories.RegisterRepository) RegisterService
|
func (p *RegisterService) Create(register *models.Register) (*models.Register, error) {
password, _ := bcrypt.GenerateFromPassword([]byte(register.Password), 14)
register.Password = string(password)
return p.RegisterRepository.Create(register)
}
func (p *RegisterService) Save(user *models.Register) (*models.Register, error) {
return p.RegisterRepository.Save(user)
}
func (p *RegisterService) Find(id string) (*models.Register, error) {
iid, _ := strconv.Atoi(id)
return p.RegisterRepository.Find(iid)
}
func (p *RegisterService) All() ([]*models.Register, error) {
return p.RegisterRepository.All()
}
|
{
return RegisterService{RegisterRepository: p}
}
|
object.go
|
package codegen
import (
"fmt"
"go/types"
"strconv"
"strings"
"unicode"
"github.com/99designs/gqlgen/codegen/config"
"github.com/vektah/gqlparser/v2/ast"
)
type GoFieldType int
const (
GoFieldUndefined GoFieldType = iota
GoFieldMethod
GoFieldVariable
GoFieldMap
)
type Object struct {
*ast.Definition
Type types.Type
ResolverInterface types.Type
Root bool
Fields []*Field
Implements []*ast.Definition
DisableConcurrency bool
Stream bool
Directives []*Directive
}
func (b *builder) buildObject(typ *ast.Definition) (*Object, error) {
dirs, err := b.getDirectives(typ.Directives)
if err != nil {
return nil, fmt.Errorf("%s: %w", typ.Name, err)
}
obj := &Object{
Definition: typ,
Root: b.Schema.Query == typ || b.Schema.Mutation == typ || b.Schema.Subscription == typ,
DisableConcurrency: typ == b.Schema.Mutation,
Stream: typ == b.Schema.Subscription,
Directives: dirs,
ResolverInterface: types.NewNamed(
types.NewTypeName(0, b.Config.Exec.Pkg(), typ.Name+"Resolver", nil),
nil,
nil,
),
}
if !obj.Root {
goObject, err := b.Binder.DefaultUserObject(typ.Name)
if err != nil {
return nil, err
}
obj.Type = goObject
}
for _, intf := range b.Schema.GetImplements(typ) {
obj.Implements = append(obj.Implements, b.Schema.Types[intf.Name])
}
for _, field := range typ.Fields {
if strings.HasPrefix(field.Name, "__") {
continue
}
var f *Field
f, err = b.buildField(obj, field)
if err != nil {
return nil, err
}
obj.Fields = append(obj.Fields, f)
}
return obj, nil
}
func (o *Object) Reference() types.Type {
if config.IsNilable(o.Type) {
return o.Type
}
return types.NewPointer(o.Type)
}
type Objects []*Object
func (o *Object) Implementors() string {
satisfiedBy := strconv.Quote(o.Name)
for _, s := range o.Implements {
satisfiedBy += ", " + strconv.Quote(s.Name)
}
return "[]string{" + satisfiedBy + "}"
}
func (o *Object) HasResolvers() bool {
for _, f := range o.Fields {
if f.IsResolver {
return true
}
}
return false
}
|
for i := 0; i < o.Type.(*types.Named).NumMethods(); i++ {
if o.Type.(*types.Named).Method(i).Name() == "UnmarshalGQL" {
return true
}
}
return false
}
func (o *Object) HasDirectives() bool {
if len(o.Directives) > 0 {
return true
}
for _, f := range o.Fields {
if f.HasDirectives() {
return true
}
}
return false
}
func (o *Object) IsConcurrent() bool {
for _, f := range o.Fields {
if f.IsConcurrent() {
return true
}
}
return false
}
func (o *Object) IsReserved() bool {
return strings.HasPrefix(o.Definition.Name, "__")
}
func (o *Object) Description() string {
return o.Definition.Description
}
func (os Objects) ByName(name string) *Object {
for i, o := range os {
if strings.EqualFold(o.Definition.Name, name) {
return os[i]
}
}
return nil
}
func ucFirst(s string) string {
if s == "" {
return ""
}
r := []rune(s)
r[0] = unicode.ToUpper(r[0])
return string(r)
}
|
func (o *Object) HasUnmarshal() bool {
if o.Type == config.MapType {
return true
}
|
package_with_version.rs
|
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::VersionBinding;
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PackageWithVersion {
pub name: String,
pub version: String,
pub binding: VersionBinding,
}
impl PackageWithVersion {
pub fn from_str(contents: &str) -> Self {
let split: Vec<&str> = contents.split(')').collect::<Vec<&str>>()[0]
.split(" (")
.collect();
if split.len() == 2 {
let name = split[0].to_string();
let mut version = split[1].to_string();
let mut version_binding = String::new();
loop {
let first = version.chars().next().unwrap();
if !(first == '=' || first == '>' || first == '<' || first == ' ') {
break;
} else {
version_binding.push(first);
version.remove(0);
}
}
PackageWithVersion {
name,
version,
|
binding: VersionBinding::from_str(&version_binding),
}
} else {
let name = split[0].to_string();
PackageWithVersion {
name,
version: String::new(),
binding: VersionBinding::Any,
}
}
}
}
| |
otpQrcode.go
|
package users
import (
"encoding/json"
"github.com/1uLang/zhiannet-api/common/server/edge_logins_server"
"github.com/TeaOSLab/EdgeAdmin/internal/configloaders"
"github.com/TeaOSLab/EdgeAdmin/internal/web/actions/actionutils"
"github.com/TeaOSLab/EdgeCommon/pkg/rpc/pb"
"github.com/iwind/TeaGo/maps"
"github.com/skip2/go-qrcode"
"github.com/xlzd/gotp"
)
type OtpQrcodeAction struct {
actionutils.ParentAction
}
func (this *OtpQrcodeAction) Init() {
this.Nav("", "", "")
}
func (this *OtpQrcodeAction) RunGet(params struct {
UserId uint64
}) {
otpInfo, err := edge_logins_server.GetInfoByUid(params.UserId)
if err != nil {
this.ErrorPage(err)
return
}
if otpInfo == nil || otpInfo.IsOn == 0 {
this.NotFound("userLogin", int64(params.UserId))
return
}
loginParams := maps.Map{}
err = json.Unmarshal([]byte(otpInfo.Params), &loginParams)
if err != nil {
this.ErrorPage(err)
return
}
secret := loginParams.GetString("secret")
// 当前用户信息
userResp, err := this.RPC().UserRPC().FindEnabledUser(this.AdminContext(), &pb.FindEnabledUserRequest{UserId: int64(params.UserId)})
if err != nil {
this.ErrorPage(err)
return
}
admin := userResp.User
if admin == nil {
this.Not
|
err := configloaders.LoadAdminUIConfig()
if err != nil {
this.ErrorPage(err)
return
}
url := gotp.NewDefaultTOTP(secret).ProvisioningUri(admin.Username, uiConfig.AdminSystemName)
data, err := qrcode.Encode(url, qrcode.Medium, 256)
if err != nil {
this.ErrorPage(err)
return
}
this.AddHeader("Content-Type", "image/png")
this.Write(data)
}
|
Found("admin", int64(params.UserId))
return
}
uiConfig,
|
WindowMockup.tsx
|
import React, { forwardRef } from 'react'
import clsx from 'clsx'
import { twMerge } from 'tailwind-merge'
import { IComponentBaseProps } from '../types'
import { bgColors, brandColors, componentStatuses } from '../constants'
export const windowMockupColors = [
...bgColors,
...brandColors,
...componentStatuses,
] as const
type WindowMockupColors = typeof windowMockupColors[number]
export type WindowMockupProps = React.HTMLAttributes<HTMLDivElement> &
IComponentBaseProps & {
frameColor?: WindowMockupColors
backgroundColor?: WindowMockupColors
border?: boolean
borderColor?: WindowMockupColors
}
const WindowMockup = forwardRef<HTMLDivElement, WindowMockupProps>(
(
{
border,
borderColor,
backgroundColor,
frameColor,
dataTheme,
className,
children,
...props
},
ref
): JSX.Element => {
// Set border color to framecolor or 'bg-base-300', if border color is not defined
const borderColorValue = borderColor
? borderColor
: frameColor || 'base-300'
const classes = twMerge(
'mockup-window',
border && `border border-${borderColorValue}`,
clsx({
[`border-${borderColorValue}`]: borderColorValue,
[`bg-${frameColor}`]: frameColor,
}),
className
)
console.log('classes', classes)
// If border is true, then we need to add the border-t and padding classes to the children
// if more than one child is passed in, or the single child is not a valid element, then we need to wrap the child/children in a div
const numChildren = React.Children.count(children)
const firstChild = numChildren > 0 && React.Children.toArray(children)[0]
// List of classes that child element will have
const innerClasses = clsx(
backgroundColor && `bg-${backgroundColor}`,
border && `border-t border-${borderColorValue}`,
'p-4'
)
console.log('innerClasses', innerClasses)
// Add the innerClasses to the child element, merging classNames if existing, or wrapping with div and adding innerClasses
const innerEl =
firstChild && React.isValidElement(firstChild) ? (
|
})
) : (
<div className={innerClasses}>{children}</div>
)
return (
<div
aria-label="Window mockup"
{...props}
data-theme={dataTheme}
className={classes}
ref={ref}
>
{innerEl}
</div>
)
}
)
WindowMockup.displayName = 'WindowMockup'
export default WindowMockup
|
React.cloneElement(firstChild, {
className: twMerge(innerClasses, firstChild.props.className),
|
rights_manager.py
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and functions that manage rights for various user actions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import copy
import logging
from constants import constants
from core.domain import activity_services
from core.domain import change_domain
from core.domain import role_services
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
current_user_services = models.Registry.import_current_user_services()
(collection_models, exp_models,) = models.Registry.import_models([
models.NAMES.collection, models.NAMES.exploration
])
# IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve
# backward-compatibility with previous exploration snapshots in the datastore.
# Do not modify the definitions of CMD keys that already exist.
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status'
CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status'
CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability'
CMD_RELEASE_OWNERSHIP = 'release_ownership'
CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec'
ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE
ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC
ROLE_OWNER = 'owner'
ROLE_EDITOR = 'editor'
ROLE_VOICE_ARTIST = 'voice artist'
ROLE_VIEWER = 'viewer'
ROLE_NONE = 'none'
ROLE_ADMIN = 'admin'
ROLE_MODERATOR = 'moderator'
# The allowed list of roles which can be used in change_role command.
ALLOWED_ROLES = [ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST, ROLE_VIEWER]
# The allowed list of status which can be used in change_exploration_status
# and change_collection_status commands.
ALLOWED_STATUS = [ACTIVITY_STATUS_PRIVATE, ACTIVITY_STATUS_PUBLIC]
COMMON_ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'old_role', 'new_role'],
'optional_attribute_names': [],
'allowed_values': {'new_role': ALLOWED_ROLES, 'old_role': ALLOWED_ROLES}
}, {
'name': CMD_CHANGE_PRIVATE_VIEWABILITY,
'required_attribute_names': [
'old_viewable_if_private', 'new_viewable_if_private'],
'optional_attribute_names': []
}, {
'name': CMD_RELEASE_OWNERSHIP,
'required_attribute_names': [],
'optional_attribute_names': [],
}, {
'name': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'required_attribute_names': [
'old_first_published_msec', 'new_first_published_msec'],
'optional_attribute_names': [],
}]
class ActivityRights(python_utils.OBJECT):
"""Domain object for the rights/publication status of an activity (an
exploration or a collection).
"""
def __init__(
self, exploration_id, owner_ids, editor_ids, voice_artist_ids,
viewer_ids, community_owned=False, cloned_from=None,
status=ACTIVITY_STATUS_PRIVATE, viewable_if_private=False,
first_published_msec=None):
self.id = exploration_id
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.voice_artist_ids = voice_artist_ids
self.viewer_ids = viewer_ids
self.community_owned = community_owned
self.cloned_from = cloned_from
self.status = status
self.viewable_if_private = viewable_if_private
self.first_published_msec = first_published_msec
def validate(self):
"""Validates an ActivityRights object.
Raises:
utils.ValidationError: if any of the owners, editors, voice artists
and viewers lists overlap, or if a community-owned exploration
has owners, editors, voice artists or viewers specified.
"""
if self.community_owned:
if (self.owner_ids or self.editor_ids or self.voice_artist_ids or
self.viewer_ids):
raise utils.ValidationError(
'Community-owned explorations should have no owners, '
'editors, voice artists or viewers specified.')
if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE:
raise utils.ValidationError(
'Community-owned explorations cannot be private.')
if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids:
raise utils.ValidationError(
'Public explorations should have no viewers specified.')
owner_editor = set(self.owner_ids) & set(self.editor_ids)
owner_voice_artist = set(self.owner_ids) & set(self.voice_artist_ids)
owner_viewer = set(self.owner_ids) & set(self.viewer_ids)
editor_voice_artist = set(self.editor_ids) & set(self.voice_artist_ids)
editor_viewer = set(self.editor_ids) & set(self.viewer_ids)
voice_artist_viewer = set(self.voice_artist_ids) & set(self.viewer_ids)
if owner_editor:
raise utils.ValidationError(
'A user cannot be both an owner and an editor: %s' %
owner_editor)
if owner_voice_artist:
raise utils.ValidationError(
'A user cannot be both an owner and a voice artist: %s' %
owner_voice_artist)
if owner_viewer:
raise utils.ValidationError(
'A user cannot be both an owner and a viewer: %s' %
owner_viewer)
if editor_voice_artist:
raise utils.ValidationError(
'A user cannot be both an editor and a voice artist: %s' %
editor_voice_artist)
if editor_viewer:
raise utils.ValidationError(
'A user cannot be both an editor and a viewer: %s' %
editor_viewer)
if voice_artist_viewer:
raise utils.ValidationError(
'A user cannot be both a voice artist and a viewer: %s' %
voice_artist_viewer)
def to_dict(self):
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of ActivityRights suitable for use by the
frontend.
"""
if self.community_owned:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': True,
'owner_names': [],
'editor_names': [],
'voice_artist_names': [],
'viewer_names': [],
'viewable_if_private': self.viewable_if_private,
}
else:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': False,
'owner_names': user_services.get_human_readable_user_ids(
self.owner_ids),
'editor_names': user_services.get_human_readable_user_ids(
self.editor_ids),
'voice_artist_names': user_services.get_human_readable_user_ids(
self.voice_artist_ids),
'viewer_names': user_services.get_human_readable_user_ids(
self.viewer_ids),
'viewable_if_private': self.viewable_if_private,
}
def is_owner(self, user_id):
"""Checks whether given user is owner of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity owner.
"""
return bool(user_id in self.owner_ids)
def is_editor(self, user_id):
"""Checks whether given user is editor of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity editor.
"""
return bool(user_id in self.editor_ids)
def is_voice_artist(self, user_id):
"""Checks whether given user is voice artist of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity voice artist.
"""
return bool(user_id in self.voice_artist_ids)
def is_viewer(self, user_id):
"""Checks whether given user is viewer of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity viewer.
"""
return bool(user_id in self.viewer_ids)
def is_published(self):
"""Checks whether activity is published.
Returns:
bool. Whether activity is published.
"""
return bool(self.status == ACTIVITY_STATUS_PUBLIC)
def is_private(self):
"""Checks whether activity is private.
Returns:
bool. Whether activity is private.
"""
return bool(self.status == ACTIVITY_STATUS_PRIVATE)
class ActivityRightsChange(change_domain.BaseChange):
"""Domain object class for an activity rights change.
The allowed commands, together with the attributes:
- 'create_new'
- 'change_role' (with assignee_id, old_role, new_role)
- 'change_exploration_status' (with old_status, new_status)
- 'change_collection_status' (with old_status, new_status)
- 'change_private_viewability' (with
old_viewable_if_private, new_viewable_if_private)
- 'release_ownership'
- 'update_first_published_msec' (with
old_first_published_msec, new_first_published_msec)
A role must be one of the ALLOWED_ROLES.
A status must be one of the ALLOWED_STATUS.
"""
ALLOWED_COMMANDS = COMMON_ALLOWED_COMMANDS
class ExplorationRightsChange(ActivityRightsChange):
"""Domain object class for an exploration rights change."""
ALLOWED_COMMANDS = copy.deepcopy(COMMON_ALLOWED_COMMANDS)
ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_EXPLORATION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_STATUS, 'new_status': ALLOWED_STATUS}
})
class CollectionRightsChange(ActivityRightsChange):
"""Domain object class for an collection rights change."""
ALLOWED_COMMANDS = copy.deepcopy(COMMON_ALLOWED_COMMANDS)
ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_COLLECTION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_STATUS, 'new_status': ALLOWED_STATUS}
})
def get_activity_rights_from_model(activity_rights_model, activity_type):
"""Constructs an ActivityRights object from the given activity rights model.
Args:
activity_rights_model: ActivityRightsModel. Activity rights from the
datastore.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Returns:
ActivityRights. The rights object created from the model.
"""
return ActivityRights(
activity_rights_model.id,
activity_rights_model.owner_ids,
activity_rights_model.editor_ids,
activity_rights_model.voice_artist_ids,
activity_rights_model.viewer_ids,
community_owned=activity_rights_model.community_owned,
cloned_from=(
activity_rights_model.cloned_from
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION else None),
status=activity_rights_model.status,
viewable_if_private=activity_rights_model.viewable_if_private,
first_published_msec=activity_rights_model.first_published_msec
)
def _save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds):
"""Saves an ExplorationRights or CollectionRights domain object to the
datastore.
Args:
committer_id: str. ID of the committer.
activity_rights: ActivityRights. The rights object for the given
activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
commit_message: str. Descriptive message for the commit.
commit_cmds: list(dict). A list of commands describing what kind of
commit was done.
"""
activity_rights.validate()
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
model_cls = exp_models.ExplorationRightsModel
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
model_cls = collection_models.CollectionRightsModel
model = model_cls.get(activity_rights.id, strict=False)
model.owner_ids = activity_rights.owner_ids
model.editor_ids = activity_rights.editor_ids
model.viewer_ids = activity_rights.viewer_ids
model.voice_artist_ids = activity_rights.voice_artist_ids
model.community_owned = activity_rights.community_owned
model.status = activity_rights.status
model.viewable_if_private = activity_rights.viewable_if_private
model.first_published_msec = activity_rights.first_published_msec
model.commit(committer_id, commit_message, commit_cmds)
def _update_exploration_summary(activity_rights):
"""Updates the exploration summary for the activity associated with the
given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_rights: ActivityRights. The rights object for the given
activity.
"""
# TODO(msl): Get rid of inline imports by refactoring code.
from core.domain import exp_services
exp_services.update_exploration_summary(
activity_rights.id, None)
def _update_collection_summary(activity_rights):
"""Updates the collection summary for the given activity associated with
the given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_rights: ActivityRights. The rights object for the given
activity.
"""
from core.domain import collection_services
collection_services.update_collection_summary(
activity_rights.id, None)
def _update_activity_summary(activity_type, activity_rights):
"""Updates the activity summary for the given activity associated with
the given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_rights: ActivityRights. The rights object for the given
activity.
"""
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
_update_exploration_summary(activity_rights)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
_update_collection_summary(activity_rights)
def update_activity_first_published_msec(
activity_type, activity_id, first_published_msec):
"""Updates the first_published_msec field for the given activity.
The caller is responsible for ensuring that this value is not already
set before updating it.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_id: str. ID of the activity.
first_published_msec: float. First publication time in milliseconds
since the Epoch.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
commit_cmds = [{
'cmd': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'old_first_published_msec': activity_rights.first_published_msec,
'new_first_published_msec': first_published_msec
}]
activity_rights.first_published_msec = first_published_msec
_save_activity_rights(
feconf.SYSTEM_COMMITTER_ID, activity_rights, activity_type,
'set first published time in msec', commit_cmds)
def create_new_exploration_rights(exploration_id, committer_id):
"""Creates a new exploration rights object and saves it to the datastore.
Subscribes the committer to the new exploration.
Args:
exploration_id: str. ID of the exploration.
committer_id: str. ID of the committer.
"""
exploration_rights = ActivityRights(
exploration_id, [committer_id], [], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
exp_models.ExplorationRightsModel(
id=exploration_rights.id,
owner_ids=exploration_rights.owner_ids,
editor_ids=exploration_rights.editor_ids,
voice_artist_ids=exploration_rights.voice_artist_ids,
viewer_ids=exploration_rights.viewer_ids,
community_owned=exploration_rights.community_owned,
status=exploration_rights.status,
viewable_if_private=exploration_rights.viewable_if_private,
first_published_msec=exploration_rights.first_published_msec,
).commit(committer_id, 'Created new exploration', commit_cmds)
subscription_services.subscribe_to_exploration(
committer_id, exploration_id)
def get_exploration_rights(exploration_id, strict=True):
"""Retrieves the rights for this exploration from the datastore.
Args:
exploration_id: str. ID of the exploration.
strict: bool. Whether to raise an error if there is no exploration
matching the given ID.
Returns:
ActivityRights. The rights object for the given exploration.
Raises:
EntityNotFoundError. The exploration with ID exploration_id was not
found in the datastore.
"""
model = exp_models.ExplorationRightsModel.get(
exploration_id, strict=strict)
if model is None:
return None
return get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_EXPLORATION)
def get_multiple_exploration_rights_by_ids(exp_ids):
"""Returns a list of ActivityRights objects for given exploration ids.
Args:
exp_ids: list(str). List of exploration ids.
Returns:
list(ActivityRights or None). List of rights object --> ActivityRights
objects for existing exploration or None.
"""
exp_rights_models = exp_models.ExplorationRightsModel.get_multi(
exp_ids)
exp_models_list = []
for model in exp_rights_models:
if model is None:
exp_models_list.append(None)
else:
exp_models_list.append(
get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_EXPLORATION))
return exp_models_list
def is_exploration_private(exploration_id):
"""Returns whether exploration is private.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is private or not.
"""
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PRIVATE
def is_exploration_public(exploration_id):
"""Returns whether exploration is public.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is public.
"""
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PUBLIC
def is_exploration_cloned(exploration_id):
"""Returns whether the exploration is a clone of another exploration.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is a clone of another exploration.
"""
exploration_rights = get_exploration_rights(exploration_id)
return bool(exploration_rights.cloned_from)
def create_new_collection_rights(collection_id, committer_id):
"""Creates a new collection rights object and saves it to the datastore.
Subscribes the committer to the new collection.
Args:
collection_id: str. ID of the collection.
committer_id: str. ID of the committer.
"""
collection_rights = ActivityRights(
collection_id, [committer_id], [], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
collection_models.CollectionRightsModel(
id=collection_rights.id,
owner_ids=collection_rights.owner_ids,
editor_ids=collection_rights.editor_ids,
voice_artist_ids=collection_rights.voice_artist_ids,
viewer_ids=collection_rights.viewer_ids,
community_owned=collection_rights.community_owned,
status=collection_rights.status,
viewable_if_private=collection_rights.viewable_if_private,
first_published_msec=collection_rights.first_published_msec
).commit(committer_id, 'Created new collection', commit_cmds)
subscription_services.subscribe_to_collection(committer_id, collection_id)
def get_collection_rights(collection_id, strict=True):
"""Retrieves the rights for this collection from the datastore.
Args:
collection_id: str. ID of the collection.
strict: bool. Whether to raise an error if ID is not found.
Returns:
ActivityRights. The rights object for the collection.
Raises:
EntityNotFoundError. The collection with ID collection_id is not found
in the datastore.
"""
model = collection_models.CollectionRightsModel.get(
collection_id, strict=strict)
if model is None:
return None
return get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_COLLECTION)
def get_collection_owner_names(collection_id):
"""Retrieves the owners for this collection from the datastore.
Args:
collection_id: str. ID of the collection.
Returns:
list(str). Human-readable usernames (or truncated email addresses) of
owners for this collection.
"""
collection_rights = get_collection_rights(collection_id)
return user_services.get_human_readable_user_ids(
collection_rights.owner_ids)
def is_collection_private(collection_id):
"""Returns whether the collection is private.
Args:
collection_id: str. ID of the collection.
Returns:
bool. Whether the collection is private.
"""
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PRIVATE
def is_collection_public(collection_id):
"""Returns whether the collection is public.
Args:
collection_id: str. ID of the collection.
Returns:
bool. Whether the collection is public.
"""
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PUBLIC
def _get_activity_rights(activity_type, activity_id):
"""Retrieves the rights object for the given activity
based on its type.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_id: str. ID of the activity.
Returns:
ActivityRights. The rights object associated with the given activity.
Raises:
Exception. activity_type provided is unknown.
"""
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
return get_exploration_rights(activity_id, strict=False)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
return get_collection_rights(activity_id, strict=False)
else:
raise Exception(
'Cannot get activity rights for unknown activity type: %s' % (
activity_type))
def check_can_access_activity(user, activity_rights):
"""Checks whether the user can access given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: AcitivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given activity can be accessed by the given user.
"""
if activity_rights is None:
return False
elif activity_rights.is_published():
return bool(
role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY in user.actions)
elif activity_rights.is_private():
return bool(
(role_services.ACTION_PLAY_ANY_PRIVATE_ACTIVITY in user.actions) or
activity_rights.is_viewer(user.user_id) or
activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id) or
activity_rights.is_voice_artist(user.user_id) or
activity_rights.viewable_if_private)
def check_can_edit_activity(user, activity_rights):
"""Checks whether the user can edit given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given user can edit this activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions:
return False
if (activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id)):
return True
if (activity_rights.community_owned or
(role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)):
return True
if (activity_rights.is_published() and
(role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in
user.actions)):
return True
return False
def check_can_voiceover_activity(user, activity_rights):
"""Checks whether the user can voiceover given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given user can voiceover this activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions:
return False
if (activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id) or
activity_rights.is_voice_artist(user.user_id)):
return True
if (activity_rights.community_owned or
(role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)):
return True
if (activity_rights.is_published() and
(role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in
user.actions)):
return True
return False
def check_can_save_activity(user, activity_rights):
"""Checks whether the user can save given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can save given activity.
"""
return (check_can_edit_activity(user, activity_rights) or (
check_can_voiceover_activity(user, activity_rights)))
def check_can_delete_activity(user, activity_rights):
"""Checks whether the user can delete given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can delete given activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_DELETE_ANY_ACTIVITY in user.actions:
return True
elif (activity_rights.is_private() and
(role_services.ACTION_DELETE_OWNED_PRIVATE_ACTIVITY in user.actions)
and activity_rights.is_owner(user.user_id)):
return True
elif (activity_rights.is_published() and
(role_services.ACTION_DELETE_ANY_PUBLIC_ACTIVITY in user.actions)):
return True
return False
def check_can_modify_activity_roles(user, activity_rights):
"""Checks whether the user can modify roles for given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can modify roles for given activity.
"""
if activity_rights is None:
return False
if (activity_rights.community_owned or
activity_rights.cloned_from):
return False
if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY in
user.actions):
return True
if (role_services.ACTION_MODIFY_ROLES_FOR_OWNED_ACTIVITY in
user.actions):
if activity_rights.is_owner(user.user_id):
return True
return False
def check_can_release_ownership(user, activity_rights):
"""Checks whether the user can release ownership for given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can release ownership for given activity.
"""
if activity_rights is None:
return False
if activity_rights.is_private():
return False
return check_can_modify_activity_roles(
user, activity_rights)
def check_can_publish_activity(user, activity_rights):
"""Checks whether the user can publish given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can publish given activity.
"""
if activity_rights is None:
return False
if activity_rights.cloned_from:
return False
if activity_rights.is_published():
return False
if role_services.ACTION_PUBLISH_ANY_ACTIVITY in user.actions:
return True
if role_services.ACTION_PUBLISH_OWNED_ACTIVITY in user.actions:
if activity_rights.is_owner(user.user_id):
return True
return False
def check_can_unpublish_activity(user, activity_rights):
"""Checks whether the user can unpublish given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can unpublish given activity.
"""
if activity_rights is None:
return False
if activity_rights.community_owned:
return False
if activity_rights.is_published():
if role_services.ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY in user.actions:
return True
return False
def _assign_role(
committer, assignee_id, new_role, activity_id, activity_type):
"""Assigns a new role to the user.
Args:
committer: UserActionsInfo. UserActionInfo object for the user
who is performing the action.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
ROLE_VOICE_ARTIST
ROLE_VIEWER
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to modify a role.
Exception. The user already owns the activity.
Exception. The user can already edit the activity.
Exception. The user can already voiceover the activity.
Exception. The activity is already publicly editable.
Exception. The activity is already publicly translatable.
Exception. The user can already view the activity.
Exception. The activity is already publicly viewable.
Exception. The role is invalid.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_modify_activity_roles(committer, activity_rights):
logging.error(
'User %s tried to allow user %s to be a(n) %s of activity %s '
'but was refused permission.' % (
committer_id, assignee_id, new_role, activity_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
assignee_username = user_services.get_username(assignee_id)
old_role = ROLE_NONE
if new_role == ROLE_OWNER:
if activity_rights.is_owner(assignee_id):
raise Exception('This user already owns this %s.' % activity_type)
activity_rights.owner_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
if assignee_id in activity_rights.editor_ids:
activity_rights.editor_ids.remove(assignee_id)
old_role = ROLE_EDITOR
if assignee_id in activity_rights.voice_artist_ids:
activity_rights.voice_artist_ids.remove(assignee_id)
old_role = ROLE_VOICE_ARTIST
elif new_role == ROLE_EDITOR:
if (activity_rights.is_editor(assignee_id) or
activity_rights.is_owner(assignee_id)):
raise Exception(
'This user already can edit this %s.' % activity_type)
activity_rights.editor_ids.append(assignee_id)
if assignee_id in activity_rights.voice_artist_ids:
activity_rights.voice_artist_ids.remove(assignee_id)
old_role = ROLE_VOICE_ARTIST
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VOICE_ARTIST:
if (activity_rights.is_editor(assignee_id) or
activity_rights.is_voice_artist(assignee_id) or
activity_rights.is_owner(assignee_id)):
raise Exception(
'This user already can voiceover this %s.' % activity_type)
activity_rights.voice_artist_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VIEWER:
if (activity_rights.is_owner(assignee_id) or
activity_rights.is_editor(assignee_id) or
activity_rights.is_viewer(assignee_id)):
raise Exception(
'This user already can view this %s.' % activity_type)
if activity_rights.status != ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Public %ss can be viewed by anyone.' % activity_type)
activity_rights.viewer_ids.append(assignee_id)
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = 'Changed role of %s from %s to %s' % (
assignee_username, old_role, new_role)
commit_cmds = [{
'cmd': CMD_CHANGE_ROLE,
'assignee_id': assignee_id,
'old_role': old_role,
'new_role': new_role
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
commit_message, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _release_ownership_of_activity(committer, activity_id, activity_type):
"""Releases ownership of the given activity to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user who
is performing the action.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raise:
Exception. The committer does not have release rights.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_release_ownership(committer, activity_rights):
logging.error(
'User %s tried to release ownership of %s %s but was '
'refused permission.' % (committer_id, activity_type, activity_id))
raise Exception(
'The ownership of this %s cannot be released.' % activity_type)
activity_rights.community_owned = True
activity_rights.owner_ids = []
activity_rights.editor_ids = []
activity_rights.viewer_ids = []
commit_cmds = [{
'cmd': CMD_RELEASE_OWNERSHIP,
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
'%s ownership released to the community.' % activity_type, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _change_activity_status(
committer_id, activity_id, activity_type, new_status, commit_message):
"""Changes the status of the given activity.
Args:
committer_id: str. ID of the user who is performing the update action.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
new_status: str. The new status of the activity.
commit_message: str. The human-written commit message for this change.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
old_status = activity_rights.status
activity_rights.status = new_status
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
cmd_type = CMD_CHANGE_EXPLORATION_STATUS
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
cmd_type = CMD_CHANGE_COLLECTION_STATUS
commit_cmds = [{
'cmd': cmd_type,
'old_status': old_status,
'new_status': new_status
}]
if new_status != ACTIVITY_STATUS_PRIVATE:
activity_rights.viewer_ids = []
if activity_rights.first_published_msec is None:
activity_rights.first_published_msec = (
utils.get_current_time_in_millisecs())
_save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _publish_activity(committer, activity_id, activity_type):
"""Publishes the given activity.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to publish the
activity.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_publish_activity(committer, activity_rights):
logging.error(
'User %s tried to publish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be published.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLIC,
'%s published.' % activity_type)
def _unpublish_activity(committer, activity_id, activity_type):
"""Unpublishes the given activity.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to unpublish the
activity.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_unpublish_activity(committer, activity_rights):
logging.error(
'User %s tried to unpublish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be unpublished.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PRIVATE,
'%s unpublished.' % activity_type)
activity_services.remove_featured_activity(activity_type, activity_id)
# Rights functions for activities.
def assign_role_for_exploration(
committer, exploration_id, assignee_id, new_role):
"""Assigns a user to the given role and subscribes the assignee to future
exploration updates.
The caller should ensure that assignee_id corresponds to a valid user in
the system.
Args:
committer: UserActionsInfo. The UserActionsInfo object for the
committer.
exploration_id: str. ID of the exploration.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
ROLE_VOICE_ARTIST
Raises:
Exception. This could potentially throw an exception from
_assign_role.
"""
_assign_role(
committer, assignee_id, new_role, exploration_id,
constants.ACTIVITY_TYPE_EXPLORATION)
if new_role in [ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST]:
subscription_services.subscribe_to_exploration(
assignee_id, exploration_id)
def release_ownership_of_exploration(committer, exploration_id):
"""Releases ownership of the given exploration to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_release_ownership_of_activity.
"""
_release_ownership_of_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def set_private_viewability_of_exploration(
committer, exploration_id, viewable_if_private):
"""Sets the viewable_if_private attribute for the given exploration's rights
object.
If viewable_if_private is True, this allows a private exploration
to be viewed by anyone with the link.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
viewable_if_private: bool. Whether the exploration should be made
viewable (by anyone with the link).
Raises:
Exception. The committer does not have the permission to perform change
action.
Exception. If the viewable_if_private property is already as desired.
"""
committer_id = committer.user_id
exploration_rights = get_exploration_rights(exploration_id)
# The user who can publish activity can change its private viewability.
if not check_can_publish_activity(committer, exploration_rights):
logging.error(
'User %s tried to change private viewability of exploration %s '
'but was refused permission.' % (committer_id, exploration_id))
raise Exception(
'The viewability status of this exploration cannot be changed.')
old_viewable_if_private = exploration_rights.viewable_if_private
if old_viewable_if_private == viewable_if_private:
raise Exception(
'Trying to change viewability status of this exploration to %s, '
'but that is already the current value.' % viewable_if_private)
exploration_rights.viewable_if_private = viewable_if_private
commit_cmds = [{
'cmd': CMD_CHANGE_PRIVATE_VIEWABILITY,
'old_viewable_if_private': old_viewable_if_private,
'new_viewable_if_private': viewable_if_private,
}]
commit_message = (
'Made exploration viewable to anyone with the link.'
if viewable_if_private else
'Made exploration viewable only to invited playtesters.')
_save_activity_rights(
committer_id, exploration_rights, constants.ACTIVITY_TYPE_EXPLORATION,
commit_message, commit_cmds)
_update_exploration_summary(exploration_rights)
def publish_exploration(committer, exploration_id):
"""Publishes the given exploration.
It is the responsibility of the caller to check that the exploration is
valid prior to publication.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_publish_activity.
"""
_publish_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def unpublish_exploration(committer, exploration_id):
"""Unpublishes the given exploration.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_unpublish_activity.
"""
_unpublish_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
# Rights functions for collections.
def assign_role_for_collection(
committer, collection_id, assignee_id, new_role):
"""Assign the given user to the given role and subscribes the assignee
to future collection updates.
The caller should ensure that assignee_id corresponds to a valid user in
the system.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
|
_assign_role(
committer, assignee_id, new_role, collection_id,
constants.ACTIVITY_TYPE_COLLECTION)
if new_role in [ROLE_OWNER, ROLE_EDITOR]:
subscription_services.subscribe_to_collection(
assignee_id, collection_id)
def release_ownership_of_collection(committer, collection_id):
"""Releases ownership of the given collection to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_release_ownership_of_activity.
"""
_release_ownership_of_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
def publish_collection(committer, collection_id):
"""Publishes the given collection.
It is the responsibility of the caller to check that the collection is
valid prior to publication.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_publish_activity.
"""
_publish_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
def unpublish_collection(committer, collection_id):
"""Unpublishes the given collection.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_unpublish_activity.
"""
_unpublish_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
|
Raises:
Exception. This could potentially throw an exception from
_assign_role.
"""
|
result.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"fmt"
"reflect"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/watch"
)
// ErrMatchFunc can be used to filter errors that may not be true failures.
type ErrMatchFunc func(error) bool
// Result contains helper methods for dealing with the outcome of a Builder.
type Result struct {
err error
visitor Visitor
sources []Visitor
singular bool
ignoreErrors []utilerrors.Matcher
// populated by a call to Infos
info []*Info
}
// IgnoreErrors will filter errors that occur when by visiting the result
// (but not errors that occur by creating the result in the first place),
// eliminating any that match fns. This is best used in combination with
// Builder.ContinueOnError(), where the visitors accumulate errors and return
// them after visiting as a slice of errors. If no errors remain after
// filtering, the various visitor methods on Result will return nil for
// err.
func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result {
for _, fn := range fns {
r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn))
}
return r
}
// Err returns one or more errors (via a util.ErrorList) that occurred prior
// to visiting the elements in the visitor. To see all errors including those
// that occur during visitation, invoke Infos().
func (r *Result) Err() error {
return r.err
}
// Visit implements the Visitor interface on the items described in the Builder.
// Note that some visitor sources are not traversable more than once, or may
// return different results. If you wish to operate on the same set of resources
// multiple times, use the Infos() method.
func (r *Result) Visit(fn VisitorFunc) error {
if r.err != nil {
return r.err
}
err := r.visitor.Visit(fn)
return utilerrors.FilterOut(err, r.ignoreErrors...)
}
// IntoSingular sets the provided boolean pointer to true if the Builder input
// reflected a single item, or multiple.
func (r *Result) IntoSingular(b *bool) *Result {
*b = r.singular
return r
}
// Infos returns an array of all of the resource infos retrieved via traversal.
// Will attempt to traverse the entire set of visitors only once, and will return
// a cached list on subsequent calls.
func (r *Result) Infos() ([]*Info, error) {
if r.err != nil {
return nil, r.err
}
if r.info != nil {
return r.info, nil
}
infos := []*Info{}
err := r.visitor.Visit(func(info *Info, err error) error {
if err != nil {
return err
}
infos = append(infos, info)
return nil
})
err = utilerrors.FilterOut(err, r.ignoreErrors...)
r.info, r.err = infos, err
return infos, err
}
// Object returns a single object representing the output of a single visit to all
// found resources. If the Builder was a singular context (expected to return a
// single resource by user input) and only a single resource was found, the resource
// will be returned as is. Otherwise, the returned resources will be part of an
// api.List. The ResourceVersion of the api.List will be set only if it is identical
// across all infos returned.
func (r *Result) Object() (runtime.Object, error) {
infos, err := r.Infos()
if err != nil {
return nil, err
}
versions := sets.String{}
objects := []runtime.Object{}
for _, info := range infos {
if info.Object != nil {
objects = append(objects, info.Object)
versions.Insert(info.ResourceVersion)
}
}
if len(objects) == 1 {
if r.singular {
return objects[0], nil
}
// if the item is a list already, don't create another list
if meta.IsListType(objects[0]) {
return objects[0], nil
}
}
version := ""
if len(versions) == 1 {
version = versions.List()[0]
}
return &api.List{
ListMeta: metav1.ListMeta{
ResourceVersion: version,
},
Items: objects,
}, err
}
// ResourceMapping returns a single meta.RESTMapping representing the
// resources located by the builder, or an error if more than one
// mapping was found.
func (r *Result) ResourceMapping() (*meta.RESTMapping, error) {
if r.err != nil {
return nil, r.err
}
mappings := map[string]*meta.RESTMapping{}
for i := range r.sources {
m, ok := r.sources[i].(ResourceMapping)
if !ok {
return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i]))
}
mapping := m.ResourceMapping()
mappings[mapping.Resource] = mapping
}
if len(mappings) != 1 {
return nil, fmt.Errorf("expected only a single resource type")
}
for _, mapping := range mappings {
return mapping, nil
}
return nil, nil
}
// Watch retrieves changes that occur on the server to the specified resource.
// It currently supports watching a single source - if the resource source
// (selectors or pure types) can be watched, they will be, otherwise the list
// will be visited (equivalent to the Infos() call) and if there is a single
// resource present, it will be watched, otherwise an error will be returned.
func (r *Result) Watch(resourceVersion string) (watch.Interface, error) {
if r.err != nil {
return nil, r.err
}
if len(r.sources) != 1 {
return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time")
}
w, ok := r.sources[0].(Watchable)
if !ok {
info, err := r.Infos()
if err != nil {
return nil, err
}
if len(info) != 1 {
return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info))
}
return info[0].Watch(resourceVersion)
}
return w.Watch(resourceVersion)
}
// AsVersionedObject converts a list of infos into a single object - either a List containing
// the objects as children, or if only a single Object is present, as that object. The provided
// version will be preferred as the conversion target, but the Object's mapping version will be
// used if that version is not present.
func AsVersionedObject(infos []*Info, forceList bool, version schema.GroupVersion, encoder runtime.Encoder) (runtime.Object, error) {
objects, err := AsVersionedObjects(infos, version, encoder)
if err != nil {
return nil, err
}
var object runtime.Object
if len(objects) == 1 && !forceList {
object = objects[0]
} else {
object = &api.List{Items: objects}
converted, err := TryConvert(api.Scheme, object, version, registered.GroupOrDie(api.GroupName).GroupVersion)
if err != nil {
return nil, err
}
object = converted
}
return object, nil
}
// AsVersionedObjects converts a list of infos into versioned objects. The provided
// version will be preferred as the conversion target, but the Object's mapping version will be
// used if that version is not present.
func AsVersionedObjects(infos []*Info, version schema.GroupVersion, encoder runtime.Encoder) ([]runtime.Object, error) {
objects := []runtime.Object{}
for _, info := range infos {
if info.Object == nil {
continue
}
// TODO: use info.VersionedObject as the value?
switch obj := info.Object.(type) {
case *extensions.ThirdPartyResourceData:
objects = append(objects, &runtime.Unknown{Raw: obj.Data})
continue
}
// objects that are not part of api.Scheme must be converted to JSON
// TODO: convert to map[string]interface{}, attach to runtime.Unknown?
if !version.Empty() {
if _, _, err := api.Scheme.ObjectKinds(info.Object); runtime.IsNotRegisteredError(err) {
// TODO: ideally this would encode to version, but we don't expose multiple codecs here.
data, err := runtime.Encode(encoder, info.Object)
if err != nil {
return nil, err
}
// TODO: Set ContentEncoding and ContentType.
objects = append(objects, &runtime.Unknown{Raw: data})
continue
}
}
converted, err := TryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion())
if err != nil {
return nil, err
}
objects = append(objects, converted)
}
return objects, nil
}
// TryConvert attempts to convert the given object to the provided versions in order. This function assumes
// the object is in internal version.
func
|
(converter runtime.ObjectConvertor, object runtime.Object, versions ...schema.GroupVersion) (runtime.Object, error) {
var last error
for _, version := range versions {
if version.Empty() {
return object, nil
}
obj, err := converter.ConvertToVersion(object, version)
if err != nil {
last = err
continue
}
return obj, nil
}
return nil, last
}
|
TryConvert
|
collect.py
|
import unittest
from core import collect
class TestCollect(unittest.TestCase):
def test_if_we_get_viz_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
|
def test_if_we_get_yen_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_yen()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_sevenseas_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_seven_seas()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_darkhorse_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_dark_horse()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_kodansha_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_kodansha()
assert response[0].keys() == mock_data.keys()
|
response = collect.get_viz()
assert response[0].keys() == mock_data.keys()
|
jobs_controller_test.go
|
package web_test
import (
"bytes"
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"testing"
"time"
"github.com/stretchr/testify/mock"
"github.com/smartcontractkit/chainlink/core/services/eth"
"github.com/pelletier/go-toml"
"github.com/smartcontractkit/chainlink/core/internal/cltest"
"github.com/smartcontractkit/chainlink/core/services/job"
"github.com/smartcontractkit/chainlink/core/store/models"
"github.com/smartcontractkit/chainlink/core/web"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/guregu/null.v4"
)
func TestJobsController_Create_ValidationFailure(t *testing.T) {
var (
contractAddress = cltest.NewEIP55Address()
)
var tt = []struct {
name string
pid models.PeerID
kb models.Sha256Hash
taExists bool
expectedErr error
}{
{
name: "invalid keybundle",
pid: models.PeerID(cltest.DefaultP2PPeerID),
kb: models.Sha256Hash(cltest.Random32Byte()),
taExists: true,
expectedErr: job.ErrNoSuchKeyBundle,
},
{
name: "invalid peerID",
pid: models.PeerID(cltest.NonExistentP2PPeerID),
kb: cltest.DefaultOCRKeyBundleIDSha256,
taExists: true,
expectedErr: job.ErrNoSuchPeerID,
},
{
name: "invalid transmitter address",
pid: models.PeerID(cltest.DefaultP2PPeerID),
kb: cltest.DefaultOCRKeyBundleIDSha256,
taExists: false,
expectedErr: job.ErrNoSuchTransmitterAddress,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
ta, client, cleanup := setupJobsControllerTests(t)
defer cleanup()
var address models.EIP55Address
if tc.taExists {
key := cltest.MustInsertRandomKey(t, ta.Store.DB)
address = key.Address
} else {
address = cltest.NewEIP55Address()
}
sp := cltest.MinimalOCRNonBootstrapSpec(contractAddress, address, tc.pid, tc.kb)
body, _ := json.Marshal(models.CreateJobSpecRequest{
TOML: sp,
})
resp, cleanup := client.Post("/v2/jobs", bytes.NewReader(body))
defer cleanup()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
b, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
assert.Contains(t, string(b), tc.expectedErr.Error())
})
}
}
func
|
(t *testing.T) {
app, client, cleanup := setupJobsControllerTests(t)
defer cleanup()
toml := string(cltest.MustReadFile(t, "testdata/oracle-spec.toml"))
toml = strings.Replace(toml, "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4", app.Key.Address.Hex(), 1)
body, _ := json.Marshal(models.CreateJobSpecRequest{
TOML: toml,
})
response, cleanup := client.Post("/v2/jobs", bytes.NewReader(body))
defer cleanup()
require.Equal(t, http.StatusOK, response.StatusCode)
jb := job.SpecDB{}
require.NoError(t, app.Store.DB.Preload("OffchainreportingOracleSpec").First(&jb).Error)
ocrJobSpec := job.SpecDB{}
err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &ocrJobSpec)
assert.NoError(t, err)
assert.Equal(t, "web oracle spec", jb.Name.ValueOrZero())
assert.Equal(t, jb.OffchainreportingOracleSpec.P2PPeerID, ocrJobSpec.OffchainreportingOracleSpec.P2PPeerID)
assert.Equal(t, jb.OffchainreportingOracleSpec.P2PBootstrapPeers, ocrJobSpec.OffchainreportingOracleSpec.P2PBootstrapPeers)
assert.Equal(t, jb.OffchainreportingOracleSpec.IsBootstrapPeer, ocrJobSpec.OffchainreportingOracleSpec.IsBootstrapPeer)
assert.Equal(t, jb.OffchainreportingOracleSpec.EncryptedOCRKeyBundleID, ocrJobSpec.OffchainreportingOracleSpec.EncryptedOCRKeyBundleID)
assert.Equal(t, jb.OffchainreportingOracleSpec.TransmitterAddress, ocrJobSpec.OffchainreportingOracleSpec.TransmitterAddress)
assert.Equal(t, jb.OffchainreportingOracleSpec.ObservationTimeout, ocrJobSpec.OffchainreportingOracleSpec.ObservationTimeout)
assert.Equal(t, jb.OffchainreportingOracleSpec.BlockchainTimeout, ocrJobSpec.OffchainreportingOracleSpec.BlockchainTimeout)
assert.Equal(t, jb.OffchainreportingOracleSpec.ContractConfigTrackerSubscribeInterval, ocrJobSpec.OffchainreportingOracleSpec.ContractConfigTrackerSubscribeInterval)
assert.Equal(t, jb.OffchainreportingOracleSpec.ContractConfigTrackerSubscribeInterval, ocrJobSpec.OffchainreportingOracleSpec.ContractConfigTrackerSubscribeInterval)
assert.Equal(t, jb.OffchainreportingOracleSpec.ContractConfigConfirmations, ocrJobSpec.OffchainreportingOracleSpec.ContractConfigConfirmations)
assert.NotNil(t, ocrJobSpec.PipelineSpec.DotDagSource)
// Sanity check to make sure it inserted correctly
require.Equal(t, models.EIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C"), jb.OffchainreportingOracleSpec.ContractAddress)
}
func TestJobsController_Create_HappyPath_DirectRequestSpec(t *testing.T) {
rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t)
defer assertMocksCalled()
app, cleanup := cltest.NewApplicationWithKey(t,
eth.NewClientWith(rpcClient, gethClient),
)
defer cleanup()
require.NoError(t, app.Start())
gethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(cltest.EmptyMockSubscription(), nil)
client := app.NewHTTPClient()
tomlBytes := cltest.MustReadFile(t, "testdata/direct-request-spec.toml")
body, _ := json.Marshal(models.CreateJobSpecRequest{
TOML: string(tomlBytes),
})
response, cleanup := client.Post("/v2/jobs", bytes.NewReader(body))
defer cleanup()
require.Equal(t, http.StatusOK, response.StatusCode)
jb := job.SpecDB{}
require.NoError(t, app.Store.DB.Preload("DirectRequestSpec").First(&jb).Error)
jobSpec := job.SpecDB{}
err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &jobSpec)
assert.NoError(t, err)
assert.Equal(t, "example eth request event spec", jb.Name.ValueOrZero())
assert.NotNil(t, jobSpec.PipelineSpec.DotDagSource)
// Sanity check to make sure it inserted correctly
require.Equal(t, models.EIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C"), jb.DirectRequestSpec.ContractAddress)
sha := sha256.Sum256(tomlBytes)
require.Equal(t, sha[:], jb.DirectRequestSpec.OnChainJobSpecID[:])
}
func TestJobsController_Create_HappyPath_FluxMonitorSpec(t *testing.T) {
rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t)
defer assertMocksCalled()
app, cleanup := cltest.NewApplicationWithKey(t,
eth.NewClientWith(rpcClient, gethClient),
)
defer cleanup()
require.NoError(t, app.Start())
gethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(cltest.EmptyMockSubscription(), nil)
client := app.NewHTTPClient()
tomlBytes := cltest.MustReadFile(t, "testdata/flux-monitor-spec.toml")
body, _ := json.Marshal(models.CreateJobSpecRequest{
TOML: string(tomlBytes),
})
response, cleanup := client.Post("/v2/jobs", bytes.NewReader(body))
defer cleanup()
require.Equal(t, http.StatusOK, response.StatusCode)
jb := job.SpecDB{}
require.NoError(t, app.Store.DB.Preload("FluxMonitorSpec").First(&jb).Error)
jobSpec := job.SpecDB{}
err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &jobSpec)
assert.NoError(t, err)
t.Log()
assert.Equal(t, "example flux monitor spec", jb.Name.ValueOrZero())
assert.NotNil(t, jobSpec.PipelineSpec.DotDagSource)
assert.Equal(t, models.EIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42"), jb.FluxMonitorSpec.ContractAddress)
assert.Equal(t, time.Second, jb.FluxMonitorSpec.IdleTimerPeriod)
assert.Equal(t, false, jb.FluxMonitorSpec.IdleTimerDisabled)
assert.Equal(t, int32(2), jb.FluxMonitorSpec.Precision)
assert.Equal(t, float32(0.5), jb.FluxMonitorSpec.Threshold)
}
func TestJobsController_Index_HappyPath(t *testing.T) {
client, cleanup, ocrJobSpecFromFile, _, ereJobSpecFromFile, _ := setupJobSpecsControllerTestsWithJobs(t)
defer cleanup()
response, cleanup := client.Get("/v2/jobs")
defer cleanup()
cltest.AssertServerResponse(t, response, http.StatusOK)
jobSpecs := []job.SpecDB{}
err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &jobSpecs)
assert.NoError(t, err)
require.Len(t, jobSpecs, 2)
runOCRJobSpecAssertions(t, ocrJobSpecFromFile, jobSpecs[0])
runDirectRequestJobSpecAssertions(t, ereJobSpecFromFile, jobSpecs[1])
}
func TestJobsController_Show_HappyPath(t *testing.T) {
client, cleanup, ocrJobSpecFromFile, jobID, ereJobSpecFromFile, jobID2 := setupJobSpecsControllerTestsWithJobs(t)
defer cleanup()
response, cleanup := client.Get("/v2/jobs/" + fmt.Sprintf("%v", jobID))
defer cleanup()
cltest.AssertServerResponse(t, response, http.StatusOK)
ocrJobSpec := job.SpecDB{}
err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &ocrJobSpec)
assert.NoError(t, err)
runOCRJobSpecAssertions(t, ocrJobSpecFromFile, ocrJobSpec)
response, cleanup = client.Get("/v2/jobs/" + fmt.Sprintf("%v", jobID2))
defer cleanup()
cltest.AssertServerResponse(t, response, http.StatusOK)
ereJobSpec := job.SpecDB{}
err = web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &ereJobSpec)
assert.NoError(t, err)
runDirectRequestJobSpecAssertions(t, ereJobSpecFromFile, ereJobSpec)
}
func TestJobsController_Show_InvalidID(t *testing.T) {
client, cleanup, _, _, _, _ := setupJobSpecsControllerTestsWithJobs(t)
defer cleanup()
response, cleanup := client.Get("/v2/jobs/uuidLikeString")
defer cleanup()
cltest.AssertServerResponse(t, response, http.StatusUnprocessableEntity)
}
func TestJobsController_Show_NonExistentID(t *testing.T) {
client, cleanup, _, _, _, _ := setupJobSpecsControllerTestsWithJobs(t)
defer cleanup()
response, cleanup := client.Get("/v2/jobs/999999999")
defer cleanup()
cltest.AssertServerResponse(t, response, http.StatusNotFound)
}
func runOCRJobSpecAssertions(t *testing.T, ocrJobSpecFromFileDB job.SpecDB, ocrJobSpecFromServer job.SpecDB) {
ocrJobSpecFromFile := ocrJobSpecFromFileDB.OffchainreportingOracleSpec
assert.Equal(t, ocrJobSpecFromFile.ContractAddress, ocrJobSpecFromServer.OffchainreportingOracleSpec.ContractAddress)
assert.Equal(t, ocrJobSpecFromFile.P2PPeerID, ocrJobSpecFromServer.OffchainreportingOracleSpec.P2PPeerID)
assert.Equal(t, ocrJobSpecFromFile.P2PBootstrapPeers, ocrJobSpecFromServer.OffchainreportingOracleSpec.P2PBootstrapPeers)
assert.Equal(t, ocrJobSpecFromFile.IsBootstrapPeer, ocrJobSpecFromServer.OffchainreportingOracleSpec.IsBootstrapPeer)
assert.Equal(t, ocrJobSpecFromFile.EncryptedOCRKeyBundleID, ocrJobSpecFromServer.OffchainreportingOracleSpec.EncryptedOCRKeyBundleID)
assert.Equal(t, ocrJobSpecFromFile.TransmitterAddress, ocrJobSpecFromServer.OffchainreportingOracleSpec.TransmitterAddress)
assert.Equal(t, ocrJobSpecFromFile.ObservationTimeout, ocrJobSpecFromServer.OffchainreportingOracleSpec.ObservationTimeout)
assert.Equal(t, ocrJobSpecFromFile.BlockchainTimeout, ocrJobSpecFromServer.OffchainreportingOracleSpec.BlockchainTimeout)
assert.Equal(t, ocrJobSpecFromFile.ContractConfigTrackerSubscribeInterval, ocrJobSpecFromServer.OffchainreportingOracleSpec.ContractConfigTrackerSubscribeInterval)
assert.Equal(t, ocrJobSpecFromFile.ContractConfigTrackerSubscribeInterval, ocrJobSpecFromServer.OffchainreportingOracleSpec.ContractConfigTrackerSubscribeInterval)
assert.Equal(t, ocrJobSpecFromFile.ContractConfigConfirmations, ocrJobSpecFromServer.OffchainreportingOracleSpec.ContractConfigConfirmations)
assert.Equal(t, ocrJobSpecFromFileDB.Pipeline.DOTSource, ocrJobSpecFromServer.PipelineSpec.DotDagSource)
// Check that create and update dates are non empty values.
// Empty date value is "0001-01-01 00:00:00 +0000 UTC" so we are checking for the
// millenia and century characters to be present
assert.Contains(t, ocrJobSpecFromServer.OffchainreportingOracleSpec.CreatedAt.String(), "20")
assert.Contains(t, ocrJobSpecFromServer.OffchainreportingOracleSpec.UpdatedAt.String(), "20")
}
func runDirectRequestJobSpecAssertions(t *testing.T, ereJobSpecFromFile job.SpecDB, ereJobSpecFromServer job.SpecDB) {
assert.Equal(t, ereJobSpecFromFile.DirectRequestSpec.ContractAddress, ereJobSpecFromServer.DirectRequestSpec.ContractAddress)
assert.Equal(t, ereJobSpecFromFile.Pipeline.DOTSource, ereJobSpecFromServer.PipelineSpec.DotDagSource)
// Check that create and update dates are non empty values.
// Empty date value is "0001-01-01 00:00:00 +0000 UTC" so we are checking for the
// millenia and century characters to be present
assert.Contains(t, ereJobSpecFromServer.DirectRequestSpec.CreatedAt.String(), "20")
assert.Contains(t, ereJobSpecFromServer.DirectRequestSpec.UpdatedAt.String(), "20")
}
func setupJobsControllerTests(t *testing.T) (*cltest.TestApplication, cltest.HTTPClientCleaner, func()) {
t.Parallel()
rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t)
defer assertMocksCalled()
app, cleanup := cltest.NewApplicationWithKey(t,
eth.NewClientWith(rpcClient, gethClient),
)
require.NoError(t, app.Start())
_, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah")
require.NoError(t, app.Store.DB.Create(bridge).Error)
_, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah")
require.NoError(t, app.Store.DB.Create(bridge2).Error)
client := app.NewHTTPClient()
return app, client, cleanup
}
func setupJobSpecsControllerTestsWithJobs(t *testing.T) (cltest.HTTPClientCleaner, func(), job.SpecDB, int32, job.SpecDB, int32) {
t.Parallel()
rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t)
defer assertMocksCalled()
app, cleanup := cltest.NewApplicationWithKey(t,
eth.NewClientWith(rpcClient, gethClient),
)
require.NoError(t, app.Start())
_, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah")
require.NoError(t, app.Store.DB.Create(bridge).Error)
_, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah")
require.NoError(t, app.Store.DB.Create(bridge2).Error)
client := app.NewHTTPClient()
var ocrJobSpecFromFileDB job.SpecDB
tree, err := toml.LoadFile("testdata/oracle-spec.toml")
require.NoError(t, err)
err = tree.Unmarshal(&ocrJobSpecFromFileDB)
require.NoError(t, err)
var ocrSpec job.OffchainReportingOracleSpec
err = tree.Unmarshal(&ocrSpec)
require.NoError(t, err)
ocrJobSpecFromFileDB.OffchainreportingOracleSpec = &ocrSpec
ocrJobSpecFromFileDB.OffchainreportingOracleSpec.TransmitterAddress = &app.Key.Address
jobID, _ := app.AddJobV2(context.Background(), ocrJobSpecFromFileDB, null.String{})
var ereJobSpecFromFileDB job.SpecDB
tree, err = toml.LoadFile("testdata/direct-request-spec.toml")
require.NoError(t, err)
err = tree.Unmarshal(&ereJobSpecFromFileDB)
require.NoError(t, err)
var drSpec job.DirectRequestSpec
err = tree.Unmarshal(&drSpec)
require.NoError(t, err)
ereJobSpecFromFileDB.DirectRequestSpec = &drSpec
jobID2, _ := app.AddJobV2(context.Background(), ereJobSpecFromFileDB, null.String{})
return client, cleanup, ocrJobSpecFromFileDB, jobID, ereJobSpecFromFileDB, jobID2
}
|
TestJobsController_Create_HappyPath_OffchainReportingSpec
|
stubber_cli_test.py
|
from typing import List
import pytest
from pytest_mock import MockerFixture
from mock import MagicMock
from pathlib import Path
from click.testing import CliRunner
# module under test :
import stubber.stubber as stubber
def test_stubber_help():
# check basic commandline sanity check
runner = CliRunner()
result = runner.invoke(stubber.stubber_cli, ["--help"])
assert result.exit_code == 0
assert "Usage:" in result.output
assert "Commands:" in result.output
##########################################################################################
# clone
##########################################################################################
def test_stubber_clone(mocker: MockerFixture, tmp_path: Path):
runner = CliRunner()
mock_clone: MagicMock = mocker.patch("stubber.stubber.git.clone", autospec=True, return_value=0)
mock_fetch: MagicMock = mocker.patch("stubber.stubber.git.fetch", autospec=True, return_value=0)
result = runner.invoke(stubber.stubber_cli, ["clone"])
assert result.exit_code == 0
# either clone or fetch
assert mock_clone.call_count + mock_fetch.call_count == 2
if mock_clone.call_count > 0:
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython.git", path=Path("repos/micropython"))
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython-lib.git", path=Path("repos/micropython-lib"))
else:
mock_fetch.assert_any_call(Path("repos/micropython"))
mock_fetch.assert_any_call(Path("repos/micropython-lib"))
def test_stubber_clone_path(mocker: MockerFixture, tmp_path: Path):
runner = CliRunner()
mock_clone: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.git.clone", mock_clone)
m_tag = mocker.patch("stubber.stubber.git.get_tag", autospec=True)
m_dir = mocker.patch("stubber.stubber.os.mkdir", autospec=True)
# now test with path specified
result = runner.invoke(stubber.stubber_cli, ["clone", "--path", "foobar"])
assert result.exit_code == 0
assert mock_clone.call_count >= 2
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython.git", path=Path("foobar/micropython"))
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython-lib.git", path=Path("foobar/micropython-lib"))
assert m_tag.call_count >= 2
##########################################################################################
# switch
##########################################################################################
@pytest.mark.parametrize(
"params",
[
["switch", "--version", "latest", "--path", "foobar"],
["switch", "--version", "v1.10", "--path", "foobar"],
],
)
def test_stubber_switch(mocker: MockerFixture, params: List[str]):
runner = CliRunner()
# mock_clone: MagicMock = mocker.patch("stubber.stubber.git.clone", autospec=True, return_value=0)
# Mock Path.exists
m_fetch: MagicMock = mocker.patch("stubber.stubber.git.fetch", autospec=True, return_value=0)
m_switch: MagicMock = mocker.patch("stubber.stubber.git.switch_branch", autospec=True, return_value=0)
m_checkout: MagicMock = mocker.patch("stubber.stubber.git.checkout_tag", autospec=True, return_value=0)
m_get_tag: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
m_match = mocker.patch("stubber.stubber.get_mpy.match_lib_with_mpy", autospec=True)
m_exists = mocker.patch("stubber.stubber.Path.exists", return_value=True)
result = runner.invoke(stubber.stubber_cli, params)
assert result.exit_code == 0
# fetch latest
assert m_fetch.call_count == 2
# "foobar" from params is used as the path
m_fetch.assert_any_call(Path("foobar/micropython"))
m_fetch.assert_any_call(Path("foobar/micropython-lib"))
# core
m_match.assert_called_once()
if "latest" in params:
m_switch.assert_called_once()
m_checkout.assert_not_called()
else:
m_switch.assert_not_called()
m_checkout.assert_called_once()
##########################################################################################
# minify
##########################################################################################
def test_stubber_minify(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
mock_minify: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.minify", mock_minify)
result = runner.invoke(stubber.stubber_cli, ["minify"])
assert result.exit_code == 0
mock_minify.assert_called_once_with("board/createstubs.py", "./minified", True, False, False)
def test_stubber_minify_all(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
mock_minify: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.minify", mock_minify)
result = runner.invoke(stubber.stubber_cli, ["minify", "--all"])
assert result.exit_code == 0
assert mock_minify.call_count == 3
mock_minify.assert_any_call("board/createstubs.py", "./minified", True, False, False)
mock_minify.assert_any_call("board/createstubs_db.py", "./minified", True, False, False)
mock_minify.assert_any_call("board/createstubs_mem.py", "./minified", True, False, False)
##########################################################################################
# stub
##########################################################################################
def test_stubber_stub(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
# mock: MagicMock = mocker.MagicMock(return_value=True)
mock: MagicMock = mocker.patch("stubber.stubber.utils.generate_pyi_files", autospec=True, return_value=True)
# fake run on current folder
result = runner.invoke(stubber.stubber_cli, ["stub", "--source", "."])
mock.assert_called_once_with(Path("."))
assert result.exit_code == 0
##########################################################################################
# get-frozen
##########################################################################################
def test_stubber_get_frozen(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock_version: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
mock: MagicMock = mocker.patch("stubber.stubber.get_mpy.get_frozen", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run - need to ensure that there is a destination folder
result = runner.invoke(stubber.stubber_cli, ["get-frozen", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# FIXME : test failes in CI
mock.assert_called_once()
mock_version.assert_called_once()
mock_post.assert_called_once_with([tmp_path / "micropython-v1_42-frozen"], True, True)
##########################################################################################
# get-lobo
##########################################################################################
def test_stubber_get_lobo(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.get_lobo.get_frozen", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-lobo", "--stub-folder", tmp_path.as_posix()])
mock.assert_called_once()
mock_post.assert_called_once()
mock_post.assert_called_once_with([tmp_path / "loboris-v3_2_24-frozen"], True, True)
assert result.exit_code == 0
##########################################################################################
# get-core
##########################################################################################
def test_stubber_get_core(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.get_cpython.get_core", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-core", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# process is called twice
assert mock.call_count == 2
# post is called one
mock_post.assert_called_with([tmp_path / "cpython_core-pycopy", tmp_path / "cpython_core-micropython"], True, True)
##########################################################################################
# get-docstubs
##########################################################################################
def test_stubber_get_docstubs(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock_version: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
mock: MagicMock = mocker.patch("stubber.stubber.generate_from_rst", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-docstubs", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# process is called twice
assert mock.call_count == 1
mock.assert_called_once()
|
# post is called one
mock_post.assert_called_with([tmp_path / "micropython-v1_42-docstubs"], False, True)
##########################################################################################
# get-lobo
##########################################################################################
def test_stubber_fallback(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.update_fallback", autospec=True)
# mock2: MagicMock = mocker.patch("stubber.update_fallback.update_fallback", autospec=True)
# from .update_fallback import update_fallback,
# fake run
result = runner.invoke(stubber.stubber_cli, ["update-fallback", "--stub-folder", tmp_path.as_posix()])
mock.assert_called_once()
assert result.exit_code == 0
|
assert mock_version.call_count >= 1
|
modnotes.py
|
"""
Utilities for managing moderator notes about users.
"""
import re
import discord
from redbot.core import checks
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import inline, box, pagify
from tsutils import CogSettings
class ModNotes(commands.Cog):
def __init__(self, bot: Red, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot = bot
self.settings = ModNotesSettings("modnotes")
@commands.group(aliases=["usernote"])
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def usernotes(self, ctx):
|
async def red_get_data_for_user(self, *, user_id):
"""Get a user's personal data."""
data = "No data is stored for user with ID {}.\n".format(user_id)
return {"user_data.txt": BytesIO(data.encode())}
async def red_delete_data_for_user(self, *, requester, user_id):
"""Delete a user's personal data.
No personal data is stored in this cog.
"""
return
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def get(self, ctx, user: discord.User):
"""Sends the notes for a user."""
notes = self.settings.getNotesForUser(ctx.guild.id, user.id)
if not notes:
await ctx.send(box('No notes for {}'.format(user.name)))
return
for idx, note in enumerate(notes):
await ctx.send(inline('Note {} of {}:'.format(idx + 1, len(notes))))
await ctx.send(box(note))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def add(self, ctx, user: discord.User, *, note_text: str):
"""Add a note to a user."""
timestamp = str(ctx.message.created_at)[:-7]
msg = 'Added by {} ({}): {}'.format(ctx.author.name, timestamp, note_text)
server_id = ctx.guild.id
notes = self.settings.addNoteForUser(server_id, user.id, msg)
await ctx.send(inline('Done. User {} now has {} notes'.format(user.name, len(notes))))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def delete(self, ctx, user: discord.User, note_num: int):
"""Delete a specific note for a user."""
notes = self.settings.getNotesForUser(ctx.guild.id, user.id)
if len(notes) < note_num:
await ctx.send(box('Note not found for {}'.format(user.name)))
return
note = notes[note_num - 1]
notes.remove(note)
self.settings.setNotesForUser(ctx.guild.id, user.id, notes)
await ctx.send(inline('Removed note {}. User has {} remaining.'.format(note_num, len(notes))))
await ctx.send(box(note))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def list(self, ctx):
"""Lists all users and note counts for the server."""
user_notes = self.settings.getUserNotes(ctx.guild.id)
msg = 'Notes for {} users'.format(len(user_notes))
for user_id, notes in user_notes.items():
user = ctx.guild.get_member(user_id)
user_text = '{} ({})'.format(user.name, user.id) if user else user_id
msg += '\n\t{} : {}'.format(len(notes), user_text)
for page in pagify(msg):
await ctx.send(box(page))
class ModNotesSettings(CogSettings):
def make_default_settings(self):
config = {
'servers': {}
}
return config
def servers(self):
return self.bot_settings['servers']
def getServer(self, server_id):
servers = self.servers()
if server_id not in servers:
servers[server_id] = {}
return servers[server_id]
def getUserNotes(self, server_id):
server = self.getServer(server_id)
key = 'user_notes'
if key not in server:
server[key] = {}
return server[key]
def getNotesForUser(self, server_id, user_id):
user_notes = self.getUserNotes(server_id)
return user_notes.get(user_id, [])
def setNotesForUser(self, server_id, user_id, notes):
user_notes = self.getUserNotes(server_id)
if notes:
user_notes[user_id] = notes
else:
user_notes.pop(user_id, None)
self.save_settings()
return notes
def addNoteForUser(self, server_id, user_id, note: str):
notes = self.getNotesForUser(server_id, user_id)
notes.append(note)
self.setNotesForUser(server_id, user_id, notes)
return notes
|
"""Moderator notes for users.
This module allows you to create notes to share between moderators.
"""
|
postgres_storage.rs
|
extern crate owning_ref;
extern crate sodiumoxide;
extern crate r2d2;
extern crate r2d2_postgres;
use postgres;
use self::r2d2_postgres::{TlsMode, PostgresConnectionManager};
use serde_json;
use self::owning_ref::OwningHandle;
use std::rc::Rc;
use std::time::Duration;
use errors::wallet::WalletStorageError;
use errors::common::CommonError;
use wql::language;
use wql::query;
use wql::transaction;
use wql::storage::{StorageIterator, WalletStorage, StorageRecord, EncryptedValue, Tag, TagName};
fn default_true() -> bool { true }
fn default_false() -> bool { false }
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct RecordOptions {
#[serde(default = "default_false")]
retrieve_type: bool,
#[serde(default = "default_true")]
retrieve_value: bool,
#[serde(default = "default_false")]
retrieve_tags: bool
}
impl RecordOptions {
pub fn id() -> String {
let options = RecordOptions {
retrieve_type: false,
retrieve_value: false,
retrieve_tags: false
};
serde_json::to_string(&options).unwrap()
}
pub fn id_value() -> String {
let options = RecordOptions {
retrieve_type: false,
retrieve_value: true,
retrieve_tags: false
};
serde_json::to_string(&options).unwrap()
}
}
impl Default for RecordOptions {
fn default() -> RecordOptions {
RecordOptions {
retrieve_type: false,
retrieve_value: true,
retrieve_tags: false,
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct SearchOptions {
#[serde(default = "default_true")]
retrieve_records: bool,
#[serde(default = "default_false")]
retrieve_total_count: bool,
#[serde(default = "default_false")]
retrieve_type: bool,
#[serde(default = "default_true")]
retrieve_value: bool,
#[serde(default = "default_false")]
retrieve_tags: bool
}
impl SearchOptions {
pub fn id_value() -> String {
let options = SearchOptions {
retrieve_records: true,
retrieve_total_count: true,
retrieve_type: true,
retrieve_value: true,
retrieve_tags: false
};
serde_json::to_string(&options).unwrap()
}
}
impl Default for SearchOptions {
fn default() -> SearchOptions {
SearchOptions {
retrieve_records: true,
retrieve_total_count: false,
retrieve_type: false,
retrieve_value: true,
retrieve_tags: false,
}
}
}
const _POSTGRES_DB: &str = "postgres";
const _WALLETS_DB: &str = "wallets";
const _PLAIN_TAGS_QUERY: &str = "SELECT name, value from tags_plaintext where item_id = $1";
const _ENCRYPTED_TAGS_QUERY: &str = "SELECT name, value from tags_encrypted where item_id = $1";
const _PLAIN_TAGS_QUERY_MULTI: &str = "SELECT name, value from tags_plaintext where item_id = $1 and wallet_id = $2";
const _ENCRYPTED_TAGS_QUERY_MULTI: &str = "SELECT name, value from tags_encrypted where item_id = $1 and wallet_id = $2";
const _CREATE_WALLET_DATABASE: &str = "CREATE DATABASE \"$1\"";
const _CREATE_WALLETS_DATABASE: &str = "CREATE DATABASE wallets";
// Note: wallet id length was constrained before by postgres database name length to 64 characters, keeping the same restrictions
const _CREATE_SCHEMA: [&str; 12] = [
"CREATE TABLE IF NOT EXISTS metadata (
id BIGSERIAL PRIMARY KEY,
value BYTEA NOT NULL
)",
"CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_values ON metadata(value)",
"CREATE TABLE IF NOT EXISTS items(
id BIGSERIAL PRIMARY KEY,
type BYTEA NOT NULL,
name BYTEA NOT NULL,
value BYTEA NOT NULL,
key BYTEA NOT NULL
)",
"CREATE UNIQUE INDEX IF NOT EXISTS ux_items_type_name ON items(type, name)",
"CREATE TABLE IF NOT EXISTS tags_encrypted(
name BYTEA NOT NULL,
value BYTEA NOT NULL,
item_id BIGINT NOT NULL,
PRIMARY KEY(name, item_id),
FOREIGN KEY(item_id)
REFERENCES items(id)
ON DELETE CASCADE
ON UPDATE CASCADE
)",
"CREATE INDEX IF NOT EXISTS ix_tags_encrypted_name ON tags_encrypted(name)",
"CREATE INDEX IF NOT EXISTS ix_tags_encrypted_value ON tags_encrypted(value)",
"CREATE INDEX IF NOT EXISTS ix_tags_encrypted_item_id ON tags_encrypted(item_id)",
"CREATE TABLE IF NOT EXISTS tags_plaintext(
name BYTEA NOT NULL,
value TEXT NOT NULL,
item_id BIGINT NOT NULL,
PRIMARY KEY(name, item_id),
FOREIGN KEY(item_id)
REFERENCES items(id)
ON DELETE CASCADE
ON UPDATE CASCADE
)",
"CREATE INDEX IF NOT EXISTS ix_tags_plaintext_name ON tags_plaintext(name)",
"CREATE INDEX IF NOT EXISTS ix_tags_plaintext_value ON tags_plaintext(value)",
"CREATE INDEX IF NOT EXISTS ix_tags_plaintext_item_id ON tags_plaintext(item_id)"
];
const _CREATE_SCHEMA_MULTI: [&str; 14] = [
"CREATE TABLE IF NOT EXISTS metadata (
wallet_id VARCHAR(64) NOT NULL,
value BYTEA NOT NULL,
PRIMARY KEY(wallet_id)
)",
"CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_wallet_id_id ON metadata(wallet_id)",
"CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_values ON metadata(wallet_id, value)",
"CREATE TABLE IF NOT EXISTS items(
wallet_id VARCHAR(64) NOT NULL,
id BIGSERIAL NOT NULL,
type BYTEA NOT NULL,
name BYTEA NOT NULL,
value BYTEA NOT NULL,
key BYTEA NOT NULL,
PRIMARY KEY(wallet_id, id)
)",
"CREATE UNIQUE INDEX IF NOT EXISTS ux_items_wallet_id_id ON items(wallet_id, id)",
"CREATE UNIQUE INDEX IF NOT EXISTS ux_items_type_name ON items(wallet_id, type, name)",
"CREATE TABLE IF NOT EXISTS tags_encrypted(
wallet_id VARCHAR(64) NOT NULL,
name BYTEA NOT NULL,
value BYTEA NOT NULL,
item_id BIGINT NOT NULL,
PRIMARY KEY(wallet_id, name, item_id),
FOREIGN KEY(wallet_id, item_id)
REFERENCES items(wallet_id, id)
ON DELETE CASCADE
ON UPDATE CASCADE
)",
"CREATE INDEX IF NOT EXISTS ix_tags_encrypted_name ON tags_encrypted(wallet_id, name)",
"CREATE INDEX IF NOT EXISTS ix_tags_encrypted_value ON tags_encrypted(wallet_id, value)",
"CREATE INDEX IF NOT EXISTS ix_tags_encrypted_wallet_id_item_id ON tags_encrypted(wallet_id, item_id)",
"CREATE TABLE IF NOT EXISTS tags_plaintext(
wallet_id VARCHAR(64) NOT NULL,
name BYTEA NOT NULL,
value TEXT NOT NULL,
item_id BIGINT NOT NULL,
PRIMARY KEY(wallet_id, name, item_id),
FOREIGN KEY(wallet_id, item_id)
REFERENCES items(wallet_id, id)
ON DELETE CASCADE
ON UPDATE CASCADE
)",
"CREATE INDEX IF NOT EXISTS ix_tags_plaintext_name ON tags_plaintext(wallet_id, name)",
"CREATE INDEX IF NOT EXISTS ix_tags_plaintext_value ON tags_plaintext(wallet_id, value)",
"CREATE INDEX IF NOT EXISTS ix_tags_plaintext_wallet_id_item_id ON tags_plaintext(wallet_id, item_id)"
];
const _DROP_WALLET_DATABASE: &str = "DROP DATABASE \"$1\"";
const _DROP_SCHEMA: [&str; 4] = [
"DROP TABLE tags_plaintext",
"DROP TABLE tags_encrypted",
"DROP TABLE items",
"DROP TABLE metadata"
];
const _DELETE_WALLET_MULTI: [&str; 4] = [
"DELETE FROM tags_plaintext WHERE wallet_id = $1",
"DELETE FROM tags_encrypted WHERE wallet_id = $1",
"DELETE FROM items WHERE wallet_id = $1",
"DELETE FROM metadata WHERE wallet_id = $1"
];
#[derive(Debug)]
struct TagRetriever<'a> {
plain_tags_stmt: postgres::stmt::Statement<'a>,
encrypted_tags_stmt: postgres::stmt::Statement<'a>,
wallet_id: Option<String>,
}
type TagRetrieverOwned = OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<TagRetriever<'static>>>;
impl<'a> TagRetriever<'a> {
fn new_owned(conn: Rc<r2d2::PooledConnection<PostgresConnectionManager>>, wallet_id: Option<String>) -> Result<TagRetrieverOwned, WalletStorageError> {
OwningHandle::try_new(conn.clone(), |conn| -> Result<_, postgres::Error> {
let (plain_tags_stmt, encrypted_tags_stmt) = unsafe {
match wallet_id {
Some(_) => ((*conn).prepare(_PLAIN_TAGS_QUERY_MULTI)?,
(*conn).prepare(_ENCRYPTED_TAGS_QUERY_MULTI)?),
None => ((*conn).prepare(_PLAIN_TAGS_QUERY)?,
(*conn).prepare(_ENCRYPTED_TAGS_QUERY)?)
}
};
let tr = TagRetriever {
plain_tags_stmt,
encrypted_tags_stmt,
wallet_id
};
Ok(Box::new(tr))
}).map_err(WalletStorageError::from)
}
fn retrieve(&mut self, id: i64) -> Result<Vec<Tag>, WalletStorageError> {
let mut tags = Vec::new();
let plain_results = match self.wallet_id {
Some(ref w_id) => self.plain_tags_stmt.query(&[&id, &w_id])?,
None => self.plain_tags_stmt.query(&[&id])?
};
let mut iter_plain = plain_results.iter();
while let Some(res) = iter_plain.next() {
let row = res;
tags.push(Tag::PlainText(row.get(0), row.get(1)));
}
let encrypted_results = match self.wallet_id {
Some(ref w_id) => self.encrypted_tags_stmt.query(&[&id, &w_id])?,
None => self.encrypted_tags_stmt.query(&[&id])?
};
let mut iter_encrypted = encrypted_results.iter();
while let Some(res) = iter_encrypted.next() {
let row = res;
tags.push(Tag::Encrypted(row.get(0), row.get(1)));
}
Ok(tags)
}
}
struct PostgresStorageIterator {
rows: Option<
OwningHandle<
OwningHandle<
Rc<r2d2::PooledConnection<PostgresConnectionManager>>,
Box<postgres::stmt::Statement<'static>>>,
Box<postgres::rows::Rows<>>>>,
tag_retriever: Option<TagRetrieverOwned>,
options: RecordOptions,
total_count: Option<usize>,
iter_count: usize,
}
impl PostgresStorageIterator {
fn new(stmt: Option<OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<postgres::stmt::Statement<'static>>>>,
args: &[&dyn postgres::types::ToSql],
options: RecordOptions,
tag_retriever: Option<TagRetrieverOwned>,
total_count: Option<usize>) -> Result<PostgresStorageIterator, WalletStorageError> {
let mut iter = PostgresStorageIterator {
rows: None,
tag_retriever,
options,
total_count,
iter_count: 0
};
if let Some(stmt) = stmt {
iter.rows = Some(OwningHandle::try_new(
stmt, |stmt|
unsafe {
(*(stmt as *mut postgres::stmt::Statement)).query(args).map(Box::new)
},
)?);
}
Ok(iter)
}
}
impl StorageIterator for PostgresStorageIterator {
fn next(&mut self) -> Result<Option<StorageRecord>, WalletStorageError> {
// if records are not requested.
if self.rows.is_none() {
return Ok(None);
}
// TODO not sure if iter().nth() is the most efficient way to iterate through the result set
// TODO investigate if the Iter object can be cached between calls to next()
match self.rows.as_mut().unwrap().iter().nth(self.iter_count) {
Some(row) => {
self.iter_count = self.iter_count + 1;
let name = row.get(1);
let value = if self.options.retrieve_value {
Some(EncryptedValue::new(row.get(2), row.get(3)))
} else {
None
};
let tags = if self.options.retrieve_tags {
match self.tag_retriever {
Some(ref mut tag_retriever) => Some(tag_retriever.retrieve(row.get(0))?),
None => return Err(WalletStorageError::CommonError(
CommonError::InvalidState("Fetch tags option set and tag retriever is None".to_string())
))
}
} else {
None
};
let type_ = if self.options.retrieve_type {
Some(row.get(4))
} else {
None
};
Ok(Some(StorageRecord::new(name, value, type_, tags)))
}
//Some(Err(err)) => Err(WalletStorageError::from(err)),
None => Ok(None)
}
}
fn get_total_count(&self) -> Result<Option<usize>, WalletStorageError> {
Ok(self.total_count)
}
}
#[derive(Deserialize, Debug)]
pub struct PostgresConfig {
url: String,
tls: Option<String>, // default off
max_connections: Option<u32>, // default 5
min_idle_time: Option<u32>, // default 0
connection_timeout: Option<u64>, // default 5
wallet_scheme: Option<WalletScheme>, // default DatabasePerWallet
}
impl PostgresConfig {
fn tls(&self) -> postgres::TlsMode {
match &self.tls {
Some(tls) => match tls.as_ref() {
"None" => postgres::TlsMode::None,
// TODO add tls support for connecting to postgres db
//"Prefer" => postgres::TlsMode::Prefer(&postgres::Connection),
//"Require" => postgres::TlsMode::Require(&postgres::Connection),
_ => postgres::TlsMode::None
},
None => postgres::TlsMode::None
}
}
fn r2d2_tls(&self) -> TlsMode {
match &self.tls {
Some(tls) => match tls.as_ref() {
"None" => TlsMode::None,
// TODO add tls support for connecting to postgres db
//"Prefer" => TlsMode::Prefer(&postgres::Connection),
//"Require" => TlsMode::Require(&postgres::Connection),
_ => TlsMode::None
},
None => TlsMode::None
}
}
fn max_connections(&self) -> u32 {
match &self.max_connections {
Some(conn) => *conn,
None => 5
}
}
fn min_idle_time(&self) -> u32 {
match &self.min_idle_time {
Some(idle) => *idle,
None => 0
}
}
fn connection_timeout(&self) -> u64 {
match &self.connection_timeout {
Some(timeout) => *timeout,
None => 5
}
}
}
#[derive(Deserialize, Debug)]
pub struct PostgresCredentials {
account: String,
password: String,
admin_account: Option<String>,
admin_password: Option<String>,
}
#[derive(Debug)]
pub struct PostgresStorage {
pool: r2d2::Pool<PostgresConnectionManager>,
wallet_id: String
}
pub trait WalletStorageType {
fn init_storage(&self, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError>;
fn create_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>, metadata: &[u8]) -> Result<(), WalletStorageError>;
fn open_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<Box<PostgresStorage>, WalletStorageError>;
fn delete_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError>;
}
#[derive(Deserialize, Debug)]
#[derive(Copy, Clone)]
enum WalletScheme {
DatabasePerWallet,
MultiWalletSingleTable,
MultiWalletMultiTable
}
trait WalletStrategy {
// initialize storage based on wallet storage strategy
fn init_storage(&self, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError>;
// initialize a single wallet based on wallet storage strategy
fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError>;
// open a wallet based on wallet storage strategy
fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError>;
// delete a single wallet based on wallet storage strategy
fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError>;
// determine phyisical table name based on wallet strategy
fn table_name(&self, id: &str, base_name: &str) -> String;
// determine additional query parameters based on wallet strategy
fn query_qualifier(&self) -> Option<String>;
}
pub struct PostgresStorageType {}
struct DatabasePerWalletStrategy {}
struct MultiWalletSingleTableStrategy {}
struct MultiWalletMultiTableStrategy {}
impl WalletStrategy for DatabasePerWalletStrategy {
// initialize storage based on wallet storage strategy
fn init_storage(&self, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> {
// no-op
Ok(())
}
// initialize a single wallet based on wallet storage strategy
fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError> {
// create database for wallet
// if admin user and password aren't provided then bail
if credentials.admin_account == None || credentials.admin_password == None {
return Ok(())
}
let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials);
let url = PostgresStorageType::_postgres_url(id, &config, &credentials);
let conn = postgres::Connection::connect(&url_base[..], config.tls())?;
let create_db_sql = str::replace(_CREATE_WALLET_DATABASE, "$1", id);
let mut schema_result = match conn.execute(&create_db_sql, &[]) {
Ok(_) => Ok(()),
Err(_error) => {
Err(WalletStorageError::AlreadyExists)
}
};
conn.finish()?;
let conn = match postgres::Connection::connect(&url[..], config.tls()) {
Ok(conn) => conn,
Err(error) => {
return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error)));
}
};
for sql in &_CREATE_SCHEMA {
match schema_result {
Ok(_) => schema_result = match conn.execute(sql, &[]) {
Ok(_) => Ok(()),
Err(error) => {
Err(WalletStorageError::IOError(format!("Error occurred while creating wallet schema: {}", error)))
}
},
_ => ()
}
};
let ret = match schema_result {
Ok(_) => {
match conn.execute("INSERT INTO metadata(value) VALUES($1)
ON CONFLICT (value) DO UPDATE SET value = excluded.value",
&[&metadata]) {
Ok(_) => Ok(()),
Err(error) => {
//std::fs::remove_file(db_path)?;
Err(WalletStorageError::IOError(format!("Error occurred while inserting the keys: {}", error)))
}
}
},
Err(error) => Err(error)
};
conn.finish()?;
ret
}
// open a wallet based on wallet storage strategy
fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> {
let url = PostgresStorageType::_postgres_url(id, &config, &credentials);
// don't need a connection, but connect just to verify we can
let _conn = match postgres::Connection::connect(&url[..], config.tls()) {
Ok(conn) => conn,
Err(_) => return Err(WalletStorageError::NotFound)
};
// TODO close _conn
let manager = match PostgresConnectionManager::new(&url[..], config.r2d2_tls()) {
Ok(manager) => manager,
Err(_) => return Err(WalletStorageError::NotFound)
};
let pool = match r2d2::Pool::builder().min_idle(Some(config.min_idle_time())).max_size(config.max_connections()).idle_timeout(Some(Duration::new(config.connection_timeout(), 0))).build(manager) {
Ok(pool) => pool,
Err(_) => return Err(WalletStorageError::NotFound)
};
Ok(Box::new(PostgresStorage {
pool: pool,
wallet_id: id.to_string()
}))
}
// delete a single wallet based on wallet storage strategy
fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> {
// if admin user and password aren't provided then bail
if credentials.admin_account == None || credentials.admin_password == None {
return Ok(())
}
let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials);
let url = PostgresStorageType::_postgres_url(id, &config, &credentials);
match postgres::Connection::connect(&url[..], config.tls()) {
Ok(conn) => {
for sql in &_DROP_SCHEMA {
match conn.execute(sql, &[]) {
Ok(_) => (),
Err(_) => ()
};
}
let _ret = conn.finish();
()
},
Err(_) => return Err(WalletStorageError::NotFound)
};
let conn = postgres::Connection::connect(url_base, config.tls())?;
let drop_db_sql = str::replace(_DROP_WALLET_DATABASE, "$1", id);
let ret = match conn.execute(&drop_db_sql, &[]) {
Ok(_) => Ok(()),
Err(_) => Ok(())
};
conn.finish()?;
ret
}
// determine phyisical table name based on wallet strategy
fn table_name(&self, _id: &str, base_name: &str) -> String {
// TODO
base_name.to_owned()
}
// determine additional query parameters based on wallet strategy
fn query_qualifier(&self) -> Option<String> {
// TODO
None
}
}
impl WalletStrategy for MultiWalletSingleTableStrategy {
// initialize storage based on wallet storage strategy
fn init_storage(&self, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> {
// create database and tables for storage
// if admin user and password aren't provided then bail
if credentials.admin_account == None || credentials.admin_password == None {
return Ok(())
}
let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials);
let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials);
let conn = postgres::Connection::connect(&url_base[..], postgres::TlsMode::None)?;
if let Err(error) = conn.execute(&_CREATE_WALLETS_DATABASE, &[]) {
if error.code() != Some(&postgres::error::DUPLICATE_DATABASE) {
conn.finish()?;
return Err(WalletStorageError::IOError(format!("Error occurred while creating the database: {}", error)))
} else {
// if database already exists, assume tables are created already and return
conn.finish()?;
return Ok(());
}
}
conn.finish()?;
let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) {
Ok(conn) => conn,
Err(error) => {
return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error)));
}
};
for sql in &_CREATE_SCHEMA_MULTI {
if let Err(error) = conn.execute(sql, &[]) {
conn.finish()?;
return Err(WalletStorageError::IOError(format!("Error occurred while creating wallet schema: {}", error)));
}
}
conn.finish()?;
Ok(())
}
// initialize a single wallet based on wallet storage strategy
fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError> {
// insert metadata
let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials);
let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) {
Ok(conn) => conn,
Err(error) => {
return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error)));
}
};
// We allow error on conflict since this indicates AlreadyExists error
let ret = match conn.execute("INSERT INTO metadata(wallet_id, value) VALUES($1, $2)", &[&id, &metadata]) {
Ok(_) => Ok(()),
Err(error) => {
if error.code() == Some(&postgres::error::UNIQUE_VIOLATION) {
Err(WalletStorageError::AlreadyExists)
} else {
Err(WalletStorageError::IOError(format!("Error occurred while inserting into metadata: {}", error)))
}
}
};
conn.finish()?;
ret
}
// open a wallet based on wallet storage strategy
fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> {
let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials);
// don't need a connection, but connect just to verify we can
let conn = match postgres::Connection::connect(&url[..], config.tls()) {
Ok(conn) => conn,
Err(_) => return Err(WalletStorageError::NotFound)
};
// select metadata for this wallet to ensure it exists
let res: Result<Vec<u8>, WalletStorageError> = {
let mut rows = conn.query(
"SELECT value FROM metadata WHERE wallet_id = $1",
&[&id]);
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
}
};
match res {
Ok(_entity) => (),
Err(_) => return Err(WalletStorageError::NotFound)
};
// TODO close conn
let manager = match PostgresConnectionManager::new(&url[..], config.r2d2_tls()) {
Ok(manager) => manager,
Err(_) => return Err(WalletStorageError::NotFound)
};
let pool = match r2d2::Pool::builder().min_idle(Some(config.min_idle_time())).max_size(config.max_connections()).idle_timeout(Some(Duration::new(config.connection_timeout(), 0))).build(manager) {
Ok(pool) => pool,
Err(_) => return Err(WalletStorageError::NotFound)
};
Ok(Box::new(PostgresStorage {
pool: pool,
wallet_id: id.to_string()
}))
}
// delete a single wallet based on wallet storage strategy
fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> {
let url = PostgresStorageType::_postgres_url(&_WALLETS_DB, &config, &credentials);
let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) {
Ok(conn) => conn,
Err(error) => {
return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error)));
}
};
let mut ret = Ok(());
for sql in &_DELETE_WALLET_MULTI {
ret = match conn.execute(sql, &[&id]) {
Ok(row_count) => {
if row_count == 0 {
Err(WalletStorageError::NotFound)
} else {
Ok(())
}
},
Err(error) => {
Err(WalletStorageError::IOError(format!("Error occurred while deleting wallet: {}", error)))
}
}
};
conn.finish()?;
return ret
}
// determine phyisical table name based on wallet strategy
fn table_name(&self, _id: &str, base_name: &str) -> String {
// TODO
base_name.to_owned()
}
// determine additional query parameters based on wallet strategy
fn query_qualifier(&self) -> Option<String> {
// TODO
Some("AND wallet_id = $$".to_owned())
}
}
impl WalletStrategy for MultiWalletMultiTableStrategy {
// initialize storage based on wallet storage strategy
fn init_storage(&self, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> {
// create database for storage
// TODO
Ok(())
}
// initialize a single wallet based on wallet storage strategy
fn create_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials, _metadata: &[u8]) -> Result<(), WalletStorageError> {
// create tables for wallet storage
// TODO
Ok(())
}
// open a wallet based on wallet storage strategy
fn open_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> {
// TODO
Err(WalletStorageError::NotFound)
}
// delete a single wallet based on wallet storage strategy
fn delete_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> {
// TODO
Ok(())
}
// determine phyisical table name based on wallet strategy
fn table_name(&self, _id: &str, base_name: &str) -> String {
// TODO
base_name.to_owned()
}
// determine additional query parameters based on wallet strategy
fn query_qualifier(&self) -> Option<String> {
// TODO
None
}
}
static mut SELECTED_STRATEGY: &dyn WalletStrategy = &DatabasePerWalletStrategy{};
impl PostgresStorageType {
pub fn new() -> PostgresStorageType {
PostgresStorageType {}
}
fn _admin_postgres_url(config: &PostgresConfig, credentials: &PostgresCredentials) -> String {
let mut url_base = "postgresql://".to_owned();
match credentials.admin_account {
Some(ref account) => url_base.push_str(&account[..]),
None => ()
}
url_base.push_str(":");
match credentials.admin_password {
Some(ref password) => url_base.push_str(&password[..]),
None => ()
}
url_base.push_str("@");
url_base.push_str(&config.url[..]);
url_base
}
fn _base_postgres_url(config: &PostgresConfig, credentials: &PostgresCredentials) -> String {
let mut url_base = "postgresql://".to_owned();
url_base.push_str(&credentials.account[..]);
url_base.push_str(":");
url_base.push_str(&credentials.password[..]);
url_base.push_str("@");
url_base.push_str(&config.url[..]);
url_base
}
fn _postgres_url(id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> String {
let mut url_base = PostgresStorageType::_base_postgres_url(config, credentials);
url_base.push_str("/");
url_base.push_str(id);
url_base
}
}
impl WalletStorage for PostgresStorage {
///
/// Tries to fetch values and/or tags from the storage.
/// Returns Result with StorageEntity object which holds requested data in case of success or
/// Result with WalletStorageError in case of failure.
///
///
/// # Arguments
///
/// * `type_` - type_ of the item in storage
/// * `id` - id of the item in storage
/// * `options` - JSon containing what needs to be fetched.
/// Example: {"retrieveValue": true, "retrieveTags": true}
///
/// # Returns
///
/// Result that can be either:
///
/// * `StorageEntity` - Contains name, optional value and optional tags
/// * `WalletStorageError`
///
/// # Errors
///
/// Any of the following `WalletStorageError` type_ of errors can be throw by this method:
///
/// * `WalletStorageError::Closed` - Storage is closed
/// * `WalletStorageError::ItemNotFound` - Item is not found in database
/// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query
///
fn get(&self, type_: &[u8], id: &[u8], options: &str) -> Result<StorageRecord, WalletStorageError> {
let options: RecordOptions = if options == "{}" { // FIXME:
RecordOptions::default()
} else {
serde_json::from_str(options)?
};
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let res: Result<(i64, Vec<u8>, Vec<u8>), WalletStorageError> = {
let mut rows = match query_qualifier {
Some(_) => conn.query(
"SELECT id, value, key FROM items where type = $1 AND name = $2 AND wallet_id = $3",
&[&type_.to_vec(), &id.to_vec(), &self.wallet_id]),
None => conn.query(
"SELECT id, value, key FROM items where type = $1 AND name = $2",
&[&type_.to_vec(), &id.to_vec()])
};
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok((row.get(0), row.get(1), row.get(2))),
None => Err(WalletStorageError::ItemNotFound)
}
};
let item = match res {
Ok(entity) => entity,
Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound),
Err(err) => return Err(WalletStorageError::from(err))
};
let value = if options.retrieve_value
{ Some(EncryptedValue::new(item.1, item.2)) } else { None };
let type_ = if options.retrieve_type { Some(type_.clone()) } else { None };
let tags = if options.retrieve_tags {
let mut tags = Vec::new();
// get all encrypted.
let rows = match query_qualifier {
Some(_) => {
let stmt = conn.prepare_cached("SELECT name, value FROM tags_encrypted WHERE item_id = $1 AND wallet_id = $2")?;
stmt.query(&[&item.0, &self.wallet_id])?
},
None => {
let stmt = conn.prepare_cached("SELECT name, value FROM tags_encrypted WHERE item_id = $1")?;
stmt.query(&[&item.0])?
}
};
let mut iter = rows.iter();
while let Some(res) = iter.next() {
let row = res;
//let tag_name: Vec<u8> = row.get(0);
//let tag_value: Vec<u8> = row.get(1);
tags.push(Tag::Encrypted(row.get(0), row.get(1)));
}
// get all plain
let rows = match query_qualifier {
Some(_) => {
let stmt = conn.prepare_cached("SELECT name, value FROM tags_plaintext WHERE item_id = $1 AND wallet_id = $2")?;
stmt.query(&[&item.0, &self.wallet_id])?
},
None => {
let stmt = conn.prepare_cached("SELECT name, value FROM tags_plaintext WHERE item_id = $1")?;
stmt.query(&[&item.0])?
}
};
let mut iter = rows.iter();
while let Some(res) = iter.next() {
let row = res;
//let tag_name: Vec<u8> = row.get(0);
//let tag_value: String = row.get(1);
tags.push(Tag::PlainText(row.get(0), row.get(1)));
}
Some(tags)
} else { None };
Ok(StorageRecord::new(id.to_vec(), value, type_.map(|val| val.to_vec()), tags))
}
///
/// inserts value and tags into storage.
/// Returns Result with () on success or
/// Result with WalletStorageError in case of failure.
///
///
/// # Arguments
///
/// * `type_` - type of the item in storage
/// * `id` - id of the item in storage
/// * `value` - value of the item in storage
/// * `value_key` - key used to encrypt the value
/// * `tags` - tags assigned to the value
///
/// # Returns
///
/// Result that can be either:
///
/// * `()`
/// * `WalletStorageError`
///
/// # Errors
///
/// Any of the following `WalletStorageError` class of errors can be throw by this method:
///
/// * `WalletStorageError::Closed` - Storage is closed
/// * `WalletStorageError::ItemAlreadyExists` - Item is already present in database
/// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query
///
fn add(&self, type_: &[u8], id: &[u8], value: &EncryptedValue, tags: &[Tag]) -> Result<(), WalletStorageError> {
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let tx: transaction::Transaction = transaction::Transaction::new(&conn)?;
let res = match query_qualifier {
Some(_) => tx.prepare_cached("INSERT INTO items (type, name, value, key, wallet_id) VALUES ($1, $2, $3, $4, $5) RETURNING id")?
.query(&[&type_.to_vec(), &id.to_vec(), &value.data, &value.key, &self.wallet_id]),
None => tx.prepare_cached("INSERT INTO items (type, name, value, key) VALUES ($1, $2, $3, $4) RETURNING id")?
.query(&[&type_.to_vec(), &id.to_vec(), &value.data, &value.key])
};
let item_id = match res {
Ok(rows) => {
let res = match rows.iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
};
let item_id: i64 = match res {
Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound),
Err(err) => return Err(WalletStorageError::from(err)),
Ok(id) => id
};
item_id
},
Err(err) => {
if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) ||
err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) {
return Err(WalletStorageError::ItemAlreadyExists);
} else {
return Err(WalletStorageError::from(err));
}
}
};
let item_id = item_id as i64;
if !tags.is_empty() {
let stmt_e = match query_qualifier {
Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?,
None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3)")?
};
let stmt_p = match query_qualifier {
Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?,
None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3)")?
};
for tag in tags {
match tag {
&Tag::Encrypted(ref tag_name, ref tag_data) => {
let res = match query_qualifier {
Some(_) => stmt_e.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]),
None => stmt_e.execute(&[&item_id, tag_name, tag_data])
};
match res {
Ok(_) => (),
Err(err) => {
if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) ||
err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) {
return Err(WalletStorageError::ItemAlreadyExists);
} else {
return Err(WalletStorageError::from(err));
}
}
}
},
&Tag::PlainText(ref tag_name, ref tag_data) => {
let res = match query_qualifier {
Some(_) => stmt_p.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]),
None => stmt_p.execute(&[&item_id, tag_name, tag_data])
};
match res {
Ok(_) => (),
Err(err) => {
if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) ||
err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) {
return Err(WalletStorageError::ItemAlreadyExists);
} else {
return Err(WalletStorageError::from(err));
}
}
}
}
};
}
}
tx.commit()?;
Ok(())
}
fn update(&self, type_: &[u8], id: &[u8], value: &EncryptedValue) -> Result<(), WalletStorageError> {
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let res = match query_qualifier {
Some(_) => conn.prepare_cached("UPDATE items SET value = $1, key = $2 WHERE type = $3 AND name = $4 AND wallet_id = $5")?
.execute(&[&value.data, &value.key, &type_.to_vec(), &id.to_vec(), &self.wallet_id]),
None => conn.prepare_cached("UPDATE items SET value = $1, key = $2 WHERE type = $3 AND name = $4")?
.execute(&[&value.data, &value.key, &type_.to_vec(), &id.to_vec()])
};
match res {
Ok(1) => Ok(()),
Ok(0) => Err(WalletStorageError::ItemNotFound),
Ok(count) => Err(WalletStorageError::CommonError(CommonError::InvalidState(format!("Postgres returned update row count: {}", count)))),
Err(err) => Err(WalletStorageError::from(err)),
}
}
fn add_tags(&self, type_: &[u8], id: &[u8], tags: &[Tag]) -> Result<(), WalletStorageError> {
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let tx: transaction::Transaction = transaction::Transaction::new(&conn)?;
let res = match query_qualifier {
Some(_) => {
let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")?
.query(&[&type_.to_vec(), &id.to_vec()]);
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
}
},
None => {
let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")?
.query(&[&type_.to_vec(), &id.to_vec()]);
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
}
}
};
let item_id: i64 = match res {
Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound),
Err(err) => return Err(WalletStorageError::from(err)),
Ok(id) => id
};
if !tags.is_empty() {
let enc_tag_insert_stmt = match query_qualifier {
Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)
ON CONFLICT (name, item_id, wallet_id) DO UPDATE SET value = excluded.value")?,
None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3)
ON CONFLICT (name, item_id) DO UPDATE SET value = excluded.value")?
};
let plain_tag_insert_stmt = match query_qualifier {
Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)
ON CONFLICT (name, item_id, wallet_id) DO UPDATE SET value = excluded.value")?,
None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3)
ON CONFLICT (name, item_id) DO UPDATE SET value = excluded.value")?
};
for tag in tags {
match tag {
&Tag::Encrypted(ref tag_name, ref tag_data) => {
let res = match query_qualifier {
Some(_) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]),
None => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data])
};
match res {
Ok(_) => (),
Err(err) => {
if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) ||
err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) {
return Err(WalletStorageError::ItemAlreadyExists);
} else {
return Err(WalletStorageError::from(err));
}
}
}
},
&Tag::PlainText(ref tag_name, ref tag_data) => {
let res = match query_qualifier {
Some(_) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]),
None => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data])
};
match res {
Ok(_) => (),
Err(err) => {
if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) ||
err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) {
return Err(WalletStorageError::ItemAlreadyExists);
} else {
return Err(WalletStorageError::from(err));
}
}
}
}
};
}
}
tx.commit()?;
Ok(())
}
fn update_tags(&self, type_: &[u8], id: &[u8], tags: &[Tag]) -> Result<(), WalletStorageError> {
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let tx: transaction::Transaction = transaction::Transaction::new(&conn)?;
let res = match query_qualifier {
Some(_) => {
let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2 AND wallet_id = $3")?
.query(&[&type_.to_vec(), &id.to_vec(), &self.wallet_id]);
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
}
},
None => {
let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")?
.query(&[&type_.to_vec(), &id.to_vec()]);
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
}
}
};
let item_id: i64 = match res {
Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound),
Err(err) => return Err(WalletStorageError::from(err)),
Ok(id) => id
};
match query_qualifier {
Some(_) => {
tx.execute("DELETE FROM tags_encrypted WHERE item_id = $1 AND wallet_id = $2", &[&item_id, &self.wallet_id])?;
tx.execute("DELETE FROM tags_plaintext WHERE item_id = $1 AND wallet_id = $2", &[&item_id, &self.wallet_id])?;
},
None => {
tx.execute("DELETE FROM tags_encrypted WHERE item_id = $1", &[&item_id])?;
tx.execute("DELETE FROM tags_plaintext WHERE item_id = $1", &[&item_id])?;
}
};
if !tags.is_empty() {
let enc_tag_insert_stmt = match query_qualifier {
Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?,
None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3)")?
};
let plain_tag_insert_stmt = match query_qualifier {
Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?,
None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3)")?
};
for tag in tags {
match query_qualifier {
Some(_) => {
match tag {
&Tag::Encrypted(ref tag_name, ref tag_data) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id])?,
&Tag::PlainText(ref tag_name, ref tag_data) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id])?
}
},
None => {
match tag {
&Tag::Encrypted(ref tag_name, ref tag_data) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data])?,
&Tag::PlainText(ref tag_name, ref tag_data) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data])?
}
}
};
}
}
tx.commit()?;
Ok(())
}
fn delete_tags(&self, type_: &[u8], id: &[u8], tag_names: &[TagName]) -> Result<(), WalletStorageError> {
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let res = match query_qualifier {
Some(_) => {
let mut rows = conn.prepare_cached("SELECT id FROM items WHERE type =$1 AND name = $2 AND wallet_id = $3")?
.query(&[&type_.to_vec(), &id.to_vec(), &self.wallet_id]);
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
}
},
None => {
let mut rows = conn.prepare_cached("SELECT id FROM items WHERE type =$1 AND name = $2")?
.query(&[&type_.to_vec(), &id.to_vec()]);
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
}
}
};
let item_id: i64 = match res {
Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound),
Err(err) => return Err(WalletStorageError::from(err)),
Ok(id) => id
};
let tx: transaction::Transaction = transaction::Transaction::new(&conn)?;
{
let enc_tag_delete_stmt = match query_qualifier {
Some(_) => tx.prepare_cached("DELETE FROM tags_encrypted WHERE item_id = $1 AND name = $2 AND wallet_id = $3")?,
None => tx.prepare_cached("DELETE FROM tags_encrypted WHERE item_id = $1 AND name = $2")?
};
let plain_tag_delete_stmt = match query_qualifier {
Some(_) => tx.prepare_cached("DELETE FROM tags_plaintext WHERE item_id = $1 AND name = $2 AND wallet_id = $3")?,
None => tx.prepare_cached("DELETE FROM tags_plaintext WHERE item_id = $1 AND name = $2")?
};
for tag_name in tag_names {
match query_qualifier {
Some(_) =>
match tag_name {
&TagName::OfEncrypted(ref tag_name) => enc_tag_delete_stmt.execute(&[&item_id, tag_name, &self.wallet_id])?,
&TagName::OfPlain(ref tag_name) => plain_tag_delete_stmt.execute(&[&item_id, tag_name, &self.wallet_id])?,
},
None =>
match tag_name {
&TagName::OfEncrypted(ref tag_name) => enc_tag_delete_stmt.execute(&[&item_id, tag_name])?,
&TagName::OfPlain(ref tag_name) => plain_tag_delete_stmt.execute(&[&item_id, tag_name])?,
}
};
}
}
tx.commit()?;
Ok(())
}
///
/// deletes value and tags into storage.
/// Returns Result with () on success or
/// Result with WalletStorageError in case of failure.
///
///
/// # Arguments
///
/// * `type_` - type of the item in storage
/// * `id` - id of the item in storage
///
/// # Returns
///
/// Result that can be either:
///
/// * `()`
/// * `WalletStorageError`
///
/// # Errors
///
/// Any of the following `WalletStorageError` type_ of errors can be throw by this method:
///
/// * `WalletStorageError::Closed` - Storage is closed
/// * `WalletStorageError::ItemNotFound` - Item is not found in database
/// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query
///
fn delete(&self, type_: &[u8], id: &[u8]) -> Result<(), WalletStorageError> {
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let row_count = match query_qualifier {
Some(_) => conn.execute(
"DELETE FROM items where type = $1 AND name = $2 AND wallet_id = $3",
&[&type_.to_vec(), &id.to_vec(), &self.wallet_id]
)?,
None => conn.execute(
"DELETE FROM items where type = $1 AND name = $2",
&[&type_.to_vec(), &id.to_vec()]
)?
};
if row_count == 1 {
Ok(())
} else {
Err(WalletStorageError::ItemNotFound)
}
}
fn get_storage_metadata(&self) -> Result<Vec<u8>, WalletStorageError> {
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let res: Result<Vec<u8>, WalletStorageError> = {
let mut rows = match query_qualifier {
Some(_) => conn.query(
"SELECT value FROM metadata WHERE wallet_id = $1",
&[&self.wallet_id]),
None => conn.query(
"SELECT value FROM metadata",
&[])
};
match rows.as_mut().unwrap().iter().next() {
Some(row) => Ok(row.get(0)),
None => Err(WalletStorageError::ItemNotFound)
}
};
match res {
Ok(entity) => Ok(entity),
Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound),
Err(err) => return Err(WalletStorageError::from(err))
}
}
fn
|
(&self, metadata: &[u8]) -> Result<(), WalletStorageError> {
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let res = match query_qualifier {
Some(_) => conn.execute("UPDATE metadata SET value = $1 WHERE wallet_id = $2", &[&metadata.to_vec(), &self.wallet_id]),
None => conn.execute("UPDATE metadata SET value = $1", &[&metadata.to_vec()])
};
match res {
Ok(_) => Ok(()),
Err(error) => {
Err(WalletStorageError::IOError(format!("Error occurred while inserting the keys: {}", error)))
}
}
}
fn get_all(&self) -> Result<Box<dyn StorageIterator>, WalletStorageError> {
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let statement = match query_qualifier {
Some(_) => self._prepare_statement("SELECT id, name, value, key, type FROM items WHERE wallet_id = $1")?,
None => self._prepare_statement("SELECT id, name, value, key, type FROM items")?
};
let fetch_options = RecordOptions {
retrieve_type: true,
retrieve_value: true,
retrieve_tags: true,
};
let pool = self.pool.clone();
let tag_retriever = match query_qualifier {
Some(_) => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), Some(self.wallet_id.clone()))?),
None => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), None)?)
};
let storage_iterator = match query_qualifier {
Some(_) => PostgresStorageIterator::new(Some(statement), &[&self.wallet_id], fetch_options, tag_retriever, None)?,
None => PostgresStorageIterator::new(Some(statement), &[], fetch_options, tag_retriever, None)?
};
Ok(Box::new(storage_iterator))
}
fn search(&self, type_: &[u8], query: &language::Operator, options: Option<&str>) -> Result<Box<dyn StorageIterator>, WalletStorageError> {
let type_ = type_.to_vec(); // FIXME
let search_options = match options {
None => SearchOptions::default(),
Some(option_str) => serde_json::from_str(option_str)?
};
let pool = self.pool.clone();
let conn = pool.get().unwrap();
let query_qualifier = unsafe {
SELECTED_STRATEGY.query_qualifier()
};
let wallet_id_arg = self.wallet_id.to_owned();
let total_count: Option<usize> = if search_options.retrieve_total_count {
let (query_string, query_arguments) = match query_qualifier {
Some(_) => {
let (mut query_string, mut query_arguments) = query::wql_to_sql_count(&type_, query)?;
query_arguments.push(&wallet_id_arg);
let arg_str = format!(" AND i.wallet_id = ${}", query_arguments.len());
query_string.push_str(&arg_str);
let mut with_clause = false;
if query_string.contains("tags_plaintext") {
query_arguments.push(&wallet_id_arg);
query_string = format!("tags_plaintext as (select * from tags_plaintext where wallet_id = ${}) {}", query_arguments.len(), query_string);
with_clause = true;
}
if query_string.contains("tags_encrypted") {
if with_clause {
query_string = format!(", {}", query_string);
}
query_arguments.push(&wallet_id_arg);
query_string = format!("tags_encrypted as (select * from tags_encrypted where wallet_id = ${}) {}", query_arguments.len(), query_string);
with_clause = true;
}
if with_clause {
query_string = format!("WITH {}", query_string);
}
(query_string, query_arguments)
},
None => query::wql_to_sql_count(&type_, query)?
};
let mut rows = conn.query(
&query_string,
&query_arguments[..]);
match rows.as_mut().unwrap().iter().next() {
Some(row) => {
let x: i64 = row.get(0);
Some(x as usize)
},
None => None
}
} else { None };
if search_options.retrieve_records {
let fetch_options = RecordOptions {
retrieve_value: search_options.retrieve_value,
retrieve_tags: search_options.retrieve_tags,
retrieve_type: search_options.retrieve_type,
};
let (query_string, query_arguments) = match query_qualifier {
Some(_) => {
let (mut query_string, mut query_arguments) = query::wql_to_sql(&type_, query, options)?;
query_arguments.push(&wallet_id_arg);
let arg_str = format!(" AND i.wallet_id = ${}", query_arguments.len());
query_string.push_str(&arg_str);
let mut with_clause = false;
if query_string.contains("tags_plaintext") {
query_arguments.push(&wallet_id_arg);
query_string = format!("tags_plaintext as (select * from tags_plaintext where wallet_id = ${}) {}", query_arguments.len(), query_string);
with_clause = true;
}
if query_string.contains("tags_encrypted") {
if with_clause {
query_string = format!(", {}", query_string);
}
query_arguments.push(&wallet_id_arg);
query_string = format!("tags_encrypted as (select * from tags_encrypted where wallet_id = ${}) {}", query_arguments.len(), query_string);
with_clause = true;
}
if with_clause {
query_string = format!("WITH {}", query_string);
}
(query_string, query_arguments)
},
None => query::wql_to_sql(&type_, query, options)?
};
let statement = self._prepare_statement(&query_string)?;
let tag_retriever = if fetch_options.retrieve_tags {
let pool = self.pool.clone();
match query_qualifier {
Some(_) => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), Some(self.wallet_id.clone()))?),
None => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), None)?)
}
} else {
None
};
let storage_iterator = PostgresStorageIterator::new(Some(statement), &query_arguments[..], fetch_options, tag_retriever, total_count)?;
Ok(Box::new(storage_iterator))
} else {
let storage_iterator = PostgresStorageIterator::new(None, &[], RecordOptions::default(), None, total_count)?;
Ok(Box::new(storage_iterator))
}
}
fn close(&mut self) -> Result<(), WalletStorageError> {
// TODO throws a borrow error if we try to close the connection here; temporary workaround is to rely on idle connection timeout
Ok(())
}
}
impl PostgresStorage {
fn _prepare_statement(&self, sql: &str) -> Result<
OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<postgres::stmt::Statement<'static>>>,
WalletStorageError> {
let pool = self.pool.clone();
OwningHandle::try_new(Rc::new(pool.get().unwrap()).clone(), |conn| {
unsafe { (*conn).prepare(sql) }.map(Box::new).map_err(WalletStorageError::from)
})
}
}
impl WalletStorageType for PostgresStorageType {
///
/// Initializes the wallets database and creates the necessary tables for all wallets
/// This needs to be called once at the very beginning, I'm not entirely sure the best way to enforce it
///
/// # Arguments
///
/// * `storage_config` - config containing the location of Postgres DB files
/// * `storage_credentials` - DB credentials
///
/// # Returns
///
/// Result that can be either:
///
/// * `()`
/// * `WalletStorageError`
///
/// # Errors
///
/// Any of the following `WalletStorageError` type_ of errors can be throw by this method:
///
/// * `WalletStorageError::NotFound` - File with the provided id not found
/// * `IOError(..)` - Deletion of the file form the file-system failed
///
fn init_storage(&self, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError> {
let config = config
.map(serde_json::from_str::<PostgresConfig>)
.map_or(Ok(None), |v| v.map(Some))
.map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?;
let credentials = credentials
.map(serde_json::from_str::<PostgresCredentials>)
.map_or(Ok(None), |v| v.map(Some))
.map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?;
let config = match config {
Some(config) => config,
None => return Err(WalletStorageError::ConfigError)
};
let credentials = match credentials {
Some(credentials) => credentials,
None => return Err(WalletStorageError::ConfigError)
};
unsafe {
match config.wallet_scheme {
Some(scheme) => match scheme {
WalletScheme::DatabasePerWallet => SELECTED_STRATEGY = &DatabasePerWalletStrategy{},
WalletScheme::MultiWalletSingleTable => SELECTED_STRATEGY = &MultiWalletSingleTableStrategy{},
WalletScheme::MultiWalletMultiTable => SELECTED_STRATEGY = &MultiWalletMultiTableStrategy{}
},
None => ()
};
}
// initialize using the global SELECTED_STRATEGY object
unsafe {
return SELECTED_STRATEGY.init_storage(&config, &credentials);
}
}
///
/// Deletes the Postgres database file with the provided id from the path specified in the
/// config file.
///
/// # Arguments
///
/// * `id` - the wallet id
/// * `storage_config` - Postgres DB connection config
/// * `storage_credentials` - DB credentials
///
/// # Returns
///
/// Result that can be either:
///
/// * `()`
/// * `WalletStorageError`
///
/// # Errors
///
/// Any of the following `WalletStorageError` type_ of errors can be throw by this method:
///
/// * `WalletStorageError::NotFound` - File with the provided id not found
/// * `IOError(..)` - Deletion of the file form the file-system failed
///
fn delete_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError> {
let config = config
.map(serde_json::from_str::<PostgresConfig>)
.map_or(Ok(None), |v| v.map(Some))
.map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?;
let credentials = credentials
.map(serde_json::from_str::<PostgresCredentials>)
.map_or(Ok(None), |v| v.map(Some))
.map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?;
let config = match config {
Some(config) => config,
None => return Err(WalletStorageError::ConfigError)
};
let credentials = match credentials {
Some(credentials) => credentials,
None => return Err(WalletStorageError::ConfigError)
};
unsafe {
return SELECTED_STRATEGY.delete_wallet(id, &config, &credentials);
}
}
///
/// Creates the Postgres DB schema with the provided name in the id specified in the config file,
/// and initializes the encryption keys needed for encryption and decryption of data.
///
/// # Arguments
///
/// * `id` - name of the Postgres DB schema
/// * `config` - config containing the location of postgres db
/// * `credentials` - DB credentials
/// * `metadata` - encryption keys that need to be stored in the newly created DB
///
/// # Returns
///
/// Result that can be either:
///
/// * `()`
/// * `WalletStorageError`
///
/// # Errors
///
/// Any of the following `WalletStorageError` type_ of errors can be throw by this method:
///
/// * `AlreadyExists` - Schema with a given name already exists in the database
/// * `IOError("IO error during storage operation:...")` - Connection to the DB failed
/// * `IOError("Error occurred while creating wallet file:..)"` - Creation of schema failed
/// * `IOError("Error occurred while inserting the keys...")` - Insertion of keys failed
///
fn create_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>, metadata: &[u8]) -> Result<(), WalletStorageError> {
let config = config
.map(serde_json::from_str::<PostgresConfig>)
.map_or(Ok(None), |v| v.map(Some))
.map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?;
let credentials = credentials
.map(serde_json::from_str::<PostgresCredentials>)
.map_or(Ok(None), |v| v.map(Some))
.map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?;
let config = match config {
Some(config) => config,
None => return Err(WalletStorageError::ConfigError)
};
let credentials = match credentials {
Some(credentials) => credentials,
None => return Err(WalletStorageError::ConfigError)
};
// initialize using the global selected_strategy object
unsafe {
return SELECTED_STRATEGY.create_wallet(id, &config, &credentials, metadata);
}
}
///
/// Establishes a connection to the SQLite DB with the provided id located in the path
/// specified in the config. In case of a successful onection returns a Storage object
/// embedding the connection and the encryption keys that will be used for encryption and
/// decryption operations.
///
///
/// # Arguments
///
/// * `id` - id of the SQLite DB file
/// * `config` - config containing the location of SQLite DB files
/// * `credentials` - DB credentials
///
/// # Returns
///
/// Result that can be either:
///
/// * `(Box<Storage>, Vec<u8>)` - Tuple of `SQLiteStorage` and `encryption keys`
/// * `WalletStorageError`
///
/// # Errors
///
/// Any of the following `WalletStorageError` type_ of errors can be throw by this method:
///
/// * `WalletStorageError::NotFound` - File with the provided id not found
/// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query
///
fn open_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<Box<PostgresStorage>, WalletStorageError> {
let config = config
.map(serde_json::from_str::<PostgresConfig>)
.map_or(Ok(None), |v| v.map(Some))
.map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?;
let credentials = credentials
.map(serde_json::from_str::<PostgresCredentials>)
.map_or(Ok(None), |v| v.map(Some))
.map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?;
let config = match config {
Some(config) => config,
None => return Err(WalletStorageError::ConfigError)
};
let credentials = match credentials {
Some(credentials) => credentials,
None => return Err(WalletStorageError::ConfigError)
};
// initialize using the global selected_strategy object
unsafe {
return SELECTED_STRATEGY.open_wallet(id, &config, &credentials);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use utils::test;
#[test]
fn postgres_storage_type_create_works() {
_cleanup();
let storage_type = PostgresStorageType::new();
storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap();
}
#[test]
fn postgres_storage_type_create_works_for_twice() {
_cleanup();
let storage_type = PostgresStorageType::new();
storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap();
let res = storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata());
assert_match!(Err(WalletStorageError::AlreadyExists), res);
}
#[test]
fn postgres_storage_get_storage_metadata_works() {
_cleanup();
let storage = _storage();
let metadata = storage.get_storage_metadata().unwrap();
assert_eq!(metadata, _metadata());
}
#[test]
fn postgres_storage_type_delete_works() {
_cleanup();
let storage_type = PostgresStorageType::new();
storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap();
storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap();
}
#[test]
fn postgres_storage_type_delete_works_for_non_existing() {
_cleanup();
let storage_type = PostgresStorageType::new();
storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap();
let res = storage_type.delete_storage("unknown", Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]));
assert_match!(Err(WalletStorageError::NotFound), res);
storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap();
}
#[test]
fn postgres_storage_type_open_works() {
_cleanup();
_storage();
}
#[test]
fn postgres_storage_type_open_works_for_not_created() {
_cleanup();
let storage_type = PostgresStorageType::new();
let res = storage_type.open_storage("unknown", Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]));
assert_match!(Err(WalletStorageError::NotFound), res);
}
#[test]
fn postgres_storage_add_works_with_config() {
_cleanup();
let storage = _storage_db_pool();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
}
#[test]
fn postgres_storage_add_works_for_is_802() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let res = storage.add(&_type1(), &_id1(), &_value1(), &_tags());
assert_match!(Err(WalletStorageError::ItemAlreadyExists), res);
let res = storage.add(&_type1(), &_id1(), &_value1(), &_tags());
assert_match!(Err(WalletStorageError::ItemAlreadyExists), res);
}
#[test]
fn postgres_storage_set_get_works() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
assert_eq!(_sort(record.tags.unwrap()), _sort(_tags()));
}
#[test]
fn postgres_storage_set_get_works_for_twice() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let res = storage.add(&_type1(), &_id1(), &_value2(), &_tags());
assert_match!(Err(WalletStorageError::ItemAlreadyExists), res);
}
#[test]
fn postgres_storage_set_get_works_for_reopen() {
_cleanup();
{
_storage().add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
}
let storage_type = PostgresStorageType::new();
let storage = storage_type.open_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
assert_eq!(_sort(record.tags.unwrap()), _sort(_tags()));
}
#[test]
fn postgres_storage_get_works_for_wrong_key() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let res = storage.get(&_type1(), &_id2(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##);
assert_match!(Err(WalletStorageError::ItemNotFound), res)
}
#[test]
fn postgres_storage_delete_works() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
assert_eq!(_sort(record.tags.unwrap()), _sort(_tags()));
storage.delete(&_type1(), &_id1()).unwrap();
let res = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##);
assert_match!(Err(WalletStorageError::ItemNotFound), res);
}
#[test]
fn postgres_storage_delete_works_for_non_existing() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let res = storage.delete(&_type1(), &_id2());
assert_match!(Err(WalletStorageError::ItemNotFound), res);
}
#[test]
fn postgres_storage_create_and_find_multiple_works() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let record1 = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record1.value.unwrap(), _value1());
assert_eq!(_sort(record1.tags.unwrap()), _sort(_tags()));
storage.add(&_type2(), &_id2(), &_value2(), &_tags()).unwrap();
let record2 = storage.get(&_type2(), &_id2(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record2.value.unwrap(), _value2());
assert_eq!(_sort(record2.tags.unwrap()), _sort(_tags()));
}
#[test]
fn postgres_storage_get_all_workss() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
storage.add(&_type2(), &_id2(), &_value2(), &_tags()).unwrap();
let mut storage_iterator = storage.get_all().unwrap();
let record = storage_iterator.next().unwrap().unwrap();
assert_eq!(record.type_.unwrap(), _type1());
assert_eq!(record.value.unwrap(), _value1());
assert_eq!(_sort(record.tags.unwrap()), _sort(_tags()));
let record = storage_iterator.next().unwrap().unwrap();
assert_eq!(record.type_.unwrap(), _type2());
assert_eq!(record.value.unwrap(), _value2());
assert_eq!(_sort(record.tags.unwrap()), _sort(_tags()));
let record = storage_iterator.next().unwrap();
assert!(record.is_none());
}
#[test]
fn postgres_storage_get_all_works_for_empty() {
_cleanup();
let storage = _storage();
let mut storage_iterator = storage.get_all().unwrap();
let record = storage_iterator.next().unwrap();
assert!(record.is_none());
}
#[test]
fn postgres_storage_update_works() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
storage.update(&_type1(), &_id1(), &_value2()).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value2());
}
#[test]
fn postgres_storage_update_works_for_non_existing_id() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
let res = storage.update(&_type1(), &_id2(), &_value2());
assert_match!(Err(WalletStorageError::ItemNotFound), res)
}
#[test]
fn postgres_storage_update_works_for_non_existing_type() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
let res = storage.update(&_type2(), &_id1(), &_value2());
assert_match!(Err(WalletStorageError::ItemNotFound), res)
}
#[test]
fn postgres_storage_add_tags_works() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
storage.add_tags(&_type1(), &_id1(), &_new_tags()).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
let expected_tags = {
let mut tags = _tags();
tags.extend(_new_tags());
_sort(tags)
};
assert_eq!(_sort(record.tags.unwrap()), expected_tags);
}
#[test]
fn postgres_storage_add_tags_works_for_non_existing_id() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let res = storage.add_tags(&_type1(), &_id2(), &_new_tags());
assert_match!(Err(WalletStorageError::ItemNotFound), res)
}
#[test]
fn postgres_storage_add_tags_works_for_non_existing_type() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let res = storage.add_tags(&_type2(), &_id1(), &_new_tags());
assert_match!(Err(WalletStorageError::ItemNotFound), res)
}
#[test]
fn postgres_storage_add_tags_works_for_already_existing() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let tags_with_existing = {
let mut tags = _tags();
tags.extend(_new_tags());
tags
};
storage.add_tags(&_type1(), &_id1(), &tags_with_existing).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
let expected_tags = {
let mut tags = _tags();
tags.extend(_new_tags());
_sort(tags)
};
assert_eq!(_sort(record.tags.unwrap()), expected_tags);
}
#[test]
fn postgres_storage_update_tags_works() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
storage.update_tags(&_type1(), &_id1(), &_new_tags()).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
assert_eq!(_sort(record.tags.unwrap()), _sort(_new_tags()));
}
#[test]
fn postgres_storage_update_tags_works_for_non_existing_id() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let res = storage.update_tags(&_type1(), &_id2(), &_new_tags());
assert_match!(Err(WalletStorageError::ItemNotFound), res);
}
#[test]
fn postgres_storage_update_tags_works_for_non_existing_type() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let res = storage.update_tags(&_type1(), &_id2(), &_new_tags());
assert_match!(Err(WalletStorageError::ItemNotFound), res);
}
#[test]
fn postgres_storage_update_tags_works_for_already_existing() {
_cleanup();
let storage = _storage();
storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap();
let tags_with_existing = {
let mut tags = _tags();
tags.extend(_new_tags());
tags
};
storage.update_tags(&_type1(), &_id1(), &tags_with_existing).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.value.unwrap(), _value1());
let expected_tags = {
let mut tags = _tags();
tags.extend(_new_tags());
_sort(tags)
};
assert_eq!(_sort(record.tags.unwrap()), expected_tags);
}
#[test]
fn postgres_storage_delete_tags_works() {
_cleanup();
let storage = _storage();
let tag_name1 = vec![0, 0, 0];
let tag_name2 = vec![1, 1, 1];
let tag_name3 = vec![2, 2, 2];
let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]);
let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string());
let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]);
let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()];
storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap();
let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())];
storage.delete_tags(&_type1(), &_id1(), &tag_names).unwrap();
let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap();
assert_eq!(record.tags.unwrap(), vec![tag3]);
}
#[test]
fn postgres_storage_delete_tags_works_for_non_existing_type() {
_cleanup();
let storage = _storage();
let tag_name1 = vec![0, 0, 0];
let tag_name2 = vec![1, 1, 1];
let tag_name3 = vec![2, 2, 2];
let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]);
let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string());
let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]);
let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()];
storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap();
let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())];
let res = storage.delete_tags(&_type2(), &_id1(), &tag_names);
assert_match!(Err(WalletStorageError::ItemNotFound), res);
}
#[test]
fn postgres_storage_delete_tags_works_for_non_existing_id() {
_cleanup();
let storage = _storage();
let tag_name1 = vec![0, 0, 0];
let tag_name2 = vec![1, 1, 1];
let tag_name3 = vec![2, 2, 2];
let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]);
let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string());
let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]);
let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()];
storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap();
let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())];
let res = storage.delete_tags(&_type1(), &_id2(), &tag_names);
assert_match!(Err(WalletStorageError::ItemNotFound), res);
}
fn _cleanup() {
let storage_type = PostgresStorageType::new();
let _res = storage_type.init_storage(Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap();
let _ret = storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]));
let res = test::cleanup_storage();
res
}
fn _wallet_id() -> &'static str {
"walle1"
}
fn _storage() -> Box<WalletStorage> {
let storage_type = PostgresStorageType::new();
storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap();
let res = storage_type.open_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap();
res
}
fn _storage_db_pool() -> Box<WalletStorage> {
let storage_type = PostgresStorageType::new();
storage_type.create_storage(_wallet_id(), Some(&_wallet_config_db_pool()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap();
let res = storage_type.open_storage(_wallet_id(), Some(&_wallet_config_db_pool()[..]), Some(&_wallet_credentials()[..])).unwrap();
res
}
fn _wallet_config() -> String {
let wallet_scheme = env::var("WALLET_SCHEME");
match wallet_scheme {
Ok(scheme) => {
if scheme == "MultiWalletSingleTable" {
return _wallet_config_multi();
}
},
Err(_) => ()
};
let config = json!({
"url": "localhost:5432".to_owned()
}).to_string();
config
}
fn _wallet_config_multi() -> String {
let config = json!({
"url": "localhost:5432".to_owned(),
"wallet_scheme": "MultiWalletSingleTable".to_owned()
}).to_string();
config
}
fn _wallet_config_db_pool() -> String {
let config = json!({
"url": "localhost:5432".to_owned(),
"tls": "None",
"max_connections": 4,
"min_idle_time": 0,
"connection_timeout": 10
}).to_string();
config
}
fn _wallet_credentials() -> String {
let creds = json!({
"account": "postgres".to_owned(),
"password": "mysecretpassword".to_owned(),
"admin_account": Some("postgres".to_owned()),
"admin_password": Some("mysecretpassword".to_owned())
}).to_string();
creds
}
fn _metadata() -> Vec<u8> {
return vec![
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8
];
}
fn _type(i: u8) -> Vec<u8> {
vec![i, 1 + i, 2 + i]
}
fn _type1() -> Vec<u8> {
_type(1)
}
fn _type2() -> Vec<u8> {
_type(2)
}
fn _id(i: u8) -> Vec<u8> {
vec![3 + i, 4 + i, 5 + i]
}
fn _id1() -> Vec<u8> {
_id(1)
}
fn _id2() -> Vec<u8> {
_id(2)
}
fn _value(i: u8) -> EncryptedValue {
EncryptedValue { data: vec![6 + i, 7 + i, 8 + i], key: vec![9 + i, 10 + i, 11 + i] }
}
fn _value1() -> EncryptedValue {
_value(1)
}
fn _value2() -> EncryptedValue {
_value(2)
}
fn _tags() -> Vec<Tag> {
let mut tags: Vec<Tag> = Vec::new();
tags.push(Tag::Encrypted(vec![1, 5, 8], vec![3, 5, 6]));
tags.push(Tag::PlainText(vec![1, 5, 8, 1], "Plain value 1".to_string()));
tags.push(Tag::Encrypted(vec![2, 5, 8], vec![3, 5, 7]));
tags.push(Tag::PlainText(vec![2, 5, 8, 1], "Plain value 2".to_string()));
tags
}
fn _new_tags() -> Vec<Tag> {
vec![
Tag::Encrypted(vec![1, 1, 1], vec![2, 2, 2]),
Tag::PlainText(vec![1, 1, 1], String::from("tag_value_3"))
]
}
fn _sort(mut v: Vec<Tag>) -> Vec<Tag> {
v.sort();
v
}
}
|
set_storage_metadata
|
styles.tsx
|
import styled from 'styled-components';
export default {
Wrapper: styled.div`
margin-top: 24px;
`,
Item: styled.span`
display: block;
`,
}
| ||
view.rs
|
use serde::Serialize;
use actix_web::{HttpRequest, HttpResponse, Responder};
use serde_json;
#[derive(Serialize)]
pub struct ArticleView {
title: String,
description: String,
content: String
}
impl Responder for ArticleView {
fn
|
(self, _req: &HttpRequest) -> HttpResponse{
let body = serde_json::to_string(&self).unwrap();
// Create response and set content type
HttpResponse::Ok().content_type("application/json").body(body)
}
}
|
respond_to
|
clientset.gen.go
|
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
authenticationv1alpha1 "istio.io/client-go/pkg/clientset/versioned/typed/authentication/v1alpha1"
configv1alpha2 "istio.io/client-go/pkg/clientset/versioned/typed/config/v1alpha2"
networkingv1alpha3 "istio.io/client-go/pkg/clientset/versioned/typed/networking/v1alpha3"
rbacv1alpha1 "istio.io/client-go/pkg/clientset/versioned/typed/rbac/v1alpha1"
securityv1beta1 "istio.io/client-go/pkg/clientset/versioned/typed/security/v1beta1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface
// Deprecated: please explicitly pick a version if possible.
Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface
ConfigV1alpha2() configv1alpha2.ConfigV1alpha2Interface
// Deprecated: please explicitly pick a version if possible.
Config() configv1alpha2.ConfigV1alpha2Interface
NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface
// Deprecated: please explicitly pick a version if possible.
Networking() networkingv1alpha3.NetworkingV1alpha3Interface
RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface
// Deprecated: please explicitly pick a version if possible.
Rbac() rbacv1alpha1.RbacV1alpha1Interface
SecurityV1beta1() securityv1beta1.SecurityV1beta1Interface
// Deprecated: please explicitly pick a version if possible.
Security() securityv1beta1.SecurityV1beta1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
authenticationV1alpha1 *authenticationv1alpha1.AuthenticationV1alpha1Client
configV1alpha2 *configv1alpha2.ConfigV1alpha2Client
networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client
rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client
securityV1beta1 *securityv1beta1.SecurityV1beta1Client
}
// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client
func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface {
return c.authenticationV1alpha1
}
// Deprecated: Authentication retrieves the default version of AuthenticationClient.
// Please explicitly pick a version.
func (c *Clientset) Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface {
return c.authenticationV1alpha1
}
// ConfigV1alpha2 retrieves the ConfigV1alpha2Client
func (c *Clientset) ConfigV1alpha2() configv1alpha2.ConfigV1alpha2Interface {
return c.configV1alpha2
}
// Deprecated: Config retrieves the default version of ConfigClient.
// Please explicitly pick a version.
func (c *Clientset) Config() configv1alpha2.ConfigV1alpha2Interface {
return c.configV1alpha2
}
// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
return c.networkingV1alpha3
}
// Deprecated: Networking retrieves the default version of NetworkingClient.
// Please explicitly pick a version.
func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface {
return c.networkingV1alpha3
}
// RbacV1alpha1 retrieves the RbacV1alpha1Client
func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface {
return c.rbacV1alpha1
}
// Deprecated: Rbac retrieves the default version of RbacClient.
// Please explicitly pick a version.
func (c *Clientset) Rbac() rbacv1alpha1.RbacV1alpha1Interface {
return c.rbacV1alpha1
}
// SecurityV1beta1 retrieves the SecurityV1beta1Client
func (c *Clientset) SecurityV1beta1() securityv1beta1.SecurityV1beta1Interface {
return c.securityV1beta1
}
// Deprecated: Security retrieves the default version of SecurityClient.
// Please explicitly pick a version.
func (c *Clientset) Security() securityv1beta1.SecurityV1beta1Interface {
return c.securityV1beta1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
func
|
(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.authenticationV1alpha1, err = authenticationv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.configV1alpha2, err = configv1alpha2.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.networkingV1alpha3, err = networkingv1alpha3.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.securityV1beta1, err = securityv1beta1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.authenticationV1alpha1 = authenticationv1alpha1.NewForConfigOrDie(c)
cs.configV1alpha2 = configv1alpha2.NewForConfigOrDie(c)
cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c)
cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c)
cs.securityV1beta1 = securityv1beta1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.authenticationV1alpha1 = authenticationv1alpha1.New(c)
cs.configV1alpha2 = configv1alpha2.New(c)
cs.networkingV1alpha3 = networkingv1alpha3.New(c)
cs.rbacV1alpha1 = rbacv1alpha1.New(c)
cs.securityV1beta1 = securityv1beta1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}
|
NewForConfig
|
libcloudfuncs.py
|
# -*- coding: utf-8 -*-
'''
The generic libcloud template used to create the connections and deploy the
cloud virtual machines
'''
from __future__ import absolute_import
# Import python libs
import os
import logging
from salt.ext.six import string_types
import salt.ext.six as six
from salt.ext.six.moves import zip
# pylint: disable=W0611
# Import libcloud
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import (
|
ScriptDeployment
)
HAS_LIBCLOUD = True
LIBCLOUD_VERSION_INFO = tuple([
int(part) for part in libcloud.__version__.replace('-', '.').split('.')[:3]
])
except ImportError:
HAS_LIBCLOUD = False
LIBCLOUD_VERSION_INFO = (1000,)
# pylint: enable=W0611
# Import salt libs
import salt.utils.event
import salt.client
# Import salt cloud libs
import salt.utils
import salt.utils.cloud
import salt.config as config
from salt.exceptions import SaltCloudNotFound, SaltCloudSystemExit
# Get logging started
log = logging.getLogger(__name__)
LIBCLOUD_MINIMAL_VERSION = (0, 14, 0)
def node_state(id_):
'''
Libcloud supported node states
'''
states = {0: 'RUNNING',
1: 'REBOOTING',
2: 'TERMINATED',
3: 'PENDING',
4: 'UNKNOWN',
5: 'STOPPED',
6: 'SUSPENDED',
7: 'ERROR',
8: 'PAUSED'}
return states[id_]
def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
'''
Compare different libcloud versions
'''
if not HAS_LIBCLOUD:
return False
if not isinstance(reqver, (list, tuple)):
raise RuntimeError(
'\'reqver\' needs to passed as a tuple or list, i.e., (0, 14, 0)'
)
try:
import libcloud # pylint: disable=redefined-outer-name
except ImportError:
raise ImportError(
'salt-cloud requires >= libcloud {0} which is not installed'.format(
'.'.join([str(num) for num in reqver])
)
)
if LIBCLOUD_VERSION_INFO >= reqver:
return libcloud.__version__
errormsg = 'Your version of libcloud is {0}. '.format(libcloud.__version__)
errormsg += 'salt-cloud requires >= libcloud {0}'.format(
'.'.join([str(num) for num in reqver])
)
if why:
errormsg += ' for {0}'.format(why)
errormsg += '. Please upgrade.'
raise ImportError(errormsg)
def get_node(conn, name):
'''
Return a libcloud node for the named VM
'''
nodes = conn.list_nodes()
for node in nodes:
if node.name == name:
salt.utils.cloud.cache_node(salt.utils.cloud.simple_types_filter(node.__dict__), __active_provider_name__, __opts__)
return node
def avail_locations(conn=None, call=None):
'''
Return a dict of all available VM locations on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
locations = conn.list_locations()
ret = {}
for img in locations:
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret
def avail_images(conn=None, call=None):
'''
Return a dict of all available VM images on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
images = conn.list_images()
ret = {}
for img in images:
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret
def avail_sizes(conn=None, call=None):
'''
Return a dict of all available VM images on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
sizes = conn.list_sizes()
ret = {}
for size in sizes:
if isinstance(size.name, string_types):
size_name = size.name.encode('ascii', 'salt-cloud-force-ascii')
else:
size_name = str(size.name)
ret[size_name] = {}
for attr in dir(size):
if attr.startswith('_'):
continue
try:
attr_value = getattr(size, attr)
except Exception:
pass
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[size_name][attr] = attr_value
return ret
def get_location(conn, vm_):
'''
Return the location object to use
'''
locations = conn.list_locations()
vm_location = config.get_cloud_config_value('location', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
for img in locations:
if isinstance(img.id, string_types):
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id)
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
if vm_location and vm_location in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified location, {0!r}, could not be found.'.format(
vm_location
)
)
def get_image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
vm_image = config.get_cloud_config_value('image', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
for img in images:
if isinstance(img.id, string_types):
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id)
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
if vm_image and vm_image in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified image, {0!r}, could not be found.'.format(vm_image)
)
def get_size(conn, vm_):
'''
Return the VM's size object
'''
sizes = conn.list_sizes()
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
if not vm_size:
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(size.id), str(size.name)):
return size
raise SaltCloudNotFound(
'The specified size, {0!r}, could not be found.'.format(vm_size)
)
def script(vm_):
'''
Return the script deployment object
'''
return ScriptDeployment(
salt.utils.cloud.os_script(
config.get_cloud_config_value('os', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
)
def destroy(name, conn=None, call=None):
'''
Delete a single VM
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name},
transport=__opts__['transport']
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
profiles = get_configured_provider()['profiles'] # pylint: disable=E0602
if node is None:
log.error('Unable to find the VM {0}'.format(name))
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
flush_mine_on_destroy = False
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: {0}'.format(name))
mopts_ = salt.config.DEFAULT_MINION_OPTS
conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.minion_config(os.path.join(conf_path, 'minion'))
)
client = salt.client.get_local_client(mopts_)
minions = client.cmd(name, 'mine.flush')
log.info('Clearing Salt Mine: {0}, {1}'.format(name, flush_mine_on_destroy))
log.info('Destroying VM: {0}'.format(name))
ret = conn.destroy_node(node)
if ret:
log.info('Destroyed VM: {0}'.format(name))
# Fire destroy action
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name},
transport=__opts__['transport']
)
if __opts__['delete_sshkeys'] is True:
public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips'))
if public_ips:
salt.utils.cloud.remove_sshkey(public_ips[0])
private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips'))
if private_ips:
salt.utils.cloud.remove_sshkey(private_ips[0])
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return True
log.error('Failed to Destroy VM: {0}'.format(name))
return False
def reboot(name, conn=None):
'''
Reboot a single VM
'''
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM {0}'.format(name))
log.info('Rebooting VM: {0}'.format(name))
ret = conn.reboot_node(node)
if ret:
log.info('Rebooted VM: {0}'.format(name))
# Fire reboot action
salt.utils.cloud.fire_event(
'event',
'{0} has been rebooted'.format(name), 'salt-cloud'
'salt/cloud/{0}/rebooting'.format(name),
{'name': name},
transport=__opts__['transport']
)
return True
log.error('Failed to reboot VM: {0}'.format(name))
return False
def list_nodes(conn=None, call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
nodes = conn.list_nodes()
ret = {}
for node in nodes:
ret[node.name] = {
'id': node.id,
'image': node.image,
'name': node.name,
'private_ips': node.private_ips,
'public_ips': node.public_ips,
'size': node.size,
'state': node_state(node.state)
}
return ret
def list_nodes_full(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with all fields
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
nodes = conn.list_nodes()
ret = {}
for node in nodes:
pairs = {}
for key, value in zip(node.__dict__, six.itervalues(node.__dict__)):
pairs[key] = value
ret[node.name] = pairs
del ret[node.name]['driver']
salt.utils.cloud.cache_node_list(ret, __active_provider_name__.split(':')[0], __opts__)
return ret
def list_nodes_select(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
if not conn:
conn = get_conn() # pylint: disable=E0602
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, 'function'), __opts__['query.selection'], call,
)
def show_instance(name, call=None):
'''
Show the details from the provider concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def conn_has_method(conn, method_name):
'''
Find if the provided connection object has a specific method
'''
if method_name in dir(conn):
return True
log.error(
'Method {0!r} not yet supported!'.format(
method_name
)
)
return False
|
MultiStepDeployment,
|
iscsi_received_stats_info.py
|
from netapp.netapp_object import NetAppObject
class IscsiReceivedStatsInfo(NetAppObject):
"""
Counts for PDUs received.
"""
_data_out = None
@property
def data_out(self):
"""
Count of data out requests.
"""
return self._data_out
@data_out.setter
def data_out(self, val):
if val != None:
self.validate('data_out', val)
self._data_out = val
_scsi_task_mgt_cmd = None
@property
def scsi_task_mgt_cmd(self):
"""
Count of SCSI task management commands.
"""
return self._scsi_task_mgt_cmd
@scsi_task_mgt_cmd.setter
def scsi_task_mgt_cmd(self, val):
if val != None:
self.validate('scsi_task_mgt_cmd', val)
self._scsi_task_mgt_cmd = val
_login_req = None
@property
def login_req(self):
"""
Count of login requests.
"""
return self._login_req
@login_req.setter
def login_req(self, val):
if val != None:
self.validate('login_req', val)
self._login_req = val
_unknown = None
@property
def unknown(self):
"""
Count of unknown PDUs.
"""
return self._unknown
@unknown.setter
def unknown(self, val):
if val != None:
self.validate('unknown', val)
self._unknown = val
_nop_out = None
@property
def nop_out(self):
"""
Count of NOP Out.
"""
return self._nop_out
@nop_out.setter
def nop_out(self, val):
if val != None:
self.validate('nop_out', val)
self._nop_out = val
_scsi_cmd = None
@property
def scsi_cmd(self):
"""
Count of SCSI commands.
"""
return self._scsi_cmd
@scsi_cmd.setter
def scsi_cmd(self, val):
if val != None:
self.validate('scsi_cmd', val)
self._scsi_cmd = val
_snack = None
@property
def snack(self):
"""
Count of SNACK requests.
"""
return self._snack
@snack.setter
def snack(self, val):
if val != None:
self.validate('snack', val)
self._snack = val
_text_req = None
@property
def text_req(self):
"""
Count of text requests.
"""
return self._text_req
@text_req.setter
def text_req(self, val):
if val != None:
self.validate('text_req', val)
|
_total = None
@property
def total(self):
"""
Total PDUs received.
"""
return self._total
@total.setter
def total(self, val):
if val != None:
self.validate('total', val)
self._total = val
_logout_req = None
@property
def logout_req(self):
"""
Count of logout requests.
"""
return self._logout_req
@logout_req.setter
def logout_req(self, val):
if val != None:
self.validate('logout_req', val)
self._logout_req = val
@staticmethod
def get_api_name():
return "iscsi-received-stats-info"
@staticmethod
def get_desired_attrs():
return [
'data-out',
'scsi-task-mgt-cmd',
'login-req',
'unknown',
'nop-out',
'scsi-cmd',
'snack',
'text-req',
'total',
'logout-req',
]
def describe_properties(self):
return {
'data_out': { 'class': int, 'is_list': False, 'required': 'required' },
'scsi_task_mgt_cmd': { 'class': int, 'is_list': False, 'required': 'required' },
'login_req': { 'class': int, 'is_list': False, 'required': 'required' },
'unknown': { 'class': int, 'is_list': False, 'required': 'required' },
'nop_out': { 'class': int, 'is_list': False, 'required': 'required' },
'scsi_cmd': { 'class': int, 'is_list': False, 'required': 'required' },
'snack': { 'class': int, 'is_list': False, 'required': 'required' },
'text_req': { 'class': int, 'is_list': False, 'required': 'required' },
'total': { 'class': int, 'is_list': False, 'required': 'required' },
'logout_req': { 'class': int, 'is_list': False, 'required': 'required' },
}
|
self._text_req = val
|
table_pattern.go
|
// Copyright 2016 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
|
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Raphael 'kena' Poss ([email protected])
package parser
import (
"bytes"
"fmt"
)
// Table patterns are used by e.g. GRANT statements, to designate
// zero, one or more table names. For example:
// GRANT ... ON foo ...
// GRANT ... ON * ...
// GRANT ... ON db.* ...
//
// The other syntax nodes hold a TablePattern reference. This is
// initially populated during parsing with an UnresolvedName, which
// can be transformed to either a TableName (single name) or
// AllTablesSelector instance (all tables of a given database) using
// NormalizeTablePattern().
// TablePattern is the common interface to UnresolvedName, TableName
// and AllTablesSelector.
type TablePattern interface {
fmt.Stringer
NodeFormatter
NormalizeTablePattern() (TablePattern, error)
}
var _ TablePattern = UnresolvedName{}
var _ TablePattern = &TableName{}
var _ TablePattern = &AllTablesSelector{}
// NormalizeTablePattern resolves an UnresolvedName to either a
// TableName or AllTablesSelector.
func (n UnresolvedName) NormalizeTablePattern() (TablePattern, error) {
if len(n) == 0 || len(n) > 2 {
return nil, fmt.Errorf("invalid table name: %q", n)
}
var db Name
if len(n) > 1 {
dbName, ok := n[0].(Name)
if !ok {
return nil, fmt.Errorf("invalid database name: %q", n[0])
}
db = dbName
}
switch t := n[len(n)-1].(type) {
case UnqualifiedStar:
return &AllTablesSelector{Database: db}, nil
case Name:
if len(t) == 0 {
return nil, fmt.Errorf("empty table name: %q", n)
}
return &TableName{DatabaseName: db, TableName: t}, nil
default:
return nil, fmt.Errorf("invalid table pattern: %q", n)
}
}
// NormalizeTablePattern implements the TablePattern interface.
func (t *TableName) NormalizeTablePattern() (TablePattern, error) { return t, nil }
// AllTablesSelector corresponds to a selection of all
// tables in a database, e.g. when used with GRANT.
type AllTablesSelector struct {
Database Name
}
// Format implements the NodeFormatter interface.
func (at *AllTablesSelector) Format(buf *bytes.Buffer, f FmtFlags) {
if at.Database != "" {
FormatNode(buf, f, at.Database)
buf.WriteByte('.')
}
buf.WriteByte('*')
}
func (at *AllTablesSelector) String() string { return AsString(at) }
// NormalizeTablePattern implements the TablePattern interface.
func (at *AllTablesSelector) NormalizeTablePattern() (TablePattern, error) { return at, nil }
// QualifyWithDatabase adds an indirection for the database, if it's missing.
// It transforms: * -> database.*
func (at *AllTablesSelector) QualifyWithDatabase(database string) error {
if at.Database != "" {
return nil
}
if database == "" {
return fmt.Errorf("no database specified: %q", at)
}
at.Database = Name(database)
return nil
}
// TablePatterns implement a comma-separated list of table patterns.
// Used by e.g. the GRANT statement.
type TablePatterns []TablePattern
// Format implements the NodeFormatter interface.
func (tt TablePatterns) Format(buf *bytes.Buffer, f FmtFlags) {
for i, t := range tt {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(t.String())
}
}
| |
combine_logs.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Combine logs from multiple techcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import pathlib
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "techcoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
colors = defaultdict(lambda: '')
if args.color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
log_events = read_logs(testdir)
if args.html:
print_logs_html(log_events)
else:
print_logs_plain(log_events, colors)
print_node_warnings(testdir, colors)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
# Find out what the folder is called that holds the debug.log file
glob = pathlib.Path(tmp_dir).glob('node0/**/debug.log')
path = next(glob, None)
if path:
assert next(glob, None) is None # more than one debug.log, should never happen
chain = re.search(r'node0/(.+?)/debug\.log$', path.as_posix()).group(1) # extract the chain name
else:
chain = 'regtest' # fallback to regtest (should only happen when none exists)
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/{}/debug.log".format(tmp_dir, i, chain)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
|
def print_node_warnings(tmp_dir, colors):
"""Print nodes' errors and warnings"""
warnings = []
for stream in ['stdout', 'stderr']:
for i in itertools.count():
folder = "{}/node{}/{}".format(tmp_dir, i, stream)
if not os.path.isdir(folder):
break
for (_, _, fns) in os.walk(folder):
for fn in fns:
warning = pathlib.Path('{}/{}'.format(folder, fn)).read_text().strip()
if warning:
warnings.append(("node{} {}".format(i, stream), warning))
print()
for w in warnings:
print("{} {} {} {}".format(colors[w[0].split()[0]], w[0], w[1], colors["reset"]))
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs_plain(log_events, colors):
"""Renders the iterator of log events into text."""
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
def print_logs_html(log_events):
"""Renders the iterator of log events into html."""
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
return heapq.merge(*[get_log_events(source, f) for source, f in files])
|
dlangt.go
|
// Copyright ©2020 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testlapack
import (
"fmt"
"math"
"testing"
"math/rand"
"github.com/gopherd/gonum/floats"
"github.com/gopherd/gonum/lapack"
)
type Dlangter interface {
Dlangt(norm lapack.MatrixNorm, n int, dl, d, du []float64) float64
}
func DlangtTest(t *testing.T, impl Dlangter) {
rnd := rand.New(rand.NewSource(1))
for _, norm := range []lapack.MatrixNorm{lapack.MaxAbs, lapack.MaxRowSum, lapack.MaxColumnSum, lapack.Frobenius} {
t.Run(normToString(norm), func(t *testing.T) {
for _, n := range []int{0, 1, 2, 3, 4, 5, 10} {
for iter := 0; iter < 10; iter++ {
dlangtTest(t, impl, rnd, norm, n)
}
}
})
}
}
func d
|
t *testing.T, impl Dlangter, rnd *rand.Rand, norm lapack.MatrixNorm, n int) {
const (
tol = 1e-14
extra = 10
)
name := fmt.Sprintf("n=%v", n)
// Generate three random diagonals.
dl := randomSlice(n+extra, rnd)
dlCopy := make([]float64, len(dl))
copy(dlCopy, dl)
d := randomSlice(n+1+extra, rnd)
// Sometimes put a NaN into the matrix.
if n > 0 && rnd.Float64() < 0.5 {
d[rnd.Intn(n)] = math.NaN()
}
dCopy := make([]float64, len(d))
copy(dCopy, d)
du := randomSlice(n+extra, rnd)
duCopy := make([]float64, len(du))
copy(duCopy, du)
// Deal with zero-sized matrices early.
if n == 0 {
got := impl.Dlangt(norm, n, nil, nil, nil)
if got != 0 {
t.Errorf("%v: unexpected result for zero-sized matrix with nil input", name)
}
got = impl.Dlangt(norm, n, dl, d, du)
if !floats.Same(dl, dlCopy) {
t.Errorf("%v: unexpected modification in dl", name)
}
if !floats.Same(d, dCopy) {
t.Errorf("%v: unexpected modification in d", name)
}
if !floats.Same(du, duCopy) {
t.Errorf("%v: unexpected modification in du", name)
}
if got != 0 {
t.Errorf("%v: unexpected result for zero-sized matrix with non-nil input", name)
}
return
}
// Generate a dense representation of the matrix and compute the wanted result.
a := zeros(n, n, n)
for i := 0; i < n-1; i++ {
a.Data[i*a.Stride+i] = d[i]
a.Data[i*a.Stride+i+1] = du[i]
a.Data[(i+1)*a.Stride+i] = dl[i]
}
a.Data[(n-1)*a.Stride+n-1] = d[n-1]
got := impl.Dlangt(norm, n, dl, d, du)
if !floats.Same(dl, dlCopy) {
t.Errorf("%v: unexpected modification in dl", name)
}
if !floats.Same(d, dCopy) {
t.Errorf("%v: unexpected modification in d", name)
}
if !floats.Same(du, duCopy) {
t.Errorf("%v: unexpected modification in du", name)
}
want := dlange(norm, n, n, a.Data, a.Stride)
if math.IsNaN(want) {
if !math.IsNaN(got) {
t.Errorf("%v: unexpected result with NaN element; got %v, want %v", name, got, want)
}
return
}
if norm == lapack.MaxAbs {
if got != want {
t.Errorf("%v: unexpected result; got %v, want %v", name, got, want)
}
return
}
diff := math.Abs(got - want)
if diff > tol {
t.Errorf("%v: unexpected result; got %v, want %v, diff=%v", name, got, want, diff)
}
}
|
langtTest(
|
inline_response_200_1.rs
|
/*
* NetBox API
*
* API to access NetBox
*
* The version of the OpenAPI document: 3.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InlineResponse2001 {
#[serde(rename = "count")]
pub count: i32,
#[serde(rename = "next", skip_serializing_if = "Option::is_none")]
pub next: Option<String>,
#[serde(rename = "previous", skip_serializing_if = "Option::is_none")]
pub previous: Option<String>,
#[serde(rename = "results")]
pub results: Vec<crate::models::CircuitType>,
}
impl InlineResponse2001 {
pub fn new(count: i32, results: Vec<crate::models::CircuitType>) -> InlineResponse2001
|
}
|
{
InlineResponse2001 {
count,
next: None,
previous: None,
results,
}
}
|
test_artificial_1024_inv_constant_30__20.py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
|
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 30, transform = "inv", sigma = 0.0, exog_count = 20, ar_order = 0);
|
|
apis.go
|
package apis
import (
"fmt"
"github.com/gofiber/fiber/v2"
)
func UploadAPI(c *fiber.Ctx) error
|
{
file, err := c.FormFile("file")
if err != nil {
return err
}
c.SaveFile(file, fmt.Sprintf("media/%s", file.Filename))
return c.SendString("Upload success 👋!")
}
|
|
test_translate.py
|
from unittest import TestCase, main
from cogent3 import DNA, make_aligned_seqs, make_unaligned_seqs
from cogent3.app.composable import NotCompleted
from cogent3.app.translate import (
best_frame,
get_code,
get_fourfold_degenerate_sets,
select_translatable,
translate_frames,
translate_seqs,
)
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Alpha"
class TestTranslatable(TestCase):
"""testing translation functions"""
def test_best_frame(self):
"""correctly identify best frame with/without allowing rc"""
make_seq = DNA.make_seq
seq = make_seq("ATGCTAACATAAA", name="fake1")
f = best_frame(seq)
self.assertEqual(f, 1)
f = best_frame(seq, require_stop=True)
self.assertEqual(f, 1)
# a challenging seq, translatable in 1 and 3 frames, ending on stop in
# frame 1. Should return frame 1 irrespective of require_stop
seq = make_seq("ATGTTACGGACGATGCTGAAGTCGAAGATCCACCGCGCCACGGTGACCTGCTGA")
f = best_frame(seq)
self.assertEqual(f, 1)
# a rc seq
f = best_frame(seq)
seq = make_seq(
"AATATAAATGCCAGCTCATTACAGCATGAGAACAGCAGTTTATTACTTCATAAAGTCATA", name="fake2"
)
f = best_frame(seq, allow_rc=True)
self.assertEqual(f, 1)
with self.assertRaises(ValueError):
f = best_frame(seq, allow_rc=True, require_stop=True)
rc = seq.rc()
f = best_frame(rc, allow_rc=True)
self.assertEqual(f, -1)
def test_select_translatable(self):
"""correctly get translatable seqs"""
data = {
"a": "AATATAAATGCCAGCTCATTACAGCATGAGAACA" "GCAGTTTATTACTTCATAAAGTCATA",
|
"rc": "TATGACTTTATGAAGTAATAAACTGCTGTTCTCA" "TGCTGTAATGAGCTGGCATTTATATT",
}
seqs = make_unaligned_seqs(data=data, moltype=DNA)
trans = select_translatable(allow_rc=False)
tr = trans(seqs)
ex = data.copy()
ex.pop("rc")
self.assertEqual(tr.to_dict(), ex)
trans = select_translatable(allow_rc=True)
tr = trans(seqs)
ex = data.copy()
ex["rc"] = data["a"]
self.assertEqual(tr.to_dict(), ex)
# if seqs not translatable returns NotCompletedResult
data = dict(a="TAATTGATTAA", b="GCAGTTTATTA")
seqs = make_unaligned_seqs(data=data, moltype=DNA)
got = select_translatable(allow_rc=False)
self.assertTrue(type(got), NotCompleted)
def test_translate_frames(self):
"""returns translated sequences"""
seq = DNA.make_seq("ATGCTGACATAAA", name="fake1")
tr = translate_frames(seq)
self.assertEqual(tr, ["MLT*", "C*HK", "ADI"])
# with the bacterial nuclear and plant plastid code
tr = translate_frames(seq, gc="Euplotid Nuclear")
self.assertEqual(tr, ["MLT*", "CCHK", "ADI"])
class TestTranslate(TestCase):
def test_translate_seqcoll(self):
"""correctly translate a sequence collection"""
seqs = dict(a="ATGAGG", b="ATGTAA")
seqs = make_unaligned_seqs(seqs)
# trim terminal stops
translater = translate_seqs()
aa = translater(seqs)
self.assertEqual(aa.to_dict(), dict(a="MR", b="M"))
self.assertEqual(aa.moltype.label, "protein")
# don't trim terminal stops, returns NotCompleted
translater = translate_seqs(trim_terminal_stop=False)
aa = translater(seqs)
self.assertIsInstance(aa, NotCompleted)
def test_translate_aln(self):
"""correctly translates alignments"""
data = dict(a="ATGAGGCCC", b="ATGTTT---")
# an array alignment
aln = make_aligned_seqs(data)
translater = translate_seqs()
aa = translater(aln)
self.assertEqual(aa.to_dict(), dict(a="MRP", b="MF-"))
self.assertEqual(aa.moltype.label, "protein")
self.assertIsInstance(aa, type(aln))
# Alignment
aln = aln.to_type(array_align=True)
aa = translater(aln)
self.assertEqual(aa.to_dict(), dict(a="MRP", b="MF-"))
self.assertEqual(aa.moltype.label, "protein")
self.assertIsInstance(aa, type(aln))
class TestFourFoldDegen(TestCase):
def test_get_fourfold_degenerate_sets(self):
"""correctly identify 4-fold degenerate codons"""
# using straight characters
expect = set()
for di in "GC", "GG", "CT", "CC", "TC", "CG", "AC", "GT":
expect.update([frozenset([di + n for n in "ACGT"])])
for i in range(1, 3):
got = get_fourfold_degenerate_sets(get_code(i), as_indices=False)
self.assertEqual(got, expect)
with self.assertRaises(AssertionError):
# as_indices requires an alphabet
get_fourfold_degenerate_sets(get_code(1), as_indices=True)
expect = set()
for di in "GC", "GG", "CT", "CC", "TC", "CG", "AC", "GT":
codons = list(
map(
lambda x: tuple(DNA.alphabet.to_indices(x)),
[di + n for n in "ACGT"],
)
)
expect.update([frozenset(codons)])
for i in range(1, 3):
got = get_fourfold_degenerate_sets(
get_code(i), alphabet=DNA.alphabet, as_indices=True
)
self.assertEqual(got, expect)
if __name__ == "__main__":
main()
| |
mod.rs
|
// TODO: Remove allow
#[allow(dead_code)]
mod envelope;
// TODO: Remove allow
#[allow(dead_code)]
mod parse;
// TODO: Remove allow
#[allow(dead_code)]
mod sign;
// TODO: Remove allow
#[allow(dead_code)]
mod verify;
// TODO: Remove allow
#[allow(unused_imports)]
pub(crate) use envelope::{Algorithm, CompactHeader, Header, ProtectedHeader, Signature, JWS};
// TODO: Remove allow
#[allow(unused_imports)]
pub(crate) use sign::{sign, sign_compact};
// TODO: Remove allow
#[allow(unused_imports)]
pub(crate) use parse::{parse, parse_compact, ParsedCompactJWS, ParsedJWS};
#[cfg(test)]
mod tests {
use askar_crypto::{alg::ed25519::Ed25519KeyPair, jwk::FromJwk};
use crate::jws::{self, Algorithm};
#[test]
fn demo_works()
|
}
|
{
// Identifier of Alice key
let alice_kid = "did:example:alice#key-1";
// Alice private key
let alice_key = Ed25519KeyPair::from_jwk(
r#"
{
"kty":"OKP",
"d":"pFRUKkyzx4kHdJtFSnlPA9WzqkDT1HWV0xZ5OYZd2SY",
"crv":"Ed25519",
"x":"G-boxFB6vOZBu-wXkm-9Lh79I8nf9Z50cILaOgKKGww"
}
"#,
)
.expect("Unable from_jwk");
// Alice public key
let alice_pkey = Ed25519KeyPair::from_jwk(
r#"
{
"kty":"OKP",
"crv":"Ed25519",
"x":"G-boxFB6vOZBu-wXkm-9Lh79I8nf9Z50cILaOgKKGww"
}
"#,
)
.expect("Unable from_jwk");
// Message payload
let payload = "Hello World!";
// Produce signed message
let msg = jws::sign(
payload.as_bytes(),
(alice_kid, &alice_key),
Algorithm::EdDSA,
)
.expect("unable sign");
// Parse message
let mut buf = vec![];
let msg = jws::parse(&msg, &mut buf).expect("Unable parse");
// Verify signature
let valid = msg
.verify((alice_kid, &alice_pkey))
.expect("Unable verify.");
assert!(valid);
}
|
test_worker_aio.py
|
# -*- coding: utf-8 -*-
import aiounittest
from datetime import datetime
from .fixtures_aio import fixture_data, Worker, Workers, Account
from graphenecommon import exceptions
class Testcases(aiounittest.AsyncTestCase):
def setUp(self):
fixture_data()
async def test_worker(self):
w = await Worker("1.14.139")
self.assertIsInstance(w["work_end_date"], datetime)
self.assertIsInstance(w["work_begin_date"], datetime)
self.assertIsInstance(w["work_begin_date"], datetime)
self.assertIsInstance(w["daily_pay"], int)
account = await w.account
self.assertIsInstance(account, Account)
self.assertEqual(account["id"], "1.2.100")
await Worker(w)
async def test_nonexist(self):
with self.assertRaises(exceptions.WorkerDoesNotExistsException):
await Worker("foobar")
async def test_workers(self):
|
ws = await Workers()
self.assertEqual(len(ws), 2)
|
|
model_transformer.py
|
"""
Based on the implementation of https://github.com/jadore801120/attention-is-all-you-need-pytorch
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from maskrcnn_benchmark.modeling.utils import cat
from .utils_motifs import obj_edge_vectors, to_onehot, nms_overlaps, encode_box_info
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
"""
Args:
q (bsz, len_q, dim_q)
k (bsz, len_k, dim_k)
v (bsz, len_v, dim_v)
Note: len_k==len_v, and dim_q==dim_k
Returns:
output (bsz, len_q, dim_v)
attn (bsz, len_q, len_k)
"""
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
"""
Args:
q (bsz, len_q, dim_q)
k (bsz, len_k, dim_k)
v (bsz, len_v, dim_v)
Note: len_k==len_v, and dim_q==dim_k
Returns:
output (bsz, len_q, d_model)
attn (bsz, len_q, len_k)
"""
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size() # len_k==len_v
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
Merge adjacent information. Equal to linear layer if kernel size is 1
Args:
x (bsz, len, dim)
Returns:
output (bsz, len, dim)
"""
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output *= non_pad_mask.float()
enc_output = self.pos_ffn(enc_output)
enc_output *= non_pad_mask.float()
return enc_output, enc_slf_attn
class TransformerEncoder(nn.Module):
"""
A encoder model with self attention mechanism.
"""
def __init__(self, n_layers, n_head, d_k, d_v, d_model, d_inner, dropout=0.1):
super().__init__()
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
def forward(self, input_feats, num_objs):
"""
Args:
input_feats [Tensor] (#total_box, d_model) : bounding box features of a batch
num_objs [list of int] (bsz, ) : number of bounding box of each image
Returns:
enc_output [Tensor] (#total_box, d_model)
"""
original_input_feats = input_feats
input_feats = input_feats.split(num_objs, dim=0)
input_feats = nn.utils.rnn.pad_sequence(input_feats, batch_first=True)
# -- Prepare masks
bsz = len(num_objs)
device = input_feats.device
pad_len = max(num_objs)
num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len)
slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len)
non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) # (bsz, pad_len, 1)
# -- Forward
enc_output = input_feats
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(
enc_output,
non_pad_mask=non_pad_mask,
slf_attn_mask=slf_attn_mask)
enc_output = enc_output[non_pad_mask.squeeze(-1)]
return enc_output
class TransformerContext(nn.Module):
def __init__(self, config, obj_classes, rel_classes, in_channels):
super().__init__()
self.cfg = config
# setting parameters
if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX:
self.mode = 'predcls' if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL else 'sgcls'
else:
self.mode = 'sgdet'
self.obj_classes = obj_classes
self.rel_classes = rel_classes
self.num_obj_cls = len(obj_classes)
self.num_rel_cls = len(rel_classes)
self.in_channels = in_channels
self.obj_dim = in_channels
self.embed_dim = self.cfg.MODEL.ROI_RELATION_HEAD.EMBED_DIM
self.hidden_dim = self.cfg.MODEL.ROI_RELATION_HEAD.CONTEXT_HIDDEN_DIM
self.nms_thresh = self.cfg.TEST.RELATION.LATER_NMS_PREDICTION_THRES
self.dropout_rate = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.DROPOUT_RATE
self.obj_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.OBJ_LAYER
self.edge_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.REL_LAYER
self.num_head = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.NUM_HEAD
self.inner_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.INNER_DIM
self.k_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.KEY_DIM
self.v_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.VAL_DIM
# the following word embedding layer should be initalize by glove.6B before using
embed_vecs = obj_edge_vectors(self.obj_classes, wv_dir=self.cfg.GLOVE_DIR, wv_dim=self.embed_dim)
self.obj_embed1 = nn.Embedding(self.num_obj_cls, self.embed_dim)
self.obj_embed2 = nn.Embedding(self.num_obj_cls, self.embed_dim)
with torch.no_grad():
self.obj_embed1.weight.copy_(embed_vecs, non_blocking=True)
self.obj_embed2.weight.copy_(embed_vecs, non_blocking=True)
# position embedding
self.bbox_embed = nn.Sequential(*[
nn.Linear(9, 32), nn.ReLU(inplace=True), nn.Dropout(0.1),
nn.Linear(32, 128), nn.ReLU(inplace=True), nn.Dropout(0.1),
])
self.lin_obj = nn.Linear(self.in_channels + self.embed_dim + 128, self.hidden_dim)
self.lin_edge = nn.Linear(self.embed_dim + self.hidden_dim + self.in_channels, self.hidden_dim)
self.out_obj = nn.Linear(self.hidden_dim, self.num_obj_cls)
self.context_obj = TransformerEncoder(self.obj_layer, self.num_head, self.k_dim,
self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate)
self.context_edge = TransformerEncoder(self.edge_layer, self.num_head, self.k_dim,
self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate)
def forward(self, roi_features, proposals, logger=None):
# labels will be used in DecoderRNN during training
use_gt_label = self.training or self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL
obj_labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) if use_gt_label else None
# label/logits embedding will be used as input
if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL:
obj_embed = self.obj_embed1(obj_labels)
else:
obj_logits = cat([proposal.get_field("predict_logits") for proposal in proposals], dim=0).detach()
obj_embed = F.softmax(obj_logits, dim=1) @ self.obj_embed1.weight
# bbox embedding will be used as input
assert proposals[0].mode == 'xyxy'
pos_embed = self.bbox_embed(encode_box_info(proposals))
# encode objects with transformer
obj_pre_rep = cat((roi_features, obj_embed, pos_embed), -1)
num_objs = [len(p) for p in proposals]
obj_pre_rep = self.lin_obj(obj_pre_rep)
obj_feats = self.context_obj(obj_pre_rep, num_objs)
# predict obj_dists and obj_preds
if self.mode == 'predcls':
obj_preds = obj_labels
obj_dists = to_onehot(obj_preds, self.num_obj_cls)
edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_labels)), dim=-1)
else:
obj_dists = self.out_obj(obj_feats)
use_decoder_nms = self.mode == 'sgdet' and not self.training
if use_decoder_nms:
boxes_per_cls = [proposal.get_field('boxes_per_cls') for proposal in proposals]
obj_preds = self.nms_per_cls(obj_dists, boxes_per_cls, num_objs)
else:
obj_preds = obj_dists[:, 1:].max(1)[1] + 1
edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_preds)), dim=-1)
# edge context
edge_pre_rep = self.lin_edge(edge_pre_rep)
edge_ctx = self.context_edge(edge_pre_rep, num_objs)
return obj_dists, obj_preds, edge_ctx
def
|
(self, obj_dists, boxes_per_cls, num_objs):
obj_dists = obj_dists.split(num_objs, dim=0)
obj_preds = []
for i in range(len(num_objs)):
is_overlap = nms_overlaps(boxes_per_cls[i]).cpu().numpy() >= self.nms_thresh # (#box, #box, #class)
out_dists_sampled = F.softmax(obj_dists[i], -1).cpu().numpy()
out_dists_sampled[:, 0] = -1
out_label = obj_dists[i].new(num_objs[i]).fill_(0)
for i in range(num_objs[i]):
box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(), out_dists_sampled.shape)
out_label[int(box_ind)] = int(cls_ind)
out_dists_sampled[is_overlap[box_ind,:,cls_ind], cls_ind] = 0.0
out_dists_sampled[box_ind] = -1.0 # This way we won't re-sample
obj_preds.append(out_label.long())
obj_preds = torch.cat(obj_preds, dim=0)
return obj_preds
|
nms_per_cls
|
filter_raster_features_by_area.rs
|
/*
This tool is part of the WhiteboxTools geospatial analysis library.
Authors: Dr. John Lindsay
Created: 22/06/2017
Last Modified: 18/10/2019
License: MIT
*/
use whitebox_raster::*;
use crate::tools::*;
use rayon::prelude::*;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::path;
/// This tool takes an input raster (`--input`) containing integer-labelled features, such as the output of the `Clump` tool,
/// and removes all features that are smaller than a user-specified size (`--threshold`), measured in grid cells. The
/// user must specify the replacement value for removed features using the `--background` parameter, which can be either
/// `zero` or `nodata`.
///
/// # See Also
/// `Clump`
pub struct FilterRasterFeaturesByArea {
name: String,
description: String,
toolbox: String,
parameters: Vec<ToolParameter>,
example_usage: String,
}
impl FilterRasterFeaturesByArea {
pub fn new() -> FilterRasterFeaturesByArea {
// public constructor
let name = "FilterRasterFeaturesByArea".to_string();
let toolbox = "GIS Analysis".to_string();
let description = "Removes small-area features from a raster.".to_string();
let mut parameters = vec![];
parameters.push(ToolParameter {
name: "Input File".to_owned(),
flags: vec!["-i".to_owned(), "--input".to_owned()],
description: "Input raster file.".to_owned(),
parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster),
default_value: None,
optional: false,
});
parameters.push(ToolParameter {
name: "Output File".to_owned(),
flags: vec!["-o".to_owned(), "--output".to_owned()],
description: "Output raster file.".to_owned(),
parameter_type: ParameterType::NewFile(ParameterFileType::Raster),
default_value: None,
optional: false,
});
parameters.push(ToolParameter {
name: "Area Threshold (grid cells)".to_owned(),
flags: vec!["--threshold".to_owned()],
description: "Remove features with fewer grid cells than this threshold value."
.to_owned(),
parameter_type: ParameterType::Integer,
default_value: None,
optional: false,
});
parameters.push(ToolParameter {
name: "Background Value".to_owned(),
flags: vec!["--background".to_owned()],
description: "Background value.".to_owned(),
parameter_type: ParameterType::OptionList(vec!["zero".to_owned(), "nodata".to_owned()]),
default_value: Some("zero".to_owned()),
optional: true,
});
let sep: String = path::MAIN_SEPARATOR.to_string();
let p = format!("{}", env::current_dir().unwrap().display());
let e = format!("{}", env::current_exe().unwrap().display());
let mut short_exe = e
.replace(&p, "")
.replace(".exe", "")
.replace(".", "")
.replace(&sep, "");
if e.contains(".exe") {
short_exe += ".exe";
}
let usage = format!(
">>.*{} -r={} -v --wd=\"*path*to*data*\" -i=input.tif -o=output.tif --background=zero",
short_exe, name
)
.replace("*", &sep);
FilterRasterFeaturesByArea {
name: name,
description: description,
toolbox: toolbox,
parameters: parameters,
example_usage: usage,
}
}
}
impl WhiteboxTool for FilterRasterFeaturesByArea {
fn get_source_file(&self) -> String {
String::from(file!())
}
fn get_tool_name(&self) -> String {
self.name.clone()
}
fn get_tool_description(&self) -> String {
self.description.clone()
}
fn get_tool_parameters(&self) -> String {
match serde_json::to_string(&self.parameters) {
Ok(json_str) => return format!("{{\"parameters\":{}}}", json_str),
Err(err) => return format!("{:?}", err),
}
}
fn
|
(&self) -> String {
self.example_usage.clone()
}
fn get_toolbox(&self) -> String {
self.toolbox.clone()
}
fn run<'a>(
&self,
args: Vec<String>,
working_directory: &'a str,
verbose: bool,
) -> Result<(), Error> {
let mut input_file = String::new();
let mut output_file = String::new();
let mut threshold = 0usize;
let mut background_value = String::new();
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-threshold" {
threshold = if keyval {
vec[1]
.to_string()
.parse::<usize>()
.expect(&format!("Error parsing {}", flag_val))
} else {
args[i + 1]
.to_string()
.parse::<usize>()
.expect(&format!("Error parsing {}", flag_val))
};
} else if flag_val == "-background" {
let background_str = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
background_value = if background_str.contains("z") {
"zero".to_string()
} else {
"nodata".to_string()
};
}
}
if verbose {
let tool_name = self.get_tool_name();
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if verbose {
println!("Reading data...")
};
let input = Raster::new(&input_file, "r")?;
let start = Instant::now();
let nodata = input.configs.nodata;
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let back_val = if background_value == "zero" {
0f64
} else {
nodata
};
// Calculate the area of features in the input image,
let min_val = input.configs.minimum;
let max_val = input.configs.maximum;
let mut histo = vec![0usize; (max_val - min_val) as usize + 1];
let mut value: f64;
let mut bin: usize;
for row in 0..rows {
for col in 0..columns {
value = input.get_value(row, col);
if value != nodata {
bin = (value - min_val) as usize;
histo[bin] += 1;
}
}
if verbose {
progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Progress: {}%", progress);
old_progress = progress;
}
}
}
let mut output = Raster::initialize_using_file(&output_file, &input);
for row in 0..rows {
let values = input.get_row_data(row);
let new_vals = values
.par_iter()
.map(|value| {
let mut ret_value = nodata;
if *value != nodata {
let bin = (value - min_val) as usize;
if histo[bin] >= threshold {
ret_value = *value;
} else {
ret_value = back_val;
}
}
ret_value
})
.collect();
output.set_row_data(row, new_vals);
if verbose {
progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Progress: {}%", progress);
old_progress = progress;
}
}
}
let elapsed_time = get_formatted_elapsed_time(start);
output.configs.palette = "qual.plt".to_string();
output.add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
self.get_tool_name()
));
output.add_metadata_entry(format!("Input file: {}", input_file));
output.add_metadata_entry(format!("Threshold: {}", threshold));
output.add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
if verbose {
println!("Saving data...")
};
let _ = match output.write() {
Ok(_) => {
if verbose {
println!("Output file written")
}
}
Err(e) => return Err(e),
};
if verbose {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
Ok(())
}
}
|
get_example_usage
|
bench.py
|
# Author: Lisandro Dalcin
# Contact: [email protected]
"""Run MPI benchmarks and tests."""
import sys as _sys
def helloworld(comm, args=None, verbose=True):
"""Hello, World! using MPI."""
# pylint: disable=import-outside-toplevel
from argparse import ArgumentParser
parser = ArgumentParser(prog=__name__ + " helloworld")
parser.add_argument("-q", "--quiet", action="store_false",
dest="verbose", default=verbose)
options = parser.parse_args(args)
from . import MPI
size = comm.Get_size()
rank = comm.Get_rank()
name = MPI.Get_processor_name()
message = ("Hello, World! I am process %*d of %d on %s.\n"
% (len(str(size - 1)), rank, size, name))
comm.Barrier()
if rank > 0:
comm.Recv([None, 'B'], rank - 1)
if options.verbose:
_sys.stdout.write(message)
_sys.stdout.flush()
if rank < size - 1:
comm.Send([None, 'B'], rank + 1)
comm.Barrier()
return message
def ringtest(comm, args=None, verbose=True):
"""Time a message going around the ring of processes."""
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
# pylint: disable=import-outside-toplevel
from argparse import ArgumentParser
parser = ArgumentParser(prog=__name__ + " ringtest")
parser.add_argument("-q", "--quiet", action="store_false",
dest="verbose", default=verbose)
parser.add_argument("-n", "--size", type=int, default=1, dest="size",
help="message size")
parser.add_argument("-s", "--skip", type=int, default=0, dest="skip",
help="number of warm-up iterations")
parser.add_argument("-l", "--loop", type=int, default=1, dest="loop",
help="number of iterations")
options = parser.parse_args(args)
def ring(comm, n=1, loop=1, skip=0):
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
from array import array
from . import MPI
iterations = list(range((loop + skip)))
size = comm.Get_size()
rank = comm.Get_rank()
source = (rank - 1) % size
dest = (rank + 1) % size
Sendrecv = comm.Sendrecv
Send = comm.Send
Recv = comm.Recv
Wtime = MPI.Wtime
sendmsg = array('B', [+42]) * n
recvmsg = array('B', [0x0]) * n
if size == 1:
for i in iterations:
if i == skip:
tic = Wtime()
Sendrecv(sendmsg, dest, 0,
recvmsg, source, 0)
else:
if rank == 0:
for i in iterations:
if i == skip:
tic = Wtime()
Send(sendmsg, dest, 0)
Recv(recvmsg, source, 0)
else:
sendmsg = recvmsg
for i in iterations:
if i == skip:
tic = Wtime()
Recv(recvmsg, source, 0)
Send(sendmsg, dest, 0)
toc = Wtime()
if comm.rank == 0 and sendmsg != recvmsg: # pragma: no cover
import warnings
import traceback
try:
warnings.warn("received message does not match!")
except UserWarning:
traceback.print_exc()
comm.Abort(2)
return toc - tic
size = getattr(options, 'size', 1)
loop = getattr(options, 'loop', 1)
skip = getattr(options, 'skip', 0)
comm.Barrier()
elapsed = ring(comm, size, loop, skip)
if options.verbose and comm.rank == 0:
message = ("time for %d loops = %g seconds (%d processes, %d bytes)\n"
% (loop, elapsed, comm.size, size))
_sys.stdout.write(message)
_sys.stdout.flush()
return elapsed
def
|
(args=None):
"""Entry-point for ``python -m mpi4py.bench``."""
# pylint: disable=import-outside-toplevel
from argparse import ArgumentParser, REMAINDER
parser = ArgumentParser(prog=__name__,
usage="%(prog)s [options] <command> [args]")
parser.add_argument("--threads",
action="store_true", dest="threads", default=None,
help="initialize MPI with thread support")
parser.add_argument("--no-threads",
action="store_false", dest="threads", default=None,
help="initialize MPI without thread support")
parser.add_argument("--thread-level",
dest="thread_level", default=None,
action="store", metavar="LEVEL",
choices="single funneled serialized multiple".split(),
help="initialize MPI with required thread level")
parser.add_argument("--mpe",
action="store_true", dest="mpe", default=False,
help="use MPE for MPI profiling")
parser.add_argument("--vt",
action="store_true", dest="vt", default=False,
help="use VampirTrace for MPI profiling")
parser.add_argument("command",
action="store", metavar="<command>",
help="benchmark command to run")
parser.add_argument("args",
nargs=REMAINDER, metavar="[args]",
help="arguments for benchmark command")
options = parser.parse_args(args)
from . import rc, profile
if options.threads is not None:
rc.threads = options.threads
if options.thread_level is not None:
rc.thread_level = options.thread_level
if options.mpe:
profile('mpe', logfile='mpi4py')
if options.vt:
profile('vt', logfile='mpi4py')
from . import MPI
comm = MPI.COMM_WORLD
if options.command not in main.commands:
if comm.rank == 0:
parser.error("unknown command '%s'" % options.command)
parser.exit(2)
command = main.commands[options.command]
command(comm, options.args)
parser.exit()
main.commands = { # type: ignore[attr-defined]
'helloworld': helloworld,
'ringtest': ringtest,
}
if __name__ == '__main__':
main()
|
main
|
visitors.py
|
# sql/visitors.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for the purposes of applying
transformations to expressions.
Examples of how the visit system is used can be seen in the source code
of for example the ``sqlalchemy.sql.util`` and the ``sqlalchemy.sql.compiler``
modules. Some background on clause adaption is also at
https://techspot.zzzeek.org/2008/01/23/expression-transformations/ .
"""
from collections import deque
import itertools
import operator
from .. import exc
from .. import util
from ..util import langhelpers
from ..util import symbol
__all__ = [
"iterate",
"traverse_using",
"traverse",
"cloned_traverse",
"replacement_traverse",
"Traversible",
"TraversibleType",
"ExternalTraversal",
"InternalTraversal",
]
def _generate_compiler_dispatch(cls):
"""Generate a _compiler_dispatch() external traversal on classes with a
__visit_name__ attribute.
"""
visit_name = cls.__visit_name__
if "_compiler_dispatch" in cls.__dict__:
# class has a fixed _compiler_dispatch() method.
# copy it to "original" so that we can get it back if
# sqlalchemy.ext.compiles overrides it.
cls._original_compiler_dispatch = cls._compiler_dispatch
return
if not isinstance(visit_name, util.compat.string_types):
raise exc.InvalidRequestError(
"__visit_name__ on class %s must be a string at the class level"
% cls.__name__
)
name = "visit_%s" % visit_name
getter = operator.attrgetter(name)
def _compiler_dispatch(self, visitor, **kw):
"""Look for an attribute named "visit_<visit_name>" on the
visitor, and call it with the same kw params.
"""
try:
meth = getter(visitor)
except AttributeError as err:
return visitor.visit_unsupported_compilation(self, err, **kw)
else:
return meth(self, **kw)
cls._compiler_dispatch = (
cls._original_compiler_dispatch
) = _compiler_dispatch
class TraversibleType(type):
"""Metaclass which assigns dispatch attributes to various kinds of
"visitable" classes.
Attributes include:
* The ``_compiler_dispatch`` method, corresponding to ``__visit_name__``.
This is called "external traversal" because the caller of each visit()
method is responsible for sub-traversing the inner elements of each
object. This is appropriate for string compilers and other traversals
that need to call upon the inner elements in a specific pattern.
* internal traversal collections ``_children_traversal``,
``_cache_key_traversal``, ``_copy_internals_traversal``, generated from
an optional ``_traverse_internals`` collection of symbols which comes
from the :class:`.InternalTraversal` list of symbols. This is called
"internal traversal" MARKMARK
"""
def __init__(cls, clsname, bases, clsdict):
if clsname != "Traversible":
if "__visit_name__" in clsdict:
_generate_compiler_dispatch(cls)
super(TraversibleType, cls).__init__(clsname, bases, clsdict)
class Traversible(util.with_metaclass(TraversibleType)):
"""Base class for visitable objects, applies the
:class:`.visitors.TraversibleType` metaclass.
"""
def __class_getitem__(cls, key):
# allow generic classes in py3.9+
return cls
@util.preload_module("sqlalchemy.sql.traversals")
def get_children(self, omit_attrs=(), **kw):
r"""Return immediate child :class:`.visitors.Traversible`
elements of this :class:`.visitors.Traversible`.
This is used for visit traversal.
\**kw may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
traversals = util.preloaded.sql_traversals
try:
traverse_internals = self._traverse_internals
except AttributeError:
# user-defined classes may not have a _traverse_internals
return []
dispatch = traversals._get_children.run_generated_dispatch
return itertools.chain.from_iterable(
meth(obj, **kw)
for attrname, obj, meth in dispatch(
self, traverse_internals, "_generated_get_children_traversal"
)
if attrname not in omit_attrs and obj is not None
)
class _InternalTraversalType(type):
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ in ("InternalTraversal", "ExtendedInternalTraversal"):
lookup = {}
for key, sym in clsdict.items():
if key.startswith("dp_"):
visit_key = key.replace("dp_", "visit_")
sym_name = sym.name
assert sym_name not in lookup, sym_name
lookup[sym] = lookup[sym_name] = visit_key
if hasattr(cls, "_dispatch_lookup"):
lookup.update(cls._dispatch_lookup)
cls._dispatch_lookup = lookup
super(_InternalTraversalType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatcher(visitor, internal_dispatch, method_name):
names = []
for attrname, visit_sym in internal_dispatch:
meth = visitor.dispatch(visit_sym)
if meth:
visit_name = ExtendedInternalTraversal._dispatch_lookup[visit_sym]
names.append((attrname, visit_name))
code = (
(" return [\n")
+ (
", \n".join(
" (%r, self.%s, visitor.%s)"
% (attrname, attrname, visit_name)
for attrname, visit_name in names
)
)
+ ("\n ]\n")
)
meth_text = ("def %s(self, visitor):\n" % method_name) + code + "\n"
# print(meth_text)
return langhelpers._exec_code_in_env(meth_text, {}, method_name)
class InternalTraversal(util.with_metaclass(_InternalTraversalType, object)):
r"""Defines visitor symbols used for internal traversal.
The :class:`.InternalTraversal` class is used in two ways. One is that
it can serve as the superclass for an object that implements the
various visit methods of the class. The other is that the symbols
themselves of :class:`.InternalTraversal` are used within
the ``_traverse_internals`` collection. Such as, the :class:`.Case`
object defines ``_traverse_internals`` as ::
_traverse_internals = [
("value", InternalTraversal.dp_clauseelement),
("whens", InternalTraversal.dp_clauseelement_tuples),
("else_", InternalTraversal.dp_clauseelement),
]
Above, the :class:`.Case` class indicates its internal state as the
attributes named ``value``, ``whens``, and ``else_``. They each
link to an :class:`.InternalTraversal` method which indicates the type
of datastructure referred towards.
Using the ``_traverse_internals`` structure, objects of type
:class:`.InternalTraversible` will have the following methods automatically
implemented:
* :meth:`.Traversible.get_children`
* :meth:`.Traversible._copy_internals`
* :meth:`.Traversible._gen_cache_key`
Subclasses can also implement these methods directly, particularly for the
:meth:`.Traversible._copy_internals` method, when special steps
are needed.
.. versionadded:: 1.4
"""
def dispatch(self, visit_symbol):
"""Given a method from :class:`.InternalTraversal`, return the
corresponding method on a subclass.
"""
name = self._dispatch_lookup[visit_symbol]
return getattr(self, name, None)
def run_generated_dispatch(
self, target, internal_dispatch, generate_dispatcher_name
):
try:
dispatcher = target.__class__.__dict__[generate_dispatcher_name]
except KeyError:
# most of the dispatchers are generated up front
# in sqlalchemy/sql/__init__.py ->
# traversals.py-> _preconfigure_traversals().
# this block will generate any remaining dispatchers.
dispatcher = self.generate_dispatch(
target.__class__, internal_dispatch, generate_dispatcher_name
)
return dispatcher(target, self)
def generate_dispatch(
self, target_cls, internal_dispatch, generate_dispatcher_name
):
dispatcher = _generate_dispatcher(
self, internal_dispatch, generate_dispatcher_name
)
# assert isinstance(target_cls, type)
setattr(target_cls, generate_dispatcher_name, dispatcher)
return dispatcher
dp_has_cache_key = symbol("HC")
"""Visit a :class:`.HasCacheKey` object."""
dp_has_cache_key_list = symbol("HL")
"""Visit a list of :class:`.HasCacheKey` objects."""
dp_clauseelement = symbol("CE")
"""Visit a :class:`_expression.ClauseElement` object."""
dp_fromclause_canonical_column_collection = symbol("FC")
"""Visit a :class:`_expression.FromClause` object in the context of the
``columns`` attribute.
The column collection is "canonical", meaning it is the originally
defined location of the :class:`.ColumnClause` objects. Right now
this means that the object being visited is a
:class:`_expression.TableClause`
or :class:`_schema.Table` object only.
"""
dp_clauseelement_tuples = symbol("CTS")
"""Visit a list of tuples which contain :class:`_expression.ClauseElement`
objects.
"""
dp_clauseelement_list = symbol("CL")
"""Visit a list of :class:`_expression.ClauseElement` objects.
"""
dp_clauseelement_tuple = symbol("CT")
"""Visit a tuple of :class:`_expression.ClauseElement` objects.
"""
dp_executable_options = symbol("EO")
dp_with_context_options = symbol("WC")
dp_fromclause_ordered_set = symbol("CO")
"""Visit an ordered set of :class:`_expression.FromClause` objects. """
dp_string = symbol("S")
"""Visit a plain string value.
Examples include table and column names, bound parameter keys, special
keywords such as "UNION", "UNION ALL".
The string value is considered to be significant for cache key
generation.
"""
dp_string_list = symbol("SL")
"""Visit a list of strings."""
dp_anon_name = symbol("AN")
"""Visit a potentially "anonymized" string value.
The string value is considered to be significant for cache key
generation.
"""
dp_boolean = symbol("B")
"""Visit a boolean value.
The boolean value is considered to be significant for cache key
generation.
"""
dp_operator = symbol("O")
"""Visit an operator.
The operator is a function from the :mod:`sqlalchemy.sql.operators`
module.
The operator value is considered to be significant for cache key
generation.
"""
dp_type = symbol("T")
"""Visit a :class:`.TypeEngine` object
The type object is considered to be significant for cache key
generation.
"""
dp_plain_dict = symbol("PD")
"""Visit a dictionary with string keys.
The keys of the dictionary should be strings, the values should
be immutable and hashable. The dictionary is considered to be
significant for cache key generation.
"""
dp_dialect_options = symbol("DO")
"""Visit a dialect options structure."""
dp_string_clauseelement_dict = symbol("CD")
"""Visit a dictionary of string keys to :class:`_expression.ClauseElement`
objects.
"""
dp_string_multi_dict = symbol("MD")
"""Visit a dictionary of string keys to values which may either be
plain immutable/hashable or :class:`.HasCacheKey` objects.
"""
dp_annotations_key = symbol("AK")
"""Visit the _annotations_cache_key element.
This is a dictionary of additional information about a ClauseElement
that modifies its role. It should be included when comparing or caching
objects, however generating this key is relatively expensive. Visitors
should check the "_annotations" dict for non-None first before creating
this key.
"""
dp_plain_obj = symbol("PO")
"""Visit a plain python object.
The value should be immutable and hashable, such as an integer.
The value is considered to be significant for cache key generation.
"""
dp_named_ddl_element = symbol("DD")
"""Visit a simple named DDL element.
The current object used by this method is the :class:`.Sequence`.
The object is only considered to be important for cache key generation
as far as its name, but not any other aspects of it.
"""
dp_prefix_sequence = symbol("PS")
"""Visit the sequence represented by :class:`_expression.HasPrefixes`
or :class:`_expression.HasSuffixes`.
"""
dp_table_hint_list = symbol("TH")
"""Visit the ``_hints`` collection of a :class:`_expression.Select`
object.
"""
dp_setup_join_tuple = symbol("SJ")
dp_memoized_select_entities = symbol("ME")
dp_statement_hint_list = symbol("SH")
"""Visit the ``_statement_hints`` collection of a
:class:`_expression.Select`
object.
"""
dp_unknown_structure = symbol("UK")
"""Visit an unknown structure.
"""
dp_dml_ordered_values = symbol("DML_OV")
"""Visit the values() ordered tuple list of an
:class:`_expression.Update` object."""
dp_dml_values = symbol("DML_V")
"""Visit the values() dictionary of a :class:`.ValuesBase`
(e.g. Insert or Update) object.
"""
dp_dml_multi_values = symbol("DML_MV")
"""Visit the values() multi-valued list of dictionaries of an
:class:`_expression.Insert` object.
"""
dp_propagate_attrs = symbol("PA")
"""Visit the propagate attrs dict. This hardcodes to the particular
elements we care about right now."""
class ExtendedInternalTraversal(InternalTraversal):
"""Defines additional symbols that are useful in caching applications.
Traversals for :class:`_expression.ClauseElement` objects only need to use
those symbols present in :class:`.InternalTraversal`. However, for
additional caching use cases within the ORM, symbols dealing with the
:class:`.HasCacheKey` class are added here.
"""
dp_ignore = symbol("IG")
"""Specify an object that should be ignored entirely.
This currently applies function call argument caching where some
arguments should not be considered to be part of a cache key.
"""
dp_inspectable = symbol("IS")
"""Visit an inspectable object where the return value is a
:class:`.HasCacheKey` object."""
dp_multi = symbol("M")
"""Visit an object that may be a :class:`.HasCacheKey` or may be a
plain hashable object."""
dp_multi_list = symbol("MT")
"""Visit a tuple containing elements that may be :class:`.HasCacheKey` or
may be a plain hashable object."""
dp_has_cache_key_tuples = symbol("HT")
"""Visit a list of tuples which contain :class:`.HasCacheKey`
objects.
"""
dp_inspectable_list = symbol("IL")
"""Visit a list of inspectable objects which upon inspection are
HasCacheKey objects."""
class ExternalTraversal(object):
"""Base class for visitor objects which can traverse externally using
the :func:`.visitors.traverse` function.
Direct usage of the :func:`.visitors.traverse` function is usually
preferred.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self.visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""Traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""Traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith("visit_"):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def visitor_iterator(self):
"""Iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, "_next", None)
def chain(self, visitor):
"""'Chain' an additional ClauseVisitor onto this ClauseVisitor.
The chained visitor will receive all visit events after this one.
"""
tail = list(self.visitor_iterator)[-1]
tail._next = visitor
return self
class CloningExternalTraversal(ExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.cloned_traverse` function.
Direct usage of the :func:`.visitors.cloned_traverse` function is usually
preferred.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""Traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict
)
class ReplacingExternalTraversal(CloningExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.replacement_traverse` function.
Direct usage of the :func:`.visitors.replacement_traverse` function is
usually preferred.
"""
def replace(self, elem):
"""Receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
|
# backwards compatibility
Visitable = Traversible
VisitableType = TraversibleType
ClauseVisitor = ExternalTraversal
CloningVisitor = CloningExternalTraversal
ReplacingCloningVisitor = ReplacingExternalTraversal
def iterate(obj, opts=util.immutabledict()):
r"""Traverse the given expression structure, returning an iterator.
Traversal is configured to be breadth-first.
The central API feature used by the :func:`.visitors.iterate`
function is the
:meth:`_expression.ClauseElement.get_children` method of
:class:`_expression.ClauseElement` objects. This method should return all
the :class:`_expression.ClauseElement` objects which are associated with a
particular :class:`_expression.ClauseElement` object. For example, a
:class:`.Case` structure will refer to a series of
:class:`_expression.ColumnElement` objects within its "whens" and "else\_"
member variables.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
"""
yield obj
children = obj.get_children(**opts)
if not children:
return
stack = deque([children])
while stack:
t_iterator = stack.popleft()
for t in t_iterator:
yield t
stack.append(t.get_children(**opts))
def traverse_using(iterator, obj, visitors):
"""Visit the given expression structure using the given iterator of
objects.
:func:`.visitors.traverse_using` is usually called internally as the result
of the :func:`.visitors.traverse` function.
:param iterator: an iterable or sequence which will yield
:class:`_expression.ClauseElement`
structures; the iterator is assumed to be the
product of the :func:`.visitors.iterate` function.
:param obj: the :class:`_expression.ClauseElement`
that was used as the target of the
:func:`.iterate` function.
:param visitors: dictionary of visit functions. See :func:`.traverse`
for details on this dictionary.
.. seealso::
:func:`.traverse`
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""Traverse and visit the given expression structure using the default
iterator.
e.g.::
from sqlalchemy.sql import visitors
stmt = select(some_table).where(some_table.c.foo == 'bar')
def visit_bindparam(bind_param):
print("found bound value: %s" % bind_param.value)
visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
The iteration of objects uses the :func:`.visitors.iterate` function,
which does a breadth-first traversal using a stack.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
:param visitors: dictionary of visit functions. The dictionary should
have strings as keys, each of which would correspond to the
``__visit_name__`` of a particular kind of SQL expression object, and
callable functions as values, each of which represents a visitor function
for that kind of object.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""Clone the given expression structure, allowing modifications by
visitors.
Traversal usage is the same as that of :func:`.visitors.traverse`.
The visitor functions present in the ``visitors`` dictionary may also
modify the internals of the given structure as the traversal proceeds.
The central API feature used by the :func:`.visitors.cloned_traverse`
and :func:`.visitors.replacement_traverse` functions, in addition to the
:meth:`_expression.ClauseElement.get_children`
function that is used to achieve
the iteration, is the :meth:`_expression.ClauseElement._copy_internals`
method.
For a :class:`_expression.ClauseElement`
structure to support cloning and replacement
traversals correctly, it needs to be able to pass a cloning function into
its internal members in order to make copies of them.
.. seealso::
:func:`.visitors.traverse`
:func:`.visitors.replacement_traverse`
"""
cloned = {}
stop_on = set(opts.get("stop_on", []))
def deferred_copy_internals(obj):
return cloned_traverse(obj, opts, visitors)
def clone(elem, **kw):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id(elem)] = newelem
return newelem
cloned[id(elem)] = newelem = elem._clone(clone=clone, **kw)
newelem._copy_internals(clone=clone, **kw)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # remove gc cycles
return obj
def replacement_traverse(obj, opts, replace):
"""Clone the given expression structure, allowing element
replacement by a given replacement function.
This function is very similar to the :func:`.visitors.cloned_traverse`
function, except instead of being passed a dictionary of visitors, all
elements are unconditionally passed into the given replace function.
The replace function then has the option to return an entirely new object
which will replace the one given. If it returns ``None``, then the object
is kept in place.
The difference in usage between :func:`.visitors.cloned_traverse` and
:func:`.visitors.replacement_traverse` is that in the former case, an
already-cloned object is passed to the visitor function, and the visitor
function can then manipulate the internal state of the object.
In the case of the latter, the visitor function should only return an
entirely different object, or do nothing.
The use case for :func:`.visitors.replacement_traverse` is that of
replacing a FROM clause inside of a SQL structure with a different one,
as is a common use case within the ORM.
"""
cloned = {}
stop_on = {id(x) for x in opts.get("stop_on", [])}
def deferred_copy_internals(obj):
return replacement_traverse(obj, opts, replace)
def clone(elem, **kw):
if (
id(elem) in stop_on
or "no_replacement_traverse" in elem._annotations
):
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
# base "already seen" on id(), not hash, so that we don't
# replace an Annotated element with its non-annotated one, and
# vice versa
id_elem = id(elem)
if id_elem not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id_elem] = newelem
return newelem
cloned[id_elem] = newelem = elem._clone(**kw)
newelem._copy_internals(clone=clone, **kw)
return cloned[id_elem]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # remove gc cycles
return obj
|
"""Traverse and visit the given expression structure."""
def replace(elem):
for v in self.visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
|
update_test.go
|
package issue
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/lighttiger2505/lab/commands/internal"
"github.com/lighttiger2505/lab/internal/api"
gitlab "github.com/xanzy/go-gitlab"
)
func Test_updateMethod_Process(t *testing.T) {
var issue = &gitlab.Issue{
IID: 12,
Title: "title",
State: "state",
Assignee: &gitlab.IssueAssignee{
ID: 24,
},
Description: "desc",
}
tests := []struct {
name string
method internal.Method
want string
wantErr bool
}{
{
name: "update all",
method: &updateMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("newtitle"),
Description: gitlab.String("newmessage"),
StateEvent: gitlab.String("newstate"),
AssigneeIDs: []int{13},
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "newtitle",
Message: "newmessage",
StateEvent: "newstate",
AssigneeID: 13,
},
project: "group/project",
id: 12,
},
want: "",
wantErr: false,
},
{
name: "update title only",
method: &updateMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("newtitle"),
Description: gitlab.String("desc"),
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "newtitle",
Message: "",
StateEvent: "",
AssigneeID: 0,
},
project: "group/project",
id: 12,
},
want: "",
wantErr: false,
},
{
name: "update message only",
method: &updateMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("title"),
Description: gitlab.String("newmessage"),
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "",
Message: "newmessage",
StateEvent: "",
AssigneeID: 0,
},
project: "group/project",
id: 12,
},
want: "",
wantErr: false,
},
{
name: "update state only",
method: &updateMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("title"),
Description: gitlab.String("desc"),
StateEvent: gitlab.String("newstate"),
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "",
Message: "",
StateEvent: "newstate",
AssigneeID: 0,
},
project: "group/project",
id: 12,
},
want: "",
wantErr: false,
},
{
name: "update assignee only",
method: &updateMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("title"),
Description: gitlab.String("desc"),
AssigneeIDs: []int{13},
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "",
Message: "",
StateEvent: "",
AssigneeID: 13,
},
project: "group/project",
id: 12,
},
want: "",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := tt.method
got, err := m.Process()
if (err != nil) != tt.wantErr {
t.Errorf("updateMethod.Process() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("updateMethod.Process() = %v, want %v", got, tt.want)
}
})
}
}
func Test_updateOnEditorMethod_Process(t *testing.T) {
var issue = &gitlab.Issue{
IID: 12,
Title: "title",
State: "state",
Assignee: &gitlab.IssueAssignee{
ID: 24,
},
|
name string
method internal.Method
want string
wantErr bool
}{
{
name: "update all",
method: &updateOnEditorMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("newtitle"),
Description: gitlab.String("newmessage"),
StateEvent: gitlab.String("newstate"),
AssigneeIDs: []int{13},
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "newtitle",
Message: "newmessage",
StateEvent: "newstate",
AssigneeID: 13,
},
project: "group/project",
id: 12,
editFunc: func(program, file string) error { return nil },
},
want: "",
wantErr: false,
},
{
name: "change title only",
method: &updateOnEditorMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("newtitle"),
Description: gitlab.String("desc"),
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "newtitle",
Message: "",
StateEvent: "",
AssigneeID: 0,
},
project: "group/project",
id: 12,
editFunc: func(program, file string) error { return nil },
},
want: "",
wantErr: false,
},
{
name: "change message only",
method: &updateOnEditorMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("title"),
Description: gitlab.String("newmessage"),
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "",
Message: "newmessage",
StateEvent: "",
AssigneeID: 0,
},
project: "group/project",
id: 12,
editFunc: func(program, file string) error { return nil },
},
want: "",
wantErr: false,
},
{
name: "change state only",
method: &updateOnEditorMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("title"),
Description: gitlab.String("desc"),
StateEvent: gitlab.String("newstate"),
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "",
Message: "",
StateEvent: "newstate",
AssigneeID: 0,
},
project: "group/project",
id: 12,
editFunc: func(program, file string) error { return nil },
},
want: "",
wantErr: false,
},
{
name: "change assignee only",
method: &updateOnEditorMethod{
client: &api.MockLabIssueClient{
MockGetIssue: func(pid int, repositoryName string) (*gitlab.Issue, error) {
return issue, nil
},
MockUpdateIssue: func(opt *gitlab.UpdateIssueOptions, pid int, repositoryName string) (*gitlab.Issue, error) {
got := opt
want := &gitlab.UpdateIssueOptions{
Title: gitlab.String("title"),
Description: gitlab.String("desc"),
AssigneeIDs: []int{13},
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("invalide arg (-got +want)\n%s", diff)
}
return issue, nil
},
},
opt: &CreateUpdateOption{
Title: "",
Message: "",
StateEvent: "",
AssigneeID: 13,
},
project: "group/project",
id: 12,
editFunc: func(program, file string) error { return nil },
},
want: "",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := tt.method
got, err := m.Process()
if (err != nil) != tt.wantErr {
t.Errorf("updateMethod.Process() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("updateMethod.Process() = %v, want %v", got, tt.want)
}
})
}
}
|
Description: "desc",
}
tests := []struct {
|
3throot.py
|
import sys
import stdio
|
c = float(sys.argv[1])
t = c
while abs(t - c/(t*t)) > MIN*t:
t = (c/(t*t) +2*t)/3
stdio.writeln(t)
|
MIN = 1e-10
|
client.rs
|
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
client: aws_smithy_client::Client<C, M, R>,
conf: crate::Config,
}
/// Client for Amazon Import/Export Snowball
///
/// Client for invoking operations on Amazon Import/Export Snowball. Each operation on Amazon Import/Export Snowball is a method on this
/// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service.
///
/// # Examples
/// **Constructing a client and invoking an operation**
/// ```rust,no_run
/// # async fn docs() {
/// // create a shared configuration. This can be used & shared between multiple service clients.
/// let shared_config = aws_config::load_from_env().await;
/// let client = aws_sdk_snowball::Client::new(&shared_config);
/// // invoke an operation
/// /* let rsp = client
/// .<operation_name>().
/// .<param>("some value")
/// .send().await; */
/// # }
/// ```
/// **Constructing a client with custom configuration**
/// ```rust,no_run
/// use aws_config::RetryConfig;
/// # async fn docs() {
/// let shared_config = aws_config::load_from_env().await;
/// let config = aws_sdk_snowball::config::Builder::from(&shared_config)
/// .retry_config(RetryConfig::disabled())
/// .build();
/// let client = aws_sdk_snowball::Client::from_conf(config);
/// # }
#[derive(std::fmt::Debug)]
pub struct Client<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: aws_smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
/// Creates a client with the given service configuration.
pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Constructs a fluent builder for the `CancelCluster` operation.
///
/// See [`CancelCluster`](crate::client::fluent_builders::CancelCluster) for more information about the
/// operation and its arguments.
pub fn cancel_cluster(&self) -> fluent_builders::CancelCluster<C, M, R> {
fluent_builders::CancelCluster::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CancelJob` operation.
///
/// See [`CancelJob`](crate::client::fluent_builders::CancelJob) for more information about the
/// operation and its arguments.
pub fn cancel_job(&self) -> fluent_builders::CancelJob<C, M, R> {
fluent_builders::CancelJob::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateAddress` operation.
///
/// See [`CreateAddress`](crate::client::fluent_builders::CreateAddress) for more information about the
/// operation and its arguments.
pub fn create_address(&self) -> fluent_builders::CreateAddress<C, M, R> {
fluent_builders::CreateAddress::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateCluster` operation.
///
/// See [`CreateCluster`](crate::client::fluent_builders::CreateCluster) for more information about the
/// operation and its arguments.
pub fn create_cluster(&self) -> fluent_builders::CreateCluster<C, M, R> {
fluent_builders::CreateCluster::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateJob` operation.
///
/// See [`CreateJob`](crate::client::fluent_builders::CreateJob) for more information about the
/// operation and its arguments.
pub fn create_job(&self) -> fluent_builders::CreateJob<C, M, R> {
fluent_builders::CreateJob::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateLongTermPricing` operation.
///
/// See [`CreateLongTermPricing`](crate::client::fluent_builders::CreateLongTermPricing) for more information about the
/// operation and its arguments.
pub fn create_long_term_pricing(&self) -> fluent_builders::CreateLongTermPricing<C, M, R> {
fluent_builders::CreateLongTermPricing::new(self.handle.clone())
}
/// Constructs a fluent builder for the `CreateReturnShippingLabel` operation.
///
/// See [`CreateReturnShippingLabel`](crate::client::fluent_builders::CreateReturnShippingLabel) for more information about the
/// operation and its arguments.
pub fn create_return_shipping_label(
&self,
) -> fluent_builders::CreateReturnShippingLabel<C, M, R> {
fluent_builders::CreateReturnShippingLabel::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeAddress` operation.
///
/// See [`DescribeAddress`](crate::client::fluent_builders::DescribeAddress) for more information about the
/// operation and its arguments.
pub fn describe_address(&self) -> fluent_builders::DescribeAddress<C, M, R> {
fluent_builders::DescribeAddress::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeAddresses` operation.
///
/// See [`DescribeAddresses`](crate::client::fluent_builders::DescribeAddresses) for more information about the
/// operation and its arguments.
pub fn describe_addresses(&self) -> fluent_builders::DescribeAddresses<C, M, R> {
fluent_builders::DescribeAddresses::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeCluster` operation.
///
/// See [`DescribeCluster`](crate::client::fluent_builders::DescribeCluster) for more information about the
/// operation and its arguments.
pub fn describe_cluster(&self) -> fluent_builders::DescribeCluster<C, M, R> {
fluent_builders::DescribeCluster::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeJob` operation.
///
/// See [`DescribeJob`](crate::client::fluent_builders::DescribeJob) for more information about the
/// operation and its arguments.
pub fn describe_job(&self) -> fluent_builders::DescribeJob<C, M, R> {
fluent_builders::DescribeJob::new(self.handle.clone())
}
/// Constructs a fluent builder for the `DescribeReturnShippingLabel` operation.
///
/// See [`DescribeReturnShippingLabel`](crate::client::fluent_builders::DescribeReturnShippingLabel) for more information about the
/// operation and its arguments.
pub fn describe_return_shipping_label(
&self,
) -> fluent_builders::DescribeReturnShippingLabel<C, M, R> {
fluent_builders::DescribeReturnShippingLabel::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetJobManifest` operation.
///
/// See [`GetJobManifest`](crate::client::fluent_builders::GetJobManifest) for more information about the
/// operation and its arguments.
pub fn get_job_manifest(&self) -> fluent_builders::GetJobManifest<C, M, R> {
fluent_builders::GetJobManifest::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetJobUnlockCode` operation.
///
/// See [`GetJobUnlockCode`](crate::client::fluent_builders::GetJobUnlockCode) for more information about the
/// operation and its arguments.
pub fn get_job_unlock_code(&self) -> fluent_builders::GetJobUnlockCode<C, M, R> {
fluent_builders::GetJobUnlockCode::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetSnowballUsage` operation.
///
/// See [`GetSnowballUsage`](crate::client::fluent_builders::GetSnowballUsage) for more information about the
/// operation and its arguments.
pub fn
|
(&self) -> fluent_builders::GetSnowballUsage<C, M, R> {
fluent_builders::GetSnowballUsage::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetSoftwareUpdates` operation.
///
/// See [`GetSoftwareUpdates`](crate::client::fluent_builders::GetSoftwareUpdates) for more information about the
/// operation and its arguments.
pub fn get_software_updates(&self) -> fluent_builders::GetSoftwareUpdates<C, M, R> {
fluent_builders::GetSoftwareUpdates::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListClusterJobs` operation.
///
/// See [`ListClusterJobs`](crate::client::fluent_builders::ListClusterJobs) for more information about the
/// operation and its arguments.
pub fn list_cluster_jobs(&self) -> fluent_builders::ListClusterJobs<C, M, R> {
fluent_builders::ListClusterJobs::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListClusters` operation.
///
/// See [`ListClusters`](crate::client::fluent_builders::ListClusters) for more information about the
/// operation and its arguments.
pub fn list_clusters(&self) -> fluent_builders::ListClusters<C, M, R> {
fluent_builders::ListClusters::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListCompatibleImages` operation.
///
/// See [`ListCompatibleImages`](crate::client::fluent_builders::ListCompatibleImages) for more information about the
/// operation and its arguments.
pub fn list_compatible_images(&self) -> fluent_builders::ListCompatibleImages<C, M, R> {
fluent_builders::ListCompatibleImages::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListJobs` operation.
///
/// See [`ListJobs`](crate::client::fluent_builders::ListJobs) for more information about the
/// operation and its arguments.
pub fn list_jobs(&self) -> fluent_builders::ListJobs<C, M, R> {
fluent_builders::ListJobs::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListLongTermPricing` operation.
///
/// See [`ListLongTermPricing`](crate::client::fluent_builders::ListLongTermPricing) for more information about the
/// operation and its arguments.
pub fn list_long_term_pricing(&self) -> fluent_builders::ListLongTermPricing<C, M, R> {
fluent_builders::ListLongTermPricing::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateCluster` operation.
///
/// See [`UpdateCluster`](crate::client::fluent_builders::UpdateCluster) for more information about the
/// operation and its arguments.
pub fn update_cluster(&self) -> fluent_builders::UpdateCluster<C, M, R> {
fluent_builders::UpdateCluster::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateJob` operation.
///
/// See [`UpdateJob`](crate::client::fluent_builders::UpdateJob) for more information about the
/// operation and its arguments.
pub fn update_job(&self) -> fluent_builders::UpdateJob<C, M, R> {
fluent_builders::UpdateJob::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateJobShipmentState` operation.
///
/// See [`UpdateJobShipmentState`](crate::client::fluent_builders::UpdateJobShipmentState) for more information about the
/// operation and its arguments.
pub fn update_job_shipment_state(&self) -> fluent_builders::UpdateJobShipmentState<C, M, R> {
fluent_builders::UpdateJobShipmentState::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateLongTermPricing` operation.
///
/// See [`UpdateLongTermPricing`](crate::client::fluent_builders::UpdateLongTermPricing) for more information about the
/// operation and its arguments.
pub fn update_long_term_pricing(&self) -> fluent_builders::UpdateLongTermPricing<C, M, R> {
fluent_builders::UpdateLongTermPricing::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `CancelCluster`.
///
/// <p>Cancels a cluster job. You can only cancel a cluster job while it's in the
/// <code>AwaitingQuorum</code> status. You'll have at least an hour after creating a cluster
/// job to cancel it.</p>
#[derive(std::fmt::Debug)]
pub struct CancelCluster<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::cancel_cluster_input::Builder,
}
impl<C, M, R> CancelCluster<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CancelCluster`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CancelClusterOutput,
aws_smithy_http::result::SdkError<crate::error::CancelClusterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CancelClusterInputOperationOutputAlias,
crate::output::CancelClusterOutput,
crate::error::CancelClusterError,
crate::input::CancelClusterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The 39-character ID for the cluster that you want to cancel, for example
/// <code>CID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn cluster_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.cluster_id(inp);
self
}
/// <p>The 39-character ID for the cluster that you want to cancel, for example
/// <code>CID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_cluster_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_cluster_id(input);
self
}
}
/// Fluent builder constructing a request to `CancelJob`.
///
/// <p>Cancels the specified job. You can only cancel a job before its <code>JobState</code>
/// value changes to <code>PreparingAppliance</code>. Requesting the <code>ListJobs</code> or
/// <code>DescribeJob</code> action returns a job's <code>JobState</code> as part of the
/// response element data returned.</p>
#[derive(std::fmt::Debug)]
pub struct CancelJob<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::cancel_job_input::Builder,
}
impl<C, M, R> CancelJob<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CancelJob`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CancelJobOutput,
aws_smithy_http::result::SdkError<crate::error::CancelJobError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CancelJobInputOperationOutputAlias,
crate::output::CancelJobOutput,
crate::error::CancelJobError,
crate::input::CancelJobInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The 39-character job ID for the job that you want to cancel, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The 39-character job ID for the job that you want to cancel, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
}
/// Fluent builder constructing a request to `CreateAddress`.
///
/// <p>Creates an address for a Snow device to be shipped to. In most regions,
/// addresses are validated at the time of creation. The address you provide must be located
/// within the serviceable area of your region. If the address is invalid or unsupported, then an
/// exception is thrown.</p>
#[derive(std::fmt::Debug)]
pub struct CreateAddress<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_address_input::Builder,
}
impl<C, M, R> CreateAddress<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateAddress`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateAddressOutput,
aws_smithy_http::result::SdkError<crate::error::CreateAddressError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateAddressInputOperationOutputAlias,
crate::output::CreateAddressOutput,
crate::error::CreateAddressError,
crate::input::CreateAddressInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The address that you want the Snow device shipped to.</p>
pub fn address(mut self, inp: crate::model::Address) -> Self {
self.inner = self.inner.address(inp);
self
}
/// <p>The address that you want the Snow device shipped to.</p>
pub fn set_address(mut self, input: std::option::Option<crate::model::Address>) -> Self {
self.inner = self.inner.set_address(input);
self
}
}
/// Fluent builder constructing a request to `CreateCluster`.
///
/// <p>Creates an empty cluster. Each cluster supports five nodes. You use the <a>CreateJob</a> action separately to create the jobs for each of these nodes. The
/// cluster does not ship until these five node jobs have been created.</p>
#[derive(std::fmt::Debug)]
pub struct CreateCluster<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_cluster_input::Builder,
}
impl<C, M, R> CreateCluster<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateCluster`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateClusterOutput,
aws_smithy_http::result::SdkError<crate::error::CreateClusterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateClusterInputOperationOutputAlias,
crate::output::CreateClusterOutput,
crate::error::CreateClusterError,
crate::input::CreateClusterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The type of job for this cluster. Currently, the only job type supported for clusters
/// is <code>LOCAL_USE</code>.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn job_type(mut self, inp: crate::model::JobType) -> Self {
self.inner = self.inner.job_type(inp);
self
}
/// <p>The type of job for this cluster. Currently, the only job type supported for clusters
/// is <code>LOCAL_USE</code>.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn set_job_type(mut self, input: std::option::Option<crate::model::JobType>) -> Self {
self.inner = self.inner.set_job_type(input);
self
}
/// <p>The resources associated with the cluster job. These resources include Amazon S3
/// buckets and optional Lambda functions written in the Python language.
/// </p>
pub fn resources(mut self, inp: crate::model::JobResource) -> Self {
self.inner = self.inner.resources(inp);
self
}
/// <p>The resources associated with the cluster job. These resources include Amazon S3
/// buckets and optional Lambda functions written in the Python language.
/// </p>
pub fn set_resources(
mut self,
input: std::option::Option<crate::model::JobResource>,
) -> Self {
self.inner = self.inner.set_resources(input);
self
}
/// <p>Specifies the service or services on the Snow Family device that your transferred data
/// will be exported from or imported into. Amazon Web Services Snow Family device clusters support Amazon S3 and NFS
/// (Network File System).</p>
pub fn on_device_service_configuration(
mut self,
inp: crate::model::OnDeviceServiceConfiguration,
) -> Self {
self.inner = self.inner.on_device_service_configuration(inp);
self
}
/// <p>Specifies the service or services on the Snow Family device that your transferred data
/// will be exported from or imported into. Amazon Web Services Snow Family device clusters support Amazon S3 and NFS
/// (Network File System).</p>
pub fn set_on_device_service_configuration(
mut self,
input: std::option::Option<crate::model::OnDeviceServiceConfiguration>,
) -> Self {
self.inner = self.inner.set_on_device_service_configuration(input);
self
}
/// <p>An optional description of this specific cluster, for example <code>Environmental Data
/// Cluster-01</code>.</p>
pub fn description(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(inp);
self
}
/// <p>An optional description of this specific cluster, for example <code>Environmental Data
/// Cluster-01</code>.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The ID for the address that you want the cluster shipped to.</p>
pub fn address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.address_id(inp);
self
}
/// <p>The ID for the address that you want the cluster shipped to.</p>
pub fn set_address_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_address_id(input);
self
}
/// <p>The <code>KmsKeyARN</code> value that you want to associate with this cluster.
/// <code>KmsKeyARN</code> values are created by using the <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html">CreateKey</a> API action in Key Management Service (KMS). </p>
pub fn kms_key_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.kms_key_arn(inp);
self
}
/// <p>The <code>KmsKeyARN</code> value that you want to associate with this cluster.
/// <code>KmsKeyARN</code> values are created by using the <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html">CreateKey</a> API action in Key Management Service (KMS). </p>
pub fn set_kms_key_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_kms_key_arn(input);
self
}
/// <p>The <code>RoleARN</code> that you want to associate with this cluster.
/// <code>RoleArn</code> values are created by using the <a href="https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html">CreateRole</a> API action in Identity and Access Management (IAM).</p>
pub fn role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role_arn(inp);
self
}
/// <p>The <code>RoleARN</code> that you want to associate with this cluster.
/// <code>RoleArn</code> values are created by using the <a href="https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html">CreateRole</a> API action in Identity and Access Management (IAM).</p>
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role_arn(input);
self
}
/// <p>The type of Snow Family Devices to use for this cluster. </p>
/// <note>
/// <p>For cluster jobs, Amazon Web Services Snow Family currently supports only the
/// <code>EDGE</code> device type.</p>
/// </note>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn snowball_type(mut self, inp: crate::model::SnowballType) -> Self {
self.inner = self.inner.snowball_type(inp);
self
}
/// <p>The type of Snow Family Devices to use for this cluster. </p>
/// <note>
/// <p>For cluster jobs, Amazon Web Services Snow Family currently supports only the
/// <code>EDGE</code> device type.</p>
/// </note>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn set_snowball_type(
mut self,
input: std::option::Option<crate::model::SnowballType>,
) -> Self {
self.inner = self.inner.set_snowball_type(input);
self
}
/// <p>The shipping speed for each node in this cluster. This speed doesn't dictate how soon
/// you'll get each Snowball Edge device, rather it represents how quickly each device moves to
/// its destination while in transit. Regional shipping speeds are as follows: </p>
/// <ul>
/// <li>
/// <p>In Australia, you have access to express shipping. Typically, Snow devices shipped
/// express are delivered in about a day.</p>
/// </li>
/// <li>
/// <p>In the European Union (EU), you have access to express shipping. Typically, Snow
/// devices shipped express are delivered in about a day. In addition, most countries in the
/// EU have access to standard shipping, which typically takes less than a week, one
/// way.</p>
/// </li>
/// <li>
/// <p>In India, Snow devices are delivered in one to seven days.</p>
/// </li>
/// <li>
/// <p>In the United States of America (US), you have access to one-day shipping and
/// two-day shipping.</p>
/// </li>
/// </ul>
///
/// <ul>
/// <li>
/// <p>In Australia, you have access to express shipping. Typically, devices shipped
/// express are delivered in about a day.</p>
/// </li>
/// <li>
/// <p>In the European Union (EU), you have access to express shipping. Typically, Snow
/// devices shipped express are delivered in about a day. In addition, most countries in the
/// EU have access to standard shipping, which typically takes less than a week, one
/// way.</p>
/// </li>
/// <li>
/// <p>In India, Snow devices are delivered in one to seven days.</p>
/// </li>
/// <li>
/// <p>In the US, you have access to one-day shipping and two-day shipping.</p>
/// </li>
/// </ul>
pub fn shipping_option(mut self, inp: crate::model::ShippingOption) -> Self {
self.inner = self.inner.shipping_option(inp);
self
}
/// <p>The shipping speed for each node in this cluster. This speed doesn't dictate how soon
/// you'll get each Snowball Edge device, rather it represents how quickly each device moves to
/// its destination while in transit. Regional shipping speeds are as follows: </p>
/// <ul>
/// <li>
/// <p>In Australia, you have access to express shipping. Typically, Snow devices shipped
/// express are delivered in about a day.</p>
/// </li>
/// <li>
/// <p>In the European Union (EU), you have access to express shipping. Typically, Snow
/// devices shipped express are delivered in about a day. In addition, most countries in the
/// EU have access to standard shipping, which typically takes less than a week, one
/// way.</p>
/// </li>
/// <li>
/// <p>In India, Snow devices are delivered in one to seven days.</p>
/// </li>
/// <li>
/// <p>In the United States of America (US), you have access to one-day shipping and
/// two-day shipping.</p>
/// </li>
/// </ul>
///
/// <ul>
/// <li>
/// <p>In Australia, you have access to express shipping. Typically, devices shipped
/// express are delivered in about a day.</p>
/// </li>
/// <li>
/// <p>In the European Union (EU), you have access to express shipping. Typically, Snow
/// devices shipped express are delivered in about a day. In addition, most countries in the
/// EU have access to standard shipping, which typically takes less than a week, one
/// way.</p>
/// </li>
/// <li>
/// <p>In India, Snow devices are delivered in one to seven days.</p>
/// </li>
/// <li>
/// <p>In the US, you have access to one-day shipping and two-day shipping.</p>
/// </li>
/// </ul>
pub fn set_shipping_option(
mut self,
input: std::option::Option<crate::model::ShippingOption>,
) -> Self {
self.inner = self.inner.set_shipping_option(input);
self
}
/// <p>The Amazon Simple Notification Service (Amazon SNS) notification settings for this
/// cluster.</p>
pub fn notification(mut self, inp: crate::model::Notification) -> Self {
self.inner = self.inner.notification(inp);
self
}
/// <p>The Amazon Simple Notification Service (Amazon SNS) notification settings for this
/// cluster.</p>
pub fn set_notification(
mut self,
input: std::option::Option<crate::model::Notification>,
) -> Self {
self.inner = self.inner.set_notification(input);
self
}
/// <p>The forwarding address ID for a cluster. This field is not supported in most
/// regions.</p>
pub fn forwarding_address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.forwarding_address_id(inp);
self
}
/// <p>The forwarding address ID for a cluster. This field is not supported in most
/// regions.</p>
pub fn set_forwarding_address_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_forwarding_address_id(input);
self
}
/// <p>The tax documents required in your Amazon Web Services Region.</p>
pub fn tax_documents(mut self, inp: crate::model::TaxDocuments) -> Self {
self.inner = self.inner.tax_documents(inp);
self
}
/// <p>The tax documents required in your Amazon Web Services Region.</p>
pub fn set_tax_documents(
mut self,
input: std::option::Option<crate::model::TaxDocuments>,
) -> Self {
self.inner = self.inner.set_tax_documents(input);
self
}
/// <p>Allows you to securely operate and manage Snow devices in a cluster remotely from outside
/// of your internal network. When set to <code>INSTALLED_AUTOSTART</code>, remote management will
/// automatically be available when the device arrives at your location. Otherwise, you need to
/// use the Snowball Client to manage the device.</p>
pub fn remote_management(mut self, inp: crate::model::RemoteManagement) -> Self {
self.inner = self.inner.remote_management(inp);
self
}
/// <p>Allows you to securely operate and manage Snow devices in a cluster remotely from outside
/// of your internal network. When set to <code>INSTALLED_AUTOSTART</code>, remote management will
/// automatically be available when the device arrives at your location. Otherwise, you need to
/// use the Snowball Client to manage the device.</p>
pub fn set_remote_management(
mut self,
input: std::option::Option<crate::model::RemoteManagement>,
) -> Self {
self.inner = self.inner.set_remote_management(input);
self
}
}
/// Fluent builder constructing a request to `CreateJob`.
///
/// <p>Creates a job to import or export data between Amazon S3 and your on-premises data
/// center. Your Amazon Web Services account must have the right trust policies and permissions in
/// place to create a job for a Snow device. If you're creating a job for a node in a cluster, you
/// only need to provide the <code>clusterId</code> value; the other job attributes are inherited
/// from the cluster. </p>
/// <note>
/// <p>Only the Snowball; Edge device type is supported when ordering clustered jobs.</p>
/// <p>The device capacity is optional.</p>
/// <p>Availability of device types differ by Amazon Web Services Region. For more information
/// about Region availability, see <a href="https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/?p=ngi&loc=4">Amazon Web Services Regional Services</a>.</p>
/// </note>
///
/// <p></p>
///
/// <p class="title">
/// <b>Snow Family Devices and their capacities.</b>
/// </p>
/// <ul>
/// <li>
/// <p>Snow Family device type: <b>SNC1_SSD</b>
/// </p>
/// <ul>
/// <li>
/// <p>Capacity: T14</p>
/// </li>
/// <li>
/// <p>Description: Snowcone </p>
/// </li>
/// </ul>
///
/// <p></p>
/// </li>
/// <li>
/// <p>Snow Family device type: <b>SNC1_HDD</b>
/// </p>
/// <ul>
/// <li>
/// <p>Capacity: T8</p>
/// </li>
/// <li>
/// <p>Description: Snowcone </p>
/// </li>
/// </ul>
/// <p></p>
/// </li>
/// <li>
/// <p>Device type: <b>EDGE_S</b>
/// </p>
/// <ul>
/// <li>
/// <p>Capacity: T98</p>
/// </li>
/// <li>
/// <p>Description: Snowball Edge Storage Optimized for data transfer only </p>
/// </li>
/// </ul>
///
///
/// <p></p>
/// </li>
/// <li>
/// <p>Device type: <b>EDGE_CG</b>
/// </p>
/// <ul>
/// <li>
/// <p>Capacity: T42</p>
/// </li>
/// <li>
/// <p>Description: Snowball Edge Compute Optimized with GPU</p>
/// </li>
/// </ul>
/// <p></p>
/// </li>
/// <li>
/// <p>Device type: <b>EDGE_C</b>
/// </p>
/// <ul>
/// <li>
/// <p>Capacity: T42</p>
/// </li>
/// <li>
/// <p>Description: Snowball Edge Compute Optimized without GPU</p>
/// </li>
/// </ul>
/// <p></p>
/// </li>
/// <li>
/// <p>Device type: <b>EDGE</b>
/// </p>
/// <ul>
/// <li>
/// <p>Capacity: T100</p>
/// </li>
/// <li>
/// <p>Description: Snowball Edge Storage Optimized with EC2 Compute</p>
/// </li>
/// </ul>
/// <p></p>
/// </li>
/// <li>
/// <p>Device type: <b>STANDARD</b>
/// </p>
/// <ul>
/// <li>
/// <p>Capacity: T50</p>
/// </li>
/// <li>
/// <p>Description: Original Snowball device</p>
/// <note>
/// <p>This device is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region
/// </p>
/// </note>
/// </li>
/// </ul>
/// <p></p>
/// </li>
/// <li>
/// <p>Device type: <b>STANDARD</b>
/// </p>
/// <ul>
/// <li>
/// <p>Capacity: T80</p>
/// </li>
/// <li>
/// <p>Description: Original Snowball device</p>
/// <note>
/// <p>This device is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region. </p>
/// </note>
/// </li>
/// </ul>
/// <p></p>
/// </li>
/// </ul>
#[derive(std::fmt::Debug)]
pub struct CreateJob<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_job_input::Builder,
}
impl<C, M, R> CreateJob<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateJob`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateJobOutput,
aws_smithy_http::result::SdkError<crate::error::CreateJobError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateJobInputOperationOutputAlias,
crate::output::CreateJobOutput,
crate::error::CreateJobError,
crate::input::CreateJobInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>Defines the type of job that you're creating.
/// </p>
pub fn job_type(mut self, inp: crate::model::JobType) -> Self {
self.inner = self.inner.job_type(inp);
self
}
/// <p>Defines the type of job that you're creating.
/// </p>
pub fn set_job_type(mut self, input: std::option::Option<crate::model::JobType>) -> Self {
self.inner = self.inner.set_job_type(input);
self
}
/// <p>Defines the Amazon S3 buckets associated with this job.</p>
///
/// <p>With <code>IMPORT</code> jobs, you specify the bucket or buckets that your transferred
/// data will be imported into.</p>
///
/// <p>With <code>EXPORT</code> jobs, you specify the bucket or buckets that your transferred
/// data will be exported from. Optionally, you can also specify a <code>KeyRange</code> value. If
/// you choose to export a range, you define the length of the range by providing either an
/// inclusive <code>BeginMarker</code> value, an inclusive <code>EndMarker</code> value, or both.
/// Ranges are UTF-8 binary sorted.</p>
pub fn resources(mut self, inp: crate::model::JobResource) -> Self {
self.inner = self.inner.resources(inp);
self
}
/// <p>Defines the Amazon S3 buckets associated with this job.</p>
///
/// <p>With <code>IMPORT</code> jobs, you specify the bucket or buckets that your transferred
/// data will be imported into.</p>
///
/// <p>With <code>EXPORT</code> jobs, you specify the bucket or buckets that your transferred
/// data will be exported from. Optionally, you can also specify a <code>KeyRange</code> value. If
/// you choose to export a range, you define the length of the range by providing either an
/// inclusive <code>BeginMarker</code> value, an inclusive <code>EndMarker</code> value, or both.
/// Ranges are UTF-8 binary sorted.</p>
pub fn set_resources(
mut self,
input: std::option::Option<crate::model::JobResource>,
) -> Self {
self.inner = self.inner.set_resources(input);
self
}
/// <p>Specifies the service or services on the Snow Family device that your transferred data
/// will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File
/// System) and the Amazon Web Services Storage Gateway service Tape Gateway type.</p>
pub fn on_device_service_configuration(
mut self,
inp: crate::model::OnDeviceServiceConfiguration,
) -> Self {
self.inner = self.inner.on_device_service_configuration(inp);
self
}
/// <p>Specifies the service or services on the Snow Family device that your transferred data
/// will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File
/// System) and the Amazon Web Services Storage Gateway service Tape Gateway type.</p>
pub fn set_on_device_service_configuration(
mut self,
input: std::option::Option<crate::model::OnDeviceServiceConfiguration>,
) -> Self {
self.inner = self.inner.set_on_device_service_configuration(input);
self
}
/// <p>Defines an optional description of this specific job, for example <code>Important
/// Photos 2016-08-11</code>.</p>
pub fn description(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(inp);
self
}
/// <p>Defines an optional description of this specific job, for example <code>Important
/// Photos 2016-08-11</code>.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The ID for the address that you want the Snow device shipped to.</p>
pub fn address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.address_id(inp);
self
}
/// <p>The ID for the address that you want the Snow device shipped to.</p>
pub fn set_address_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_address_id(input);
self
}
/// <p>The <code>KmsKeyARN</code> that you want to associate with this job.
/// <code>KmsKeyARN</code>s are created using the <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html">CreateKey</a>
/// Key Management Service (KMS) API action.</p>
pub fn kms_key_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.kms_key_arn(inp);
self
}
/// <p>The <code>KmsKeyARN</code> that you want to associate with this job.
/// <code>KmsKeyARN</code>s are created using the <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html">CreateKey</a>
/// Key Management Service (KMS) API action.</p>
pub fn set_kms_key_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_kms_key_arn(input);
self
}
/// <p>The <code>RoleARN</code> that you want to associate with this job.
/// <code>RoleArn</code>s are created using the <a href="https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html">CreateRole</a>
/// Identity and Access Management (IAM) API action.</p>
pub fn role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role_arn(inp);
self
}
/// <p>The <code>RoleARN</code> that you want to associate with this job.
/// <code>RoleArn</code>s are created using the <a href="https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html">CreateRole</a>
/// Identity and Access Management (IAM) API action.</p>
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role_arn(input);
self
}
/// <p>If your job is being created in one of the US regions, you have the option of
/// specifying what size Snow device you'd like for this job. In all other regions, Snowballs come
/// with 80 TB in storage capacity.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn snowball_capacity_preference(mut self, inp: crate::model::SnowballCapacity) -> Self {
self.inner = self.inner.snowball_capacity_preference(inp);
self
}
/// <p>If your job is being created in one of the US regions, you have the option of
/// specifying what size Snow device you'd like for this job. In all other regions, Snowballs come
/// with 80 TB in storage capacity.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn set_snowball_capacity_preference(
mut self,
input: std::option::Option<crate::model::SnowballCapacity>,
) -> Self {
self.inner = self.inner.set_snowball_capacity_preference(input);
self
}
/// <p>The shipping speed for this job. This speed doesn't dictate how soon you'll get the
/// Snow device, rather it represents how quickly the Snow device moves to its destination while
/// in transit. Regional shipping speeds are as follows:</p>
///
/// <ul>
/// <li>
/// <p>In Australia, you have access to express shipping. Typically, Snow devices shipped
/// express are delivered in about a day.</p>
/// </li>
/// <li>
/// <p>In the European Union (EU), you have access to express shipping. Typically, Snow
/// devices shipped express are delivered in about a day. In addition, most countries in the
/// EU have access to standard shipping, which typically takes less than a week, one
/// way.</p>
/// </li>
/// <li>
/// <p>In India, Snow devices are delivered in one to seven days.</p>
/// </li>
/// <li>
/// <p>In the US, you have access to one-day shipping and two-day shipping.</p>
/// </li>
/// </ul>
pub fn shipping_option(mut self, inp: crate::model::ShippingOption) -> Self {
self.inner = self.inner.shipping_option(inp);
self
}
/// <p>The shipping speed for this job. This speed doesn't dictate how soon you'll get the
/// Snow device, rather it represents how quickly the Snow device moves to its destination while
/// in transit. Regional shipping speeds are as follows:</p>
///
/// <ul>
/// <li>
/// <p>In Australia, you have access to express shipping. Typically, Snow devices shipped
/// express are delivered in about a day.</p>
/// </li>
/// <li>
/// <p>In the European Union (EU), you have access to express shipping. Typically, Snow
/// devices shipped express are delivered in about a day. In addition, most countries in the
/// EU have access to standard shipping, which typically takes less than a week, one
/// way.</p>
/// </li>
/// <li>
/// <p>In India, Snow devices are delivered in one to seven days.</p>
/// </li>
/// <li>
/// <p>In the US, you have access to one-day shipping and two-day shipping.</p>
/// </li>
/// </ul>
pub fn set_shipping_option(
mut self,
input: std::option::Option<crate::model::ShippingOption>,
) -> Self {
self.inner = self.inner.set_shipping_option(input);
self
}
/// <p>Defines the Amazon Simple Notification Service (Amazon SNS) notification settings for
/// this job.</p>
pub fn notification(mut self, inp: crate::model::Notification) -> Self {
self.inner = self.inner.notification(inp);
self
}
/// <p>Defines the Amazon Simple Notification Service (Amazon SNS) notification settings for
/// this job.</p>
pub fn set_notification(
mut self,
input: std::option::Option<crate::model::Notification>,
) -> Self {
self.inner = self.inner.set_notification(input);
self
}
/// <p>The ID of a cluster. If you're creating a job for a node in a cluster, you need to
/// provide only this <code>clusterId</code> value. The other job attributes are inherited from
/// the cluster.</p>
pub fn cluster_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.cluster_id(inp);
self
}
/// <p>The ID of a cluster. If you're creating a job for a node in a cluster, you need to
/// provide only this <code>clusterId</code> value. The other job attributes are inherited from
/// the cluster.</p>
pub fn set_cluster_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_cluster_id(input);
self
}
/// <p>The type of Snow Family Devices to use for this job.
/// </p>
/// <note>
/// <p>For cluster jobs, Amazon Web Services Snow Family currently supports only the
/// <code>EDGE</code> device type.</p>
/// </note>
/// <p>The type of Amazon Web Services Snow device to use for this job. Currently, the only
/// supported device type for cluster jobs is <code>EDGE</code>.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/snowball/latest/developer-guide/device-differences.html">Snowball Edge Device
/// Options</a> in the Snowball Edge Developer Guide.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn snowball_type(mut self, inp: crate::model::SnowballType) -> Self {
self.inner = self.inner.snowball_type(inp);
self
}
/// <p>The type of Snow Family Devices to use for this job.
/// </p>
/// <note>
/// <p>For cluster jobs, Amazon Web Services Snow Family currently supports only the
/// <code>EDGE</code> device type.</p>
/// </note>
/// <p>The type of Amazon Web Services Snow device to use for this job. Currently, the only
/// supported device type for cluster jobs is <code>EDGE</code>.</p>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/snowball/latest/developer-guide/device-differences.html">Snowball Edge Device
/// Options</a> in the Snowball Edge Developer Guide.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn set_snowball_type(
mut self,
input: std::option::Option<crate::model::SnowballType>,
) -> Self {
self.inner = self.inner.set_snowball_type(input);
self
}
/// <p>The forwarding address ID for a job. This field is not supported in most
/// Regions.</p>
pub fn forwarding_address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.forwarding_address_id(inp);
self
}
/// <p>The forwarding address ID for a job. This field is not supported in most
/// Regions.</p>
pub fn set_forwarding_address_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_forwarding_address_id(input);
self
}
/// <p>The tax documents required in your Amazon Web Services Region.</p>
pub fn tax_documents(mut self, inp: crate::model::TaxDocuments) -> Self {
self.inner = self.inner.tax_documents(inp);
self
}
/// <p>The tax documents required in your Amazon Web Services Region.</p>
pub fn set_tax_documents(
mut self,
input: std::option::Option<crate::model::TaxDocuments>,
) -> Self {
self.inner = self.inner.set_tax_documents(input);
self
}
/// <p>Defines the device configuration for an Snowcone job.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn device_configuration(mut self, inp: crate::model::DeviceConfiguration) -> Self {
self.inner = self.inner.device_configuration(inp);
self
}
/// <p>Defines the device configuration for an Snowcone job.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn set_device_configuration(
mut self,
input: std::option::Option<crate::model::DeviceConfiguration>,
) -> Self {
self.inner = self.inner.set_device_configuration(input);
self
}
/// <p>Allows you to securely operate and manage Snowcone devices remotely from outside of your
/// internal network. When set to <code>INSTALLED_AUTOSTART</code>, remote management will
/// automatically be available when the device arrives at your location. Otherwise, you need to
/// use the Snowball Client to manage the device.</p>
pub fn remote_management(mut self, inp: crate::model::RemoteManagement) -> Self {
self.inner = self.inner.remote_management(inp);
self
}
/// <p>Allows you to securely operate and manage Snowcone devices remotely from outside of your
/// internal network. When set to <code>INSTALLED_AUTOSTART</code>, remote management will
/// automatically be available when the device arrives at your location. Otherwise, you need to
/// use the Snowball Client to manage the device.</p>
pub fn set_remote_management(
mut self,
input: std::option::Option<crate::model::RemoteManagement>,
) -> Self {
self.inner = self.inner.set_remote_management(input);
self
}
/// <p>The ID of the long-term pricing type for the device.</p>
pub fn long_term_pricing_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.long_term_pricing_id(inp);
self
}
/// <p>The ID of the long-term pricing type for the device.</p>
pub fn set_long_term_pricing_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_long_term_pricing_id(input);
self
}
}
/// Fluent builder constructing a request to `CreateLongTermPricing`.
///
/// <p>Creates a job with the long-term usage option for a device. The long-term usage is a
/// 1-year or 3-year long-term pricing type for the device. You are billed upfront, and Amazon Web Services provides discounts for long-term pricing.
/// </p>
#[derive(std::fmt::Debug)]
pub struct CreateLongTermPricing<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_long_term_pricing_input::Builder,
}
impl<C, M, R> CreateLongTermPricing<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateLongTermPricing`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateLongTermPricingOutput,
aws_smithy_http::result::SdkError<crate::error::CreateLongTermPricingError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateLongTermPricingInputOperationOutputAlias,
crate::output::CreateLongTermPricingOutput,
crate::error::CreateLongTermPricingError,
crate::input::CreateLongTermPricingInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The type of long-term pricing option you want for the device, either 1-year or 3-year
/// long-term pricing.</p>
pub fn long_term_pricing_type(mut self, inp: crate::model::LongTermPricingType) -> Self {
self.inner = self.inner.long_term_pricing_type(inp);
self
}
/// <p>The type of long-term pricing option you want for the device, either 1-year or 3-year
/// long-term pricing.</p>
pub fn set_long_term_pricing_type(
mut self,
input: std::option::Option<crate::model::LongTermPricingType>,
) -> Self {
self.inner = self.inner.set_long_term_pricing_type(input);
self
}
/// <p>Specifies whether the current long-term pricing type for the device should be
/// renewed.</p>
pub fn is_long_term_pricing_auto_renew(mut self, inp: bool) -> Self {
self.inner = self.inner.is_long_term_pricing_auto_renew(inp);
self
}
/// <p>Specifies whether the current long-term pricing type for the device should be
/// renewed.</p>
pub fn set_is_long_term_pricing_auto_renew(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.inner = self.inner.set_is_long_term_pricing_auto_renew(input);
self
}
/// <p>The type of Snow Family Devices to use for the long-term pricing job.</p>
pub fn snowball_type(mut self, inp: crate::model::SnowballType) -> Self {
self.inner = self.inner.snowball_type(inp);
self
}
/// <p>The type of Snow Family Devices to use for the long-term pricing job.</p>
pub fn set_snowball_type(
mut self,
input: std::option::Option<crate::model::SnowballType>,
) -> Self {
self.inner = self.inner.set_snowball_type(input);
self
}
}
/// Fluent builder constructing a request to `CreateReturnShippingLabel`.
///
/// <p>Creates a shipping label that will be used to return the Snow device to Amazon Web Services.</p>
#[derive(std::fmt::Debug)]
pub struct CreateReturnShippingLabel<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::create_return_shipping_label_input::Builder,
}
impl<C, M, R> CreateReturnShippingLabel<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `CreateReturnShippingLabel`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateReturnShippingLabelOutput,
aws_smithy_http::result::SdkError<crate::error::CreateReturnShippingLabelError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::CreateReturnShippingLabelInputOperationOutputAlias,
crate::output::CreateReturnShippingLabelOutput,
crate::error::CreateReturnShippingLabelError,
crate::input::CreateReturnShippingLabelInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID for a job that you want to create the return shipping label for; for example,
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The ID for a job that you want to create the return shipping label for; for example,
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
/// <p>The shipping speed for a particular job. This speed doesn't dictate how soon the device
/// is returned to Amazon Web Services. This speed represents how quickly it moves to its
/// destination while in transit. Regional shipping speeds are as follows:</p>
pub fn shipping_option(mut self, inp: crate::model::ShippingOption) -> Self {
self.inner = self.inner.shipping_option(inp);
self
}
/// <p>The shipping speed for a particular job. This speed doesn't dictate how soon the device
/// is returned to Amazon Web Services. This speed represents how quickly it moves to its
/// destination while in transit. Regional shipping speeds are as follows:</p>
pub fn set_shipping_option(
mut self,
input: std::option::Option<crate::model::ShippingOption>,
) -> Self {
self.inner = self.inner.set_shipping_option(input);
self
}
}
/// Fluent builder constructing a request to `DescribeAddress`.
///
/// <p>Takes an <code>AddressId</code> and returns specific details about that address in the
/// form of an <code>Address</code> object.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeAddress<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_address_input::Builder,
}
impl<C, M, R> DescribeAddress<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeAddress`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeAddressOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeAddressError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeAddressInputOperationOutputAlias,
crate::output::DescribeAddressOutput,
crate::error::DescribeAddressError,
crate::input::DescribeAddressInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The automatically generated ID for a specific address.</p>
pub fn address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.address_id(inp);
self
}
/// <p>The automatically generated ID for a specific address.</p>
pub fn set_address_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_address_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeAddresses`.
///
/// <p>Returns a specified number of <code>ADDRESS</code> objects. Calling this API in one of
/// the US regions will return addresses from the list of all addresses associated with this
/// account in all US regions.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeAddresses<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_addresses_input::Builder,
}
impl<C, M, R> DescribeAddresses<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeAddresses`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeAddressesOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeAddressesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeAddressesInputOperationOutputAlias,
crate::output::DescribeAddressesOutput,
crate::error::DescribeAddressesError,
crate::input::DescribeAddressesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The number of <code>ADDRESS</code> objects to return.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The number of <code>ADDRESS</code> objects to return.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// <code>ADDRESS</code> objects, you have the option of specifying a value for
/// <code>NextToken</code> as the starting point for your list of returned addresses.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// <code>ADDRESS</code> objects, you have the option of specifying a value for
/// <code>NextToken</code> as the starting point for your list of returned addresses.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `DescribeCluster`.
///
/// <p>Returns information about a specific cluster including shipping information, cluster
/// status, and other important metadata.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeCluster<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_cluster_input::Builder,
}
impl<C, M, R> DescribeCluster<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeCluster`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeClusterOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeClusterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeClusterInputOperationOutputAlias,
crate::output::DescribeClusterOutput,
crate::error::DescribeClusterError,
crate::input::DescribeClusterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The automatically generated ID for a cluster.</p>
pub fn cluster_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.cluster_id(inp);
self
}
/// <p>The automatically generated ID for a cluster.</p>
pub fn set_cluster_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_cluster_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeJob`.
///
/// <p>Returns information about a specific job including shipping information, job status,
/// and other important metadata. </p>
#[derive(std::fmt::Debug)]
pub struct DescribeJob<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_job_input::Builder,
}
impl<C, M, R> DescribeJob<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeJob`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeJobOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeJobError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeJobInputOperationOutputAlias,
crate::output::DescribeJobOutput,
crate::error::DescribeJobError,
crate::input::DescribeJobInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The automatically generated ID for a job, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The automatically generated ID for a job, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeReturnShippingLabel`.
///
/// <p>Information on the shipping label of a Snow device that is being returned to Amazon Web Services.</p>
#[derive(std::fmt::Debug)]
pub struct DescribeReturnShippingLabel<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_return_shipping_label_input::Builder,
}
impl<C, M, R> DescribeReturnShippingLabel<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `DescribeReturnShippingLabel`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeReturnShippingLabelOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeReturnShippingLabelError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeReturnShippingLabelInputOperationOutputAlias,
crate::output::DescribeReturnShippingLabelOutput,
crate::error::DescribeReturnShippingLabelError,
crate::input::DescribeReturnShippingLabelInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The automatically generated ID for a job, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The automatically generated ID for a job, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
}
/// Fluent builder constructing a request to `GetJobManifest`.
///
/// <p>Returns a link to an Amazon S3 presigned URL for the manifest file associated with the
/// specified <code>JobId</code> value. You can access the manifest file for up to 60 minutes
/// after this request has been made. To access the manifest file after 60 minutes have passed,
/// you'll have to make another call to the <code>GetJobManifest</code> action.</p>
///
/// <p>The manifest is an encrypted file that you can download after your job enters the
/// <code>WithCustomer</code> status. The manifest is decrypted by using the
/// <code>UnlockCode</code> code value, when you pass both values to the Snow device through the
/// Snowball client when the client is started for the first time.</p>
///
///
/// <p>As a best practice, we recommend that you don't save a copy of an
/// <code>UnlockCode</code> value in the same location as the manifest file for that job. Saving
/// these separately helps prevent unauthorized parties from gaining access to the Snow device
/// associated with that job.</p>
///
///
/// <p>The credentials of a given job, including its manifest file and unlock code, expire 360
/// days after the job is created.</p>
#[derive(std::fmt::Debug)]
pub struct GetJobManifest<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_job_manifest_input::Builder,
}
impl<C, M, R> GetJobManifest<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetJobManifest`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetJobManifestOutput,
aws_smithy_http::result::SdkError<crate::error::GetJobManifestError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetJobManifestInputOperationOutputAlias,
crate::output::GetJobManifestOutput,
crate::error::GetJobManifestError,
crate::input::GetJobManifestInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID for a job that you want to get the manifest file for, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The ID for a job that you want to get the manifest file for, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
}
/// Fluent builder constructing a request to `GetJobUnlockCode`.
///
/// <p>Returns the <code>UnlockCode</code> code value for the specified job. A particular
/// <code>UnlockCode</code> value can be accessed for up to 360 days after the associated job
/// has been created.</p>
///
/// <p>The <code>UnlockCode</code> value is a 29-character code with 25 alphanumeric
/// characters and 4 hyphens. This code is used to decrypt the manifest file when it is passed
/// along with the manifest to the Snow device through the Snowball client when the client is
/// started for the first time.</p>
///
/// <p>As a best practice, we recommend that you don't save a copy of the
/// <code>UnlockCode</code> in the same location as the manifest file for that job. Saving these
/// separately helps prevent unauthorized parties from gaining access to the Snow device
/// associated with that job.</p>
#[derive(std::fmt::Debug)]
pub struct GetJobUnlockCode<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_job_unlock_code_input::Builder,
}
impl<C, M, R> GetJobUnlockCode<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetJobUnlockCode`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetJobUnlockCodeOutput,
aws_smithy_http::result::SdkError<crate::error::GetJobUnlockCodeError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetJobUnlockCodeInputOperationOutputAlias,
crate::output::GetJobUnlockCodeOutput,
crate::error::GetJobUnlockCodeError,
crate::input::GetJobUnlockCodeInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID for the job that you want to get the <code>UnlockCode</code> value for, for
/// example <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The ID for the job that you want to get the <code>UnlockCode</code> value for, for
/// example <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
}
/// Fluent builder constructing a request to `GetSnowballUsage`.
///
/// <p>Returns information about the Snow Family service limit for your account, and also the
/// number of Snow devices your account has in use.</p>
///
/// <p>The default service limit for the number of Snow devices that you can have at one time
/// is 1. If you want to increase your service limit, contact Amazon Web Services Support.</p>
#[derive(std::fmt::Debug)]
pub struct GetSnowballUsage<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_snowball_usage_input::Builder,
}
impl<C, M, R> GetSnowballUsage<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetSnowballUsage`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetSnowballUsageOutput,
aws_smithy_http::result::SdkError<crate::error::GetSnowballUsageError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetSnowballUsageInputOperationOutputAlias,
crate::output::GetSnowballUsageOutput,
crate::error::GetSnowballUsageError,
crate::input::GetSnowballUsageInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
}
/// Fluent builder constructing a request to `GetSoftwareUpdates`.
///
/// <p>Returns an Amazon S3 presigned URL for an update file associated with a specified
/// <code>JobId</code>.</p>
#[derive(std::fmt::Debug)]
pub struct GetSoftwareUpdates<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_software_updates_input::Builder,
}
impl<C, M, R> GetSoftwareUpdates<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetSoftwareUpdates`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetSoftwareUpdatesOutput,
aws_smithy_http::result::SdkError<crate::error::GetSoftwareUpdatesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetSoftwareUpdatesInputOperationOutputAlias,
crate::output::GetSoftwareUpdatesOutput,
crate::error::GetSoftwareUpdatesError,
crate::input::GetSoftwareUpdatesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID for a job that you want to get the software update file for, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The ID for a job that you want to get the software update file for, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
}
/// Fluent builder constructing a request to `ListClusterJobs`.
///
/// <p>Returns an array of <code>JobListEntry</code> objects of the specified length. Each
/// <code>JobListEntry</code> object is for a job in the specified cluster and contains a job's
/// state, a job's ID, and other information.</p>
#[derive(std::fmt::Debug)]
pub struct ListClusterJobs<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_cluster_jobs_input::Builder,
}
impl<C, M, R> ListClusterJobs<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListClusterJobs`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListClusterJobsOutput,
aws_smithy_http::result::SdkError<crate::error::ListClusterJobsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListClusterJobsInputOperationOutputAlias,
crate::output::ListClusterJobsOutput,
crate::error::ListClusterJobsError,
crate::input::ListClusterJobsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The 39-character ID for the cluster that you want to list, for example
/// <code>CID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn cluster_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.cluster_id(inp);
self
}
/// <p>The 39-character ID for the cluster that you want to list, for example
/// <code>CID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_cluster_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_cluster_id(input);
self
}
/// <p>The number of <code>JobListEntry</code> objects to return.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The number of <code>JobListEntry</code> objects to return.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// <code>JobListEntry</code> objects, you have the option of specifying <code>NextToken</code>
/// as the starting point for your returned list.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// <code>JobListEntry</code> objects, you have the option of specifying <code>NextToken</code>
/// as the starting point for your returned list.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListClusters`.
///
/// <p>Returns an array of <code>ClusterListEntry</code> objects of the specified length. Each
/// <code>ClusterListEntry</code> object contains a cluster's state, a cluster's ID, and other
/// important status information.</p>
#[derive(std::fmt::Debug)]
pub struct ListClusters<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_clusters_input::Builder,
}
impl<C, M, R> ListClusters<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListClusters`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListClustersOutput,
aws_smithy_http::result::SdkError<crate::error::ListClustersError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListClustersInputOperationOutputAlias,
crate::output::ListClustersOutput,
crate::error::ListClustersError,
crate::input::ListClustersInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The number of <code>ClusterListEntry</code> objects to return.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The number of <code>ClusterListEntry</code> objects to return.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// <code>ClusterListEntry</code> objects, you have the option of specifying
/// <code>NextToken</code> as the starting point for your returned list.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// <code>ClusterListEntry</code> objects, you have the option of specifying
/// <code>NextToken</code> as the starting point for your returned list.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListCompatibleImages`.
///
/// <p>This action returns a list of the different Amazon EC2 Amazon Machine Images (AMIs)
/// that are owned by your Amazon Web Services accountthat would be supported for use on a Snow
/// device. Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM,
/// Ubuntu Server 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the
/// Amazon Web Services Marketplace.</p>
#[derive(std::fmt::Debug)]
pub struct ListCompatibleImages<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_compatible_images_input::Builder,
}
impl<C, M, R> ListCompatibleImages<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListCompatibleImages`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListCompatibleImagesOutput,
aws_smithy_http::result::SdkError<crate::error::ListCompatibleImagesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListCompatibleImagesInputOperationOutputAlias,
crate::output::ListCompatibleImagesOutput,
crate::error::ListCompatibleImagesError,
crate::input::ListCompatibleImagesInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The maximum number of results for the list of compatible images. Currently, a Snowball
/// Edge device can store 10 AMIs.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The maximum number of results for the list of compatible images. Currently, a Snowball
/// Edge device can store 10 AMIs.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// compatible images, you can specify a value for <code>NextToken</code> as the starting point
/// for your list of returned images.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// compatible images, you can specify a value for <code>NextToken</code> as the starting point
/// for your list of returned images.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListJobs`.
///
/// <p>Returns an array of <code>JobListEntry</code> objects of the specified length. Each
/// <code>JobListEntry</code> object contains a job's state, a job's ID, and a value that
/// indicates whether the job is a job part, in the case of export jobs. Calling this API action
/// in one of the US regions will return jobs from the list of all jobs associated with this
/// account in all US regions.</p>
#[derive(std::fmt::Debug)]
pub struct ListJobs<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_jobs_input::Builder,
}
impl<C, M, R> ListJobs<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListJobs`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListJobsOutput,
aws_smithy_http::result::SdkError<crate::error::ListJobsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListJobsInputOperationOutputAlias,
crate::output::ListJobsOutput,
crate::error::ListJobsError,
crate::input::ListJobsInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The number of <code>JobListEntry</code> objects to return.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The number of <code>JobListEntry</code> objects to return.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// <code>JobListEntry</code> objects, you have the option of specifying <code>NextToken</code>
/// as the starting point for your returned list.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>HTTP requests are stateless. To identify what object comes "next" in the list of
/// <code>JobListEntry</code> objects, you have the option of specifying <code>NextToken</code>
/// as the starting point for your returned list.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListLongTermPricing`.
///
/// <p>Lists all long-term pricing types.</p>
#[derive(std::fmt::Debug)]
pub struct ListLongTermPricing<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_long_term_pricing_input::Builder,
}
impl<C, M, R> ListLongTermPricing<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListLongTermPricing`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListLongTermPricingOutput,
aws_smithy_http::result::SdkError<crate::error::ListLongTermPricingError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListLongTermPricingInputOperationOutputAlias,
crate::output::ListLongTermPricingOutput,
crate::error::ListLongTermPricingError,
crate::input::ListLongTermPricingInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The maximum number of <code>ListLongTermPricing</code> objects to return.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
/// <p>The maximum number of <code>ListLongTermPricing</code> objects to return.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>Because HTTP requests are stateless, this is the starting point for your next list of
/// <code>ListLongTermPricing</code> to return.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
/// <p>Because HTTP requests are stateless, this is the starting point for your next list of
/// <code>ListLongTermPricing</code> to return.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `UpdateCluster`.
///
/// <p>While a cluster's <code>ClusterState</code> value is in the <code>AwaitingQuorum</code>
/// state, you can update some of the information associated with a cluster. Once the cluster
/// changes to a different job state, usually 60 minutes after the cluster being created, this
/// action is no longer available.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateCluster<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_cluster_input::Builder,
}
impl<C, M, R> UpdateCluster<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateCluster`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateClusterOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateClusterError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateClusterInputOperationOutputAlias,
crate::output::UpdateClusterOutput,
crate::error::UpdateClusterError,
crate::input::UpdateClusterInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The cluster ID of the cluster that you want to update, for example
/// <code>CID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn cluster_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.cluster_id(inp);
self
}
/// <p>The cluster ID of the cluster that you want to update, for example
/// <code>CID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_cluster_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_cluster_id(input);
self
}
/// <p>The new role Amazon Resource Name (ARN) that you want to associate with this cluster.
/// To create a role ARN, use the <a href="https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html">CreateRole</a> API action in Identity and Access Management (IAM).</p>
pub fn role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role_arn(inp);
self
}
/// <p>The new role Amazon Resource Name (ARN) that you want to associate with this cluster.
/// To create a role ARN, use the <a href="https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html">CreateRole</a> API action in Identity and Access Management (IAM).</p>
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role_arn(input);
self
}
/// <p>The updated description of this cluster.</p>
pub fn description(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(inp);
self
}
/// <p>The updated description of this cluster.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The updated arrays of <a>JobResource</a> objects that can include updated
/// <a>S3Resource</a> objects or <a>LambdaResource</a> objects.</p>
pub fn resources(mut self, inp: crate::model::JobResource) -> Self {
self.inner = self.inner.resources(inp);
self
}
/// <p>The updated arrays of <a>JobResource</a> objects that can include updated
/// <a>S3Resource</a> objects or <a>LambdaResource</a> objects.</p>
pub fn set_resources(
mut self,
input: std::option::Option<crate::model::JobResource>,
) -> Self {
self.inner = self.inner.set_resources(input);
self
}
/// <p>Specifies the service or services on the Snow Family device that your transferred data
/// will be exported from or imported into. Amazon Web Services Snow Family device clusters support Amazon S3 and NFS
/// (Network File System).</p>
pub fn on_device_service_configuration(
mut self,
inp: crate::model::OnDeviceServiceConfiguration,
) -> Self {
self.inner = self.inner.on_device_service_configuration(inp);
self
}
/// <p>Specifies the service or services on the Snow Family device that your transferred data
/// will be exported from or imported into. Amazon Web Services Snow Family device clusters support Amazon S3 and NFS
/// (Network File System).</p>
pub fn set_on_device_service_configuration(
mut self,
input: std::option::Option<crate::model::OnDeviceServiceConfiguration>,
) -> Self {
self.inner = self.inner.set_on_device_service_configuration(input);
self
}
/// <p>The ID of the updated <a>Address</a> object.</p>
pub fn address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.address_id(inp);
self
}
/// <p>The ID of the updated <a>Address</a> object.</p>
pub fn set_address_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_address_id(input);
self
}
/// <p>The updated shipping option value of this cluster's <a>ShippingDetails</a>
/// object.</p>
pub fn shipping_option(mut self, inp: crate::model::ShippingOption) -> Self {
self.inner = self.inner.shipping_option(inp);
self
}
/// <p>The updated shipping option value of this cluster's <a>ShippingDetails</a>
/// object.</p>
pub fn set_shipping_option(
mut self,
input: std::option::Option<crate::model::ShippingOption>,
) -> Self {
self.inner = self.inner.set_shipping_option(input);
self
}
/// <p>The new or updated <a>Notification</a> object.</p>
pub fn notification(mut self, inp: crate::model::Notification) -> Self {
self.inner = self.inner.notification(inp);
self
}
/// <p>The new or updated <a>Notification</a> object.</p>
pub fn set_notification(
mut self,
input: std::option::Option<crate::model::Notification>,
) -> Self {
self.inner = self.inner.set_notification(input);
self
}
/// <p>The updated ID for the forwarding address for a cluster. This field is not
/// supported in most regions.</p>
pub fn forwarding_address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.forwarding_address_id(inp);
self
}
/// <p>The updated ID for the forwarding address for a cluster. This field is not
/// supported in most regions.</p>
pub fn set_forwarding_address_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_forwarding_address_id(input);
self
}
}
/// Fluent builder constructing a request to `UpdateJob`.
///
/// <p>While a job's <code>JobState</code> value is <code>New</code>, you can update some of
/// the information associated with a job. Once the job changes to a different job state, usually
/// within 60 minutes of the job being created, this action is no longer available.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateJob<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_job_input::Builder,
}
impl<C, M, R> UpdateJob<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateJob`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateJobOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateJobError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateJobInputOperationOutputAlias,
crate::output::UpdateJobOutput,
crate::error::UpdateJobError,
crate::input::UpdateJobInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The job ID of the job that you want to update, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The job ID of the job that you want to update, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
/// <p>The new role Amazon Resource Name (ARN) that you want to associate with this job. To
/// create a role ARN, use the <a href="https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html">CreateRole</a>Identity and Access Management
/// (IAM) API action.</p>
pub fn role_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role_arn(inp);
self
}
/// <p>The new role Amazon Resource Name (ARN) that you want to associate with this job. To
/// create a role ARN, use the <a href="https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html">CreateRole</a>Identity and Access Management
/// (IAM) API action.</p>
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role_arn(input);
self
}
/// <p>The new or updated <a>Notification</a> object.</p>
pub fn notification(mut self, inp: crate::model::Notification) -> Self {
self.inner = self.inner.notification(inp);
self
}
/// <p>The new or updated <a>Notification</a> object.</p>
pub fn set_notification(
mut self,
input: std::option::Option<crate::model::Notification>,
) -> Self {
self.inner = self.inner.set_notification(input);
self
}
/// <p>The updated <code>JobResource</code> object, or the updated <a>JobResource</a> object. </p>
pub fn resources(mut self, inp: crate::model::JobResource) -> Self {
self.inner = self.inner.resources(inp);
self
}
/// <p>The updated <code>JobResource</code> object, or the updated <a>JobResource</a> object. </p>
pub fn set_resources(
mut self,
input: std::option::Option<crate::model::JobResource>,
) -> Self {
self.inner = self.inner.set_resources(input);
self
}
/// <p>Specifies the service or services on the Snow Family device that your transferred data
/// will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File
/// System) and the Amazon Web Services Storage Gateway service Tape Gateway type.</p>
pub fn on_device_service_configuration(
mut self,
inp: crate::model::OnDeviceServiceConfiguration,
) -> Self {
self.inner = self.inner.on_device_service_configuration(inp);
self
}
/// <p>Specifies the service or services on the Snow Family device that your transferred data
/// will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File
/// System) and the Amazon Web Services Storage Gateway service Tape Gateway type.</p>
pub fn set_on_device_service_configuration(
mut self,
input: std::option::Option<crate::model::OnDeviceServiceConfiguration>,
) -> Self {
self.inner = self.inner.set_on_device_service_configuration(input);
self
}
/// <p>The ID of the updated <a>Address</a> object.</p>
pub fn address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.address_id(inp);
self
}
/// <p>The ID of the updated <a>Address</a> object.</p>
pub fn set_address_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_address_id(input);
self
}
/// <p>The updated shipping option value of this job's <a>ShippingDetails</a>
/// object.</p>
pub fn shipping_option(mut self, inp: crate::model::ShippingOption) -> Self {
self.inner = self.inner.shipping_option(inp);
self
}
/// <p>The updated shipping option value of this job's <a>ShippingDetails</a>
/// object.</p>
pub fn set_shipping_option(
mut self,
input: std::option::Option<crate::model::ShippingOption>,
) -> Self {
self.inner = self.inner.set_shipping_option(input);
self
}
/// <p>The updated description of this job's <a>JobMetadata</a> object.</p>
pub fn description(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(inp);
self
}
/// <p>The updated description of this job's <a>JobMetadata</a> object.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The updated <code>SnowballCapacityPreference</code> of this job's <a>JobMetadata</a> object. The 50 TB Snowballs are only available in the US
/// regions.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn snowball_capacity_preference(mut self, inp: crate::model::SnowballCapacity) -> Self {
self.inner = self.inner.snowball_capacity_preference(inp);
self
}
/// <p>The updated <code>SnowballCapacityPreference</code> of this job's <a>JobMetadata</a> object. The 50 TB Snowballs are only available in the US
/// regions.</p>
///
/// <p>For more information, see
/// "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i> or
/// "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html" (Snow
/// Family Devices and Capacity) in the <i>Snowcone User Guide</i>.</p>
pub fn set_snowball_capacity_preference(
mut self,
input: std::option::Option<crate::model::SnowballCapacity>,
) -> Self {
self.inner = self.inner.set_snowball_capacity_preference(input);
self
}
/// <p>The updated ID for the forwarding address for a job. This field is not
/// supported in most regions.</p>
pub fn forwarding_address_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.forwarding_address_id(inp);
self
}
/// <p>The updated ID for the forwarding address for a job. This field is not
/// supported in most regions.</p>
pub fn set_forwarding_address_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_forwarding_address_id(input);
self
}
}
/// Fluent builder constructing a request to `UpdateJobShipmentState`.
///
/// <p>Updates the state when a shipment state changes to a different state.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateJobShipmentState<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_job_shipment_state_input::Builder,
}
impl<C, M, R> UpdateJobShipmentState<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateJobShipmentState`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateJobShipmentStateOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateJobShipmentStateError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateJobShipmentStateInputOperationOutputAlias,
crate::output::UpdateJobShipmentStateOutput,
crate::error::UpdateJobShipmentStateError,
crate::input::UpdateJobShipmentStateInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The job ID of the job whose shipment date you want to update, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn job_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.job_id(inp);
self
}
/// <p>The job ID of the job whose shipment date you want to update, for example
/// <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_job_id(input);
self
}
/// <p>The state of a device when it is being shipped. </p>
/// <p>Set to <code>RECEIVED</code> when the device arrives at your location.</p>
/// <p>Set to <code>RETURNED</code> when you have returned the device to Amazon Web Services.</p>
pub fn shipment_state(mut self, inp: crate::model::ShipmentState) -> Self {
self.inner = self.inner.shipment_state(inp);
self
}
/// <p>The state of a device when it is being shipped. </p>
/// <p>Set to <code>RECEIVED</code> when the device arrives at your location.</p>
/// <p>Set to <code>RETURNED</code> when you have returned the device to Amazon Web Services.</p>
pub fn set_shipment_state(
mut self,
input: std::option::Option<crate::model::ShipmentState>,
) -> Self {
self.inner = self.inner.set_shipment_state(input);
self
}
}
/// Fluent builder constructing a request to `UpdateLongTermPricing`.
///
/// <p>Updates the long-term pricing type.</p>
#[derive(std::fmt::Debug)]
pub struct UpdateLongTermPricing<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_long_term_pricing_input::Builder,
}
impl<C, M, R> UpdateLongTermPricing<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateLongTermPricing`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateLongTermPricingOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateLongTermPricingError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateLongTermPricingInputOperationOutputAlias,
crate::output::UpdateLongTermPricingOutput,
crate::error::UpdateLongTermPricingError,
crate::input::UpdateLongTermPricingInputOperationRetryAlias,
>,
{
let input = self.inner.build().map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
let op = input
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p>The ID of the long-term pricing type for the device.</p>
pub fn long_term_pricing_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.long_term_pricing_id(inp);
self
}
/// <p>The ID of the long-term pricing type for the device.</p>
pub fn set_long_term_pricing_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_long_term_pricing_id(input);
self
}
/// <p>Specifies that a device that is ordered with long-term pricing should be replaced with a
/// new device.</p>
pub fn replacement_job(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.replacement_job(inp);
self
}
/// <p>Specifies that a device that is ordered with long-term pricing should be replaced with a
/// new device.</p>
pub fn set_replacement_job(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_replacement_job(input);
self
}
/// <p>If set to <code>true</code>, specifies that the current long-term pricing type for the
/// device should be automatically renewed before the long-term pricing contract expires.</p>
pub fn is_long_term_pricing_auto_renew(mut self, inp: bool) -> Self {
self.inner = self.inner.is_long_term_pricing_auto_renew(inp);
self
}
/// <p>If set to <code>true</code>, specifies that the current long-term pricing type for the
/// device should be automatically renewed before the long-term pricing contract expires.</p>
pub fn set_is_long_term_pricing_auto_renew(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.inner = self.inner.set_is_long_term_pricing_auto_renew(input);
self
}
}
}
impl<C> Client<C, crate::middleware::DefaultMiddleware, aws_smithy_client::retry::Standard> {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut builder = aws_smithy_client::Builder::new()
.connector(conn)
.middleware(crate::middleware::DefaultMiddleware::new());
builder.set_retry_config(retry_config.into());
builder.set_timeout_config(timeout_config);
if let Some(sleep_impl) = sleep_impl {
builder.set_sleep_impl(Some(sleep_impl));
}
let client = builder.build();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
aws_smithy_client::erase::DynConnector,
crate::middleware::DefaultMiddleware,
aws_smithy_client::retry::Standard,
>
{
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut builder = aws_smithy_client::Builder::dyn_https()
.middleware(crate::middleware::DefaultMiddleware::new());
builder.set_retry_config(retry_config.into());
builder.set_timeout_config(timeout_config);
// the builder maintains a try-state. To avoid suppressing the warning when sleep is unset,
// only set it if we actually have a sleep impl.
if let Some(sleep_impl) = sleep_impl {
builder.set_sleep_impl(Some(sleep_impl));
}
let client = builder.build();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
|
get_snowball_usage
|
paths.go
|
// Code generated by goa v3.3.1, DO NOT EDIT.
//
// HTTP request path constructors for the catalog service.
|
package server
import (
"fmt"
)
// RefreshCatalogPath returns the URL path to the catalog service Refresh HTTP endpoint.
func RefreshCatalogPath(catalogName string) string {
return fmt.Sprintf("/catalog/%v/refresh", catalogName)
}
// RefreshAllCatalogPath returns the URL path to the catalog service RefreshAll HTTP endpoint.
func RefreshAllCatalogPath() string {
return "/catalog/refresh"
}
// CatalogErrorCatalogPath returns the URL path to the catalog service CatalogError HTTP endpoint.
func CatalogErrorCatalogPath(catalogName string) string {
return fmt.Sprintf("/catalog/%v/error", catalogName)
}
|
//
// Command:
// $ goa gen github.com/tektoncd/hub/api/design
|
validate.go
|
package module
import (
"fmt"
"github.com/liamg/peridot/internal/pkg/config"
"github.com/liamg/peridot/internal/pkg/variable"
)
func validateVariables(expectedVars []config.Variable, actual variable.Collection) error {
for _, expected := range expectedVars {
if expected.Required {
if !actual.Has(expected.Name) {
return fmt.Errorf("required variable '%s' is not defined", expected.Name)
}
}
}
return nil
}
func
|
(expectedVars []config.Variable, actual variable.Collection) variable.Collection {
merged := variable.NewCollection(nil)
for _, input := range expectedVars {
if actual.Has(input.Name) {
merged.Set(input.Name, actual.Get(input.Name).Interface())
} else if !input.Required && input.Default != nil {
merged.Set(input.Name, input.Default)
}
}
merged.MergeIn(config.BaseVariables())
return merged
}
|
applyVariableDefaults
|
dw.js
|
$("#tos").on("click", function(event) {
if ($("#tos").hasClass("index")) {
window.open("privacy/tou.html","_self");
} else {
window.open("../privacy/tou.html","_self");
}
});
$("#priv").on("click", function(event) {
|
if ($("#priv").hasClass("index")) {
window.open("Disclaimer/privacy.html","_self");
} else {
window.open("../Disclaimer/privacy.html","_self");
}
});
| |
xuechi_blood_add.go
|
package listener
import (
"fgame/fgame/core/event"
"fgame/fgame/cross/xuechi/pbutil"
battleeventtypes "fgame/fgame/game/battle/event/types"
gameevent "fgame/fgame/game/event"
"fgame/fgame/game/scene/scene"
)
//从血池补血
func xueChiAdd(target event.EventTarget, data event.EventData) (err error) {
pl, ok := target.(scene.Player)
if !ok {
return
}
isXueChiSync := pbutil.BuildISXueChiSync(pl)
pl.SendMsg(isXueChiSync)
return
}
func init() {
gameeve
|
nt.AddEventListener(battleeventtypes.EventTypeBattlePlayerXueChiBloodAdd, event.EventListenerFunc(xueChiAdd))
}
|
|
CoinChange2.py
|
"""
给定不同面额的硬币 coins 和一个总金额 amount。编写一个函数来计算可以凑成总金额所需的最少的硬币个数。如果没有任何一种硬币组合能组成总金额,返回 -1。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/coin-change
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
|
# 是一种剪枝
class Solution:
def coinChange(self,coins, amount: int):
# 备忘录
memo = dict()
def dp(n):
# 查备忘录,避免重复计算
if n in memo: return memo[n]
if n == 0: return 0
if n < 0: return -1
res = float('INF')
for coin in set(coins):
subproblem = dp(n - coin)
if subproblem == -1: continue
res = min(res, 1 + subproblem)
# 记入备忘录
memo[n] = res if res != float('INF') else -1
return memo[n]
return dp(amount)
if __name__ == "__main__":
s = Solution()
coins = [186,419,83,408]
amount = 6249
# coins = [1,3,5]
# amount = 11
print(s.coinChange(coins,amount))
|
# 用备忘录解决了重叠子问题
|
web.py
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import salt.ext.tornado as tornado
import traceback
import types
from inspect import isclass
from io import BytesIO
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado import escape
from salt.ext.tornado import gen
from salt.ext.tornado import httputil
from salt.ext.tornado import iostream
from salt.ext.tornado import locale
from salt.ext.tornado.log import access_log, app_log, gen_log
from salt.ext.tornado import stack_context
from salt.ext.tornado import template
from salt.ext.tornado.escape import utf8, _unicode
from salt.ext.tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
ReversibleRouter, Rule, ReversibleRuleRouter,
URLSpec)
from salt.ext.tornado.util import (ObjectDict, raise_exc_info,
unicode_type, _websocket_mask, PY3)
url = URLSpec
if PY3:
import http.cookies as Cookie
import urllib.parse as urlparse
from urllib.parse import urlencode
else:
import Cookie
import urlparse
from urllib import urlencode
try:
import typing # noqa
# The following types are accepted by RequestHandler.set_header
# and related methods.
_HeaderTypes = typing.Union[bytes, unicode_type,
numbers.Integral, datetime.datetime]
except ImportError:
pass
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overridden by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
|
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is for callback-style asynchronous methods; for
coroutines, use the ``@gen.coroutine`` decorator without
``@asynchronous``. (It is legal for legacy reasons to use the two
decorators together provided ``@asynchronous`` is first, but
``@asynchronous`` will be ignored in this case)
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example:
.. testcode::
class MyRequestHandler(RequestHandler):
@asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. testoutput::
:hide:
.. versionchanged:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
.. versionchanged:: 4.3 Returning anything but ``None`` or a
yieldable object from a method decorated with ``@asynchronous``
is an error. Such return values were previously ignored silently.
"""
# Delay the IOLoop import because it's not available on app engine.
from salt.ext.tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if result is not None:
result = gen.convert_yielded(result)
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
"""Routing implementation used internally by `Application`.
Provides a binding between `Application` and `RequestHandler`.
This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
* it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
* it allows to use a list/tuple of rules as `~.routing.Rule` target.
``process_rule`` implementation will substitute this list with an appropriate
`_ApplicationRouter` instance.
"""
def __init__(self, application, rules=None):
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter(self.application, rule.target)
return rule
def get_target_delegate(self, target, request, **target_params):
if isclass(target) and issubclass(target, RequestHandler):
return self.application.get_handler_delegate(request, target, **target_params)
return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
class Application(ReversibleRouter):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.current().start()
The constructor for this class takes in a list of `~.routing.Rule`
objects or tuples of values corresponding to the arguments of
`~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
the values in square brackets being optional. The default matcher is
`~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
instead of ``(PathMatches(regexp), target)``.
A common routing target is a `RequestHandler` subclass, but you can also
use lists of rules as a target, which create a nested routing configuration::
application = web.Application([
(HostMatches("example.com"), [
(r"/", MainPageHandler),
(r"/feed", FeedHandler),
]),
])
In addition to this you can use nested `~.routing.Router` instances,
`~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
(see `~.routing` module docs for more information).
When we receive requests, we iterate over the list in order and
instantiate an instance of the first request class whose regexp
matches the request path. The request class can be specified as
either a class object or a (fully-qualified) name.
A dictionary may be passed as the third element (``target_kwargs``)
of the tuple, which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions.
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
.. versionchanged:: 4.5
Integration with the new `tornado.routing` module.
"""
def __init__(self, handlers=None, default_host=None, transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(self, [
Rule(AnyMatches(), self.wildcard_router)
])
# Automatically reload modified modules
if self.settings.get('autoreload'):
from salt.ext.tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` to start the server.
Returns the `.HTTPServer` object.
.. versionchanged:: 4.3
Now returns the `.HTTPServer` object.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from salt.ext.tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
return server
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules([(
DefaultHostMatches(self, host_matcher.host_pattern),
host_handlers
)])
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(self, request, **kwargs):
route = self.default_router.find_handler(request)
if route is not None:
return route
if self.settings.get('default_handler_class'):
return self.get_handler_delegate(
request,
self.settings['default_handler_class'],
self.settings.get('default_handler_args', {}))
return self.get_handler_delegate(
request, ErrorHandler, {'status_code': 404})
def get_handler_delegate(self, request, target_class, target_kwargs=None,
path_args=None, path_kwargs=None):
"""Returns `~.httputil.HTTPMessageDelegate` that can serve a request
for application and `RequestHandler` subclass.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg RequestHandler target_class: a `RequestHandler` class.
:arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
:arg list path_args: positional arguments for ``target_class`` HTTP method that
will be executed while handling a request (``get``, ``post`` or any other).
:arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
"""
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs)
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _HandlerDelegate(httputil.HTTPMessageDelegate):
def __init__(self, application, request, handler_class, handler_kwargs,
path_args, path_kwargs):
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = []
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(self, start_line, headers):
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code=500, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will
end (calling `RequestHandler.finish` if it hasn't already been
called), but the error-handling methods (including
`RequestHandler.write_error`) will not be called.
If `Finish()` was created with no arguments, the pending response
will be sent as-is. If `Finish()` was given an argument, that
argument will be passed to `RequestHandler.finish()`.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
.. versionchanged:: 4.3
Arguments passed to ``Finish()`` will be passed on to
`RequestHandler.finish`.
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
`RedirectHandler` supports regular expression substitutions. E.g., to
swap the first and second parts of a path while preserving the remainder::
application = web.Application([
(r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
])
The final URL is formatted with `str.format` and the substrings that match
the capturing groups. In the above example, a request to "/a/b/c" would be
formatted like::
str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c"
Use Python's :ref:`format string syntax <formatstrings>` to customize how
values are substituted.
.. versionchanged:: 4.5
Added support for substitutions into the destination URL.
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To serve a file like ``index.html`` automatically when a directory is
requested, set ``static_handler_args=dict(default_filename="index.html")``
in your application settings, or add ``default_filename`` as an initializer
argument for your ``StaticFileHandler``.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video).
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: typing.Dict
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml",
"image/svg+xml"])
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
compresslevel=self.GZIP_LEVEL)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
Subclasses of UIModule must override the `render` method.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Override in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Override to return a JavaScript string
to be embedded in the page."""
return None
def javascript_files(self):
"""Override to return a list of JavaScript files needed by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def embedded_css(self):
"""Override to return a CSS string
that will be embedded in the page."""
return None
def css_files(self):
"""Override to returns a list of CSS files required by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def html_head(self):
"""Override to return an HTML string that will be put in the <head/>
element.
"""
return None
def html_body(self):
"""Override to return an HTML string that will be put at the end of
the <body/> element.
"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None,
key_version=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b''])
if isinstance(secret, dict):
assert key_version is not None, 'Key version must be set when sign key dict is used'
assert version >= 2, 'Version must be at least 2 for key version support'
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value):
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
try:
key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[:-len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value):
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
|
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # type: typing.Dict[str, template.BaseLoader]
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self._headers = None # type: httputil.HTTPHeaders
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization. Called for each request.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
def _convert_header_value(self, value):
# type: (_HeaderTypes) -> str
# Convert the input value to a str. This type check is a bit
# subtle: The bytes case only executes on python 3, and the
# unicode case only executes on python 2, because the other
# cases are covered by the first match for str.
if isinstance(value, str):
retval = value
elif isinstance(value, bytes): # py3
# Non-ascii characters in headers are not well supported,
# but if you pass bytes, use latin1 so they pass through as-is.
retval = value.decode('latin1')
elif isinstance(value, unicode_type): # py2
# TODO: This is inconsistent with the use of latin1 above,
# but it's been that way for a long time. Should it change?
retval = escape.utf8(value)
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
raise ValueError("Unsafe header value %r", retval)
return retval
_ARG_DEFAULT = object()
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See https://docs.python.org/2/library/cookie.html#Cookie.Morsel
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(secret, name, value, version=version,
key_version=key_version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def get_secure_cookie_key_version(self, name, value=None):
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return get_signature_key_version(value)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
# Maintain order of JavaScript files given by modules
js = self.render_linked_js(js_files)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = self.render_embed_js(js_embed)
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = self.render_embed_css(css_embed)
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
def render_embed_js(self, js_embed):
"""Default method used to render the final embedded js for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
def render_linked_css(self, css_files):
"""Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
def render_embed_css(self, css_embed):
"""Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` and ``template_whitespace`` application
settings. If a ``template_loader`` application setting is
supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if (self._status_code in (204, 304) or
(self._status_code >= 100 and self._status_code < 200)):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
def _break_cycles(self):
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is set in one of two ways:
* A subclass may override `get_current_user()`, which will be called
automatically the first time ``self.current_user`` is accessed.
`get_current_user()` will only be called once per request,
and is cached for future access::
def get_current_user(self):
user_cookie = self.get_secure_cookie("user")
if user_cookie:
return json.loads(user_cookie)
return None
* It may be set as a normal variable, typically from an overridden
`prepare()`::
@gen.coroutine
def prepare(self):
user_id_cookie = self.get_secure_cookie("user_id")
if user_id_cookie:
self.current_user = yield load_user(user_id_cookie)
Note that `prepare()` may be a coroutine while `get_current_user()`
may not, so the latter form is necessary if loading the user requires
asynchronous operations.
The user object may be any type of the application's choosing.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie.
This method may not be a coroutine.
"""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
.. versionchanged:: 4.3
The ``xsrf_cookie_kwargs`` `Application` setting may be
used to supply additional cookie options (which will be
passed directly to `set_cookie`). For example,
``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
will set the ``secure`` and ``httponly`` flags on the
``_xsrf`` cookie.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days,
**cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x):
return x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = yield result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = yield result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
|
main.go
|
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Program skylab_swarming_worker executes a Skylab task via Lucifer.
//
// skylab_swarming_worker uses lucifer_run_job to actually run the autotest
// job. Once lucifer_run_job is kicked off, skylab_swarming_worker handles Lucifer
// events, translating them to task updates and runtime status updates of the
// swarming bot. If the swarming task is canceled, lucifer_swarming_worker aborts
// the Lucifer run.
//
// The following environment variables control skylab_swarming_worker
// execution.
//
// Per-bot variables:
//
// ADMIN_SERVICE: Admin service host, e.g. foo.appspot.com.
// AUTOTEST_DIR: Path to the autotest checkout on server.
// LUCIFER_TOOLS_DIR: Path to the lucifer installation.
// PARSER_PATH: Path to the autotest_status_parser installation.
// SKYLAB_DUT_ID: skylab_inventory id of the DUT that belongs to this bot.
//
// Per-task variables:
//
// SWARMING_TASK_ID: task id of the swarming task being serviced.
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"time"
"go.chromium.org/luci/common/errors"
lflag "go.chromium.org/luci/common/flag"
"go.chromium.org/luci/common/logging/gologger"
"infra/cmd/skylab_swarming_worker/internal/annotations"
"infra/cmd/skylab_swarming_worker/internal/fifo"
"infra/cmd/skylab_swarming_worker/internal/lucifer"
"infra/cmd/skylab_swarming_worker/internal/swmbot"
"infra/cmd/skylab_swarming_worker/internal/swmbot/harness"
"infra/cros/dutstate"
)
// Task names.
const (
repairTaskName = "repair"
deployTaskName = "deploy"
auditTaskName = "audit"
setStateNeedsRepairTaskName = "set_needs_repair"
setStateReservedTaskName = "set_reserved"
setStateManualRepairTaskName = "set_manual_repair"
setStateNeedsReplacementTaskName = "set_needs_replacement"
setStateNeedsManualRepairTaskName = "set_needs_manual_repair"
)
const gcpProject = "chromeos-skylab"
func main() {
log.SetPrefix(fmt.Sprintf("%s: ", filepath.Base(os.Args[0])))
log.Printf("Starting with args: %s", os.Args)
a := parseArgs()
if err := mainInner(a); err != nil {
log.Fatalf("Error: %s", err)
}
log.Printf("Exited successfully")
}
type args struct {
adminService string
deadline time.Time
actions string
isolatedOutdir string
logdogAnnotationURL string
sideEffectsConfig string
taskName string
xClientTest bool
xKeyvals map[string]string
xProvisionLabels []string
xTestArgs string
}
func parseArgs() *args {
a := &args{}
flag.StringVar(&a.taskName, "task-name", "",
"Name of the task to run. For autotest, this is the NAME attribute in control file")
flag.StringVar(&a.logdogAnnotationURL, "logdog-annotation-url", "",
"LogDog annotation URL, like logdog://HOST/PROJECT/PREFIX/+/annotations")
flag.StringVar(&a.adminService, "admin-service", "",
"Admin service host, e.g. foo.appspot.com")
flag.BoolVar(&a.xClientTest, "client-test", false,
"This is a client side test")
flag.Var(lflag.CommaList(&a.xProvisionLabels), "provision-labels",
"Labels to provision, comma separated")
flag.Var(lflag.JSONMap(&a.xKeyvals), "keyvals",
"JSON string of job keyvals")
flag.StringVar(&a.xTestArgs, "test-args", "",
"Test args (meaning depends on test)")
flag.StringVar(&a.actions, "actions", "",
"Actions to execute for a task")
flag.StringVar(&a.isolatedOutdir, "isolated-outdir", "",
"Directory to place isolated output into. Generate no isolated output if not set.")
flag.StringVar(&a.sideEffectsConfig, "side-effect-config", "",
"JSONpb string of side_effects.Config to be dropped into the results directory. No file is created if empty.")
flag.Var(lflag.Time(&a.deadline), "deadline",
"Soft deadline for completion, formatted as stiptime. Wrap-up actions may outlive this deadline.")
flag.Parse()
return a
}
func mainInner(a *args) error {
ctx := context.Background()
// Set up Go logger for LUCI libraries.
ctx = gologger.StdConfig.Use(ctx)
b := swmbot.GetInfo()
log.Printf("Swarming bot config: %#v", b)
annotWriter, err := openLogDogWriter(ctx, a.logdogAnnotationURL)
if err != nil {
return err
}
defer annotWriter.Close()
i, err := harness.Open(ctx, b, harnessOptions(a)...)
log.Printf("mainInner: harness info object (%#v)", i)
if err != nil {
return err
}
defer i.Close(ctx)
var luciferErr error
switch {
case a.taskName == setStateNeedsRepairTaskName:
setStateForDUTs(i, dutstate.NeedsRepair)
case a.taskName == setStateReservedTaskName:
setStateForDUTs(i, dutstate.Reserved)
case a.taskName == setStateManualRepairTaskName:
setStateForDUTs(i, dutstate.ManualRepair)
case a.taskName == setStateNeedsReplacementTaskName:
setStateForDUTs(i, dutstate.NeedsReplacement)
case a.taskName == setStateNeedsManualRepairTaskName:
setStateForDUTs(i, dutstate.NeedsManualRepair)
case isSupportedLuciferTask(a):
luciferErr = luciferFlow(ctx, a, i, annotWriter)
default:
luciferErr = errors.Reason("skylab_swarming_worker failed to recognize task type").Err()
}
if err := i.Close(ctx); err != nil {
return err
}
return luciferErr
}
func setStateForDUTs(i *harness.Info, state dutstate.State) {
for _, dh := range i.DUTs {
dh.LocalState.HostState = state
}
}
func luciferFlow(ctx context.Context, a *args, i *harness.Info, annotWriter writeCloser) error {
var fifoPath string
if a.logdogAnnotationURL != "" {
// Set up FIFO, pipe, and goroutines like so:
//
// worker -> LogDog pipe
// ^
// lucifer -> FIFO -go-/
//
// Both the worker and Lucifer need to write to LogDog.
fifoPath = filepath.Join(i.TaskResultsDir.Path, "logdog.fifo")
fc, err := fifo.NewCopier(annotWriter, fifoPath)
if err != nil {
return err
}
defer fc.Close()
}
// We want to run tasks sequentially to avoid:
// 1. Unexpected pressure to skylab drones as we determine drone capacity
// based on # of bots.
// 2. Unexpected pressure to servohost. DUTs under a scheduling unit are
// likely under a same servohost due to testing locale requirement,
// so doing actions that touch servo-usb(e.g. stage image) in parallel
// may cause them to timeout or fail due to servo-usb or labstation
// performance limitation.
var errs []error
for _, dh := range i.DUTs {
ta := lucifer.TaskArgs{
// Swarming task number 5670d0e630f66c10 failed due to a path length that was too long (108 chars).
// Let's kick the can down the road and use a shorter suffix for the abort socket.
// TODO(gregorynisbet): Come up with a permanent solution for short paths.
AbortSock: filepath.Join(dh.ResultsDir, "sk"),
GCPProject: gcpProject,
ResultsDir: dh.ResultsDir,
LogDogFile: fifoPath,
}
luciferErr := runLuciferTask(ctx, dh, a, ta)
if luciferErr != nil {
// Attempt to parse results regardless of lucifer errors.
luciferErr = errors.Annotate(luciferErr, "run lucifer task").Err()
|
errs = append(errs, luciferErr)
}
}
annotations.BuildStep(annotWriter, "Epilog")
annotations.StepLink(annotWriter, "Task results (Stainless)", i.Info.Task.StainlessURL())
annotations.StepClosed(annotWriter)
if len(errs) > 0 {
return errors.Annotate(errors.MultiError(errs), "lucifer flow").Err()
}
return nil
}
func harnessOptions(a *args) []harness.Option {
var ho []harness.Option
if updatesInventory(a) {
ho = append(ho, harness.UpdateInventory(getTaskName(a)))
}
return ho
}
func isSupportedLuciferTask(a *args) bool {
return isAdminTask(a) || isDeployTask(a) || isAuditTask(a)
}
// updatesInventory returns true if the task(repair/deploy/audit)
// should update the inventory else false.
func updatesInventory(a *args) bool {
if isRepairTask(a) || isDeployTask(a) || isAuditTask(a) {
return true
}
return false
}
// getTaskName returns the task name(repair/deploy/audit) for the task.
func getTaskName(a *args) string {
switch {
case isRepairTask(a):
return repairTaskName
case isDeployTask(a):
return deployTaskName
case isAuditTask(a):
return auditTaskName
default:
return ""
}
}
func runLuciferTask(ctx context.Context, dh *harness.DUTHarness, a *args, ta lucifer.TaskArgs) error {
if !a.deadline.IsZero() {
var c context.CancelFunc
ctx, c = context.WithDeadline(ctx, a.deadline)
defer c()
}
switch {
case isAuditTask(a):
return runAuditTask(ctx, dh, a.actions, ta)
case isAdminTask(a):
n, _ := getAdminTask(a.taskName)
return runAdminTask(ctx, dh, n, ta)
case isDeployTask(a):
return runDeployTask(ctx, dh, a.actions, ta)
default:
panic("Unsupported task type")
}
}
// getAdminTask returns the admin task name if the given task is an
// admin task. If the given task is not an admin task, ok will be
// false.
func getAdminTask(name string) (task string, ok bool) {
if strings.HasPrefix(name, "admin_") {
return strings.TrimPrefix(name, "admin_"), true
}
return "", false
}
// isAdminTask determines whether the args specify an admin task
func isAdminTask(a *args) bool {
_, isAdmin := getAdminTask(a.taskName)
return isAdmin
}
// isDeployTask determines if the given task name corresponds to a deploy task.
func isDeployTask(a *args) bool {
return a.taskName == deployTaskName
}
// isAuditTask determines if the given task name corresponds to a audit task.
func isAuditTask(a *args) bool {
task, _ := getAdminTask(a.taskName)
return task == auditTaskName
}
// isRepairTask determines if the given task name corresponds to a repair task.
func isRepairTask(a *args) bool {
task, _ := getAdminTask(a.taskName)
return task == repairTaskName
}
// runAdminTask runs an admin task. name is the name of the task.
func runAdminTask(ctx context.Context, dh *harness.DUTHarness, name string, ta lucifer.TaskArgs) (err error) {
r := lucifer.AdminTaskArgs{
TaskArgs: ta,
Host: dh.DUTHostname,
Task: name,
}
cmd := lucifer.AdminTaskCommand(dh.BotInfo.LuciferConfig(), r)
if _, err := runLuciferCommand(ctx, cmd, dh, r.AbortSock); err != nil {
return errors.Annotate(err, "run admin task").Err()
}
return nil
}
// runDeployTask runs a deploy task using lucifer.
//
// actions is a possibly empty comma separated list of deploy actions to run
func runDeployTask(ctx context.Context, dh *harness.DUTHarness, actions string, ta lucifer.TaskArgs) error {
r := lucifer.DeployTaskArgs{
TaskArgs: ta,
Host: dh.DUTHostname,
Actions: actions,
}
cmd := lucifer.DeployTaskCommand(dh.BotInfo.LuciferConfig(), r)
if _, err := runLuciferCommand(ctx, cmd, dh, r.AbortSock); err != nil {
return errors.Annotate(err, "run deploy task").Err()
}
return nil
}
// runAuditTask runs an audit task using lucifer.
//
// actions is a possibly empty comma separated list of deploy actions to run
func runAuditTask(ctx context.Context, dh *harness.DUTHarness, actions string, ta lucifer.TaskArgs) error {
r := lucifer.AuditTaskArgs{
TaskArgs: ta,
Host: dh.DUTHostname,
Actions: actions,
}
cmd := lucifer.AuditTaskCommand(dh.BotInfo.LuciferConfig(), r)
if _, err := runLuciferCommand(ctx, cmd, dh, r.AbortSock); err != nil {
return errors.Annotate(err, "run audit task").Err()
}
return nil
}
|
log.Printf("Encountered error on %s. Error: %s", dh.DUTHostname, luciferErr)
|
_core.py
|
import asyncio
import os
from datetime import datetime
from pathlib import Path
from telethon.tl.types import InputMessagesFilterDocument
from astro.config import Config
from astro import CMD_HELP
from astro.utils import admin_cmd, load_module, remove_plugin
NAME = Config.NAME
DELETE_TIMEOUT = 5
thumb_image_path = "./resources/astro.jpeg"
DEFAULTUSER = str(NAME) if NAME else "ASTRO USER"
@astro.on(admin_cmd(pattern=r"send (?P<shortname>\w+)", outgoing=True))
@astro.on(sudo_cmd(pattern=r"send (?P<shortname>\w+)", allow_sudo=True))
async def send(event):
ok = await eor(event, "Sending...")
if event.fwd_from:
return
hmm = bot.uid
message_id = event.message.id
thumb = thumb_image_path
input_str = event.pattern_match.group(1)
the_plugin_file = "./astro/plugins/{}.py".format(input_str)
if os.path.exists(the_plugin_file):
await ok.delete()
start = datetime.now()
pro = await event.client.send_file(
event.chat_id,
the_plugin_file,
force_document=True,
allow_cache=False,
thumb=thumb,
reply_to=message_id,
)
end = datetime.now()
time_taken_in_ms = (end - start).seconds
await pro.edit(
f"**► Plugin Name:** `{input_str}`\n**► Uploaded by:** [{DEFAULTUSER}](tg://user?id={hmm})\n\n© @Astro_HelpChat"
)
await asyncio.sleep(DELETE_TIMEOUT)
else:
await ok.edit("**404**: `No Such Plugin!`")
@astro.on(admin_cmd(pattern="install"))
async def insta
|
t):
if event.fwd_from:
return
if event.reply_to_msg_id:
try:
downloaded_file_name = (
await event.client.download_media( # pylint:disable=E0602
await event.get_reply_message(),
"astro/plugins/", # pylint:disable=E0602
)
)
if "(" not in downloaded_file_name:
path1 = Path(downloaded_file_name)
shortname = path1.stem
load_module(shortname.replace(".py", ""))
await event.edit(
"astro Succesfully Installed The Plugin `{}`".format(
os.path.basename(downloaded_file_name)
)
)
else:
os.remove(downloaded_file_name)
await event.edit(
"**Error!**\nPlugin cannot be installed!\nMight have been pre-installed."
)
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
os.remove(downloaded_file_name)
await asyncio.sleep(DELETE_TIMEOUT)
await event.delete()
@astro.on(admin_cmd(pattern=r"unload (?P<shortname>\w+)$"))
async def unload(event):
if event.fwd_from:
return
shortname = event.pattern_match["shortname"]
try:
remove_plugin(shortname)
await event.edit(f"astro has successfully unloaded {shortname}")
except Exception as e:
await event.edit(
"astro has successfully unloaded {shortname}\n{}".format(
shortname, str(e)
)
)
@astro.on(admin_cmd(pattern=r"load (?P<shortname>\w+)$"))
async def load(event):
if event.fwd_from:
return
shortname = event.pattern_match["shortname"]
try:
try:
remove_plugin(shortname)
except BaseException:
pass
load_module(shortname)
await event.edit(f"astro has successfully loaded {shortname}")
except Exception as e:
await event.edit(
f"astro could not load {shortname} because of the following error.\n{str(e)}"
)
@astro.on(admin_cmd(pattern=r"installall$"))
async def install(event):
if event.fwd_from:
return
documentss = await event.client.get_messages(
event.chat_id, None, search=".py", filter=InputMessagesFilterDocument
)
total = int(documentss.total)
total_doxx = range(0, total)
b = await event.client.send_message(
event.chat_id,
f"**Installing {total} plugins...**\n`This msg will be deleted after the installation gets completed`",
)
text = "**Installing Plugins...**\n\n"
a = await event.client.send_message(event.chat_id, text)
if total == 0:
await a.edit("**No plugins to install.**")
await event.delete()
return
for ixo in total_doxx:
mxo = documentss[ixo].id
downloaded_file_name = await event.client.download_media(
await event.client.get_messages(event.chat_id, ids=mxo), "astro/plugins/"
)
if "(" not in downloaded_file_name:
path1 = Path(downloaded_file_name)
shortname = path1.stem
try:
load_module(shortname.replace(".py", ""))
text += f"**• Installed** `{(os.path.basename(downloaded_file_name))}` **successfully.**\n"
except BaseException:
text += f"**• Error installing** `{(os.path.basename(downloaded_file_name))}`\n"
else:
text += f"**• Plugin** `{(os.path.basename(downloaded_file_name))}` **already installed.**\n"
await a.edit(f"{text}\n**Installed every plugin.**")
await event.delete()
await b.delete()
CMD_HELP.update(
{
"core": ".load <plugin name>\nUse - Load the plugin.\
\n\n.unload <plugin name>\nUse - Unload the plugin.\
\n\n.install <reply to plugin file (.py)>\nUse - Install the plugin.\
\n\n.installall\nUse - Install all the plugins in the group/channel where it is used in.\
\n\n.send <plugin name>\nUse - Send the plugin."
}
)
|
ll(even
|
mpidr_el1.rs
|
// SPDX-License-Identifier: Apache-2.0 OR MIT
//
// Copyright (c) 2018-2020 by the author(s)
//
// Author(s):
// - Andre Richter <[email protected]>
//! Multiprocessor Affinity Register - EL1
//!
//! In a multiprocessor system, provides an additional PE identification mechanism for scheduling
//! purposes.
use register::cpu::RegisterReadOnly;
pub struct
|
;
impl RegisterReadOnly<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "MPIDR_EL1");
}
pub static MPIDR_EL1: Reg = Reg {};
|
Reg
|
cache_matcher_test.go
|
package matching_test
import (
"github.com/SpectoLabs/hoverfly/core/cache"
"github.com/SpectoLabs/hoverfly/core/matching"
"github.com/SpectoLabs/hoverfly/core/matching/matchers"
"github.com/SpectoLabs/hoverfly/core/models"
. "github.com/onsi/gomega"
"testing"
)
func Test_CacheMatcher_GetCachedResponse_WillReturnErrorIfCacheIsNil(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{}
_, err := unit.GetCachedResponse(&models.RequestDetails{})
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("No cache set"))
}
func Test_CacheMatcher_GetAllResponses_WillReturnErrorIfCacheIsNil(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{}
_, err := unit.GetAllResponses()
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("No cache set"))
}
func Test_CacheMatcher_SaveRequestMatcherResponsePair_WillReturnErrorIfCacheIsNil(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{}
cachedResponse, err := unit.SaveRequestMatcherResponsePair(models.RequestDetails{}, nil, nil)
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("No cache set"))
Expect(cachedResponse).To(BeNil())
}
func Test_CacheMatcher_SaveRequestMatcherResponsePair_CanSaveNilPairs(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{
RequestCache: cache.NewDefaultLRUCache(),
}
cachedResponse, err := unit.SaveRequestMatcherResponsePair(models.RequestDetails{}, nil, nil)
Expect(err).To(BeNil())
Expect(cachedResponse.MatchingPair).To(BeNil())
}
func
|
(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{}
err := unit.FlushCache()
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("No cache set"))
}
func Test_CacheMatcher_PreloadCache_WillReturnErrorIfCacheIsNil(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{}
simulation := models.Simulation{}
err := unit.PreloadCache(&simulation)
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("No cache set"))
}
func Test_CacheMatcher_PreloadCache_WillNotCacheIncompleteRequestMatchers(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{
RequestCache: cache.NewDefaultLRUCache(),
}
simulation := models.NewSimulation()
simulation.AddPair(&models.RequestMatcherResponsePair{
RequestMatcher: models.RequestMatcher{
Body: []models.RequestFieldMatchers{
{
Matcher: matchers.Regex,
Value: "loose",
},
},
},
Response: models.ResponseDetails{
Status: 200,
Body: "body",
},
})
err := unit.PreloadCache(simulation)
Expect(err).To(BeNil())
Expect(unit.RequestCache.RecordsCount()).To(Equal(0))
}
func Test_CacheMatcher_PreloadCache_WillPreemptivelyCacheFullExactMatchRequestMatchers(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{
RequestCache: cache.NewDefaultLRUCache(),
}
simulation := models.NewSimulation()
simulation.AddPair(&models.RequestMatcherResponsePair{
RequestMatcher: models.RequestMatcher{
Body: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "body",
},
},
Destination: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "destination",
},
},
Method: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "method",
},
},
Path: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "path",
},
},
DeprecatedQuery: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "query",
},
},
Scheme: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "scheme",
},
},
},
Response: models.ResponseDetails{
Status: 200,
Body: "body",
},
})
err := unit.PreloadCache(simulation)
Expect(err).To(BeNil())
Expect(unit.RequestCache.RecordsCount()).To(Equal(1))
}
func Test_CacheMatcher_PreloadCache_WillNotPreemptivelyCacheRequestMatchersWithoutExactMatches(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{
RequestCache: cache.NewDefaultLRUCache(),
}
simulation := models.NewSimulation()
simulation.AddPair(&models.RequestMatcherResponsePair{
RequestMatcher: models.RequestMatcher{
Destination: []models.RequestFieldMatchers{
{
Matcher: matchers.Regex,
Value: "destination",
},
},
},
Response: models.ResponseDetails{
Status: 200,
Body: "body",
},
})
err := unit.PreloadCache(simulation)
Expect(err).To(BeNil())
Expect(unit.RequestCache.RecordsCount()).To(Equal(0))
}
func Test_CacheMatcher_PreloadCache_WillCheckAllRequestMatchersInSimulation(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{
RequestCache: cache.NewDefaultLRUCache(),
}
simulation := models.NewSimulation()
simulation.AddPair(&models.RequestMatcherResponsePair{
RequestMatcher: models.RequestMatcher{
Destination: []models.RequestFieldMatchers{
{
Matcher: matchers.Regex,
Value: "destination",
},
},
},
Response: models.ResponseDetails{
Status: 200,
Body: "body",
},
})
simulation.AddPair(&models.RequestMatcherResponsePair{
RequestMatcher: models.RequestMatcher{
Body: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "body",
},
},
Destination: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "destination",
},
},
Method: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "method",
},
},
Path: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "path",
},
},
DeprecatedQuery: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "query",
},
},
Scheme: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "scheme",
},
},
},
Response: models.ResponseDetails{
Status: 200,
Body: "body",
},
})
err := unit.PreloadCache(simulation)
Expect(err).To(BeNil())
Expect(unit.RequestCache.RecordsCount()).To(Equal(1))
}
func Test_CacheMatcher_PreloadCache_WillNotCacheMatchersWithHeaders(t *testing.T) {
RegisterTestingT(t)
unit := matching.CacheMatcher{
RequestCache: cache.NewDefaultLRUCache(),
}
simulation := models.NewSimulation()
simulation.AddPair(&models.RequestMatcherResponsePair{
RequestMatcher: models.RequestMatcher{
Destination: []models.RequestFieldMatchers{
{
Matcher: matchers.Regex,
Value: "destination",
},
},
},
Response: models.ResponseDetails{
Status: 200,
Body: "body",
},
})
simulation.AddPair(&models.RequestMatcherResponsePair{
RequestMatcher: models.RequestMatcher{
Body: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "body",
},
},
Destination: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "destination",
},
},
Headers: map[string][]models.RequestFieldMatchers{
"Headers": {
{
Matcher: matchers.Exact,
Value: "value",
},
},
},
Method: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "method",
},
},
Path: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "path",
},
},
DeprecatedQuery: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "query",
},
},
Scheme: []models.RequestFieldMatchers{
{
Matcher: matchers.Exact,
Value: "scheme",
},
},
},
Response: models.ResponseDetails{
Status: 200,
Body: "body",
},
})
err := unit.PreloadCache(simulation)
Expect(err).To(BeNil())
Expect(unit.RequestCache.RecordsCount()).To(Equal(0))
}
|
Test_CacheMatcher_FlushCache_WillReturnErrorIfCacheIsNil
|
json.rs
|
// Copyright 2018-2020, Wayfair GmbH
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::registry::Registry;
use crate::tremor_const_fn;
use simd_json::to_owned_value;
pub fn load(registry: &mut Registry)
|
#[cfg(test)]
mod test {
use crate::registry::fun;
use simd_json::BorrowedValue as Value;
macro_rules! assert_val {
($e:expr, $r:expr) => {
assert_eq!($e, Ok(Value::from($r)))
};
}
#[test]
fn decode() {
let f = fun("json", "decode");
let v = Value::from(r#"["this","is","a","cake"]"#);
assert_val!(f(&[&v]), Value::from(vec!["this", "is", "a", "cake"]));
}
#[test]
fn encode() {
let f = fun("json", "encode");
let v = Value::from(vec!["this", "is", "a", "cake"]);
assert_val!(f(&[&v]), Value::from(r#"["this","is","a","cake"]"#));
}
#[test]
fn encode_pretty() {
let f = fun("json", "encode_pretty");
let v = Value::from(vec!["this", "is", "a", "cake"]);
assert_val!(
f(&[&v]),
Value::from(
r#"[
"this",
"is",
"a",
"cake"
]"#
)
);
}
}
|
{
registry
.insert(tremor_const_fn! (json::decode(_context, _input: String) {
// We need to clone here since we do not want to destroy the
// original value
let mut s: String = _input.to_string();
println!("{}", &s);
// Screw you rust
let mut bytes = unsafe{s.as_bytes_mut()};
// We need to do this since otherwise we depend on the clone of s
to_owned_value(&mut bytes).map_err(to_runtime_error).map(Value::from)
}))
.insert(tremor_const_fn! (json::encode(_context, _input) {
simd_json::to_string(_input).map(Value::from).map_err(to_runtime_error)
}))
.insert(tremor_const_fn! (json::encode_pretty(_context, _input) {
simd_json::to_string_pretty(_input).map(Value::from).map_err(to_runtime_error)
}));
}
|
workload.go
|
package tpcc
import (
"context"
"database/sql"
"fmt"
"math"
"math/rand"
"sort"
"strings"
"sync"
"time"
"github.com/pingcap/go-tpc/pkg/load"
"github.com/pingcap/go-tpc/pkg/measurement"
"github.com/pingcap/go-tpc/pkg/workload"
)
type contextKey string
const stateKey = contextKey("tpcc")
var tables = []string{tableItem, tableCustomer, tableDistrict, tableHistory,
tableNewOrder, tableOrderLine, tableOrders, tableStock, tableWareHouse}
type txn struct {
name string
action func(ctx context.Context, threadID int) error
weight int
keyingTime float64
thinkingTime float64
}
type tpccState struct {
*workload.TpcState
index int
decks []int
loaders map[string]*load.CSVBatchLoader
newOrderStmts map[string]*sql.Stmt
orderStatusStmts map[string]*sql.Stmt
deliveryStmts map[string]*sql.Stmt
stockLevelStmt map[string]*sql.Stmt
paymentStmts map[string]*sql.Stmt
}
// Config is the configuration for tpcc workload
type Config struct {
DBName string
Threads int
Parts int
Warehouses int
UseFK bool
Isolation int
CheckAll bool
NoCheck bool
// whether to involve wait times(keying time&thinking time)
Wait bool
MaxMeasureLatency time.Duration
// for prepare sub-command only
OutputType string
OutputDir string
SpecifiedTables string
}
// Workloader is TPCC workload
type Workloader struct {
db *sql.DB
cfg *Config
createTableWg sync.WaitGroup
initLoadTime string
ddlManager *ddlManager
txns []txn
// stats
rtMeasurement *measurement.Measurement
waitTimeMeasurement *measurement.Measurement
}
// NewWorkloader creates the tpc-c workloader
func NewWorkloader(db *sql.DB, cfg *Config) (workload.Workloader, error) {
if db == nil && cfg.OutputType == "" {
panic(fmt.Errorf("failed to connect to database when loading data"))
}
if cfg.Parts > cfg.Warehouses {
panic(fmt.Errorf("number warehouses %d must >= partition %d", cfg.Warehouses, cfg.Parts))
}
resetMaxLat := func(m *measurement.Measurement) {
m.MaxLatency = cfg.MaxMeasureLatency
}
w := &Workloader{
db: db,
cfg: cfg,
initLoadTime: time.Now().Format(timeFormat),
ddlManager: newDDLManager(cfg.Parts, cfg.UseFK),
rtMeasurement: measurement.NewMeasurement(resetMaxLat),
waitTimeMeasurement: measurement.NewMeasurement(resetMaxLat),
}
w.txns = []txn{
{name: "new_order", action: w.runNewOrder, weight: 45, keyingTime: 18, thinkingTime: 12},
{name: "payment", action: w.runPayment, weight: 43, keyingTime: 3, thinkingTime: 12},
{name: "order_status", action: w.runOrderStatus, weight: 4, keyingTime: 2, thinkingTime: 10},
{name: "delivery", action: w.runDelivery, weight: 4, keyingTime: 2, thinkingTime: 5},
{name: "stock_level", action: w.runStockLevel, weight: 4, keyingTime: 2, thinkingTime: 5},
}
if w.db != nil {
w.createTableWg.Add(cfg.Threads)
}
return w, nil
}
// Name implements Workloader interface
func (w *Workloader) Name() string {
return "tpcc"
}
// InitThread implements Workloader interface
func (w *Workloader) InitThread(ctx context.Context, threadID int) context.Context {
s := &tpccState{
TpcState: workload.NewTpcState(ctx, w.db),
index: 0,
decks: make([]int, 0, 23),
}
for index, txn := range w.txns {
for i := 0; i < txn.weight; i++ {
s.decks = append(s.decks, index)
}
}
s.index = len(s.decks) - 1
ctx = context.WithValue(ctx, stateKey, s)
return ctx
}
// CleanupThread implements Workloader interface
func (w *Workloader) CleanupThread(ctx context.Context, threadID int) {
s := getTPCCState(ctx)
closeStmts(s.newOrderStmts)
closeStmts(s.paymentStmts)
closeStmts(s.deliveryStmts)
closeStmts(s.stockLevelStmt)
closeStmts(s.orderStatusStmts)
// TODO: close stmts for delivery, order status, and stock level
if s.Conn != nil {
s.Conn.Close()
}
for k, _ := range s.loaders {
s.loaders[k].Close(ctx)
}
}
// Prepare implements Workloader interface
func (w *Workloader) Prepare(ctx context.Context, threadID int) error {
if w.db != nil {
if threadID == 0 {
if err := w.ddlManager.createTables(ctx); err != nil {
return err
}
}
w.createTableWg.Done()
w.createTableWg.Wait()
}
return prepareWorkload(ctx, w, w.cfg.Threads, w.cfg.Warehouses, threadID)
}
func getTPCCState(ctx context.Context) *tpccState {
s := ctx.Value(stateKey).(*tpccState)
return s
}
// Run implements Workloader interface
func (w *Workloader) Run(ctx context.Context, threadID int) error {
s := getTPCCState(ctx)
if s.newOrderStmts == nil {
s.newOrderStmts = map[string]*sql.Stmt{
newOrderSelectCustomer: prepareStmt(ctx, s.Conn, newOrderSelectCustomer),
newOrderSelectDistrict: prepareStmt(ctx, s.Conn, newOrderSelectDistrict),
newOrderUpdateDistrict: prepareStmt(ctx, s.Conn, newOrderUpdateDistrict),
newOrderInsertOrder: prepareStmt(ctx, s.Conn, newOrderInsertOrder),
newOrderInsertNewOrder: prepareStmt(ctx, s.Conn, newOrderInsertNewOrder),
// batch select items
// batch select stock for update
newOrderUpdateStock: prepareStmt(ctx, s.Conn, newOrderUpdateStock),
// batch insert order_line
}
for i := 5; i <= 15; i++ {
s.newOrderStmts[newOrderSelectItemSQLs[i]] = prepareStmt(ctx, s.Conn, newOrderSelectItemSQLs[i])
s.newOrderStmts[newOrderSelectStockSQLs[i]] = prepareStmt(ctx, s.Conn, newOrderSelectStockSQLs[i])
s.newOrderStmts[newOrderInsertOrderLineSQLs[i]] = prepareStmt(ctx, s.Conn, newOrderInsertOrderLineSQLs[i])
}
s.paymentStmts = map[string]*sql.Stmt{
paymentUpdateWarehouse: prepareStmt(ctx, s.Conn, paymentUpdateWarehouse),
paymentSelectWarehouse: prepareStmt(ctx, s.Conn, paymentSelectWarehouse),
paymentUpdateDistrict: prepareStmt(ctx, s.Conn, paymentUpdateDistrict),
paymentSelectDistrict: prepareStmt(ctx, s.Conn, paymentSelectDistrict),
paymentSelectCustomerListByLast: prepareStmt(ctx, s.Conn, paymentSelectCustomerListByLast),
paymentSelectCustomerForUpdate: prepareStmt(ctx, s.Conn, paymentSelectCustomerForUpdate),
paymentSelectCustomerData: prepareStmt(ctx, s.Conn, paymentSelectCustomerData),
paymentUpdateCustomerWithData: prepareStmt(ctx, s.Conn, paymentUpdateCustomerWithData),
paymentUpdateCustomer: prepareStmt(ctx, s.Conn, paymentUpdateCustomer),
paymentInsertHistory: prepareStmt(ctx, s.Conn, paymentInsertHistory),
}
s.orderStatusStmts = map[string]*sql.Stmt{
orderStatusSelectCustomerCntByLast: prepareStmt(ctx, s.Conn, orderStatusSelectCustomerCntByLast),
orderStatusSelectCustomerByLast: prepareStmt(ctx, s.Conn, orderStatusSelectCustomerByLast),
orderStatusSelectCustomerByID: prepareStmt(ctx, s.Conn, orderStatusSelectCustomerByID),
orderStatusSelectLatestOrder: prepareStmt(ctx, s.Conn, orderStatusSelectLatestOrder),
orderStatusSelectOrderLine: prepareStmt(ctx, s.Conn, orderStatusSelectOrderLine),
}
s.deliveryStmts = map[string]*sql.Stmt{
deliverySelectNewOrder: prepareStmt(ctx, s.Conn, deliverySelectNewOrder),
deliveryDeleteNewOrder: prepareStmt(ctx, s.Conn, deliveryDeleteNewOrder),
deliveryUpdateOrder: prepareStmt(ctx, s.Conn, deliveryUpdateOrder),
deliverySelectOrders: prepareStmt(ctx, s.Conn, deliverySelectOrders),
deliveryUpdateOrderLine: prepareStmt(ctx, s.Conn, deliveryUpdateOrderLine),
deliverySelectSumAmount: prepareStmt(ctx, s.Conn, deliverySelectSumAmount),
deliveryUpdateCustomer: prepareStmt(ctx, s.Conn, deliveryUpdateCustomer),
}
s.stockLevelStmt = map[string]*sql.Stmt{
stockLevelSelectDistrict: prepareStmt(ctx, s.Conn, stockLevelSelectDistrict),
stockLevelCount: prepareStmt(ctx, s.Conn, stockLevelCount),
}
}
// refer 5.2.4.2
if s.index == len(s.decks) {
s.index = 0
s.R.Shuffle(len(s.decks), func(i, j int) {
s.decks[i], s.decks[j] = s.decks[j], s.decks[i]
})
}
txnIndex := s.decks[s.R.Intn(len(s.decks))]
txn := w.txns[txnIndex]
// For each transaction type, the Keying Time is constant
// and must be a minimum of 18 seconds for New Order,
// 3 seconds for Payment,
// and 2 seconds each for Order-Status, Delivery, and Stock-Level.
if w.cfg.Wait {
start := time.Now()
time.Sleep(time.Duration(txn.keyingTime * float64(time.Second)))
w.waitTimeMeasurement.Measure(fmt.Sprintf("keyingTime-%s", txn.name), time.Now().Sub(start), nil)
}
start := time.Now()
err := txn.action(ctx, threadID)
w.rtMeasurement.Measure(txn.name, time.Now().Sub(start), err)
// 5.2.5.4, For each transaction type, think time is taken independently from a negative exponential distribution.
// Think time, T t , is computed from the following equation: Tt = -log(r) * (mean think time),
// r = random number uniformly distributed between 0 and 1
if w.cfg.Wait {
start := time.Now()
thinkTime := -math.Log(rand.Float64()) * txn.thinkingTime
if thinkTime > txn.thinkingTime*10 {
thinkTime = txn.thinkingTime * 10
}
time.Sleep(time.Duration(thinkTime * float64(time.Second)))
w.waitTimeMeasurement.Measure(fmt.Sprintf("thinkingTime-%s", txn.name), time.Now().Sub(start), nil)
}
// TODO: add check
return err
}
// Cleanup implements Workloader interface
func (w *Workloader) Cleanup(ctx context.Context, threadID int) error {
if threadID == 0 {
if err := w.ddlManager.dropTable(ctx); err != nil {
return err
}
}
return nil
}
func outputRtMeasurement(prefix string, opMeasurement map[string]*measurement.Histogram) {
keys := make([]string, 0, len(opMeasurement))
for k := range opMeasurement {
keys = append(keys, k)
}
sort.Strings(keys)
for _, op := range keys {
hist := opMeasurement[op]
if !hist.Empty() {
info := hist.GetInfo()
op = strings.ToUpper(op)
elapsedVec.WithLabelValues(op).Set(info.Elapsed)
sumVec.WithLabelValues(op).Set(info.Sum)
countVec.WithLabelValues(op).Set(float64(info.Count))
opsVec.WithLabelValues(op).Set(info.Ops)
avgVec.WithLabelValues(op).Set(info.Avg)
p50Vec.WithLabelValues(op).Set(info.P50)
p90Vec.WithLabelValues(op).Set(info.P90)
p99Vec.WithLabelValues(op).Set(info.P99)
p999Vec.WithLabelValues(op).Set(info.P999)
maxVec.WithLabelValues(op).Set(info.Max)
fmt.Printf("%s%-6s - %s\n", prefix, op, hist.Summary())
}
}
}
func outputWaitTimesMeasurement(prefix string, opMeasurement map[string]*measurement.Histogram) {
keys := make([]string, len(opMeasurement))
|
}
sort.Strings(keys)
for _, op := range keys {
hist := opMeasurement[op]
if !hist.Empty() {
fmt.Printf("%s%-6s - %.1fs\n", prefix, strings.ToUpper(op), float64(hist.GetInfo().Avg)/1000)
}
}
}
func (w *Workloader) OutputStats(ifSummaryReport bool) {
w.rtMeasurement.Output(ifSummaryReport, outputRtMeasurement)
if w.cfg.Wait {
w.waitTimeMeasurement.Output(ifSummaryReport, outputWaitTimesMeasurement)
}
if ifSummaryReport {
hist, e := w.rtMeasurement.OpSumMeasurement["new_order"]
if e && !hist.Empty() {
result := hist.GetInfo()
const specWarehouseFactor = 12.86
tpmC := result.Ops * 60
efc := 100 * tpmC / (specWarehouseFactor * float64(w.cfg.Warehouses))
fmt.Printf("tpmC: %.1f, efficiency: %.1f%%\n", tpmC, efc)
}
}
}
// DBName returns the name of test db.
func (w *Workloader) DBName() string {
return w.cfg.DBName
}
func (w *Workloader) beginTx(ctx context.Context) (*sql.Tx, error) {
s := getTPCCState(ctx)
tx, err := s.Conn.BeginTx(ctx, &sql.TxOptions{
Isolation: sql.IsolationLevel(w.cfg.Isolation),
})
return tx, err
}
func prepareStmts(ctx context.Context, conn *sql.Conn, queries []string) []*sql.Stmt {
stmts := make([]*sql.Stmt, len(queries))
for i, query := range queries {
if len(query) == 0 {
continue
}
stmts[i] = prepareStmt(ctx, conn, query)
}
return stmts
}
func prepareStmt(ctx context.Context, conn *sql.Conn, query string) *sql.Stmt {
stmt, err := conn.PrepareContext(ctx, query)
if err != nil {
panic(err)
}
return stmt
}
func closeStmts(stmts map[string]*sql.Stmt) {
for _, stmt := range stmts {
if stmt == nil {
continue
}
stmt.Close()
}
}
|
var i = 0
for k := range opMeasurement {
keys[i] = k
i += 1
|
index.ts
|
// remove type imports from http for Deno compatibility
// see https://github.com/octokit/octokit.js/issues/24#issuecomment-817361886
// import { IncomingMessage, ServerResponse } from "http";
type IncomingMessage = any;
type ServerResponse = any;
import { createNodeMiddleware as oauthNodeMiddleware } from "@octokit/oauth-app";
import { createNodeMiddleware as webhooksNodeMiddleware } from "@octokit/webhooks";
import { App } from "../../index";
import { onUnhandledRequestDefault } from "./on-unhandled-request-default";
import { Options } from "../../types";
export type MiddlewareOptions = {
pathPrefix?: string;
log?: Options["log"];
onUnhandledRequest?: (
request: IncomingMessage,
response: ServerResponse
) => void;
};
function noop() {}
export function createNodeMiddleware(
app: App,
options: MiddlewareOptions = {}
) {
const log = Object.assign(
{
debug: noop,
info: noop,
warn: console.warn.bind(console),
error: console.error.bind(console),
},
options.log
);
const optionsWithDefaults = {
onUnhandledRequest: onUnhandledRequestDefault,
pathPrefix: "/api/github",
...options,
log,
};
const webhooksMiddleware = webhooksNodeMiddleware(app.webhooks, {
path: optionsWithDefaults.pathPrefix + "/webhooks",
log,
onUnhandledRequest: optionsWithDefaults.onUnhandledRequest,
});
const oauthMiddleware = oauthNodeMiddleware(app.oauth, {
pathPrefix: optionsWithDefaults.pathPrefix + "/oauth",
onUnhandledRequest: optionsWithDefaults.onUnhandledRequest,
});
return middleware.bind(null, optionsWithDefaults, {
webhooksMiddleware,
oauthMiddleware,
});
}
export async function
|
(
options: Required<MiddlewareOptions>,
{ webhooksMiddleware, oauthMiddleware }: any,
request: IncomingMessage,
response: ServerResponse,
next?: Function
) {
const { pathname } = new URL(request.url as string, "http://localhost");
if (pathname === `${options.pathPrefix}/webhooks`) {
return webhooksMiddleware(request, response, next);
}
if (pathname.startsWith(`${options.pathPrefix}/oauth/`)) {
return oauthMiddleware(request, response, next);
}
const isExpressMiddleware = typeof next === "function";
if (isExpressMiddleware) {
// @ts-ignore `next` must be a function as we check two lines above
return next();
}
return options.onUnhandledRequest(request, response);
}
|
middleware
|
archive_api.js
|
//Sentinel Project archives every sources saved on each datapoints
//This API is useful to retrieve an archive sources, in case the original link was deleted
var express = require("express");
function load_archiveApi(app, websitemodel) {
var WebsiteModel = websitemodel;
// retrieve by id
app.get('/api/archiveurl/:id', function (req, res) {
return WebsiteModel.findById(req.params.id, function (err, archive) {
if (!err && archive) {
//remove javascript tag, in case it was still in the data
archive.content=archive.content.replace(/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,' ');
return res.jsonp(archive);
} else {
console.log(err);
return res.send(null);
}
});
});
// retrieve by url, do not specify the http:// part
app.get('/api/archiveurl/url/:url', function (req, res) {
var url = 'http://'+req.params.url;
return WebsiteModel.find({url: url}, function (err, archive) {
if (!err && archive) {
//remove javascript tag, in case it was still in the data
archive.content=archive.content.replace(/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,' ');
|
console.log(err);
return res.send(null);
}
});
});
}
exports.load_archiveApi = load_archiveApi;
|
return res.jsonp(archive);
} else {
|
UI_assign_group_window.py
|
# -*- coding: utf-8 -*-
from PySide2 import QtCore, QtGui, QtWidgets
import json
import core_functions as cf
import numpy as np
from UI_labeled_slider import LabeledSlider
class Ui_AssignGroup(object):
def setupUi(self, AssignGroups):
# Note: this is not how it should be done but currently I don't know
# how to do it differently. This is only needed to be able to emit
# signals to the main window
|
def retranslateUi(self, AssignGroups):
_translate = QtCore.QCoreApplication.translate
AssignGroups.setWindowTitle(_translate("AssignGroups", "Assign Groups"))
if not self.include_all_scans:
self.select_scan_number_label.setText(
_translate("AssignGroups", "Select Scan")
)
self.no_groups_label.setText(
_translate("AssignGroups", "Select Number of Groups")
)
self.available_devices_label.setText(
_translate(
"AssignGroups",
"Available Devices for Assignment "
+ str(self.parameters["device_number"]),
)
)
self.group_name_label.setText(_translate("AssignGroups", "Group Name"))
self.device_assignment_label.setText(
_translate("AssignGroups", "Assign Devices (seperated by ,)")
)
self.group_color_label.setText(_translate("AssignGroups", "Color"))
if not self.autodetect_spectrum:
self.spectrum_file_label.setText(_translate("AssignGroups", "Spectrum"))
|
AssignGroups.setObjectName("AssignGroups")
AssignGroups.setWindowTitle("Group Assignement Dialog")
AssignGroups.resize(509, 317)
AssignGroups.setStyleSheet(
"QWidget {\n"
" background-color: rgb(44, 49, 60);\n"
" color: rgb(255, 255, 255);\n"
' font: 63 10pt "Segoe UI";\n'
"}\n"
"QPushButton {\n"
" border: 2px solid rgb(52, 59, 72);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QPushButton:hover {\n"
" background-color: rgb(57, 65, 80);\n"
" border: 2px solid rgb(61, 70, 86);\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(43, 50, 61);\n"
"}\n"
"QPushButton:checked {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(85, 170, 255);\n"
"}"
"QLineEdit {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QDoubleSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
)
self.verticalLayout = QtWidgets.QVBoxLayout(AssignGroups)
self.verticalLayout.setContentsMargins(25, 10, 25, 10)
self.verticalLayout.setObjectName("verticalLayout")
# # Device settings
# self.device_settings_header_label = QtWidgets.QLabel(AssignGroups)
# self.device_settings_header_label.setMinimumSize(QtCore.QSize(0, 20))
# self.device_settings_header_label.setStyleSheet(
# 'font: 75 bold 10pt "Segoe UI";'
# )
# self.device_settings_header_label.setObjectName("device_settings_header_label")
# self.verticalLayout.addWidget(self.device_settings_header_label)
# self.header_line_1 = QtWidgets.QFrame()
# self.header_line_1.setFrameShape(QtWidgets.QFrame.HLine)
# self.header_line_1.setFrameShadow(QtWidgets.QFrame.Sunken)
# self.verticalLayout.addWidget(self.header_line_1)
# self.header_line_1.setStyleSheet(
# "QFrame {\n" " border: 2px solid rgb(52, 59, 72);\n" "}\n"
# )
# self.manualRowCountGridLayout = 1
# Define dialog in which parameters should be entered
# dialog = QtWidgets.QDialog()
# dialog.setWindowTitle("Group Assignement Dialog")
# Select the scan that shall be evaluated
if not self.include_all_scans:
self.select_scan_number_label = QtWidgets.QLabel()
self.select_scan_number_label.setObjectName("select_scan_number_label")
self.verticalLayout.addWidget(self.select_scan_number_label)
self.select_scan_number_ComboBox = QtWidgets.QComboBox()
self.select_scan_number_ComboBox.setObjectName(
"select_scan_number_ComboBox"
)
for i in range(self.parameters["no_of_scans"]):
self.select_scan_number_ComboBox.addItem(str(int(i + 1)))
self.select_scan_number_ComboBox.setCurrentIndex(0)
self.verticalLayout.addWidget(self.select_scan_number_ComboBox)
# Select the number of groups to define
self.no_groups_label = QtWidgets.QLabel()
self.verticalLayout.addWidget(self.no_groups_label)
self.no_groups_LabeledSlider = LabeledSlider(
1,
int(np.size(np.unique(self.parameters["device_number"]))),
interval=1,
orientation=QtCore.Qt.Horizontal,
)
self.verticalLayout.addWidget(self.no_groups_LabeledSlider)
self.available_devices_label = QtWidgets.QLabel()
self.verticalLayout.addWidget(self.available_devices_label)
# if np.size(self.paths) == 1:
# verticalLayout.addWidget(self.no_groups_LabeledSlider)
# Define the group assignement fields
self.group_definition_gridLayout = QtWidgets.QGridLayout()
self.group_definition_gridLayout.setSpacing(10)
# Group names and its container
self.group_name_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(self.group_name_label, 1, 0, 1, 1)
self.group_name_LineEdit_container = np.empty(0, dtype="object")
self.group_name_LineEdit_container = np.append(
self.group_name_LineEdit_container, QtWidgets.QLineEdit()
)
self.group_definition_gridLayout.addWidget(
self.group_name_LineEdit_container[0], 2, 0
)
# Enter device numbers and its container
self.device_assignment_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(
self.device_assignment_label, 1, 1, 1, 1
)
self.device_assignment_LineEdit_container = np.empty(0, dtype="object")
self.device_assignment_LineEdit_container = np.append(
self.device_assignment_LineEdit_container, QtWidgets.QLineEdit()
)
self.group_definition_gridLayout.addWidget(
self.device_assignment_LineEdit_container[0], 2, 1
)
# Assign a spectrum file to the group
if not self.autodetect_spectrum:
self.spectrum_file_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(
self.spectrum_file_label, 1, 2, 1, 1
)
self.group_spectrum_PushButton_container = np.empty(0, dtype="object")
self.group_spectrum_PushButton_container = np.append(
self.group_spectrum_PushButton_container, QtWidgets.QPushButton("")
)
self.group_spectrum_PushButton_container[0].setStyleSheet(
"background-color: red"
)
self.group_definition_gridLayout.addWidget(
self.group_spectrum_PushButton_container[0], 2, 2
)
# Definition of a plotting color for the group
self.group_color_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(self.group_color_label, 1, 3, 1, 1)
self.group_colors_PushButton_container = np.empty(0, dtype="object")
self.group_colors_PushButton_container = np.append(
self.group_colors_PushButton_container, QtWidgets.QPushButton("")
)
self.group_colors_PushButton_container[0].setStyleSheet(
"background-color: " + str(self.group_color[0])
)
self.group_definition_gridLayout.addWidget(
self.group_colors_PushButton_container[0], 2, 3
)
# Define the bottom pushbuttons that allows to close and save the dialog
self.leave_horizontalLayout = QtWidgets.QHBoxLayout()
self.close_pushButton = QtWidgets.QPushButton("Close")
self.save_pushButton = QtWidgets.QPushButton("Save")
self.leave_horizontalLayout.addWidget(self.close_pushButton)
self.leave_horizontalLayout.addWidget(self.save_pushButton)
self.verticalLayout.addLayout(self.group_definition_gridLayout)
self.verticalLayout.addLayout(self.leave_horizontalLayout)
self.setLayout(self.verticalLayout)
self.retranslateUi(AssignGroups)
QtCore.QMetaObject.connectSlotsByName(AssignGroups)
|
interface.go
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package restxmlserviceiface provides an interface to enable mocking the REST XML Service service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
package restxmlserviceiface
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/model/api/codegentest/service/restxmlservice"
)
// RESTXMLServiceAPI provides an interface to enable mocking the
// restxmlservice.RESTXMLService service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // REST XML Service.
// func myFunc(svc restxmlserviceiface.RESTXMLServiceAPI) bool {
// // Make svc.EmptyStream request
// }
//
// func main() {
// sess := session.New()
// svc := restxmlservice.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockRESTXMLServiceClient struct {
// restxmlserviceiface.RESTXMLServiceAPI
// }
// func (m *mockRESTXMLServiceClient) EmptyStream(input *restxmlservice.EmptyStreamInput) (*restxmlservice.EmptyStreamOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockRESTXMLServiceClient{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
|
// tooling to generate mocks to satisfy the interfaces.
type RESTXMLServiceAPI interface {
EmptyStream(*restxmlservice.EmptyStreamInput) (*restxmlservice.EmptyStreamOutput, error)
EmptyStreamWithContext(aws.Context, *restxmlservice.EmptyStreamInput, ...request.Option) (*restxmlservice.EmptyStreamOutput, error)
EmptyStreamRequest(*restxmlservice.EmptyStreamInput) (*request.Request, *restxmlservice.EmptyStreamOutput)
GetEventStream(*restxmlservice.GetEventStreamInput) (*restxmlservice.GetEventStreamOutput, error)
GetEventStreamWithContext(aws.Context, *restxmlservice.GetEventStreamInput, ...request.Option) (*restxmlservice.GetEventStreamOutput, error)
GetEventStreamRequest(*restxmlservice.GetEventStreamInput) (*request.Request, *restxmlservice.GetEventStreamOutput)
OtherOperation(*restxmlservice.OtherOperationInput) (*restxmlservice.OtherOperationOutput, error)
OtherOperationWithContext(aws.Context, *restxmlservice.OtherOperationInput, ...request.Option) (*restxmlservice.OtherOperationOutput, error)
OtherOperationRequest(*restxmlservice.OtherOperationInput) (*request.Request, *restxmlservice.OtherOperationOutput)
}
var _ RESTXMLServiceAPI = (*restxmlservice.RESTXMLService)(nil)
|
// and waiters. Its suggested to use the pattern above for testing, or using
|
rustls.rs
|
use real_tokio_rustls::rustls::ClientConfig;
use real_tokio_rustls::webpki::DNSNameRef;
use real_tokio_rustls::{client::TlsStream, TlsConnector};
use tungstenite::client::{uri_mode, IntoClientRequest};
use tungstenite::handshake::client::Request;
use tungstenite::stream::Mode;
use tungstenite::Error;
use crate::stream::Stream as StreamSwitcher;
use crate::{client_async_with_config, domain, Response, WebSocketConfig, WebSocketStream};
use super::TokioAdapter;
/// A stream that might be protected with TLS.
pub type MaybeTlsStream<S> = StreamSwitcher<TokioAdapter<S>, TokioAdapter<TlsStream<S>>>;
pub type AutoStream<S> = MaybeTlsStream<S>;
pub type Connector = TlsConnector;
async fn wrap_stream<S>(
socket: S,
domain: String,
connector: Option<Connector>,
mode: Mode,
) -> Result<AutoStream<S>, Error>
where
S: 'static + tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin,
{
match mode {
Mode::Plain => Ok(StreamSwitcher::Plain(TokioAdapter::new(socket))),
Mode::Tls => {
let stream = {
let connector = if let Some(connector) = connector {
connector
} else {
let mut config = ClientConfig::new();
config
.root_store
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
TlsConnector::from(std::sync::Arc::new(config))
};
let domain = DNSNameRef::try_from_ascii_str(&domain)
.map_err(|err| Error::Tls(err.into()))?;
connector.connect(domain, socket).await?
};
Ok(StreamSwitcher::Tls(TokioAdapter::new(stream)))
}
}
}
/// Creates a WebSocket handshake from a request and a stream,
/// upgrading the stream to TLS if required and using the given
/// connector and WebSocket configuration.
pub async fn client_async_tls_with_connector_and_config<R, S>(
request: R,
stream: S,
connector: Option<Connector>,
config: Option<WebSocketConfig>,
) -> Result<(WebSocketStream<AutoStream<S>>, Response), Error>
where
R: IntoClientRequest + Unpin,
S: 'static + tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin,
AutoStream<S>: Unpin,
|
{
let request: Request = request.into_client_request()?;
let domain = domain(&request)?;
// Make sure we check domain and mode first. URL must be valid.
let mode = uri_mode(request.uri())?;
let stream = wrap_stream(stream, domain, connector, mode).await?;
client_async_with_config(request, stream, config).await
}
|
|
log.go
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package log
import (
"fmt"
"os"
"path/filepath"
"strings"
"gopkg.in/ini.v1"
"github.com/inconshreveable/log15"
"github.com/inconshreveable/log15/term"
)
var Root log15.Logger
var loggersToClose []DisposableHandler
func init() {
loggersToClose = make([]DisposableHandler, 0)
Root = log15.Root()
}
func New(logger string, ctx ...interface{}) Logger {
params := append([]interface{}{"logger", logger}, ctx...)
return Root.New(params...)
}
func Trace(format string, v ...interface{}) {
Root.Debug(fmt.Sprintf(format, v))
}
func Debug(format string, v ...interface{}) {
Root.Debug(fmt.Sprintf(format, v))
}
func Debug2(message string, v ...interface{}) {
Root.Debug(message, v...)
}
func Info(format string, v ...interface{}) {
Root.Info(fmt.Sprintf(format, v))
}
func Info2(message string, v ...interface{}) {
Root.Info(message, v...)
}
func Warn(format string, v ...interface{}) {
Root.Warn(fmt.Sprintf(format, v))
}
func Warn2(message string, v ...interface{}) {
Root.Warn(message, v...)
}
func Error(skip int, format string, v ...interface{}) {
Root.Error(fmt.Sprintf(format, v))
}
func Error2(message string, v ...interface{}) {
Root.Error(message, v...)
}
func Critical(skip int, format string, v ...interface{}) {
Root.Crit(fmt.Sprintf(format, v))
}
func Fatal(skip int, format string, v ...interface{}) {
Root.Crit(fmt.Sprintf(format, v))
Close()
os.Exit(1)
}
func Close() {
for _, logger := range loggersToClose {
logger.Close()
}
loggersToClose = make([]DisposableHandler, 0)
}
var logLevels = map[string]log15.Lvl{
"trace": log15.LvlDebug,
"debug": log15.LvlDebug,
"info": log15.LvlInfo,
"warn": log15.LvlWarn,
"error": log15.LvlError,
"critical": log15.LvlCrit,
}
func
|
(key string, defaultName string, cfg *ini.File) (string, log15.Lvl) {
levelName := cfg.Section(key).Key("level").MustString("info")
levelName = strings.ToLower(levelName)
level := getLogLevelFromString(levelName)
return levelName, level
}
func getLogLevelFromString(levelName string) log15.Lvl {
level, ok := logLevels[levelName]
if !ok {
Root.Error("Unknown log level", "level", levelName)
return log15.LvlError
}
return level
}
func getFilters(filterStrArray []string) map[string]log15.Lvl {
filterMap := make(map[string]log15.Lvl)
for _, filterStr := range filterStrArray {
parts := strings.Split(filterStr, ":")
filterMap[parts[0]] = getLogLevelFromString(parts[1])
}
return filterMap
}
func getLogFormat(format string) log15.Format {
switch format {
case "console":
if term.IsTty(os.Stdout.Fd()) {
return log15.TerminalFormat()
}
return log15.LogfmtFormat()
case "text":
return log15.LogfmtFormat()
case "json":
return log15.JsonFormat()
default:
return log15.LogfmtFormat()
}
}
func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) {
Close()
defaultLevelName, _ := getLogLevelFromConfig("log", "info", cfg)
defaultFilters := getFilters(cfg.Section("log").Key("filters").Strings(" "))
handlers := make([]log15.Handler, 0)
for _, mode := range modes {
mode = strings.TrimSpace(mode)
sec, err := cfg.GetSection("log." + mode)
if err != nil {
Root.Error("Unknown log mode", "mode", mode)
}
// Log level.
_, level := getLogLevelFromConfig("log."+mode, defaultLevelName, cfg)
modeFilters := getFilters(sec.Key("filters").Strings(" "))
format := getLogFormat(sec.Key("format").MustString(""))
var handler log15.Handler
// Generate log configuration.
switch mode {
case "console":
handler = log15.StreamHandler(os.Stdout, format)
case "file":
fileName := sec.Key("file_name").MustString(filepath.Join(logsPath, "grafana.log"))
os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
fileHandler := NewFileWriter()
fileHandler.Filename = fileName
fileHandler.Format = format
fileHandler.Rotate = sec.Key("log_rotate").MustBool(true)
fileHandler.Maxlines = sec.Key("max_lines").MustInt(1000000)
fileHandler.Maxsize = 1 << uint(sec.Key("max_size_shift").MustInt(28))
fileHandler.Daily = sec.Key("daily_rotate").MustBool(true)
fileHandler.Maxdays = sec.Key("max_days").MustInt64(7)
fileHandler.Init()
loggersToClose = append(loggersToClose, fileHandler)
handler = fileHandler
case "syslog":
sysLogHandler := NewSyslog()
sysLogHandler.Format = format
sysLogHandler.Network = sec.Key("network").MustString("")
sysLogHandler.Address = sec.Key("address").MustString("")
sysLogHandler.Facility = sec.Key("facility").MustString("local7")
sysLogHandler.Tag = sec.Key("tag").MustString("")
if err := sysLogHandler.Init(); err != nil {
Root.Error("Failed to init syslog log handler", "error", err)
os.Exit(1)
}
loggersToClose = append(loggersToClose, sysLogHandler)
handler = sysLogHandler
}
for key, value := range defaultFilters {
if _, exist := modeFilters[key]; !exist {
modeFilters[key] = value
}
}
handler = LogFilterHandler(level, modeFilters, handler)
handlers = append(handlers, handler)
}
Root.SetHandler(log15.MultiHandler(handlers...))
}
func LogFilterHandler(maxLevel log15.Lvl, filters map[string]log15.Lvl, h log15.Handler) log15.Handler {
return log15.FilterHandler(func(r *log15.Record) (pass bool) {
if len(filters) > 0 {
for i := 0; i < len(r.Ctx); i += 2 {
key := r.Ctx[i].(string)
if key == "logger" {
loggerName, strOk := r.Ctx[i+1].(string)
if strOk {
if filterLevel, ok := filters[loggerName]; ok {
return r.Lvl <= filterLevel
}
}
}
}
}
return r.Lvl <= maxLevel
}, h)
}
|
getLogLevelFromConfig
|
list_hashes.rs
|
use ckb_app_config::{cli, CKBAppConfig, ExitCode};
use ckb_chain_spec::ChainSpec;
use ckb_resource::{Resource, AVAILABLE_SPECS};
use ckb_types::{packed::CellOutput, prelude::*, H256};
use clap::ArgMatches;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::convert::{TryFrom, TryInto};
use std::path::PathBuf;
#[derive(Clone, Debug, Serialize, Deserialize)]
struct SystemCell {
pub path: String,
pub tx_hash: H256,
pub index: usize,
pub data_hash: H256,
pub type_hash: Option<H256>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct DepGroupCell {
pub included_cells: Vec<String>,
pub tx_hash: H256,
pub index: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct SpecHashes {
pub spec_hash: H256,
pub genesis: H256,
pub cellbase: H256,
pub system_cells: Vec<SystemCell>,
pub dep_groups: Vec<DepGroupCell>,
}
impl TryFrom<ChainSpec> for SpecHashes {
type Error = ExitCode;
fn try_from(mut spec: ChainSpec) -> Result<Self, Self::Error> {
let hash_option = spec.genesis.hash.take();
let consensus = spec.build_consensus().map_err(to_config_error)?;
if let Some(hash) = hash_option {
let genesis_hash: H256 = consensus.genesis_hash().unpack();
if hash != genesis_hash {
eprintln!(
"Genesis hash unmatched in {} chainspec config file:\n\
in file {:#x},\n\
actual {:#x}",
spec.name, hash, genesis_hash
);
}
}
let block = consensus.genesis_block();
let cellbase = &block.transactions()[0];
let dep_group_tx = &block.transactions()[1];
// Zip name with the transaction outputs. System cells start from 1 in the genesis cellbase outputs.
let cells_hashes = spec
.genesis
.system_cells
.iter()
.map(|system_cell| &system_cell.file)
.zip(
cellbase
.outputs()
.into_iter()
.zip(cellbase.outputs_data().into_iter())
.skip(1),
)
.enumerate()
.map(|(index_minus_one, (resource, (output, data)))| {
let data_hash: H256 = CellOutput::calc_data_hash(&data.raw_data()).unpack();
let type_hash: Option<H256> = output
.type_()
.to_opt()
.map(|script| script.calc_script_hash().unpack());
SystemCell {
path: resource.to_string(),
tx_hash: cellbase.hash().unpack(),
index: index_minus_one + 1,
data_hash,
type_hash,
}
})
.collect();
let dep_groups = spec
.genesis
.dep_groups
.iter()
.enumerate()
.map(|(index, dep_group)| DepGroupCell {
included_cells: dep_group
.files
.iter()
.map(|res| res.to_string())
.collect::<Vec<_>>(),
tx_hash: dep_group_tx.hash().unpack(),
index,
})
.collect::<Vec<_>>();
Ok(SpecHashes {
spec_hash: spec.hash.unpack(),
genesis: consensus.genesis_hash().unpack(),
cellbase: cellbase.hash().unpack(),
system_cells: cells_hashes,
dep_groups,
})
}
}
pub fn list_hashes<'m>(root_dir: PathBuf, matches: &ArgMatches<'m>) -> Result<(), ExitCode> {
let mut specs = Vec::new();
if matches.is_present(cli::ARG_BUNDLED) {
println!("# Generated by: ckb list-hashes -b");
for env in AVAILABLE_SPECS {
let spec = ChainSpec::load_from(&Resource::bundled(format!("specs/{}.toml", env)))
.map_err(to_config_error)?;
let spec_name = spec.name.clone();
let spec_hashes: SpecHashes = spec.try_into()?;
specs.push((spec_name, spec_hashes));
}
} else {
println!("# Generated by: ckb list-hashes");
let mut resource = Resource::ckb_config(&root_dir);
if !resource.exists() {
resource = Resource::bundled_ckb_config();
}
let mut config: CKBAppConfig = toml::from_slice(&resource.get()?)?;
config.chain.spec.absolutize(&root_dir);
let chain_spec = ChainSpec::load_from(&config.chain.spec).map_err(to_config_error)?;
let spec_name = chain_spec.name.clone();
let spec_hashes: SpecHashes = chain_spec.try_into()?;
specs.push((spec_name, spec_hashes));
}
println!();
let length = specs.len();
for (index, (name, spec_hashes)) in specs.into_iter().enumerate() {
|
map.insert(name, spec_hashes);
print!("{}", toml::to_string(&map).unwrap());
if index + 1 < length {
println!("\n");
}
}
Ok(())
}
fn to_config_error(err: Box<dyn std::error::Error>) -> ExitCode {
eprintln!("{:?}", err);
ExitCode::Config
}
|
println!("# Spec: {}", name);
let mut map = BTreeMap::default();
|
PropertyInspector.tsx
|
import * as React from "react";
import CheckBox from "../../components/CheckBox";
import Heading from "../../components/Heading";
import HotKeySelect from "../../components/HotKeySelect";
import SdpiItem from "../../components/SdpiItem/SdpiItem";
interface Props {
}
interface State {
isShiftEnabled?: boolean,
}
class PropertyInspector extends React.Component<Props, State> {
constructor(props: Readonly<Props>) {
super(props);
this.state = {};
}
|
onShiftCheckBoxClick = (value: boolean) => {
this.setState({ isShiftEnabled: value });
};
render() {
const { isShiftEnabled = false } = this.state;
return (
<div>
<Heading>When Shift is held down...</Heading>
<SdpiItem id="shift-enabled" label="Enabled">
<CheckBox checked={isShiftEnabled} id="shift-enabled" onChange={this.onShiftCheckBoxClick}/>
</SdpiItem>
<SdpiItem id="shift-hotkey" label="Hotkey">
<HotKeySelect/>
</SdpiItem>
</div>
);
}
}
export default PropertyInspector;
| |
stmpxpermin.rs
|
#[doc = "Reader of register STMPXPERMIN"]
pub type R = crate::R<u32, super::STMPXPERMIN>;
#[doc = "Writer for register STMPXPERMIN"]
pub type W = crate::W<u32, super::STMPXPERMIN>;
#[doc = "Register STMPXPERMIN `reset()`'s with value 0xffff"]
impl crate::ResetValue for super::STMPXPERMIN {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0xffff
}
}
#[doc = "Reader of field `RESERVED16`"]
pub type RESERVED16_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `RESERVED16`"]
pub struct RESERVED16_W<'a> {
w: &'a mut W,
}
impl<'a> RESERVED16_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xffff << 16)) | (((value as u32) & 0xffff) << 16);
self.w
}
}
#[doc = "Reader of field `VALUE`"]
pub type VALUE_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `VALUE`"]
pub struct VALUE_W<'a> {
w: &'a mut W,
}
impl<'a> VALUE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 16:31 - 31:16\\]
Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline(always)]
pub fn reserved16(&self) -> RESERVED16_R {
RESERVED16_R::new(((self.bits >> 16) & 0xffff) as u16)
}
#[doc = "Bits 0:15 - 15:0\\]
Each time STMPXPER is updated, the value is also loaded into this register, provided that the value is smaller than the current value in this register. When written, the register is reset to 0xFFFF (65535), regardless of the value written. The minimum value can be used to detect extra WCLK pulses (this registers value will be significantly smaller than STMPXPER.VALUE)."]
#[inline(always)]
pub fn value(&self) -> VALUE_R
|
}
impl W {
#[doc = "Bits 16:31 - 31:16\\]
Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."]
#[inline(always)]
pub fn reserved16(&mut self) -> RESERVED16_W {
RESERVED16_W { w: self }
}
#[doc = "Bits 0:15 - 15:0\\]
Each time STMPXPER is updated, the value is also loaded into this register, provided that the value is smaller than the current value in this register. When written, the register is reset to 0xFFFF (65535), regardless of the value written. The minimum value can be used to detect extra WCLK pulses (this registers value will be significantly smaller than STMPXPER.VALUE)."]
#[inline(always)]
pub fn value(&mut self) -> VALUE_W {
VALUE_W { w: self }
}
}
|
{
VALUE_R::new((self.bits & 0xffff) as u16)
}
|
dp.py
|
import numpy as np
import scipy.integrate as integrate
import scipy.interpolate as interpolate
def calculate_parameters(axis, dose, cax=False):
|
"""
A function to calculate the relevant
descriptive parameters of dose profiles.
"""
interpolated_axis = np.linspace(axis[0], axis[-1], len(axis) * 100)
akima_dose_interpolator = interpolate.Akima1DInterpolator(axis, dose)
interpolated_dose = np.flip(akima_dose_interpolator.__call__(interpolated_axis))
D0 = (
interpolated_dose[int(len(interpolated_dose) / 2)]
+ interpolated_dose[int(len(interpolated_dose) / 2) - 1]
) / 2
XL20 = interpolated_axis[: int(len(interpolated_axis) / 2)][
(
np.abs(
interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.2 * max(dose)
)
).argmin()
]
XL50 = interpolated_axis[: int(len(interpolated_axis) / 2)][
(
np.abs(
interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.5 * max(dose)
)
).argmin()
]
XL80 = interpolated_axis[: int(len(interpolated_axis) / 2)][
(
np.abs(
interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.8 * max(dose)
)
).argmin()
]
XR20 = interpolated_axis[int(len(interpolated_axis) / 2) :][
(
np.abs(
interpolated_dose[
int(len(interpolated_axis) / 2) : len(interpolated_axis)
]
- 0.2 * max(dose)
)
).argmin()
]
XR50 = interpolated_axis[int(len(interpolated_axis) / 2) :][
(
np.abs(
interpolated_dose[
int(len(interpolated_axis) / 2) : len(interpolated_axis)
]
- 0.5 * max(dose)
)
).argmin()
]
XR80 = interpolated_axis[int(len(interpolated_axis) / 2) :][
(
np.abs(
interpolated_dose[
int(len(interpolated_axis) / 2) : len(interpolated_axis)
]
- 0.8 * max(dose)
)
).argmin()
]
HWB = round(abs(XR50 - XL50), 3)
CAXdev = round(XL50 + 0.5 * HWB, 3)
Dose80 = [value for value in dose if value >= 0.8 * max(dose)]
if cax == True:
return CAXdev
flat_krieger = round(
max([value for value in dose if value >= 0.95 * max(dose)])
- min([value for value in dose if value >= 0.95 * max(dose)]) / D0,
5,
)
flat_stddev = round(np.std(Dose80), 3)
if len(Dose80) % 2 != 0:
Dose80 = (
Dose80[0 : int(len(Dose80) / 2)]
+ Dose80[int(len(Dose80) / 2) + 1 : len(Dose80)]
)
S = round(
max(
[Dose80[i - 1] / Dose80[len(Dose80) - i] for i in range(1, len(Dose80) + 1)]
),
3,
)
Lpenumbra = round(abs(XL80 - XL20 + CAXdev), 3)
Rpenumbra = round(abs(XR80 - XR20 + CAXdev), 3)
XL20index = np.where(interpolated_axis == XL20)[0][0]
XL80index = np.where(interpolated_axis == XL80)[0][0]
XR20index = np.where(interpolated_axis == XR20)[0][0]
XR80index = np.where(interpolated_axis == XR80)[0][0]
Lintegral = round(
abs(
integrate.simps(
interpolated_dose[XL20index:XL80index],
interpolated_axis[XL20index:XL80index],
)
),
3,
)
Rintegral = round(
abs(
integrate.simps(
interpolated_dose[XR80index:XR20index],
interpolated_axis[XR80index:XR20index],
)
),
3,
)
if CAXdev > 150:
raise Exception
return [
HWB,
CAXdev,
flat_krieger,
flat_stddev,
S,
Lpenumbra,
Rpenumbra,
Lintegral,
Rintegral,
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.