file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
pso.rs
|
//! A typed high-level pipeline interface.
use std::borrow::Borrow;
use std::mem;
use std::marker::PhantomData;
use {hal, handle};
use hal::image::{self, ImageLayout};
use hal::pass::{AttachmentOps, AttachmentLoadOp, AttachmentStoreOp};
use format::{self, Format};
use {Backend, Device, Primitive, Supports, Transfer, Graphics, Encoder};
pub use hal::pso::{Rasterizer, CreationError, InstanceRate};
#[derive(Debug)]
pub struct RawDescriptorSet<B: Backend> {
pub(crate) resource: B::DescriptorSet,
pub(crate) pool: handle::raw::DescriptorPool<B>,
}
impl<B: Backend> RawDescriptorSet<B> {
pub fn resource(&self) -> &B::DescriptorSet { &self.resource }
}
pub trait Descriptors<B: Backend>: Sized {
type Data: Sized;
fn from_raw(handle::raw::DescriptorSetLayout<B>, RawDescriptorSet<B>) -> (Self, Self::Data);
fn layout_bindings() -> Vec<hal::pso::DescriptorSetLayoutBinding>;
fn layout(&self) -> &B::DescriptorSetLayout;
fn set(&self) -> &B::DescriptorSet;
}
pub trait BindDesc {
const TYPE: hal::pso::DescriptorType;
const COUNT: usize;
}
pub trait Bind<B: Backend>: BindDesc {
type Handle: 'static + Clone;
fn write<'a, I>(views: I) -> hal::pso::DescriptorWrite<'a, B, (Option<u64>, Option<u64>)>
where
I: IntoIterator,
I::Item: Borrow<&'a Self::Handle>;
fn require<'a>(
&'a Self::Handle,
&mut Vec<(&'a handle::raw::Buffer<B>, hal::buffer::State)>,
&mut Vec<(&'a handle::raw::Image<B>, image::Subresource, hal::image::State)>,
&mut handle::Bag<B>,
);
}
macro_rules! define_descriptors {
([$( $array_len:expr ),*] $( $name:ident, )*) => {
$(
impl<T: BindDesc> BindDesc for [T; $array_len] {
const TYPE: hal::pso::DescriptorType = T::TYPE;
const COUNT: usize = $array_len * T::COUNT;
}
impl<B, T> Bind<B> for [T; $array_len]
where B: Backend, T: Bind<B>
{
type Handle = T::Handle;
fn write<'a, I>(handles: I) -> hal::pso::DescriptorWrite<'a, B, (Option<u64>, Option<u64>)>
where
I: IntoIterator,
I::Item: Borrow<&'a Self::Handle>
{
T::write(handles)
}
fn require<'a>(
handle: &'a Self::Handle,
buffers: &mut Vec<(&'a handle::raw::Buffer<B>, hal::buffer::State)>,
images: &mut Vec<(&'a handle::raw::Image<B>, image::Subresource, hal::image::State)>,
others: &mut handle::Bag<B>
) {
T::require(handle, buffers, images, others)
}
}
)*
$(
pub struct $name;
impl BindDesc for $name {
const TYPE: hal::pso::DescriptorType = hal::pso::DescriptorType::$name;
const COUNT: usize = 1;
}
)*
}
}
// TODO: type-safe formats
define_descriptors! {
[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]
SampledImage,
Sampler,
}
impl<B: Backend> Bind<B> for SampledImage {
type Handle = handle::raw::ImageView<B>;
fn write<'a, I>(_views: I) -> hal::pso::DescriptorWrite<'a, B, (Option<u64>, Option<u64>)>
where
I: IntoIterator,
I::Item: Borrow<&'a Self::Handle>,
{
hal::pso::DescriptorWrite::SampledImage(&[])
/* views
.into_iter()
.map(|view| {
let layout = ImageLayout::ShaderReadOnlyOptimal;
(view.borrow().resource(), layout)
}).collect())*/
}
fn require<'a>(
view: &'a Self::Handle,
_: &mut Vec<(&'a handle::raw::Buffer<B>, hal::buffer::State)>,
images: &mut Vec<(&'a handle::raw::Image<B>, image::Subresource, hal::image::State)>,
_: &mut handle::Bag<B>,
) {
let img = view.info();
let levels = img.info().mip_levels;
let layers = img.info().kind.get_num_layers();
let state = (image::Access::SHADER_READ, ImageLayout::ShaderReadOnlyOptimal);
for level in 0..levels {
for layer in 0..layers {
images.push((img, (level, layer), state));
}
}
}
}
impl<B: Backend> Bind<B> for Sampler {
type Handle = handle::raw::Sampler<B>;
fn write<'a, I>(_samplers: I) -> hal::pso::DescriptorWrite<'a, B, (Option<u64>, Option<u64>)>
where
I: IntoIterator,
I::Item: Borrow<&'a Self::Handle>,
{
hal::pso::DescriptorWrite::Sampler(&[])
/*
samplers
.into_iter()
.map(|sampler| sampler.borrow().resource())
.collect())*/
}
fn require<'a>(
sampler: &'a Self::Handle,
_: &mut Vec<(&'a handle::raw::Buffer<B>, hal::buffer::State)>,
_: &mut Vec<(&'a handle::raw::Image<B>, image::Subresource, hal::image::State)>,
others: &mut handle::Bag<B>,
) {
others.add(sampler.clone());
}
}
pub struct DescriptorSetBindRef<'a, 'b, B: Backend, T: Bind<B>> {
pub set: &'a B::DescriptorSet,
pub binding: usize,
pub handles: &'b mut [Option<T::Handle>],
}
pub struct DescriptorSetsUpdate<'a, B: Backend> {
device: &'a mut Device<B>,
writes: Vec<hal::pso::DescriptorSetWrite<'a, B, (Option<u64>, Option<u64>)>>,
}
impl<'a, B: Backend> DescriptorSetsUpdate<'a, B> {
pub(crate) fn new(device: &'a mut Device<B>) -> Self {
DescriptorSetsUpdate { device, writes: Vec::new() }
}
pub fn write<'b, T: Bind<B>, I>(
mut self,
bind_ref: DescriptorSetBindRef<'a, 'b, B, T>,
array_offset: usize,
handles: I,
) -> Self
where
I: IntoIterator,
I::Item: Borrow<&'a T::Handle>,
{
let handles: Vec<_> = handles.into_iter().map(|handle| *handle.borrow()).collect();
for (slot, &handle) in bind_ref.handles[array_offset..].iter_mut().zip(handles.iter()) {
*slot = Some(handle.clone());
}
self.writes.push(hal::pso::DescriptorSetWrite {
set: bind_ref.set,
binding: bind_ref.binding,
array_offset,
write: T::write(handles)
});
self
}
pub fn finish(self) {
use hal::Device;
self.device.raw.write_descriptor_sets(self.writes);
}
}
pub trait GraphicsPipelineInit<B: Backend> {
type Pipeline;
fn create<'a>(
self,
&mut Device<B>,
hal::pso::GraphicsShaderSet<'a, B>,
Primitive,
Rasterizer
) -> Result<Self::Pipeline, CreationError>;
}
pub trait GraphicsPipelineMeta<B: Backend> {
fn layout(&self) -> &B::PipelineLayout;
fn render_pass(&self) -> &B::RenderPass;
}
pub trait GraphicsPipelineData<B: Backend> {
type Pipeline;
fn begin_renderpass<'a, 'b, C>(
self,
encoder: &'a mut Encoder<'b, B, C>,
pipeline: &'a Self::Pipeline
) -> hal::command::RenderPassInlineEncoder<'a, B, hal::command::Primary>
where Self: 'a, 'b: 'a, C: Supports<Transfer> + Supports<Graphics>;
}
pub trait Component<'a, B: Backend> {
type Init: 'a;
type Data: 'a;
fn descriptor_layout<'b>(&'b Self::Init) -> Option<&'b B::DescriptorSetLayout>
where 'a: 'b
{
None
}
fn attachment(&Self::Init) -> Option<Attachment> {
None
}
fn append_desc(
Self::Init,
&mut hal::pso::GraphicsPipelineDesc<B>,
) {}
fn require<'b>(
&'b Self::Data,
&mut Vec<(&'b handle::raw::Buffer<B>, hal::buffer::State)>,
&mut Vec<(&'b handle::raw::Image<B>, image::Subresource, hal::image::State)>,
&mut handle::Bag<B>,
) where 'a: 'b {}
fn vertex_buffer<'b>(&'b Self::Data) -> Option<(&'b B::Buffer, hal::pso::BufferOffset)>
where 'a: 'b
{
None
}
fn descriptor_set<'b>(&'b Self::Data) -> Option<&'b B::DescriptorSet>
where 'a: 'b
{
None
}
}
pub struct Attachment {
pub format: Format,
pub ops: AttachmentOps,
pub stencil_ops: AttachmentOps,
pub required_layout: ImageLayout,
}
pub struct RenderTarget<F: format::AsFormat>(PhantomData<F>);
impl<'a, B, F> Component<'a, B> for RenderTarget<F>
where
B: Backend,
F: 'a + format::AsFormat,
{
type Init = hal::pso::ColorBlendDesc;
type Data = &'a handle::ImageView<B, F>;
fn attachment(_: &Self::Init) -> Option<Attachment> {
Some(Attachment {
format: F::SELF,
// TODO: AttachmentLoadOp::Clear
ops: AttachmentOps::new(AttachmentLoadOp::Load, AttachmentStoreOp::Store),
stencil_ops: AttachmentOps::DONT_CARE,
required_layout: ImageLayout::ColorAttachmentOptimal,
})
}
fn append_desc(
init: Self::Init,
pipeline_desc: &mut hal::pso::GraphicsPipelineDesc<B>,
) {
pipeline_desc.blender.targets.push(init);
}
fn require<'b>(
data: &'b Self::Data,
_: &mut Vec<(&'b handle::raw::Buffer<B>, hal::buffer::State)>,
images: &mut Vec<(&'b handle::raw::Image<B>, image::Subresource, hal::image::State)>,
_: &mut handle::Bag<B>,
) where 'a: 'b {
let img = data.as_ref().info();
let levels = img.info().mip_levels;
let layers = img.info().kind.get_num_layers();
// TODO: READ not always necessary
let state = (image::Access::COLOR_ATTACHMENT_READ | image::Access::COLOR_ATTACHMENT_WRITE,
ImageLayout::ColorAttachmentOptimal);
for level in 0..levels {
for layer in 0..layers {
images.push((img, (level, layer), state));
}
}
}
}
pub trait Structure: Sized {
fn elements() -> Vec<hal::pso::Element<Format>>;
}
/// Helper trait to support variable instance rate.
pub trait ToInstanceRate {
/// The associated init type for PSO component.
type Init;
/// Get an actual instance rate value from the init.
fn get_rate(init: &Self::Init) -> InstanceRate;
}
/// Helper phantom type for per-vertex attributes.
pub enum NonInstanced {}
/// Helper phantom type for per-instance attributes.
pub enum
|
{}
impl ToInstanceRate for InstanceRate {
type Init = InstanceRate;
fn get_rate(init: &Self::Init) -> InstanceRate { *init }
}
impl ToInstanceRate for Instanced {
type Init = ();
fn get_rate(_: &Self::Init) -> InstanceRate { 1 }
}
impl ToInstanceRate for NonInstanced {
type Init = ();
fn get_rate(_: &Self::Init) -> InstanceRate { 0 }
}
pub struct VertexBuffer<T: Structure, I=NonInstanced>(PhantomData<(T, I)>);
impl<'a, B, T, I> Component<'a, B> for VertexBuffer<T, I>
where B: Backend, T: 'a + Structure, I: ToInstanceRate, I::Init: 'a
{
type Init = I::Init;
type Data = &'a handle::Buffer<B, T>;
fn append_desc(
init: Self::Init,
pipeline_desc: &mut hal::pso::GraphicsPipelineDesc<B>,
) {
let binding = pipeline_desc.vertex_buffers.len() as u32;
pipeline_desc.vertex_buffers.push(hal::pso::VertexBufferDesc {
stride: mem::size_of::<T>() as u32,
rate: I::get_rate(&init),
});
let mut location = 0;
for element in T::elements() {
pipeline_desc.attributes.push(hal::pso::AttributeDesc {
location,
binding,
element,
});
location += 1;
}
}
fn require<'b>(
data: &'b Self::Data,
buffers: &mut Vec<(&'b handle::raw::Buffer<B>, hal::buffer::State)>,
_: &mut Vec<(&'b handle::raw::Image<B>, image::Subresource, hal::image::State)>,
_: &mut handle::Bag<B>,
) where 'a: 'b {
buffers.push((data.as_ref(), hal::buffer::Access::VERTEX_BUFFER_READ));
}
fn vertex_buffer<'b>(data: &'b Self::Data) -> Option<(&'b B::Buffer, hal::pso::BufferOffset)>
where 'a: 'b
{
// TODO: offset
Some((data.as_ref().resource(), 0))
}
}
pub type InstanceBuffer<T> = VertexBuffer<T, Instanced>;
|
Instanced
|
webAppSitePushSettings.go
|
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20180201
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Push settings for the App.
type WebAppSitePushSettings struct {
pulumi.CustomResourceState
// Gets or sets a JSON string containing a list of dynamic tags that will be evaluated from user claims in the push registration endpoint.
DynamicTagsJson pulumi.StringPtrOutput `pulumi:"dynamicTagsJson"`
// Gets or sets a flag indicating whether the Push endpoint is enabled.
IsPushEnabled pulumi.BoolOutput `pulumi:"isPushEnabled"`
// Kind of resource.
Kind pulumi.StringPtrOutput `pulumi:"kind"`
// Resource Name.
Name pulumi.StringOutput `pulumi:"name"`
// Gets or sets a JSON string containing a list of tags that are whitelisted for use by the push registration endpoint.
TagWhitelistJson pulumi.StringPtrOutput `pulumi:"tagWhitelistJson"`
// Gets or sets a JSON string containing a list of tags that require user authentication to be used in the push registration endpoint.
// Tags can consist of alphanumeric characters and the following:
// '_', '@', '#', '.', ':', '-'.
// Validation should be performed at the PushRequestHandler.
TagsRequiringAuth pulumi.StringPtrOutput `pulumi:"tagsRequiringAuth"`
// Resource type.
Type pulumi.StringOutput `pulumi:"type"`
}
// NewWebAppSitePushSettings registers a new resource with the given unique name, arguments, and options.
func NewWebAppSitePushSettings(ctx *pulumi.Context,
name string, args *WebAppSitePushSettingsArgs, opts ...pulumi.ResourceOption) (*WebAppSitePushSettings, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.IsPushEnabled == nil {
return nil, errors.New("invalid value for required argument 'IsPushEnabled'")
}
if args.Name == nil
|
if args.ResourceGroupName == nil {
return nil, errors.New("invalid value for required argument 'ResourceGroupName'")
}
aliases := pulumi.Aliases([]pulumi.Alias{
{
Type: pulumi.String("azure-nextgen:web:WebAppSitePushSettings"),
},
{
Type: pulumi.String("azure-nextgen:web/latest:WebAppSitePushSettings"),
},
{
Type: pulumi.String("azure-nextgen:web/v20160801:WebAppSitePushSettings"),
},
{
Type: pulumi.String("azure-nextgen:web/v20181101:WebAppSitePushSettings"),
},
{
Type: pulumi.String("azure-nextgen:web/v20190801:WebAppSitePushSettings"),
},
{
Type: pulumi.String("azure-nextgen:web/v20200601:WebAppSitePushSettings"),
},
{
Type: pulumi.String("azure-nextgen:web/v20200901:WebAppSitePushSettings"),
},
{
Type: pulumi.String("azure-nextgen:web/v20201001:WebAppSitePushSettings"),
},
})
opts = append(opts, aliases)
var resource WebAppSitePushSettings
err := ctx.RegisterResource("azure-nextgen:web/v20180201:WebAppSitePushSettings", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetWebAppSitePushSettings gets an existing WebAppSitePushSettings resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetWebAppSitePushSettings(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *WebAppSitePushSettingsState, opts ...pulumi.ResourceOption) (*WebAppSitePushSettings, error) {
var resource WebAppSitePushSettings
err := ctx.ReadResource("azure-nextgen:web/v20180201:WebAppSitePushSettings", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering WebAppSitePushSettings resources.
type webAppSitePushSettingsState struct {
// Gets or sets a JSON string containing a list of dynamic tags that will be evaluated from user claims in the push registration endpoint.
DynamicTagsJson *string `pulumi:"dynamicTagsJson"`
// Gets or sets a flag indicating whether the Push endpoint is enabled.
IsPushEnabled *bool `pulumi:"isPushEnabled"`
// Kind of resource.
Kind *string `pulumi:"kind"`
// Resource Name.
Name *string `pulumi:"name"`
// Gets or sets a JSON string containing a list of tags that are whitelisted for use by the push registration endpoint.
TagWhitelistJson *string `pulumi:"tagWhitelistJson"`
// Gets or sets a JSON string containing a list of tags that require user authentication to be used in the push registration endpoint.
// Tags can consist of alphanumeric characters and the following:
// '_', '@', '#', '.', ':', '-'.
// Validation should be performed at the PushRequestHandler.
TagsRequiringAuth *string `pulumi:"tagsRequiringAuth"`
// Resource type.
Type *string `pulumi:"type"`
}
type WebAppSitePushSettingsState struct {
// Gets or sets a JSON string containing a list of dynamic tags that will be evaluated from user claims in the push registration endpoint.
DynamicTagsJson pulumi.StringPtrInput
// Gets or sets a flag indicating whether the Push endpoint is enabled.
IsPushEnabled pulumi.BoolPtrInput
// Kind of resource.
Kind pulumi.StringPtrInput
// Resource Name.
Name pulumi.StringPtrInput
// Gets or sets a JSON string containing a list of tags that are whitelisted for use by the push registration endpoint.
TagWhitelistJson pulumi.StringPtrInput
// Gets or sets a JSON string containing a list of tags that require user authentication to be used in the push registration endpoint.
// Tags can consist of alphanumeric characters and the following:
// '_', '@', '#', '.', ':', '-'.
// Validation should be performed at the PushRequestHandler.
TagsRequiringAuth pulumi.StringPtrInput
// Resource type.
Type pulumi.StringPtrInput
}
func (WebAppSitePushSettingsState) ElementType() reflect.Type {
return reflect.TypeOf((*webAppSitePushSettingsState)(nil)).Elem()
}
type webAppSitePushSettingsArgs struct {
// Gets or sets a JSON string containing a list of dynamic tags that will be evaluated from user claims in the push registration endpoint.
DynamicTagsJson *string `pulumi:"dynamicTagsJson"`
// Gets or sets a flag indicating whether the Push endpoint is enabled.
IsPushEnabled bool `pulumi:"isPushEnabled"`
// Kind of resource.
Kind *string `pulumi:"kind"`
// Name of web app.
Name string `pulumi:"name"`
// Name of the resource group to which the resource belongs.
ResourceGroupName string `pulumi:"resourceGroupName"`
// Gets or sets a JSON string containing a list of tags that are whitelisted for use by the push registration endpoint.
TagWhitelistJson *string `pulumi:"tagWhitelistJson"`
// Gets or sets a JSON string containing a list of tags that require user authentication to be used in the push registration endpoint.
// Tags can consist of alphanumeric characters and the following:
// '_', '@', '#', '.', ':', '-'.
// Validation should be performed at the PushRequestHandler.
TagsRequiringAuth *string `pulumi:"tagsRequiringAuth"`
}
// The set of arguments for constructing a WebAppSitePushSettings resource.
type WebAppSitePushSettingsArgs struct {
// Gets or sets a JSON string containing a list of dynamic tags that will be evaluated from user claims in the push registration endpoint.
DynamicTagsJson pulumi.StringPtrInput
// Gets or sets a flag indicating whether the Push endpoint is enabled.
IsPushEnabled pulumi.BoolInput
// Kind of resource.
Kind pulumi.StringPtrInput
// Name of web app.
Name pulumi.StringInput
// Name of the resource group to which the resource belongs.
ResourceGroupName pulumi.StringInput
// Gets or sets a JSON string containing a list of tags that are whitelisted for use by the push registration endpoint.
TagWhitelistJson pulumi.StringPtrInput
// Gets or sets a JSON string containing a list of tags that require user authentication to be used in the push registration endpoint.
// Tags can consist of alphanumeric characters and the following:
// '_', '@', '#', '.', ':', '-'.
// Validation should be performed at the PushRequestHandler.
TagsRequiringAuth pulumi.StringPtrInput
}
func (WebAppSitePushSettingsArgs) ElementType() reflect.Type {
return reflect.TypeOf((*webAppSitePushSettingsArgs)(nil)).Elem()
}
type WebAppSitePushSettingsInput interface {
pulumi.Input
ToWebAppSitePushSettingsOutput() WebAppSitePushSettingsOutput
ToWebAppSitePushSettingsOutputWithContext(ctx context.Context) WebAppSitePushSettingsOutput
}
func (*WebAppSitePushSettings) ElementType() reflect.Type {
return reflect.TypeOf((*WebAppSitePushSettings)(nil))
}
func (i *WebAppSitePushSettings) ToWebAppSitePushSettingsOutput() WebAppSitePushSettingsOutput {
return i.ToWebAppSitePushSettingsOutputWithContext(context.Background())
}
func (i *WebAppSitePushSettings) ToWebAppSitePushSettingsOutputWithContext(ctx context.Context) WebAppSitePushSettingsOutput {
return pulumi.ToOutputWithContext(ctx, i).(WebAppSitePushSettingsOutput)
}
type WebAppSitePushSettingsOutput struct {
*pulumi.OutputState
}
func (WebAppSitePushSettingsOutput) ElementType() reflect.Type {
return reflect.TypeOf((*WebAppSitePushSettings)(nil))
}
func (o WebAppSitePushSettingsOutput) ToWebAppSitePushSettingsOutput() WebAppSitePushSettingsOutput {
return o
}
func (o WebAppSitePushSettingsOutput) ToWebAppSitePushSettingsOutputWithContext(ctx context.Context) WebAppSitePushSettingsOutput {
return o
}
func init() {
pulumi.RegisterOutputType(WebAppSitePushSettingsOutput{})
}
|
{
return nil, errors.New("invalid value for required argument 'Name'")
}
|
NumberConverter.ts
|
class NumberConverter implements Fayde.Data.IValueConverter {
Minimum = 0;
Maximum = 100;
Convert (value: any, targetType: IType, parameter: any, culture: any): any {
return value.toString();
}
ConvertBack (value: any, targetType: IType, parameter: any, culture: any): any {
|
var max = parseFloat(this.Maximum.toString());
var num = parseFloat(value);
num = Math.min(Math.max(value, min), max);
return num;
}
}
Fayde.Data.IValueConverter_.mark(NumberConverter);
export = NumberConverter;
|
var min = parseFloat(this.Minimum.toString());
|
jaccard.rs
|
extern crate name_match;
use name_match::jaccard::compare;
use name_match::prelude::*;
use std::fmt;
fn
|
(){
let name_1 = "James Bay";
let name_2 = "James Sancho Adam";
let name_matcher =
compare::JaccardMatcher::default();
let score = name_matcher.get_score(name_1, name_2);
println!("Jaccard Similarity = {}", score);
}
|
main
|
word_prevalence_calc.py
|
import argparse
import re
import sys
from collections import Counter
sys.path.append("..")
from ml_humor_recogniser import read_data
from screenplay import Line
def run(data, output):
screenplays = read_data(data)
txt = screenplays_to_txt(screenplays)
word_counts = get_word_counts(txt)
word_probabilities = get_probabilities(word_counts)
write_to_file(word_probabilities, output)
# TODO take care of UNKs
def screenplays_to_txt(screenplays):
result = ''
for screenplay in screenplays:
for line in screenplay:
if isinstance(line, Line):
result += ('\n' + line.txt)
return result
def get_word_counts(txt):
"""
Counts word occurrences in "txt".
The methodology of dealing with unknown words is to calculate a count of "UNK" by splitting the set of words, and
after counting words in the bigger set, every unknown word that appears in the smaller set will be counted as "UNK".
:param txt:
:return: a {'word':integer} dictionary that represents the number of times a word appears in the txt.
"""
counts = Counter()
all_words = re.split(r'[\s\,\.\?\!\;\:"]', txt.lower())
all_words = [w for w in all_words if w]
size = len(all_words)
most_words, rest = all_words[:int(size*0.9)], all_words[int(size*0.9):]
for word in most_words:
counts[word] += 1
for word in rest:
if word in counts:
counts[word] += 1
else:
counts['UNK'] += 1
return counts
def get_probabilities(word_counts):
|
def write_to_file(word_probabilities, output):
with open(output, 'w') as f:
for word, prob in word_probabilities.items():
f.write("%s %.9f\n" % (word, prob))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A script to calculate the probabilities of words occurring in a "
"screenplay.")
parser.add_argument('data', help='The folder where the training data is located. Training data is .merged '
'files, created by the data_merger.py module and contain screenplays, '
'laugh times & dialog times.')
parser.add_argument('output', help='Output file.')
args = parser.parse_args()
run(args.data, args.output)
|
probabilities = {}
total_num_of_words = sum((count for _, count in word_counts.items()))
for word in word_counts.keys():
probabilities[word] = word_counts[word] / total_num_of_words
return probabilities
|
dsv.js
|
function objectConverter(columns) {
return new Function("d", "return {" + columns.map(function (name, i) {
return JSON.stringify(name) + ": d[" + i + "]";
}).join(",") + "}");
}
function customConverter(columns, f) {
var object = objectConverter(columns);
return function (row, i) {
return f(object(row), i, columns);
|
} // Compute unique columns in order of discovery.
function inferColumns(rows) {
var columnSet = Object.create(null),
columns = [];
rows.forEach(function (row) {
for (var column in row) {
if (!(column in columnSet)) {
columns.push(columnSet[column] = column);
}
}
});
return columns;
}
export default function (delimiter) {
var reFormat = new RegExp("[\"" + delimiter + "\n]"),
delimiterCode = delimiter.charCodeAt(0);
function parse(text, f) {
var convert,
columns,
rows = parseRows(text, function (row, i) {
if (convert) return convert(row, i - 1);
columns = row, convert = f ? customConverter(row, f) : objectConverter(row);
});
rows.columns = columns;
return rows;
}
function parseRows(text, f) {
var EOL = {},
// sentinel value for end-of-line
EOF = {},
// sentinel value for end-of-file
rows = [],
// output rows
N = text.length,
I = 0,
// current character index
n = 0,
// the current line number
t,
// the current token
eol; // is the current token followed by EOL?
function token() {
if (I >= N) return EOF; // special case: end of file
if (eol) return eol = false, EOL; // special case: end of line
// special case: quotes
var j = I,
c;
if (text.charCodeAt(j) === 34) {
var i = j;
while (i++ < N) {
if (text.charCodeAt(i) === 34) {
if (text.charCodeAt(i + 1) !== 34) break;
++i;
}
}
I = i + 2;
c = text.charCodeAt(i + 1);
if (c === 13) {
eol = true;
if (text.charCodeAt(i + 2) === 10) ++I;
} else if (c === 10) {
eol = true;
}
return text.slice(j + 1, i).replace(/""/g, "\"");
} // common case: find next delimiter or newline
while (I < N) {
var k = 1;
c = text.charCodeAt(I++);
if (c === 10) eol = true; // \n
else if (c === 13) {
eol = true;
if (text.charCodeAt(I) === 10) ++I, ++k;
} // \r|\r\n
else if (c !== delimiterCode) continue;
return text.slice(j, I - k);
} // special case: last token before EOF
return text.slice(j);
}
while ((t = token()) !== EOF) {
var a = [];
while (t !== EOL && t !== EOF) {
a.push(t);
t = token();
}
if (f && (a = f(a, n++)) == null) continue;
rows.push(a);
}
return rows;
}
function format(rows, columns) {
if (columns == null) columns = inferColumns(rows);
return [columns.map(formatValue).join(delimiter)].concat(rows.map(function (row) {
return columns.map(function (column) {
return formatValue(row[column]);
}).join(delimiter);
})).join("\n");
}
function formatRows(rows) {
return rows.map(formatRow).join("\n");
}
function formatRow(row) {
return row.map(formatValue).join(delimiter);
}
function formatValue(text) {
return text == null ? "" : reFormat.test(text += "") ? "\"" + text.replace(/\"/g, "\"\"") + "\"" : text;
}
return {
parse: parse,
parseRows: parseRows,
format: format,
formatRows: formatRows
};
}
|
};
|
modal-dialog-instance.service.ts
|
import { ComponentRef } from '@angular/core';
import { ModalDialogComponent } from './modal-dialog.component';
export class
|
{
/**
* Used to make sure there is exactly one instance of Modal Dialog
*/
private componentRef: ComponentRef<ModalDialogComponent>;
/**
* Closes existing modal dialog
*/
closeAnyExistingModalDialog() {
if (this.componentRef) {
this.componentRef.destroy();
}
}
/**
* Save component ref for future comparison
* @param componentRef
*/
saveExistingModalDialog(componentRef: ComponentRef<ModalDialogComponent>) {
this.componentRef = componentRef;
}
}
|
ModalDialogInstanceService
|
view-loyalty-point-routing.module.ts
|
import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { ViewLoyaltyPointPage } from './view-loyalty-point.page';
|
}
];
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule],
})
export class ViewLoyaltyPointPageRoutingModule {}
|
const routes: Routes = [
{
path: '',
component: ViewLoyaltyPointPage
|
middleware_gzip_test.go
|
package server
import (
"compress/gzip"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestGZipMiddleware(t *testing.T) {
hello := "Hello, world!"
handler := func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(hello))
}
// test server
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r.Header.Set("Accept-Encoding", "gzip")
GZipMiddleware(w, r, handler)
}))
defer ts.Close()
// get response
tr := &http.Transport{
// IMPORTANT: You should set DisableCompression to true, or http.Get will
// decompress the response automatically.
DisableCompression: true,
}
client := &http.Client{Transport: tr}
resp, err := client.Get(ts.URL)
if err != nil {
t.Error(err)
}
defer resp.Body.Close()
// check header
acceptEncodingHeader := resp.Header.Get("Content-Encoding")
if acceptEncodingHeader != "gzip" {
t.Errorf("Content-Encoding is not set properly, got %v", acceptEncodingHeader)
}
// check body
reader, err := gzip.NewReader(resp.Body)
if err != nil {
t.Error(err)
}
defer reader.Close()
bytes, err := ioutil.ReadAll(reader)
if err != nil {
t.Error(err)
}
|
t.Errorf("expect %v, got %v", hello, str)
}
}
|
str := string(bytes)
if str != hello {
|
pyunit_h2ono_progress.py
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
try:
from io import StringIO # py3
except ImportError:
from StringIO import StringIO # py2
def h2ono_progress():
"""
Python API test: h2o.no_progress()
Command is verified by eyeballing the pyunit test output file and make sure the no progress bars are there.
Here, we will assume the command runs well if there is no error message.
"""
try: # only only work with Python 3.
s = StringIO()
sys.stdout = s # redirect output
h2o.no_progress() # true by default.
run_test()
# make sure the word progress is found and % is found. That is how progress is displayed.
assert not s.getvalue(), "Nothing should have been printed, instead got " + s.getvalue()
finally:
sys.stdout = sys.__stdout__ # restore old stdout
def run_test():
|
if __name__ == "__main__":
pyunit_utils.standalone_test(h2ono_progress)
else:
h2ono_progress()
|
training_data = h2o.import_file(pyunit_utils.locate("smalldata/logreg/benign.csv"))
Y = 3
X = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]
model = H2OGeneralizedLinearEstimator(family="binomial", alpha=0, Lambda=1e-5)
model.train(x=X, y=Y, training_frame=training_data)
|
updatetype.go
|
/**
* MIT License
*
* Copyright (c) 2020 CNES
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file has been automatically generated by fr.cnes.mo:StubGenerator_go
* It has then be slightly transformed to match the underlying type uint8 originally defined in the mal.
* The mal should eventually use the standard generated definition of the type.
*/
package mal
import (
"fmt"
)
// Defines UpdateType type
// the generator would define the type as uint32 instead of uint8
//type UpdateType uint32
const (
UPDATETYPE_CREATION_OVAL = iota
UPDATETYPE_CREATION_NVAL = 1
UPDATETYPE_UPDATE_OVAL
UPDATETYPE_UPDATE_NVAL = 2
UPDATETYPE_MODIFICATION_OVAL
UPDATETYPE_MODIFICATION_NVAL = 3
UPDATETYPE_DELETION_OVAL
UPDATETYPE_DELETION_NVAL = 4
)
// Conversion table OVAL->NVAL
var nvalTable_UpdateType = []uint32 {
UPDATETYPE_CREATION_NVAL,
UPDATETYPE_UPDATE_NVAL,
UPDATETYPE_MODIFICATION_NVAL,
UPDATETYPE_DELETION_NVAL,
}
// Conversion map NVAL->OVAL
var ovalMap_UpdateType map[uint32]uint32
var (
UPDATETYPE_CREATION = UpdateType(UPDATETYPE_CREATION_NVAL)
UPDATETYPE_UPDATE = UpdateType(UPDATETYPE_UPDATE_NVAL)
UPDATETYPE_MODIFICATION = UpdateType(UPDATETYPE_MODIFICATION_NVAL)
UPDATETYPE_DELETION = UpdateType(UPDATETYPE_DELETION_NVAL)
)
var NullUpdateType *UpdateType = nil
func init() {
ovalMap_UpdateType = make(map[uint32]uint32)
for oval, nval := range nvalTable_UpdateType {
ovalMap_UpdateType[nval] = uint32(oval)
}
}
func (receiver UpdateType) GetNumericValue() uint32 {
// return uint32(receiver)
return uint32(uint8(receiver))
}
func (receiver UpdateType) GetOrdinalValue() (uint32, error) {
nval := receiver.GetNumericValue()
oval, ok := ovalMap_UpdateType[nval]
if !ok {
return 0, fmt.Errorf("Invalid UpdateType value: %d", nval)
}
return oval, nil
}
func UpdateTypeFromNumericValue(nval uint32) (UpdateType, error) {
_, ok := ovalMap_UpdateType[nval]
if !ok {
return UpdateType(0), fmt.Errorf("Invalid numeric value for UpdateType: %v", nval)
}
return UpdateType(nval), nil
}
func
|
(oval uint32) (UpdateType, error) {
if oval >= uint32(len(nvalTable_UpdateType)) {
return UpdateType(0), fmt.Errorf("Invalid ordinal value for UpdateType: %v", oval)
}
return UpdateType(nvalTable_UpdateType[oval]), nil
}
// ================================================================================
// Defines UpdateType type as a MAL Element
//const UPDATETYPE_TYPE_SHORT_FORM Integer = 22
//const UPDATETYPE_SHORT_FORM Long = 0x65000001000016
// Registers UpdateType type for polymorphism handling
func init() {
RegisterMALElement(UPDATETYPE_SHORT_FORM, NullUpdateType)
}
// Returns the absolute short form of the element type.
func (receiver *UpdateType) GetShortForm() Long {
return UPDATETYPE_SHORT_FORM
}
// Returns the number of the area this element type belongs to.
func (receiver *UpdateType) GetAreaNumber() UShort {
return AREA_NUMBER
}
// Returns the version of the area this element type belongs to.
func (receiver *UpdateType) GetAreaVersion() UOctet {
return AREA_VERSION
}
// Returns the number of the service this element type belongs to.
func (receiver *UpdateType) GetServiceNumber() UShort {
return NULL_SERVICE_NUMBER
}
// Returns the relative short form of the element type.
func (receiver *UpdateType) GetTypeShortForm() Integer {
return UPDATETYPE_TYPE_SHORT_FORM
}
// Allows the creation of an element in a generic way, i.e., using the MAL Element polymorphism.
func (receiver *UpdateType) CreateElement() Element {
return NullUpdateType
}
func (receiver *UpdateType) IsNull() bool {
return receiver == nil
}
func (receiver *UpdateType) Null() Element {
return NullUpdateType
}
// Encodes this element using the supplied encoder.
// @param encoder The encoder to use, must not be null.
func (receiver *UpdateType) Encode(encoder Encoder) error {
specific := encoder.LookupSpecific(UPDATETYPE_SHORT_FORM)
if specific != nil {
return specific(receiver, encoder)
}
oval, err := receiver.GetOrdinalValue()
if err != nil {
return err
}
value := NewUOctet(uint8(oval))
return encoder.EncodeUOctet(value)
}
// Decodes an instance of this element type using the supplied decoder.
// @param decoder The decoder to use, must not be null.
// @return the decoded instance, may be not the same instance as this Element.
func (receiver *UpdateType) Decode(decoder Decoder) (Element, error) {
specific := decoder.LookupSpecific(UPDATETYPE_SHORT_FORM)
if specific != nil {
return specific(decoder)
}
elem, err := decoder.DecodeUOctet()
if err != nil {
return receiver.Null(), err
}
value, err := UpdateTypeFromOrdinalValue(uint32(uint8(*elem)))
return &value, err
}
|
UpdateTypeFromOrdinalValue
|
main2.js
|
define(['./chunk-1825b42d.js'], function (__chunk_1) { 'use strict';
var main2 = __chunk_1.d.map(d => d + 2);
return main2;
|
});
| |
constants.ts
|
export const ECMA_VERSION = 11;
|
||
watervolume.js
|
print("Loading WaterPlane Volume Trigger script");
var rigidbody = me.rigidbody;
var volumetrigger = me.volumetrigger;
var entitiesInside = new Array();
if (server.IsRunning())
{
if(rigidbody && volumetrigger)
{
ConnectSignals();
}
else
{
print("Missing needed Components, Check that you have RigidBody and VolumeTrigger");
me.ComponentAdded.connect(CheckComponent);
}
}
//Checking if needed components are added after EC_Script to Entity
function CheckComponent(component, type)
{
if (component.typeName == "EC_VolumeTrigger")
volumetrigger = component;
else if (component.typeName == "EC_RigidBody")
rigidbody = component;
if (rigidbody && volumetrigger)
{
ConnectSignals();
me.ComponentAdded.disconnect(CheckComponent);
}
}
function ConnectSignals()
{
print("found needed Components");
// Hook to physics update
rigidbody.GetPhysicsWorld().Updated.connect(ServerUpdatePhysics);
// Hook to volume trigger signals
volumetrigger.EntityEnter.connect(EntityEnter);
volumetrigger.EntityLeave.connect(EntityLeave);
}
function
|
(frametime)
{
for (var i=0 ; i<volumetrigger.GetNumEntitiesInside() ; i++)
{
var entity = volumetrigger.GetEntityInside(i);
var mag = volumetrigger.GetEntityInsidePercent(entity);
var rb = entity.rigidbody;
if (rb && mag > 0.1)
{
var impulseVec = new float3(0, 0, mag * 0.8);
rb.ApplyImpulse(impulseVec);
}
}
}
function EntityEnter (entity)
{
var rb = entity.rigidbody;
rb.angularDamping = 0.25;
rb.linearDamping = 0.15;
}
function EntityLeave (entity)
{
var rb = entity.rigidbody;
rb.angularDamping = 0;
rb.linearDamping = 0;
}
|
ServerUpdatePhysics
|
views.py
|
from titlecase import titlecase
from django.conf import settings
from django.db.models import Q
from django.http import HttpResponse, HttpResponseBadRequest
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from discovery.csv import get_memberships, get_membership_name, BaseCSVView
from discovery.cache import track_page_load
from categories import models as categories
from vendors import models as vendors
from contracts import models as contracts
import csv
import time
# Filters:
#
# naics={CODE},...
# memberships={PIID},...
# countries={CODE},...
# states={CODE},...
#
class ContractCSV(BaseCSVView):
def __init__(self, **kwargs):
super(ContractCSV, self).__init__(**kwargs)
# Filters
self.vendor = None
self.naics_param = 'naics'
self.naics = []
self.memberships_param = 'memberships'
self.memberships = []
self.countries_param = 'countries'
self.countries = []
self.states_param = 'states'
self.states = []
# Queries
self.setaside_data = categories.SetAside.objects.all().order_by('far_order')
self.contract_data = contracts.Contract.objects.all().order_by('-date_signed')
def _render_vendor(self, writer):
writer.writerow((self.vendor.name,))
writer.writerow(('SAM registration expires: ', self.vendor.sam_expiration_date.strftime("%m/%d/%Y")))
writer.writerow(('', ))
writer.writerow(('DUNS', self.vendor.duns))
writer.writerow(('CAGE Code', self.vendor.cage))
writer.writerow(('', ))
writer.writerow(('Address',))
writer.writerow((titlecase(self.vendor.sam_location.address),))
writer.writerow((titlecase(self.vendor.sam_location.city) + ', ' + self.vendor.sam_location.state.upper() + ', ' + self.vendor.sam_location.zipcode,))
writer.writerow(('', ))
def
|
(self, writer, duns):
self.vendor = vendors.Vendor.objects.get(duns=duns)
self.contract_data = self.contract_data.filter(vendor=self.vendor)
self._render_vendor(writer)
def _render_naics(self, writer):
naics_data = categories.Naics.objects.filter(code__in=self.naics)
writer.writerow(('Contract NAICS codes:', 'Code', 'Description'))
for naics in naics_data:
writer.writerow(('', naics.code, naics.description))
writer.writerow(('', ))
def _process_naics(self, writer):
self.naics = self.get_params(self.naics_param)
if len(self.naics) > 0:
naics_data = categories.Naics.objects.filter(code__in=self.naics)
sin_codes = {}
for naics in naics_data:
for sin_code in list(naics.sin.all().values_list('code', flat=True)):
sin_codes[sin_code] = True
psc_codes = list(categories.PSC.objects.filter(sin__code__in=sin_codes.keys()).distinct().values_list('code', flat=True))
self.contract_data = self.contract_data.filter(Q(PSC__in=psc_codes) | Q(NAICS__in=self.naics))
self._render_naics(writer)
def _render_memberships(self, writer):
membership_map = get_memberships(self.vendor)
membership_rows = []
labels = ['Vendor vehicle memberships:', 'Filter', 'Contract PIID', 'Name', 'Contact name', 'Contact phone', 'Contact email']
labels.extend([sa_obj.name for sa_obj in self.setaside_data])
writer.writerow(labels)
for piid, info in membership_map.items():
setasides = []
for sa in self.setaside_data:
if sa.code in info['setasides']:
setasides.append('X')
else:
setasides.append('')
filter_data = [
'',
'X' if piid in self.memberships else '',
piid,
get_membership_name(membership_map, piid),
",".join(info['contacts']),
",".join(info['phones']),
",".join(info['emails'])
]
filter_data.extend(setasides)
writer.writerow(filter_data)
writer.writerow(('', ))
def _process_memberships(self, writer):
self.memberships = self.get_params(self.memberships_param)
if len(self.memberships) > 0:
self.contract_data = self.contract_data.filter(base_piid__in = self.memberships)
self._render_memberships(writer)
def _render_countries(self, writer):
writer.writerow(('Contract place of performance countries:', 'Code'))
for country in self.countries:
writer.writerow(('', country))
writer.writerow(('', ))
def _process_countries(self, writer):
self.countries = self.get_params(self.countries_param)
if len(self.countries) > 0:
self.contract_data = self.contract_data.filter(place_of_performance__country_code__in=self.countries)
self._render_countries(writer)
def _render_states(self, writer):
writer.writerow(('Contract place of performance states:', 'Code'))
for state in self.states:
writer.writerow(('', state))
writer.writerow(('', ))
def _process_states(self, writer):
self.states = self.get_params(self.states_param)
if len(self.states) > 0:
self.contract_data = self.contract_data.filter(place_of_performance__state__in=self.states)
self._render_states(writer)
def _render_contracts(self, writer):
writer.writerow(("Work performed by a vendor is often reported under a different NAICS code due to FPDS restrictions.",))
writer.writerow(('', ))
writer.writerow(('Date Signed', 'PIID', 'Agency', 'Type', 'Value ($)', 'Email POC', 'Place of Performance', 'NAIC', 'PSC', 'Status'))
for contract in self.contract_data.iterator():
pricing_type = ''
status = ''
if contract.pricing_type:
pricing_type = contract.pricing_type.name
if contract.status:
status = contract.status.name
writer.writerow((contract.date_signed.strftime("%m/%d/%Y"), contract.piid, titlecase(contract.agency.name), pricing_type, contract.obligated_amount, (contract.point_of_contact or "").lower(), contract.place_of_performance, contract.NAICS, contract.PSC, status))
writer.writerow(('', ))
@method_decorator(cache_page(settings.PAGE_CACHE_LIFETIME, cache='page_cache'))
def get(self, request, *args, **kwargs):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="vendor_contracts.csv"'
writer = csv.writer(response)
writer.writerow(('GSA Discovery vendor contract research results',))
writer.writerow(('URL: ' + self.request.build_absolute_uri(),))
writer.writerow(('Time: ' + time.strftime('%b %d, %Y %l:%M%p %Z'),))
writer.writerow(('', ))
self._process_vendor(writer, kwargs['vendor_duns'])
self._process_naics(writer)
self._process_memberships(writer)
self._process_countries(writer)
self._process_states(writer)
self._render_contracts(writer)
track_page_load(request)
return response
|
_process_vendor
|
context.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
|
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Context object of incoming request
"""
class Context(object):
"""
Context stores model relevant worker information
Some fixed during load times and some
"""
def __init__(self, model_name, model_dir, manifest, batch_size, gpu, mms_version):
self.model_name = model_name
self.manifest = manifest
self._system_properties = {
"model_dir": model_dir,
"gpu_id": gpu,
"batch_size": batch_size,
"server_name": "MMS",
"server_version": mms_version
}
self.request_ids = None
self.request_processor = RequestProcessor(dict())
self._metrics = None
@property
def system_properties(self):
return self._system_properties
@property
def request_processor(self):
return self._request_processor
@request_processor.setter
def request_processor(self, request_processor):
self._request_processor = request_processor
@property
def metrics(self):
return self._metrics
@metrics.setter
def metrics(self, metrics):
self._metrics = metrics
def set_response_content_type(self, request_id, value):
self._request_processor.add_response_property(request_id, {'content-type': value})
def get_response_content_type(self, request_id):
response_headers = self._request_processor.get_response_header().get(request_id)
if response_headers is not None:
return response_headers.get('content-type')
return None
def __eq__(self, other):
return isinstance(other, Context) and self.__dict__ == other.__dict__
class RequestProcessor(object):
"""
Request processor
"""
def __init__(self, request_header):
self._status_code = 200
self._reason_phrase = None
self._response_header = {}
self._request_header = request_header
def get_request_property(self, key):
return self._request_header.get(key)
def report_status(self, code, reason_phrase=None):
self._status_code = code
self._reason_phrase = reason_phrase
def add_response_property(self, key, value):
self._response_header[key] = value
def get_response_header(self):
return self._response_header
| |
ext.rs
|
// This file is part of Substrate.
// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Concrete externalities implementation.
use crate::{
StorageKey, StorageValue, OverlayedChanges,
backend::Backend, overlayed_changes::OverlayedExtensions,
};
use hash_db::Hasher;
use sp_core::{
storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey},
hexdisplay::HexDisplay,
};
use sp_trie::{trie_types::Layout, empty_child_trie_root};
use sp_externalities::{
Externalities, Extensions, Extension, ExtensionStore,
};
use codec::{Decode, Encode, EncodeAppend};
use sp_std::{fmt, any::{Any, TypeId}, vec::Vec, vec, boxed::Box};
use crate::{warn, trace, log_error};
#[cfg(feature = "std")]
use crate::changes_trie::State as ChangesTrieState;
use crate::StorageTransactionCache;
#[cfg(feature = "std")]
use std::error;
const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime";
const BENCHMARKING_FN: &str = "\
This is a special fn only for benchmarking where a database commit happens from the runtime.
For that reason client started transactions before calling into runtime are not allowed.
Without client transactions the loop condition garantuees the success of the tx close.";
#[cfg(feature = "std")]
fn guard() -> sp_panic_handler::AbortGuard {
sp_panic_handler::AbortGuard::force_abort()
}
#[cfg(not(feature = "std"))]
fn guard() -> () {
()
}
/// Errors that can occur when interacting with the externalities.
#[cfg(feature = "std")]
#[derive(Debug, Copy, Clone)]
pub enum Error<B, E> {
/// Failure to load state data from the backend.
#[allow(unused)]
Backend(B),
/// Failure to execute a function.
#[allow(unused)]
Executor(E),
}
#[cfg(feature = "std")]
impl<B: fmt::Display, E: fmt::Display> fmt::Display for Error<B, E> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Backend(ref e) => write!(f, "Storage backend error: {}", e),
Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e),
}
}
}
#[cfg(feature = "std")]
impl<B: error::Error, E: error::Error> error::Error for Error<B, E> {
fn description(&self) -> &str {
match *self {
Error::Backend(..) => "backend error",
Error::Executor(..) => "executor error",
}
}
}
/// Wraps a read-only backend, call executor, and current overlayed changes.
pub struct Ext<'a, H, N, B>
where
H: Hasher,
B: 'a + Backend<H>,
N: crate::changes_trie::BlockNumber,
{
/// The overlayed changes to write to.
overlay: &'a mut OverlayedChanges,
/// The storage backend to read from.
backend: &'a B,
/// The cache for the storage transactions.
storage_transaction_cache: &'a mut StorageTransactionCache<B::Transaction, H, N>,
/// Changes trie state to read from.
#[cfg(feature = "std")]
changes_trie_state: Option<ChangesTrieState<'a, H, N>>,
/// Pseudo-unique id used for tracing.
pub id: u16,
/// Dummy usage of N arg.
_phantom: sp_std::marker::PhantomData<N>,
/// Extensions registered with this instance.
#[cfg(feature = "std")]
extensions: Option<OverlayedExtensions<'a>>,
}
impl<'a, H, N, B> Ext<'a, H, N, B>
where
H: Hasher,
B: Backend<H>,
N: crate::changes_trie::BlockNumber,
{
/// Create a new `Ext`.
#[cfg(not(feature = "std"))]
pub fn new(
overlay: &'a mut OverlayedChanges,
storage_transaction_cache: &'a mut StorageTransactionCache<B::Transaction, H, N>,
backend: &'a B,
) -> Self {
Ext {
overlay,
backend,
id: 0,
storage_transaction_cache,
_phantom: Default::default(),
}
}
/// Create a new `Ext` from overlayed changes and read-only backend
#[cfg(feature = "std")]
pub fn new(
overlay: &'a mut OverlayedChanges,
storage_transaction_cache: &'a mut StorageTransactionCache<B::Transaction, H, N>,
backend: &'a B,
changes_trie_state: Option<ChangesTrieState<'a, H, N>>,
extensions: Option<&'a mut Extensions>,
) -> Self {
Self {
overlay,
backend,
changes_trie_state,
storage_transaction_cache,
id: rand::random(),
_phantom: Default::default(),
extensions: extensions.map(OverlayedExtensions::new),
}
}
/// Invalidates the currently cached storage root and the db transaction.
///
/// Called when there are changes that likely will invalidate the storage root.
fn mark_dirty(&mut self) {
self.storage_transaction_cache.reset();
}
}
#[cfg(test)]
impl<'a, H, N, B> Ext<'a, H, N, B>
where
H: Hasher,
H::Out: Ord + 'static,
B: 'a + Backend<H>,
N: crate::changes_trie::BlockNumber,
{
pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> {
use std::collections::HashMap;
self.backend.pairs().iter()
.map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec())))
.chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())))
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
.collect()
}
}
impl<'a, H, N, B> Externalities for Ext<'a, H, N, B>
where
H: Hasher,
H::Out: Ord + 'static + codec::Codec,
B: Backend<H>,
N: crate::changes_trie::BlockNumber,
{
fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) {
self.overlay.set_offchain_storage(key, value)
}
fn storage(&self, key: &[u8]) -> Option<StorageValue> {
let _guard = guard();
let result = self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(||
self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL));
trace!(target: "state", "{:04x}: Get {}={:?}",
self.id,
HexDisplay::from(&key),
result.as_ref().map(HexDisplay::from)
);
result
}
fn storage_hash(&self, key: &[u8]) -> Option<Vec<u8>> {
let _guard = guard();
let result = self.overlay
.storage(key)
.map(|x| x.map(|x| H::hash(x)))
.unwrap_or_else(|| self.backend.storage_hash(key).expect(EXT_NOT_ALLOWED_TO_FAIL));
trace!(target: "state", "{:04x}: Hash {}={:?}",
self.id,
HexDisplay::from(&key),
result,
);
result.map(|r| r.encode())
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Option<StorageValue> {
let _guard = guard();
let result = self.overlay
.child_storage(child_info, key)
.map(|x| x.map(|x| x.to_vec()))
.unwrap_or_else(||
self.backend.child_storage(child_info, key)
.expect(EXT_NOT_ALLOWED_TO_FAIL)
);
trace!(target: "state", "{:04x}: GetChild({}) {}={:?}",
self.id,
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&key),
result.as_ref().map(HexDisplay::from)
);
result
}
fn child_storage_hash(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Option<Vec<u8>> {
let _guard = guard();
let result = self.overlay
.child_storage(child_info, key)
.map(|x| x.map(|x| H::hash(x)))
.unwrap_or_else(||
self.backend.child_storage_hash(child_info, key)
.expect(EXT_NOT_ALLOWED_TO_FAIL)
);
trace!(target: "state", "{:04x}: ChildHash({}) {}={:?}",
self.id,
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&key),
result,
);
result.map(|r| r.encode())
}
fn exists_storage(&self, key: &[u8]) -> bool {
let _guard = guard();
let result = match self.overlay.storage(key) {
Some(x) => x.is_some(),
_ => self.backend.exists_storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL),
};
trace!(target: "state", "{:04x}: Exists {}={:?}",
self.id,
HexDisplay::from(&key),
result,
);
result
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> bool {
let _guard = guard();
let result = match self.overlay.child_storage(child_info, key) {
Some(x) => x.is_some(),
_ => self.backend
.exists_child_storage(child_info, key)
.expect(EXT_NOT_ALLOWED_TO_FAIL),
};
trace!(target: "state", "{:04x}: ChildExists({}) {}={:?}",
self.id,
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&key),
result,
);
result
}
fn next_storage_key(&self, key: &[u8]) -> Option<StorageKey> {
let next_backend_key = self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL);
let next_overlay_key_change = self.overlay.next_storage_key_change(key);
match (next_backend_key, next_overlay_key_change) {
(Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => Some(backend_key),
(backend_key, None) => backend_key,
(_, Some(overlay_key)) => if overlay_key.1.value().is_some() {
Some(overlay_key.0.to_vec())
} else {
self.next_storage_key(&overlay_key.0[..])
},
}
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Option<StorageKey> {
let next_backend_key = self.backend
.next_child_storage_key(child_info, key)
.expect(EXT_NOT_ALLOWED_TO_FAIL);
let next_overlay_key_change = self.overlay.next_child_storage_key_change(
child_info.storage_key(),
key
);
match (next_backend_key, next_overlay_key_change) {
(Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => Some(backend_key),
(backend_key, None) => backend_key,
(_, Some(overlay_key)) => if overlay_key.1.value().is_some() {
Some(overlay_key.0.to_vec())
} else {
self.next_child_storage_key(
child_info,
&overlay_key.0[..],
)
},
}
}
fn place_storage(&mut self, key: StorageKey, value: Option<StorageValue>) {
trace!(target: "state", "{:04x}: Put {}={:?}",
self.id,
HexDisplay::from(&key),
value.as_ref().map(HexDisplay::from)
);
let _guard = guard();
if is_child_storage_key(&key) {
warn!(target: "trie", "Refuse to directly set child storage key");
return;
}
self.mark_dirty();
self.overlay.set_storage(key, value);
}
fn place_child_storage(
&mut self,
child_info: &ChildInfo,
key: StorageKey,
value: Option<StorageValue>,
) {
trace!(target: "state", "{:04x}: PutChild({}) {}={:?}",
self.id,
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&key),
value.as_ref().map(HexDisplay::from)
);
let _guard = guard();
self.mark_dirty();
self.overlay.set_child_storage(child_info, key, value);
}
fn kill_child_storage(
&mut self,
child_info: &ChildInfo,
limit: Option<u32>,
) -> (bool, u32) {
trace!(target: "state", "{:04x}: KillChild({})",
self.id,
HexDisplay::from(&child_info.storage_key()),
);
let _guard = guard();
self.mark_dirty();
self.overlay.clear_child_storage(child_info);
let mut num_deleted: u32 = 0;
if let Some(limit) = limit {
let mut all_deleted = true;
self.backend.apply_to_child_keys_while(child_info, |key| {
if num_deleted == limit {
all_deleted = false;
return false;
}
if let Some(num) = num_deleted.checked_add(1) {
num_deleted = num;
} else {
all_deleted = false;
return false;
}
self.overlay.set_child_storage(child_info, key.to_vec(), None);
true
});
(all_deleted, num_deleted)
} else {
self.backend.apply_to_child_keys_while(child_info, |key| {
num_deleted = num_deleted.saturating_add(1);
self.overlay.set_child_storage(child_info, key.to_vec(), None);
true
});
(true, num_deleted)
}
}
fn clear_prefix(&mut self, prefix: &[u8]) {
trace!(target: "state", "{:04x}: ClearPrefix {}",
self.id,
HexDisplay::from(&prefix),
);
let _guard = guard();
if sp_core::storage::well_known_keys::starts_with_child_storage_key(prefix) {
warn!(target: "trie", "Refuse to directly clear prefix that is part or contains of child storage key");
return;
}
self.mark_dirty();
self.overlay.clear_prefix(prefix);
self.backend.for_keys_with_prefix(prefix, |key| {
self.overlay.set_storage(key.to_vec(), None);
});
}
fn clear_child_prefix(
&mut self,
child_info: &ChildInfo,
prefix: &[u8],
) {
trace!(target: "state", "{:04x}: ClearChildPrefix({}) {}",
self.id,
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&prefix),
);
let _guard = guard();
self.mark_dirty();
self.overlay.clear_child_prefix(child_info, prefix);
self.backend.for_child_keys_with_prefix(child_info, prefix, |key| {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
});
}
fn storage_append(
&mut self,
key: Vec<u8>,
value: Vec<u8>,
) {
trace!(target: "state", "{:04x}: Append {}={}",
self.id,
HexDisplay::from(&key),
HexDisplay::from(&value),
);
let _guard = guard();
self.mark_dirty();
let backend = &mut self.backend;
let current_value = self.overlay.value_mut_or_insert_with(
&key,
|| backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default()
);
StorageAppend::new(current_value).append(value);
}
fn storage_root(&mut self) -> Vec<u8> {
let _guard = guard();
if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root {
trace!(target: "state", "{:04x}: Root(cached) {}",
self.id,
HexDisplay::from(&root.as_ref()),
);
return root.encode();
}
let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache);
trace!(target: "state", "{:04x}: Root {}", self.id, HexDisplay::from(&root.as_ref()));
root.encode()
}
fn child_storage_root(
&mut self,
child_info: &ChildInfo,
) -> Vec<u8> {
let _guard = guard();
let storage_key = child_info.storage_key();
let prefixed_storage_key = child_info.prefixed_storage_key();
if self.storage_transaction_cache.transaction_storage_root.is_some() {
let root = self
.storage(prefixed_storage_key.as_slice())
.and_then(|k| Decode::decode(&mut &k[..]).ok())
.unwrap_or_else(
|| empty_child_trie_root::<Layout<H>>()
);
trace!(target: "state", "{:04x}: ChildRoot({})(cached) {}",
self.id,
HexDisplay::from(&storage_key),
HexDisplay::from(&root.as_ref()),
);
root.encode()
} else {
let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) {
let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref)));
Some(self.backend.child_storage_root(info, delta))
} else {
None
};
if let Some((root, is_empty, _)) = root {
let root = root.encode();
// We store update in the overlay in order to be able to use 'self.storage_transaction'
// cache. This is brittle as it rely on Ext only querying the trie backend for
// storage root.
// A better design would be to manage 'child_storage_transaction' in a
// similar way as 'storage_transaction' but for each child trie.
if is_empty {
self.overlay.set_storage(prefixed_storage_key.into_inner(), None);
} else {
self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone()));
}
trace!(target: "state", "{:04x}: ChildRoot({}) {}",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&root.as_ref()),
);
root
} else {
// empty overlay
let root = self
.storage(prefixed_storage_key.as_slice())
.and_then(|k| Decode::decode(&mut &k[..]).ok())
.unwrap_or_else(
|| empty_child_trie_root::<Layout<H>>()
);
trace!(target: "state", "{:04x}: ChildRoot({})(no_change) {}",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&root.as_ref()),
);
root.encode()
}
}
}
#[cfg(not(feature = "std"))]
fn storage_changes_root(&mut self, _parent_hash: &[u8]) -> Result<Option<Vec<u8>>, ()> {
Ok(None)
}
#[cfg(feature = "std")]
fn storage_changes_root(&mut self, parent_hash: &[u8]) -> Result<Option<Vec<u8>>, ()> {
let _guard = guard();
if let Some(ref root) = self.storage_transaction_cache.changes_trie_transaction_storage_root {
trace!(
target: "state",
"{:04x}: ChangesRoot({})(cached) {:?}",
self.id,
HexDisplay::from(&parent_hash),
root,
);
Ok(Some(root.encode()))
} else {
let root = self.overlay.changes_trie_root(
self.backend,
self.changes_trie_state.as_ref(),
Decode::decode(&mut &parent_hash[..]).map_err(|e|
trace!(
target: "state",
"Failed to decode changes root parent hash: {}",
e,
)
)?,
true,
self.storage_transaction_cache,
);
trace!(
target: "state",
"{:04x}: ChangesRoot({}) {:?}",
self.id,
HexDisplay::from(&parent_hash),
root,
);
root.map(|r| r.map(|o| o.encode()))
}
}
fn storage_start_transaction(&mut self) {
self.overlay.start_transaction()
}
fn storage_rollback_transaction(&mut self) -> Result<(), ()> {
self.mark_dirty();
self.overlay.rollback_transaction().map_err(|_| ())
}
fn storage_commit_transaction(&mut self) -> Result<(), ()> {
self.overlay.commit_transaction().map_err(|_| ())
}
fn wipe(&mut self) {
for _ in 0..self.overlay.transaction_depth() {
self.overlay.rollback_transaction().expect(BENCHMARKING_FN);
}
self.overlay.drain_storage_changes(
&self.backend,
#[cfg(feature = "std")]
None,
Default::default(),
self.storage_transaction_cache,
).expect(EXT_NOT_ALLOWED_TO_FAIL);
self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL);
self.mark_dirty();
self.overlay
.enter_runtime()
.expect("We have reset the overlay above, so we can not be in the runtime; qed");
}
fn commit(&mut self) {
for _ in 0..self.overlay.transaction_depth() {
self.overlay.commit_transaction().expect(BENCHMARKING_FN);
}
let changes = self.overlay.drain_storage_changes(
&self.backend,
#[cfg(feature = "std")]
None,
Default::default(),
self.storage_transaction_cache,
).expect(EXT_NOT_ALLOWED_TO_FAIL);
self.backend.commit(
changes.transaction_storage_root,
changes.transaction,
changes.main_storage_changes,
changes.child_storage_changes,
).expect(EXT_NOT_ALLOWED_TO_FAIL);
self.mark_dirty();
self.overlay
.enter_runtime()
.expect("We have reset the overlay above, so we can not be in the runtime; qed");
}
fn read_write_count(&self) -> (u32, u32, u32, u32) {
self.backend.read_write_count()
}
fn reset_read_write_count(&mut self) {
self.backend.reset_read_write_count()
}
fn get_whitelist(&self) -> Vec<TrackedStorageKey> {
self.backend.get_whitelist()
}
fn set_whitelist(&mut self, new: Vec<TrackedStorageKey>) {
self.backend.set_whitelist(new)
}
}
/// Implement `Encode` by forwarding the stored raw vec.
struct EncodeOpaqueValue(Vec<u8>);
impl Encode for EncodeOpaqueValue {
fn using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
f(&self.0)
}
}
/// Auxialiary structure for appending a value to a storage item.
pub(crate) struct StorageAppend<'a>(&'a mut Vec<u8>);
impl<'a> StorageAppend<'a> {
/// Create a new instance using the given `storage` reference.
pub fn new(storage: &'a mut Vec<u8>) -> Self {
Self(storage)
}
/// Append the given `value` to the storage item.
///
/// If appending fails, `[value]` is stored in the storage item.
pub fn append(&mut self, value: Vec<u8>) {
let value = vec![EncodeOpaqueValue(value)];
let item = sp_std::mem::take(self.0);
*self.0 = match Vec::<EncodeOpaqueValue>::append_or_new(item, &value) {
Ok(item) => item,
Err(_) => {
log_error!(
target: "runtime",
"Failed to append value, resetting storage item to `[value]`.",
);
value.encode()
}
};
}
}
#[cfg(not(feature = "std"))]
impl<'a, H, N, B> ExtensionStore for Ext<'a, H, N, B>
where
H: Hasher,
H::Out: Ord + 'static + codec::Codec,
B: Backend<H>,
N: crate::changes_trie::BlockNumber,
{
fn extension_by_type_id(&mut self, _type_id: TypeId) -> Option<&mut dyn Any> {
None
}
fn register_extension_with_type_id(
&mut self,
_type_id: TypeId,
_extension: Box<dyn Extension>,
) -> Result<(), sp_externalities::Error> {
Err(sp_externalities::Error::ExtensionsAreNotSupported)
}
fn deregister_extension_by_type_id(
&mut self,
_type_id: TypeId,
) -> Result<(), sp_externalities::Error> {
Err(sp_externalities::Error::ExtensionsAreNotSupported)
}
}
#[cfg(feature = "std")]
impl<'a, H, N, B> ExtensionStore for Ext<'a, H, N, B>
where
H: Hasher,
B: 'a + Backend<H>,
N: crate::changes_trie::BlockNumber,
{
fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> {
self.extensions.as_mut().and_then(|exts| exts.get_mut(type_id))
}
fn register_extension_with_type_id(
&mut self,
type_id: TypeId,
extension: Box<dyn Extension>,
) -> Result<(), sp_externalities::Error> {
if let Some(ref mut extensions) = self.extensions {
extensions.register(type_id, extension)
} else {
Err(sp_externalities::Error::ExtensionsAreNotSupported)
}
}
fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> {
if let Some(ref mut extensions) = self.extensions {
if extensions.deregister(type_id) {
Ok(())
} else {
Err(sp_externalities::Error::ExtensionIsNotRegistered(type_id))
}
} else {
Err(sp_externalities::Error::ExtensionsAreNotSupported)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use hex_literal::hex;
use num_traits::Zero;
use codec::Encode;
use sp_core::{
H256,
Blake2Hasher,
map,
storage::{
Storage,
StorageChild,
well_known_keys::EXTRINSIC_INDEX,
},
};
use crate::{
changes_trie::{
Configuration as ChangesTrieConfiguration,
InMemoryStorage as TestChangesTrieStorage,
}, InMemoryBackend,
};
type TestBackend = InMemoryBackend<Blake2Hasher>;
type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>;
fn prepare_overlay_with_changes() -> OverlayedChanges {
let mut changes = OverlayedChanges::default();
changes.set_collect_extrinsics(true);
changes.set_extrinsic_index(1);
changes.set_storage(vec![1], Some(vec![100]));
changes.set_storage(EXTRINSIC_INDEX.to_vec(), Some(3u32.encode()));
changes.set_offchain_storage(b"k1", Some(b"v1"));
changes.set_offchain_storage(b"k2", Some(b"v2"));
changes
}
fn changes_trie_config() -> ChangesTrieConfiguration {
ChangesTrieConfiguration {
digest_interval: 0,
digest_levels: 0,
}
}
#[test]
fn storage_changes_root_is_none_when_storage_is_not_provided() {
let mut overlay = prepare_overlay_with_changes();
let mut cache = StorageTransactionCache::default();
let backend = TestBackend::default();
let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None);
}
#[test]
fn storage_changes_root_is_none_when_state_is_not_provided() {
let mut overlay = prepare_overlay_with_changes();
let mut cache = StorageTransactionCache::default();
let backend = TestBackend::default();
let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None);
}
#[test]
fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() {
let mut overlay = prepare_overlay_with_changes();
let mut cache = StorageTransactionCache::default();
let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]);
let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage));
let backend = TestBackend::default();
let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None);
assert_eq!(
ext.storage_changes_root(&H256::default().encode()).unwrap(),
Some(hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").to_vec()),
);
}
#[test]
fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() {
let mut overlay = prepare_overlay_with_changes();
let mut cache = StorageTransactionCache::default();
overlay.set_collect_extrinsics(false);
overlay.set_storage(vec![1], None);
let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]);
let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage));
let backend = TestBackend::default();
let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None);
assert_eq!(
ext.storage_changes_root(&H256::default().encode()).unwrap(),
Some(hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").to_vec()),
);
}
#[test]
fn next_storage_key_works() {
let mut cache = StorageTransactionCache::default();
let mut overlay = OverlayedChanges::default();
overlay.set_storage(vec![20], None);
overlay.set_storage(vec![30], Some(vec![31]));
let backend = Storage {
top: map![
vec![10] => vec![10],
vec![20] => vec![20],
vec![40] => vec![40]
],
children_default: map![]
}.into();
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
// next_backend < next_overlay
assert_eq!(ext.next_storage_key(&[5]), Some(vec![10]));
// next_backend == next_overlay but next_overlay is a delete
assert_eq!(ext.next_storage_key(&[10]), Some(vec![30]));
// next_overlay < next_backend
assert_eq!(ext.next_storage_key(&[20]), Some(vec![30]));
// next_backend exist but next_overlay doesn't exist
assert_eq!(ext.next_storage_key(&[30]), Some(vec![40]));
drop(ext);
overlay.set_storage(vec![50], Some(vec![50]));
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
// next_overlay exist but next_backend doesn't exist
assert_eq!(ext.next_storage_key(&[40]), Some(vec![50]));
}
#[test]
fn next_child_storage_key_works() {
let child_info = ChildInfo::new_default(b"Child1");
let child_info = &child_info;
let mut cache = StorageTransactionCache::default();
let mut overlay = OverlayedChanges::default();
overlay.set_child_storage(child_info, vec![20], None);
overlay.set_child_storage(child_info, vec![30], Some(vec![31]));
let backend = Storage {
top: map![],
children_default: map![
child_info.storage_key().to_vec() => StorageChild {
data: map![
vec![10] => vec![10],
vec![20] => vec![20],
vec![40] => vec![40]
],
child_info: child_info.to_owned(),
}
],
}.into();
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
// next_backend < next_overlay
assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10]));
// next_backend == next_overlay but next_overlay is a delete
assert_eq!(ext.next_child_storage_key(child_info, &[10]), Some(vec![30]));
// next_overlay < next_backend
assert_eq!(ext.next_child_storage_key(child_info, &[20]), Some(vec![30]));
// next_backend exist but next_overlay doesn't exist
assert_eq!(ext.next_child_storage_key(child_info, &[30]), Some(vec![40]));
drop(ext);
overlay.set_child_storage(child_info, vec![50], Some(vec![50]));
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
// next_overlay exist but next_backend doesn't exist
assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50]));
}
#[test]
fn child_storage_works() {
let child_info = ChildInfo::new_default(b"Child1");
let child_info = &child_info;
let mut cache = StorageTransactionCache::default();
let mut overlay = OverlayedChanges::default();
overlay.set_child_storage(child_info, vec![20], None);
overlay.set_child_storage(child_info, vec![30], Some(vec![31]));
let backend = Storage {
top: map![],
children_default: map![
child_info.storage_key().to_vec() => StorageChild {
data: map![
vec![10] => vec![10],
vec![20] => vec![20],
vec![30] => vec![40]
],
child_info: child_info.to_owned(),
}
],
}.into();
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10]));
assert_eq!(
ext.child_storage_hash(child_info, &[10]),
Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()),
);
assert_eq!(ext.child_storage(child_info, &[20]), None);
assert_eq!(
ext.child_storage_hash(child_info, &[20]),
None,
);
assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![31]));
assert_eq!(
ext.child_storage_hash(child_info, &[30]),
Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()),
);
}
#[test]
fn clear_prefix_cannot_delete_a_child_root() {
let child_info = ChildInfo::new_default(b"Child1");
let child_info = &child_info;
let mut cache = StorageTransactionCache::default();
let mut overlay = OverlayedChanges::default();
let backend = Storage {
top: map![],
children_default: map![
child_info.storage_key().to_vec() => StorageChild {
data: map![
vec![30] => vec![40]
|
child_info: child_info.to_owned(),
}
],
}.into();
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
use sp_core::storage::well_known_keys;
let mut ext = ext;
let mut not_under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec();
not_under_prefix[4] = 88;
not_under_prefix.extend(b"path");
ext.set_storage(not_under_prefix.clone(), vec![10]);
ext.clear_prefix(&[]);
ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4]);
let mut under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec();
under_prefix.extend(b"path");
ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4]);
assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![40]));
assert_eq!(ext.storage(not_under_prefix.as_slice()), Some(vec![10]));
ext.clear_prefix(¬_under_prefix[..5]);
assert_eq!(ext.storage(not_under_prefix.as_slice()), None);
}
#[test]
fn storage_append_works() {
let mut data = Vec::new();
let mut append = StorageAppend::new(&mut data);
append.append(1u32.encode());
append.append(2u32.encode());
drop(append);
assert_eq!(Vec::<u32>::decode(&mut &data[..]).unwrap(), vec![1, 2]);
// Initialize with some invalid data
let mut data = vec![1];
let mut append = StorageAppend::new(&mut data);
append.append(1u32.encode());
append.append(2u32.encode());
drop(append);
assert_eq!(Vec::<u32>::decode(&mut &data[..]).unwrap(), vec![1, 2]);
}
}
|
],
|
migrate_served_from_test.go
|
/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testlib
import (
"reflect"
"testing"
"golang.org/x/net/context"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
"vitess.io/vitess/go/vt/vttablet/tmclient"
"vitess.io/vitess/go/vt/wrangler"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
func TestMigrateServedFrom(t *testing.T) {
ctx := context.Background()
ts := memorytopo.NewServer("cell1", "cell2")
wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient())
vp := NewVtctlPipe(t, ts)
defer vp.Close()
// create the source keyspace tablets
sourceMaster := NewFakeTablet(t, wr, "cell1", 10, topodatapb.TabletType_MASTER, nil,
TabletKeyspaceShard(t, "source", "0"))
sourceReplica := NewFakeTablet(t, wr, "cell1", 11, topodatapb.TabletType_REPLICA, nil,
TabletKeyspaceShard(t, "source", "0"))
sourceRdonly := NewFakeTablet(t, wr, "cell1", 12, topodatapb.TabletType_RDONLY, nil,
TabletKeyspaceShard(t, "source", "0"))
// create the destination keyspace, served form source
// double check it has all entries in map
if err := vp.Run([]string{"CreateKeyspace", "-served_from", "master:source,replica:source,rdonly:source", "dest"}); err != nil
|
ki, err := ts.GetKeyspace(ctx, "dest")
if err != nil {
t.Fatalf("GetKeyspace failed: %v", err)
}
if len(ki.ServedFroms) != 3 {
t.Fatalf("bad initial dest ServedFroms: %+v", ki.ServedFroms)
}
// create the destination keyspace tablets
destMaster := NewFakeTablet(t, wr, "cell1", 20, topodatapb.TabletType_MASTER, nil,
TabletKeyspaceShard(t, "dest", "0"))
destReplica := NewFakeTablet(t, wr, "cell1", 21, topodatapb.TabletType_REPLICA, nil,
TabletKeyspaceShard(t, "dest", "0"))
destRdonly := NewFakeTablet(t, wr, "cell1", 22, topodatapb.TabletType_RDONLY, nil,
TabletKeyspaceShard(t, "dest", "0"))
// sourceRdonly will see the refresh
sourceRdonly.StartActionLoop(t, wr)
defer sourceRdonly.StopActionLoop(t)
// sourceReplica will see the refresh
sourceReplica.StartActionLoop(t, wr)
defer sourceReplica.StopActionLoop(t)
// sourceMaster will see the refresh, and has to respond to it
// also will be asked about its replication position.
sourceMaster.FakeMysqlDaemon.CurrentMasterPosition = mysql.Position{
GTIDSet: mysql.MariadbGTIDSet{
5: mysql.MariadbGTID{
Domain: 5,
Server: 456,
Sequence: 892,
},
},
}
sourceMaster.StartActionLoop(t, wr)
defer sourceMaster.StopActionLoop(t)
// destRdonly will see the refresh
destRdonly.StartActionLoop(t, wr)
defer destRdonly.StopActionLoop(t)
// destReplica will see the refresh
destReplica.StartActionLoop(t, wr)
defer destReplica.StopActionLoop(t)
destMaster.StartActionLoop(t, wr)
defer destMaster.StopActionLoop(t)
// Override with a fake VREngine after Agent is initialized in action loop.
dbClient := binlogplayer.NewMockDBClient(t)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
destMaster.Agent.VREngine = vreplication.NewTestEngine(ts, "", destMaster.FakeMysqlDaemon, dbClientFactory, dbClient.DBName(), nil)
dbClient.ExpectRequest("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil)
if err := destMaster.Agent.VREngine.Open(context.Background()); err != nil {
t.Fatal(err)
}
// select pos, state, message from _vt.vreplication
dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{
sqltypes.NewVarBinary("MariaDB/5-456-892"),
sqltypes.NewVarBinary("Running"),
sqltypes.NewVarBinary(""),
}}}, nil)
expectDeleteVRepl(dbClient)
// simulate the clone, by fixing the dest shard record
if err := vp.Run([]string{"SourceShardAdd", "--tables", "gone1,gone2", "dest/0", "1", "source/0"}); err != nil {
t.Fatalf("SourceShardAdd failed: %v", err)
}
// migrate rdonly over in a cell
if err := vp.Run([]string{"MigrateServedFrom", "--cells", "cell1", "dest/0", "rdonly"}); err != nil {
t.Fatalf("MigrateServedFrom(rdonly) cell2 failed: %v", err)
}
// check it's gone from keyspace
ki, err = ts.GetKeyspace(ctx, "dest")
if err != nil {
t.Fatalf("GetKeyspace failed: %v", err)
}
if len(ki.ServedFroms) != 2 || ki.GetServedFrom(topodatapb.TabletType_RDONLY) != nil {
t.Fatalf("bad initial dest ServedFroms: %v", ki.ServedFroms)
}
// check the source shard has the right blacklisted tables
si, err := ts.GetShard(ctx, "source", "0")
if err != nil {
t.Fatalf("GetShard failed: %v", err)
}
expected := []*topodatapb.Shard_TabletControl{
{
TabletType: topodatapb.TabletType_RDONLY,
Cells: []string{"cell1"},
BlacklistedTables: []string{"gone1", "gone2"},
},
}
if len(si.TabletControls) != 1 || !reflect.DeepEqual(si.TabletControls, expected) {
t.Fatalf("rdonly type doesn't have right blacklisted tables. Expected: %v, got: %v", expected, si.TabletControls)
}
// migrate rdonly reverse cell
if err := vp.Run([]string{"MigrateServedFrom", "--cells", "cell1", "--reverse", "dest/0", "rdonly"}); err != nil {
t.Fatalf("MigrateServedFrom(rdonly) cell2 failed: %v", err)
}
// check it's gone from keyspace
ki, err = ts.GetKeyspace(ctx, "dest")
if err != nil {
t.Fatalf("GetKeyspace failed: %v", err)
}
if len(ki.ServedFroms) != 3 {
t.Fatalf("bad initial dest ServedFroms: %v", ki.ServedFroms)
}
// check the source shard has the right blacklisted tables
si, err = ts.GetShard(ctx, "source", "0")
if err != nil {
t.Fatalf("GetShard failed: %v", err)
}
if len(si.TabletControls) != 0 {
t.Fatalf("rdonly type doesn't have right blacklisted tables. Expected: nil, got: %v", si.TabletControls)
}
// Now migrate rdonly over
if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "rdonly"}); err != nil {
t.Fatalf("MigrateServedFrom(rdonly) failed: %v", err)
}
// check it's gone from keyspace
ki, err = ts.GetKeyspace(ctx, "dest")
if err != nil {
t.Fatalf("GetKeyspace failed: %v", err)
}
if len(ki.ServedFroms) != 2 || ki.GetServedFrom(topodatapb.TabletType_RDONLY) != nil {
t.Fatalf("bad initial dest ServedFroms: %v", ki.ServedFroms)
}
// check the source shard has the right blacklisted tables
si, err = ts.GetShard(ctx, "source", "0")
if err != nil {
t.Fatalf("GetShard failed: %v", err)
}
expected = []*topodatapb.Shard_TabletControl{
{
TabletType: topodatapb.TabletType_RDONLY,
BlacklistedTables: []string{"gone1", "gone2"},
},
}
if len(si.TabletControls) != 1 || !reflect.DeepEqual(si.TabletControls, expected) {
t.Fatalf("rdonly type doesn't have right blacklisted tables. Expected: %v, got: %v", expected, si.TabletControls)
}
// migrate replica over
if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "replica"}); err != nil {
t.Fatalf("MigrateServedFrom(replica) failed: %v", err)
}
// check it's gone from keyspace
ki, err = ts.GetKeyspace(ctx, "dest")
if err != nil {
t.Fatalf("GetKeyspace failed: %v", err)
}
if len(ki.ServedFroms) != 1 || ki.GetServedFrom(topodatapb.TabletType_REPLICA) != nil {
t.Fatalf("bad initial dest ServedFrom: %+v", ki.ServedFroms)
}
// check the source shard has the right blacklisted tables
si, err = ts.GetShard(ctx, "source", "0")
if err != nil {
t.Fatalf("GetShard failed: %v", err)
}
if len(si.TabletControls) != 2 || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{
{
TabletType: topodatapb.TabletType_RDONLY,
BlacklistedTables: []string{"gone1", "gone2"},
},
{
TabletType: topodatapb.TabletType_REPLICA,
BlacklistedTables: []string{"gone1", "gone2"},
},
}) {
t.Fatalf("replica type doesn't have right blacklisted tables")
}
// migrate master over
if err := vp.Run([]string{"MigrateServedFrom", "dest/0", "master"}); err != nil {
t.Fatalf("MigrateServedFrom(master) failed: %v", err)
}
// make sure ServedFromMap is empty
ki, err = ts.GetKeyspace(ctx, "dest")
if err != nil {
t.Fatalf("GetKeyspace failed: %v", err)
}
if len(ki.ServedFroms) > 0 {
t.Fatalf("dest keyspace still is ServedFrom: %+v", ki.ServedFroms)
}
// check the source shard has the right blacklisted tables
si, err = ts.GetShard(ctx, "source", "0")
if err != nil {
t.Fatalf("GetShard failed: %v", err)
}
if len(si.TabletControls) != 3 || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{
{
TabletType: topodatapb.TabletType_RDONLY,
BlacklistedTables: []string{"gone1", "gone2"},
},
{
TabletType: topodatapb.TabletType_REPLICA,
BlacklistedTables: []string{"gone1", "gone2"},
},
{
TabletType: topodatapb.TabletType_MASTER,
BlacklistedTables: []string{"gone1", "gone2"},
},
}) {
t.Fatalf("master type doesn't have right blacklisted tables")
}
}
|
{
t.Fatalf("CreateKeyspace(dest) failed: %v", err)
}
|
about_strings.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import Koan
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, str))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual(string, string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual(string, string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(52, len(string))
def
|
(self):
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
self.assertEqual('Hello "world"', string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual("Hello, world", string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual("Hello, world", string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual("Hello, ", original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(1, len(string))
|
test_triple_quoted_strings_can_span_lines
|
output.rs
|
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdatePartnerStatusOutput {
/// <p>The name of the database that receives data from the partner.</p>
pub database_name: std::option::Option<std::string::String>,
/// <p>The name of the partner that is authorized to send data.</p>
pub partner_name: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for UpdatePartnerStatusOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdatePartnerStatusOutput");
formatter.field("database_name", &self.database_name);
formatter.field("partner_name", &self.partner_name);
formatter.finish()
}
}
/// See [`UpdatePartnerStatusOutput`](crate::output::UpdatePartnerStatusOutput)
pub mod update_partner_status_output {
/// A builder for [`UpdatePartnerStatusOutput`](crate::output::UpdatePartnerStatusOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) database_name: std::option::Option<std::string::String>,
pub(crate) partner_name: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the database that receives data from the partner.</p>
pub fn database_name(mut self, input: impl Into<std::string::String>) -> Self {
self.database_name = Some(input.into());
self
}
pub fn set_database_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.database_name = input;
self
}
/// <p>The name of the partner that is authorized to send data.</p>
pub fn partner_name(mut self, input: impl Into<std::string::String>) -> Self {
self.partner_name = Some(input.into());
self
}
pub fn set_partner_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.partner_name = input;
self
}
/// Consumes the builder and constructs a [`UpdatePartnerStatusOutput`](crate::output::UpdatePartnerStatusOutput)
pub fn build(self) -> crate::output::UpdatePartnerStatusOutput {
crate::output::UpdatePartnerStatusOutput {
database_name: self.database_name,
partner_name: self.partner_name,
}
}
}
}
impl UpdatePartnerStatusOutput {
/// Creates a new builder-style object to manufacture [`UpdatePartnerStatusOutput`](crate::output::UpdatePartnerStatusOutput)
pub fn builder() -> crate::output::update_partner_status_output::Builder {
crate::output::update_partner_status_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RotateEncryptionKeyOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for RotateEncryptionKeyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RotateEncryptionKeyOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`RotateEncryptionKeyOutput`](crate::output::RotateEncryptionKeyOutput)
pub mod rotate_encryption_key_output {
/// A builder for [`RotateEncryptionKeyOutput`](crate::output::RotateEncryptionKeyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`RotateEncryptionKeyOutput`](crate::output::RotateEncryptionKeyOutput)
pub fn build(self) -> crate::output::RotateEncryptionKeyOutput {
crate::output::RotateEncryptionKeyOutput {
cluster: self.cluster,
}
}
}
}
impl RotateEncryptionKeyOutput {
/// Creates a new builder-style object to manufacture [`RotateEncryptionKeyOutput`](crate::output::RotateEncryptionKeyOutput)
pub fn builder() -> crate::output::rotate_encryption_key_output::Builder {
crate::output::rotate_encryption_key_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RevokeSnapshotAccessOutput {
/// <p>Describes a snapshot.</p>
pub snapshot: std::option::Option<crate::model::Snapshot>,
}
impl std::fmt::Debug for RevokeSnapshotAccessOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RevokeSnapshotAccessOutput");
formatter.field("snapshot", &self.snapshot);
formatter.finish()
}
}
/// See [`RevokeSnapshotAccessOutput`](crate::output::RevokeSnapshotAccessOutput)
pub mod revoke_snapshot_access_output {
/// A builder for [`RevokeSnapshotAccessOutput`](crate::output::RevokeSnapshotAccessOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) snapshot: std::option::Option<crate::model::Snapshot>,
}
impl Builder {
/// <p>Describes a snapshot.</p>
pub fn snapshot(mut self, input: crate::model::Snapshot) -> Self {
self.snapshot = Some(input);
self
}
pub fn set_snapshot(mut self, input: std::option::Option<crate::model::Snapshot>) -> Self {
self.snapshot = input;
self
}
/// Consumes the builder and constructs a [`RevokeSnapshotAccessOutput`](crate::output::RevokeSnapshotAccessOutput)
pub fn build(self) -> crate::output::RevokeSnapshotAccessOutput {
crate::output::RevokeSnapshotAccessOutput {
snapshot: self.snapshot,
}
}
}
}
impl RevokeSnapshotAccessOutput {
/// Creates a new builder-style object to manufacture [`RevokeSnapshotAccessOutput`](crate::output::RevokeSnapshotAccessOutput)
pub fn builder() -> crate::output::revoke_snapshot_access_output::Builder {
crate::output::revoke_snapshot_access_output::Builder::default()
}
}
/// <p>Describes an endpoint authorization for authorizing Redshift-managed VPC endpoint access to a cluster across Amazon Web Services accounts.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RevokeEndpointAccessOutput {
/// <p>The Amazon Web Services account ID of the cluster owner.</p>
pub grantor: std::option::Option<std::string::String>,
/// <p>The Amazon Web Services account ID of the grantee of the cluster.</p>
pub grantee: std::option::Option<std::string::String>,
/// <p>The cluster identifier.</p>
pub cluster_identifier: std::option::Option<std::string::String>,
/// <p>The time (UTC) when the authorization was created.</p>
pub authorize_time: std::option::Option<smithy_types::Instant>,
/// <p>The status of the cluster.</p>
pub cluster_status: std::option::Option<std::string::String>,
/// <p>The status of the authorization action.</p>
pub status: std::option::Option<crate::model::AuthorizationStatus>,
/// <p>Indicates whether all VPCs in the grantee account are allowed access to the cluster.</p>
pub allowed_all_vp_cs: bool,
/// <p>The VPCs allowed access to the cluster.</p>
pub allowed_vp_cs: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The number of Redshift-managed VPC endpoints created for the authorization.</p>
pub endpoint_count: i32,
}
impl std::fmt::Debug for RevokeEndpointAccessOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RevokeEndpointAccessOutput");
formatter.field("grantor", &self.grantor);
formatter.field("grantee", &self.grantee);
formatter.field("cluster_identifier", &self.cluster_identifier);
formatter.field("authorize_time", &self.authorize_time);
formatter.field("cluster_status", &self.cluster_status);
formatter.field("status", &self.status);
formatter.field("allowed_all_vp_cs", &self.allowed_all_vp_cs);
formatter.field("allowed_vp_cs", &self.allowed_vp_cs);
formatter.field("endpoint_count", &self.endpoint_count);
formatter.finish()
}
}
/// See [`RevokeEndpointAccessOutput`](crate::output::RevokeEndpointAccessOutput)
pub mod revoke_endpoint_access_output {
/// A builder for [`RevokeEndpointAccessOutput`](crate::output::RevokeEndpointAccessOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) grantor: std::option::Option<std::string::String>,
pub(crate) grantee: std::option::Option<std::string::String>,
pub(crate) cluster_identifier: std::option::Option<std::string::String>,
pub(crate) authorize_time: std::option::Option<smithy_types::Instant>,
pub(crate) cluster_status: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::AuthorizationStatus>,
pub(crate) allowed_all_vp_cs: std::option::Option<bool>,
pub(crate) allowed_vp_cs: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) endpoint_count: std::option::Option<i32>,
}
impl Builder {
/// <p>The Amazon Web Services account ID of the cluster owner.</p>
pub fn grantor(mut self, input: impl Into<std::string::String>) -> Self {
self.grantor = Some(input.into());
self
}
pub fn set_grantor(mut self, input: std::option::Option<std::string::String>) -> Self {
self.grantor = input;
self
}
/// <p>The Amazon Web Services account ID of the grantee of the cluster.</p>
pub fn grantee(mut self, input: impl Into<std::string::String>) -> Self {
self.grantee = Some(input.into());
self
}
pub fn set_grantee(mut self, input: std::option::Option<std::string::String>) -> Self {
self.grantee = input;
self
}
/// <p>The cluster identifier.</p>
pub fn cluster_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_identifier = Some(input.into());
self
}
pub fn set_cluster_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_identifier = input;
self
}
/// <p>The time (UTC) when the authorization was created.</p>
pub fn authorize_time(mut self, input: smithy_types::Instant) -> Self {
self.authorize_time = Some(input);
self
}
pub fn set_authorize_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.authorize_time = input;
self
}
/// <p>The status of the cluster.</p>
pub fn cluster_status(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_status = Some(input.into());
self
}
pub fn set_cluster_status(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_status = input;
self
}
/// <p>The status of the authorization action.</p>
pub fn status(mut self, input: crate::model::AuthorizationStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::AuthorizationStatus>,
) -> Self {
self.status = input;
self
}
/// <p>Indicates whether all VPCs in the grantee account are allowed access to the cluster.</p>
pub fn allowed_all_vp_cs(mut self, input: bool) -> Self {
self.allowed_all_vp_cs = Some(input);
self
}
pub fn set_allowed_all_vp_cs(mut self, input: std::option::Option<bool>) -> Self {
self.allowed_all_vp_cs = input;
self
}
pub fn allowed_vp_cs(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.allowed_vp_cs.unwrap_or_default();
v.push(input.into());
self.allowed_vp_cs = Some(v);
self
}
pub fn set_allowed_vp_cs(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.allowed_vp_cs = input;
self
}
/// <p>The number of Redshift-managed VPC endpoints created for the authorization.</p>
pub fn endpoint_count(mut self, input: i32) -> Self {
self.endpoint_count = Some(input);
self
}
pub fn set_endpoint_count(mut self, input: std::option::Option<i32>) -> Self {
self.endpoint_count = input;
self
}
/// Consumes the builder and constructs a [`RevokeEndpointAccessOutput`](crate::output::RevokeEndpointAccessOutput)
pub fn build(self) -> crate::output::RevokeEndpointAccessOutput {
crate::output::RevokeEndpointAccessOutput {
grantor: self.grantor,
grantee: self.grantee,
cluster_identifier: self.cluster_identifier,
authorize_time: self.authorize_time,
cluster_status: self.cluster_status,
status: self.status,
allowed_all_vp_cs: self.allowed_all_vp_cs.unwrap_or_default(),
allowed_vp_cs: self.allowed_vp_cs,
endpoint_count: self.endpoint_count.unwrap_or_default(),
}
}
}
}
impl RevokeEndpointAccessOutput {
/// Creates a new builder-style object to manufacture [`RevokeEndpointAccessOutput`](crate::output::RevokeEndpointAccessOutput)
pub fn builder() -> crate::output::revoke_endpoint_access_output::Builder {
crate::output::revoke_endpoint_access_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RevokeClusterSecurityGroupIngressOutput {
/// <p>Describes a security group.</p>
pub cluster_security_group: std::option::Option<crate::model::ClusterSecurityGroup>,
}
impl std::fmt::Debug for RevokeClusterSecurityGroupIngressOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RevokeClusterSecurityGroupIngressOutput");
formatter.field("cluster_security_group", &self.cluster_security_group);
formatter.finish()
}
}
/// See [`RevokeClusterSecurityGroupIngressOutput`](crate::output::RevokeClusterSecurityGroupIngressOutput)
pub mod revoke_cluster_security_group_ingress_output {
/// A builder for [`RevokeClusterSecurityGroupIngressOutput`](crate::output::RevokeClusterSecurityGroupIngressOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_security_group: std::option::Option<crate::model::ClusterSecurityGroup>,
}
impl Builder {
/// <p>Describes a security group.</p>
pub fn cluster_security_group(mut self, input: crate::model::ClusterSecurityGroup) -> Self {
self.cluster_security_group = Some(input);
self
}
pub fn set_cluster_security_group(
mut self,
input: std::option::Option<crate::model::ClusterSecurityGroup>,
) -> Self {
self.cluster_security_group = input;
self
}
/// Consumes the builder and constructs a [`RevokeClusterSecurityGroupIngressOutput`](crate::output::RevokeClusterSecurityGroupIngressOutput)
pub fn build(self) -> crate::output::RevokeClusterSecurityGroupIngressOutput {
crate::output::RevokeClusterSecurityGroupIngressOutput {
cluster_security_group: self.cluster_security_group,
}
}
}
}
impl RevokeClusterSecurityGroupIngressOutput {
/// Creates a new builder-style object to manufacture [`RevokeClusterSecurityGroupIngressOutput`](crate::output::RevokeClusterSecurityGroupIngressOutput)
pub fn builder() -> crate::output::revoke_cluster_security_group_ingress_output::Builder {
crate::output::revoke_cluster_security_group_ingress_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResumeClusterOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for ResumeClusterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResumeClusterOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`ResumeClusterOutput`](crate::output::ResumeClusterOutput)
pub mod resume_cluster_output {
/// A builder for [`ResumeClusterOutput`](crate::output::ResumeClusterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`ResumeClusterOutput`](crate::output::ResumeClusterOutput)
pub fn build(self) -> crate::output::ResumeClusterOutput {
crate::output::ResumeClusterOutput {
cluster: self.cluster,
}
}
}
}
impl ResumeClusterOutput {
/// Creates a new builder-style object to manufacture [`ResumeClusterOutput`](crate::output::ResumeClusterOutput)
pub fn builder() -> crate::output::resume_cluster_output::Builder {
crate::output::resume_cluster_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RestoreTableFromClusterSnapshotOutput {
/// <p>Describes the status of a <a>RestoreTableFromClusterSnapshot</a>
/// operation.</p>
pub table_restore_status: std::option::Option<crate::model::TableRestoreStatus>,
}
impl std::fmt::Debug for RestoreTableFromClusterSnapshotOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RestoreTableFromClusterSnapshotOutput");
formatter.field("table_restore_status", &self.table_restore_status);
formatter.finish()
}
}
/// See [`RestoreTableFromClusterSnapshotOutput`](crate::output::RestoreTableFromClusterSnapshotOutput)
pub mod restore_table_from_cluster_snapshot_output {
/// A builder for [`RestoreTableFromClusterSnapshotOutput`](crate::output::RestoreTableFromClusterSnapshotOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) table_restore_status: std::option::Option<crate::model::TableRestoreStatus>,
}
impl Builder {
/// <p>Describes the status of a <a>RestoreTableFromClusterSnapshot</a>
/// operation.</p>
pub fn table_restore_status(mut self, input: crate::model::TableRestoreStatus) -> Self {
self.table_restore_status = Some(input);
self
}
pub fn set_table_restore_status(
mut self,
input: std::option::Option<crate::model::TableRestoreStatus>,
) -> Self {
self.table_restore_status = input;
self
}
/// Consumes the builder and constructs a [`RestoreTableFromClusterSnapshotOutput`](crate::output::RestoreTableFromClusterSnapshotOutput)
pub fn build(self) -> crate::output::RestoreTableFromClusterSnapshotOutput {
crate::output::RestoreTableFromClusterSnapshotOutput {
table_restore_status: self.table_restore_status,
}
}
}
}
impl RestoreTableFromClusterSnapshotOutput {
/// Creates a new builder-style object to manufacture [`RestoreTableFromClusterSnapshotOutput`](crate::output::RestoreTableFromClusterSnapshotOutput)
pub fn builder() -> crate::output::restore_table_from_cluster_snapshot_output::Builder {
crate::output::restore_table_from_cluster_snapshot_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RestoreFromClusterSnapshotOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for RestoreFromClusterSnapshotOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RestoreFromClusterSnapshotOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`RestoreFromClusterSnapshotOutput`](crate::output::RestoreFromClusterSnapshotOutput)
pub mod restore_from_cluster_snapshot_output {
/// A builder for [`RestoreFromClusterSnapshotOutput`](crate::output::RestoreFromClusterSnapshotOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`RestoreFromClusterSnapshotOutput`](crate::output::RestoreFromClusterSnapshotOutput)
pub fn build(self) -> crate::output::RestoreFromClusterSnapshotOutput {
crate::output::RestoreFromClusterSnapshotOutput {
cluster: self.cluster,
}
}
}
}
impl RestoreFromClusterSnapshotOutput {
/// Creates a new builder-style object to manufacture [`RestoreFromClusterSnapshotOutput`](crate::output::RestoreFromClusterSnapshotOutput)
pub fn builder() -> crate::output::restore_from_cluster_snapshot_output::Builder {
crate::output::restore_from_cluster_snapshot_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResizeClusterOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for ResizeClusterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResizeClusterOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`ResizeClusterOutput`](crate::output::ResizeClusterOutput)
pub mod resize_cluster_output {
/// A builder for [`ResizeClusterOutput`](crate::output::ResizeClusterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`ResizeClusterOutput`](crate::output::ResizeClusterOutput)
pub fn build(self) -> crate::output::ResizeClusterOutput {
crate::output::ResizeClusterOutput {
cluster: self.cluster,
}
}
}
}
impl ResizeClusterOutput {
/// Creates a new builder-style object to manufacture [`ResizeClusterOutput`](crate::output::ResizeClusterOutput)
pub fn builder() -> crate::output::resize_cluster_output::Builder {
crate::output::resize_cluster_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResetClusterParameterGroupOutput {
/// <p>The name of the cluster parameter group.</p>
pub parameter_group_name: std::option::Option<std::string::String>,
/// <p>The status of the parameter group. For example, if you made a change to a parameter
/// group name-value pair, then the change could be pending a reboot of an associated
/// cluster.</p>
pub parameter_group_status: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for ResetClusterParameterGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResetClusterParameterGroupOutput");
formatter.field("parameter_group_name", &self.parameter_group_name);
formatter.field("parameter_group_status", &self.parameter_group_status);
formatter.finish()
}
}
/// See [`ResetClusterParameterGroupOutput`](crate::output::ResetClusterParameterGroupOutput)
pub mod reset_cluster_parameter_group_output {
/// A builder for [`ResetClusterParameterGroupOutput`](crate::output::ResetClusterParameterGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) parameter_group_name: std::option::Option<std::string::String>,
pub(crate) parameter_group_status: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the cluster parameter group.</p>
pub fn parameter_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.parameter_group_name = Some(input.into());
self
}
pub fn set_parameter_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.parameter_group_name = input;
self
}
/// <p>The status of the parameter group. For example, if you made a change to a parameter
/// group name-value pair, then the change could be pending a reboot of an associated
/// cluster.</p>
pub fn parameter_group_status(mut self, input: impl Into<std::string::String>) -> Self {
self.parameter_group_status = Some(input.into());
self
}
pub fn set_parameter_group_status(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.parameter_group_status = input;
self
}
/// Consumes the builder and constructs a [`ResetClusterParameterGroupOutput`](crate::output::ResetClusterParameterGroupOutput)
pub fn build(self) -> crate::output::ResetClusterParameterGroupOutput {
crate::output::ResetClusterParameterGroupOutput {
parameter_group_name: self.parameter_group_name,
parameter_group_status: self.parameter_group_status,
}
}
}
}
impl ResetClusterParameterGroupOutput {
/// Creates a new builder-style object to manufacture [`ResetClusterParameterGroupOutput`](crate::output::ResetClusterParameterGroupOutput)
pub fn builder() -> crate::output::reset_cluster_parameter_group_output::Builder {
crate::output::reset_cluster_parameter_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RejectDataShareOutput {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub data_share_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub producer_arn: std::option::Option<std::string::String>,
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub allow_publicly_accessible_consumers: bool,
/// <p>A value that specifies when the datashare has an association between a producer and data consumers.</p>
pub data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl std::fmt::Debug for RejectDataShareOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RejectDataShareOutput");
formatter.field("data_share_arn", &self.data_share_arn);
formatter.field("producer_arn", &self.producer_arn);
formatter.field(
"allow_publicly_accessible_consumers",
&self.allow_publicly_accessible_consumers,
);
formatter.field("data_share_associations", &self.data_share_associations);
formatter.finish()
}
}
/// See [`RejectDataShareOutput`](crate::output::RejectDataShareOutput)
pub mod reject_data_share_output {
/// A builder for [`RejectDataShareOutput`](crate::output::RejectDataShareOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_share_arn: std::option::Option<std::string::String>,
pub(crate) producer_arn: std::option::Option<std::string::String>,
pub(crate) allow_publicly_accessible_consumers: std::option::Option<bool>,
pub(crate) data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl Builder {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub fn data_share_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.data_share_arn = Some(input.into());
self
}
pub fn set_data_share_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.data_share_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub fn producer_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.producer_arn = Some(input.into());
self
}
pub fn set_producer_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.producer_arn = input;
self
}
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub fn allow_publicly_accessible_consumers(mut self, input: bool) -> Self {
self.allow_publicly_accessible_consumers = Some(input);
self
}
pub fn set_allow_publicly_accessible_consumers(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.allow_publicly_accessible_consumers = input;
self
}
pub fn data_share_associations(
mut self,
input: impl Into<crate::model::DataShareAssociation>,
) -> Self {
let mut v = self.data_share_associations.unwrap_or_default();
v.push(input.into());
self.data_share_associations = Some(v);
self
}
pub fn set_data_share_associations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
) -> Self {
self.data_share_associations = input;
self
}
/// Consumes the builder and constructs a [`RejectDataShareOutput`](crate::output::RejectDataShareOutput)
pub fn build(self) -> crate::output::RejectDataShareOutput {
crate::output::RejectDataShareOutput {
data_share_arn: self.data_share_arn,
producer_arn: self.producer_arn,
allow_publicly_accessible_consumers: self
.allow_publicly_accessible_consumers
.unwrap_or_default(),
data_share_associations: self.data_share_associations,
}
}
}
}
impl RejectDataShareOutput {
/// Creates a new builder-style object to manufacture [`RejectDataShareOutput`](crate::output::RejectDataShareOutput)
pub fn builder() -> crate::output::reject_data_share_output::Builder {
crate::output::reject_data_share_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RebootClusterOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for RebootClusterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RebootClusterOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`RebootClusterOutput`](crate::output::RebootClusterOutput)
pub mod reboot_cluster_output {
/// A builder for [`RebootClusterOutput`](crate::output::RebootClusterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`RebootClusterOutput`](crate::output::RebootClusterOutput)
pub fn build(self) -> crate::output::RebootClusterOutput {
crate::output::RebootClusterOutput {
cluster: self.cluster,
}
}
}
}
impl RebootClusterOutput {
/// Creates a new builder-style object to manufacture [`RebootClusterOutput`](crate::output::RebootClusterOutput)
pub fn builder() -> crate::output::reboot_cluster_output::Builder {
crate::output::reboot_cluster_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PurchaseReservedNodeOfferingOutput {
/// <p>Describes a reserved node. You can call the <a>DescribeReservedNodeOfferings</a> API to obtain the available reserved node
/// offerings. </p>
pub reserved_node: std::option::Option<crate::model::ReservedNode>,
}
impl std::fmt::Debug for PurchaseReservedNodeOfferingOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PurchaseReservedNodeOfferingOutput");
formatter.field("reserved_node", &self.reserved_node);
formatter.finish()
}
}
/// See [`PurchaseReservedNodeOfferingOutput`](crate::output::PurchaseReservedNodeOfferingOutput)
pub mod purchase_reserved_node_offering_output {
/// A builder for [`PurchaseReservedNodeOfferingOutput`](crate::output::PurchaseReservedNodeOfferingOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) reserved_node: std::option::Option<crate::model::ReservedNode>,
}
impl Builder {
/// <p>Describes a reserved node. You can call the <a>DescribeReservedNodeOfferings</a> API to obtain the available reserved node
/// offerings. </p>
pub fn reserved_node(mut self, input: crate::model::ReservedNode) -> Self {
self.reserved_node = Some(input);
self
}
pub fn set_reserved_node(
mut self,
input: std::option::Option<crate::model::ReservedNode>,
) -> Self {
self.reserved_node = input;
self
}
/// Consumes the builder and constructs a [`PurchaseReservedNodeOfferingOutput`](crate::output::PurchaseReservedNodeOfferingOutput)
pub fn build(self) -> crate::output::PurchaseReservedNodeOfferingOutput {
crate::output::PurchaseReservedNodeOfferingOutput {
reserved_node: self.reserved_node,
}
}
}
}
impl PurchaseReservedNodeOfferingOutput {
/// Creates a new builder-style object to manufacture [`PurchaseReservedNodeOfferingOutput`](crate::output::PurchaseReservedNodeOfferingOutput)
pub fn builder() -> crate::output::purchase_reserved_node_offering_output::Builder {
crate::output::purchase_reserved_node_offering_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PauseClusterOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for PauseClusterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PauseClusterOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`PauseClusterOutput`](crate::output::PauseClusterOutput)
pub mod pause_cluster_output {
/// A builder for [`PauseClusterOutput`](crate::output::PauseClusterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`PauseClusterOutput`](crate::output::PauseClusterOutput)
pub fn build(self) -> crate::output::PauseClusterOutput {
crate::output::PauseClusterOutput {
cluster: self.cluster,
}
}
}
}
impl PauseClusterOutput {
/// Creates a new builder-style object to manufacture [`PauseClusterOutput`](crate::output::PauseClusterOutput)
pub fn builder() -> crate::output::pause_cluster_output::Builder {
crate::output::pause_cluster_output::Builder::default()
}
}
/// <p>Describes a usage limit object for a cluster. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyUsageLimitOutput {
/// <p>The identifier of the usage limit.</p>
pub usage_limit_id: std::option::Option<std::string::String>,
/// <p>The identifier of the cluster with a usage limit.</p>
pub cluster_identifier: std::option::Option<std::string::String>,
/// <p>The Amazon Redshift feature to which the limit applies.</p>
pub feature_type: std::option::Option<crate::model::UsageLimitFeatureType>,
/// <p>The type of limit. Depending on the feature type, this can be based on a time duration or data size.</p>
pub limit_type: std::option::Option<crate::model::UsageLimitLimitType>,
/// <p>The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).</p>
pub amount: i64,
/// <p>The time period that the amount applies to. A <code>weekly</code> period begins on Sunday. The default is <code>monthly</code>. </p>
pub period: std::option::Option<crate::model::UsageLimitPeriod>,
/// <p>The action that Amazon Redshift takes when the limit is reached. Possible values are: </p>
/// <ul>
/// <li>
/// <p>
/// <b>log</b> - To log an event in a system table. The default is log.</p>
/// </li>
/// <li>
/// <p>
/// <b>emit-metric</b> - To emit CloudWatch metrics.</p>
/// </li>
/// <li>
/// <p>
/// <b>disable</b> - To disable the feature until the next usage period begins.</p>
/// </li>
/// </ul>
pub breach_action: std::option::Option<crate::model::UsageLimitBreachAction>,
/// <p>A list of tag instances.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl std::fmt::Debug for ModifyUsageLimitOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyUsageLimitOutput");
formatter.field("usage_limit_id", &self.usage_limit_id);
formatter.field("cluster_identifier", &self.cluster_identifier);
formatter.field("feature_type", &self.feature_type);
formatter.field("limit_type", &self.limit_type);
formatter.field("amount", &self.amount);
formatter.field("period", &self.period);
formatter.field("breach_action", &self.breach_action);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`ModifyUsageLimitOutput`](crate::output::ModifyUsageLimitOutput)
pub mod modify_usage_limit_output {
/// A builder for [`ModifyUsageLimitOutput`](crate::output::ModifyUsageLimitOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) usage_limit_id: std::option::Option<std::string::String>,
pub(crate) cluster_identifier: std::option::Option<std::string::String>,
pub(crate) feature_type: std::option::Option<crate::model::UsageLimitFeatureType>,
pub(crate) limit_type: std::option::Option<crate::model::UsageLimitLimitType>,
pub(crate) amount: std::option::Option<i64>,
pub(crate) period: std::option::Option<crate::model::UsageLimitPeriod>,
pub(crate) breach_action: std::option::Option<crate::model::UsageLimitBreachAction>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>The identifier of the usage limit.</p>
pub fn usage_limit_id(mut self, input: impl Into<std::string::String>) -> Self {
self.usage_limit_id = Some(input.into());
self
}
pub fn set_usage_limit_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.usage_limit_id = input;
self
}
/// <p>The identifier of the cluster with a usage limit.</p>
pub fn cluster_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_identifier = Some(input.into());
self
}
pub fn set_cluster_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_identifier = input;
self
}
/// <p>The Amazon Redshift feature to which the limit applies.</p>
pub fn feature_type(mut self, input: crate::model::UsageLimitFeatureType) -> Self {
self.feature_type = Some(input);
self
}
pub fn set_feature_type(
mut self,
input: std::option::Option<crate::model::UsageLimitFeatureType>,
) -> Self {
self.feature_type = input;
self
}
/// <p>The type of limit. Depending on the feature type, this can be based on a time duration or data size.</p>
pub fn limit_type(mut self, input: crate::model::UsageLimitLimitType) -> Self {
self.limit_type = Some(input);
self
}
pub fn set_limit_type(
mut self,
input: std::option::Option<crate::model::UsageLimitLimitType>,
) -> Self {
self.limit_type = input;
self
}
/// <p>The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).</p>
pub fn amount(mut self, input: i64) -> Self {
self.amount = Some(input);
self
}
pub fn set_amount(mut self, input: std::option::Option<i64>) -> Self {
self.amount = input;
self
}
/// <p>The time period that the amount applies to. A <code>weekly</code> period begins on Sunday. The default is <code>monthly</code>. </p>
pub fn period(mut self, input: crate::model::UsageLimitPeriod) -> Self {
self.period = Some(input);
self
}
pub fn set_period(
mut self,
input: std::option::Option<crate::model::UsageLimitPeriod>,
) -> Self {
self.period = input;
self
}
/// <p>The action that Amazon Redshift takes when the limit is reached. Possible values are: </p>
/// <ul>
/// <li>
/// <p>
/// <b>log</b> - To log an event in a system table. The default is log.</p>
/// </li>
/// <li>
/// <p>
/// <b>emit-metric</b> - To emit CloudWatch metrics.</p>
/// </li>
/// <li>
/// <p>
/// <b>disable</b> - To disable the feature until the next usage period begins.</p>
/// </li>
/// </ul>
pub fn breach_action(mut self, input: crate::model::UsageLimitBreachAction) -> Self {
self.breach_action = Some(input);
self
}
pub fn set_breach_action(
mut self,
input: std::option::Option<crate::model::UsageLimitBreachAction>,
) -> Self {
self.breach_action = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`ModifyUsageLimitOutput`](crate::output::ModifyUsageLimitOutput)
pub fn build(self) -> crate::output::ModifyUsageLimitOutput {
crate::output::ModifyUsageLimitOutput {
usage_limit_id: self.usage_limit_id,
cluster_identifier: self.cluster_identifier,
feature_type: self.feature_type,
limit_type: self.limit_type,
amount: self.amount.unwrap_or_default(),
period: self.period,
breach_action: self.breach_action,
tags: self.tags,
}
}
}
}
impl ModifyUsageLimitOutput {
/// Creates a new builder-style object to manufacture [`ModifyUsageLimitOutput`](crate::output::ModifyUsageLimitOutput)
pub fn builder() -> crate::output::modify_usage_limit_output::Builder {
crate::output::modify_usage_limit_output::Builder::default()
}
}
/// <p>Describes a snapshot schedule. You can set a regular interval for creating
/// snapshots of a cluster. You can also schedule snapshots for specific dates. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifySnapshotScheduleOutput {
/// <p>A list of ScheduleDefinitions.</p>
pub schedule_definitions: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>A unique identifier for the schedule.</p>
pub schedule_identifier: std::option::Option<std::string::String>,
/// <p>The description of the schedule.</p>
pub schedule_description: std::option::Option<std::string::String>,
/// <p>An optional set of tags describing the schedule.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
/// <p></p>
pub next_invocations: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
/// <p>The number of clusters associated with the schedule.</p>
pub associated_cluster_count: std::option::Option<i32>,
/// <p>A list of clusters associated with the schedule. A maximum of 100 clusters is returned.</p>
pub associated_clusters:
std::option::Option<std::vec::Vec<crate::model::ClusterAssociatedToSchedule>>,
}
impl std::fmt::Debug for ModifySnapshotScheduleOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifySnapshotScheduleOutput");
formatter.field("schedule_definitions", &self.schedule_definitions);
formatter.field("schedule_identifier", &self.schedule_identifier);
formatter.field("schedule_description", &self.schedule_description);
formatter.field("tags", &self.tags);
formatter.field("next_invocations", &self.next_invocations);
formatter.field("associated_cluster_count", &self.associated_cluster_count);
formatter.field("associated_clusters", &self.associated_clusters);
formatter.finish()
}
}
/// See [`ModifySnapshotScheduleOutput`](crate::output::ModifySnapshotScheduleOutput)
pub mod modify_snapshot_schedule_output {
/// A builder for [`ModifySnapshotScheduleOutput`](crate::output::ModifySnapshotScheduleOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) schedule_definitions: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) schedule_identifier: std::option::Option<std::string::String>,
pub(crate) schedule_description: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
pub(crate) next_invocations: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
pub(crate) associated_cluster_count: std::option::Option<i32>,
pub(crate) associated_clusters:
std::option::Option<std::vec::Vec<crate::model::ClusterAssociatedToSchedule>>,
}
impl Builder {
pub fn schedule_definitions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.schedule_definitions.unwrap_or_default();
v.push(input.into());
self.schedule_definitions = Some(v);
self
}
pub fn set_schedule_definitions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.schedule_definitions = input;
self
}
/// <p>A unique identifier for the schedule.</p>
pub fn schedule_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.schedule_identifier = Some(input.into());
self
}
pub fn set_schedule_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.schedule_identifier = input;
self
}
/// <p>The description of the schedule.</p>
pub fn schedule_description(mut self, input: impl Into<std::string::String>) -> Self {
self.schedule_description = Some(input.into());
self
}
pub fn set_schedule_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.schedule_description = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
pub fn next_invocations(mut self, input: impl Into<smithy_types::Instant>) -> Self {
let mut v = self.next_invocations.unwrap_or_default();
v.push(input.into());
self.next_invocations = Some(v);
self
}
pub fn set_next_invocations(
mut self,
input: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
) -> Self {
self.next_invocations = input;
self
}
/// <p>The number of clusters associated with the schedule.</p>
pub fn associated_cluster_count(mut self, input: i32) -> Self {
self.associated_cluster_count = Some(input);
self
}
pub fn set_associated_cluster_count(mut self, input: std::option::Option<i32>) -> Self {
self.associated_cluster_count = input;
self
}
pub fn associated_clusters(
mut self,
input: impl Into<crate::model::ClusterAssociatedToSchedule>,
) -> Self {
let mut v = self.associated_clusters.unwrap_or_default();
v.push(input.into());
self.associated_clusters = Some(v);
self
}
pub fn set_associated_clusters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ClusterAssociatedToSchedule>>,
) -> Self {
self.associated_clusters = input;
self
}
/// Consumes the builder and constructs a [`ModifySnapshotScheduleOutput`](crate::output::ModifySnapshotScheduleOutput)
pub fn build(self) -> crate::output::ModifySnapshotScheduleOutput {
crate::output::ModifySnapshotScheduleOutput {
schedule_definitions: self.schedule_definitions,
schedule_identifier: self.schedule_identifier,
schedule_description: self.schedule_description,
tags: self.tags,
next_invocations: self.next_invocations,
associated_cluster_count: self.associated_cluster_count,
associated_clusters: self.associated_clusters,
}
}
}
}
impl ModifySnapshotScheduleOutput {
/// Creates a new builder-style object to manufacture [`ModifySnapshotScheduleOutput`](crate::output::ModifySnapshotScheduleOutput)
pub fn builder() -> crate::output::modify_snapshot_schedule_output::Builder {
crate::output::modify_snapshot_schedule_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifySnapshotCopyRetentionPeriodOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for ModifySnapshotCopyRetentionPeriodOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifySnapshotCopyRetentionPeriodOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`ModifySnapshotCopyRetentionPeriodOutput`](crate::output::ModifySnapshotCopyRetentionPeriodOutput)
pub mod modify_snapshot_copy_retention_period_output {
/// A builder for [`ModifySnapshotCopyRetentionPeriodOutput`](crate::output::ModifySnapshotCopyRetentionPeriodOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`ModifySnapshotCopyRetentionPeriodOutput`](crate::output::ModifySnapshotCopyRetentionPeriodOutput)
pub fn build(self) -> crate::output::ModifySnapshotCopyRetentionPeriodOutput {
crate::output::ModifySnapshotCopyRetentionPeriodOutput {
cluster: self.cluster,
}
}
}
}
impl ModifySnapshotCopyRetentionPeriodOutput {
/// Creates a new builder-style object to manufacture [`ModifySnapshotCopyRetentionPeriodOutput`](crate::output::ModifySnapshotCopyRetentionPeriodOutput)
pub fn builder() -> crate::output::modify_snapshot_copy_retention_period_output::Builder {
crate::output::modify_snapshot_copy_retention_period_output::Builder::default()
}
}
/// <p>Describes a scheduled action. You can use a scheduled action to trigger some Amazon Redshift API operations on a schedule.
/// For information about which API operations can be scheduled, see <a>ScheduledActionType</a>. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyScheduledActionOutput {
/// <p>The name of the scheduled action. </p>
pub scheduled_action_name: std::option::Option<std::string::String>,
/// <p>A JSON format string of the Amazon Redshift API operation with input parameters. </p>
/// <p>"<code>{\"ResizeCluster\":{\"NodeType\":\"ds2.8xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}</code>". </p>
pub target_action: std::option::Option<crate::model::ScheduledActionType>,
/// <p>The schedule for a one-time (at format) or recurring (cron format) scheduled action.
/// Schedule invocations must be separated by at least one hour.</p>
/// <p>Format of at expressions is "<code>at(yyyy-mm-ddThh:mm:ss)</code>". For example, "<code>at(2016-03-04T17:27:00)</code>".</p>
/// <p>Format of cron expressions is "<code>cron(Minutes Hours Day-of-month Month Day-of-week Year)</code>".
/// For example, "<code>cron(0 10 ? * MON *)</code>". For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions">Cron Expressions</a>
/// in the <i>Amazon CloudWatch Events User Guide</i>.</p>
pub schedule: std::option::Option<std::string::String>,
/// <p>The IAM role to assume to run the scheduled action.
/// This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action.
/// This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf.
/// For more information about the IAM role to use with the Amazon Redshift scheduler, see
/// <a href="https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html">Using Identity-Based Policies for Amazon Redshift</a>
/// in the <i>Amazon Redshift Cluster Management Guide</i>.
/// </p>
pub iam_role: std::option::Option<std::string::String>,
/// <p>The description of the scheduled action. </p>
pub scheduled_action_description: std::option::Option<std::string::String>,
/// <p>The state of the scheduled action. For example, <code>DISABLED</code>. </p>
pub state: std::option::Option<crate::model::ScheduledActionState>,
/// <p>List of times when the scheduled action will run. </p>
pub next_invocations: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
/// <p>The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger. </p>
pub start_time: std::option::Option<smithy_types::Instant>,
/// <p>The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger. </p>
pub end_time: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for ModifyScheduledActionOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyScheduledActionOutput");
formatter.field("scheduled_action_name", &self.scheduled_action_name);
formatter.field("target_action", &self.target_action);
formatter.field("schedule", &self.schedule);
formatter.field("iam_role", &self.iam_role);
formatter.field(
"scheduled_action_description",
&self.scheduled_action_description,
);
formatter.field("state", &self.state);
formatter.field("next_invocations", &self.next_invocations);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.finish()
}
}
/// See [`ModifyScheduledActionOutput`](crate::output::ModifyScheduledActionOutput)
pub mod modify_scheduled_action_output {
/// A builder for [`ModifyScheduledActionOutput`](crate::output::ModifyScheduledActionOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) scheduled_action_name: std::option::Option<std::string::String>,
pub(crate) target_action: std::option::Option<crate::model::ScheduledActionType>,
pub(crate) schedule: std::option::Option<std::string::String>,
pub(crate) iam_role: std::option::Option<std::string::String>,
pub(crate) scheduled_action_description: std::option::Option<std::string::String>,
pub(crate) state: std::option::Option<crate::model::ScheduledActionState>,
pub(crate) next_invocations: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
pub(crate) start_time: std::option::Option<smithy_types::Instant>,
pub(crate) end_time: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>The name of the scheduled action. </p>
pub fn scheduled_action_name(mut self, input: impl Into<std::string::String>) -> Self {
self.scheduled_action_name = Some(input.into());
self
}
pub fn set_scheduled_action_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.scheduled_action_name = input;
self
}
/// <p>A JSON format string of the Amazon Redshift API operation with input parameters. </p>
/// <p>"<code>{\"ResizeCluster\":{\"NodeType\":\"ds2.8xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}</code>". </p>
pub fn target_action(mut self, input: crate::model::ScheduledActionType) -> Self {
self.target_action = Some(input);
self
}
pub fn set_target_action(
mut self,
input: std::option::Option<crate::model::ScheduledActionType>,
) -> Self {
self.target_action = input;
self
}
/// <p>The schedule for a one-time (at format) or recurring (cron format) scheduled action.
/// Schedule invocations must be separated by at least one hour.</p>
/// <p>Format of at expressions is "<code>at(yyyy-mm-ddThh:mm:ss)</code>". For example, "<code>at(2016-03-04T17:27:00)</code>".</p>
/// <p>Format of cron expressions is "<code>cron(Minutes Hours Day-of-month Month Day-of-week Year)</code>".
/// For example, "<code>cron(0 10 ? * MON *)</code>". For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions">Cron Expressions</a>
/// in the <i>Amazon CloudWatch Events User Guide</i>.</p>
pub fn schedule(mut self, input: impl Into<std::string::String>) -> Self {
self.schedule = Some(input.into());
self
}
pub fn set_schedule(mut self, input: std::option::Option<std::string::String>) -> Self {
self.schedule = input;
self
}
/// <p>The IAM role to assume to run the scheduled action.
/// This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action.
/// This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf.
/// For more information about the IAM role to use with the Amazon Redshift scheduler, see
/// <a href="https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html">Using Identity-Based Policies for Amazon Redshift</a>
/// in the <i>Amazon Redshift Cluster Management Guide</i>.
/// </p>
pub fn iam_role(mut self, input: impl Into<std::string::String>) -> Self {
self.iam_role = Some(input.into());
self
}
pub fn set_iam_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.iam_role = input;
self
}
/// <p>The description of the scheduled action. </p>
pub fn scheduled_action_description(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.scheduled_action_description = Some(input.into());
self
}
pub fn set_scheduled_action_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.scheduled_action_description = input;
self
}
/// <p>The state of the scheduled action. For example, <code>DISABLED</code>. </p>
pub fn state(mut self, input: crate::model::ScheduledActionState) -> Self {
self.state = Some(input);
self
}
pub fn set_state(
mut self,
input: std::option::Option<crate::model::ScheduledActionState>,
) -> Self {
self.state = input;
self
}
pub fn next_invocations(mut self, input: impl Into<smithy_types::Instant>) -> Self {
let mut v = self.next_invocations.unwrap_or_default();
v.push(input.into());
self.next_invocations = Some(v);
self
}
pub fn set_next_invocations(
mut self,
input: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
) -> Self {
self.next_invocations = input;
self
}
/// <p>The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger. </p>
pub fn start_time(mut self, input: smithy_types::Instant) -> Self {
self.start_time = Some(input);
self
}
pub fn set_start_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.start_time = input;
self
}
/// <p>The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger. </p>
pub fn end_time(mut self, input: smithy_types::Instant) -> Self {
self.end_time = Some(input);
self
}
pub fn set_end_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.end_time = input;
self
}
/// Consumes the builder and constructs a [`ModifyScheduledActionOutput`](crate::output::ModifyScheduledActionOutput)
pub fn build(self) -> crate::output::ModifyScheduledActionOutput {
crate::output::ModifyScheduledActionOutput {
scheduled_action_name: self.scheduled_action_name,
target_action: self.target_action,
schedule: self.schedule,
iam_role: self.iam_role,
scheduled_action_description: self.scheduled_action_description,
state: self.state,
next_invocations: self.next_invocations,
start_time: self.start_time,
end_time: self.end_time,
}
}
}
}
impl ModifyScheduledActionOutput {
/// Creates a new builder-style object to manufacture [`ModifyScheduledActionOutput`](crate::output::ModifyScheduledActionOutput)
pub fn builder() -> crate::output::modify_scheduled_action_output::Builder {
crate::output::modify_scheduled_action_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyEventSubscriptionOutput {
/// <p>Describes event subscriptions.</p>
pub event_subscription: std::option::Option<crate::model::EventSubscription>,
}
impl std::fmt::Debug for ModifyEventSubscriptionOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyEventSubscriptionOutput");
formatter.field("event_subscription", &self.event_subscription);
formatter.finish()
}
}
/// See [`ModifyEventSubscriptionOutput`](crate::output::ModifyEventSubscriptionOutput)
pub mod modify_event_subscription_output {
/// A builder for [`ModifyEventSubscriptionOutput`](crate::output::ModifyEventSubscriptionOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) event_subscription: std::option::Option<crate::model::EventSubscription>,
}
impl Builder {
/// <p>Describes event subscriptions.</p>
pub fn event_subscription(mut self, input: crate::model::EventSubscription) -> Self {
self.event_subscription = Some(input);
self
}
pub fn set_event_subscription(
mut self,
input: std::option::Option<crate::model::EventSubscription>,
) -> Self {
self.event_subscription = input;
self
}
/// Consumes the builder and constructs a [`ModifyEventSubscriptionOutput`](crate::output::ModifyEventSubscriptionOutput)
pub fn build(self) -> crate::output::ModifyEventSubscriptionOutput {
crate::output::ModifyEventSubscriptionOutput {
event_subscription: self.event_subscription,
}
}
}
}
impl ModifyEventSubscriptionOutput {
/// Creates a new builder-style object to manufacture [`ModifyEventSubscriptionOutput`](crate::output::ModifyEventSubscriptionOutput)
pub fn builder() -> crate::output::modify_event_subscription_output::Builder {
crate::output::modify_event_subscription_output::Builder::default()
}
}
/// <p>Describes a Redshift-managed VPC endpoint.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyEndpointAccessOutput {
/// <p>The cluster identifier of the cluster associated with the endpoint.</p>
pub cluster_identifier: std::option::Option<std::string::String>,
/// <p>The Amazon Web Services account ID of the owner of the cluster.</p>
pub resource_owner: std::option::Option<std::string::String>,
/// <p>The subnet group name where Amazon Redshift chooses to deploy the endpoint.</p>
pub subnet_group_name: std::option::Option<std::string::String>,
/// <p>The status of the endpoint.</p>
pub endpoint_status: std::option::Option<std::string::String>,
/// <p>The name of the endpoint.</p>
pub endpoint_name: std::option::Option<std::string::String>,
/// <p>The time (UTC) that the endpoint was created.</p>
pub endpoint_create_time: std::option::Option<smithy_types::Instant>,
/// <p>The port number on which the cluster accepts incoming connections.</p>
pub port: i32,
/// <p>The DNS address of the endpoint.</p>
pub address: std::option::Option<std::string::String>,
/// <p>The security groups associated with the endpoint.</p>
pub vpc_security_groups:
std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
/// <p>The connection endpoint for connecting to an Amazon Redshift cluster through the proxy.</p>
pub vpc_endpoint: std::option::Option<crate::model::VpcEndpoint>,
}
impl std::fmt::Debug for ModifyEndpointAccessOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyEndpointAccessOutput");
formatter.field("cluster_identifier", &self.cluster_identifier);
formatter.field("resource_owner", &self.resource_owner);
formatter.field("subnet_group_name", &self.subnet_group_name);
formatter.field("endpoint_status", &self.endpoint_status);
formatter.field("endpoint_name", &self.endpoint_name);
formatter.field("endpoint_create_time", &self.endpoint_create_time);
formatter.field("port", &self.port);
formatter.field("address", &self.address);
formatter.field("vpc_security_groups", &self.vpc_security_groups);
formatter.field("vpc_endpoint", &self.vpc_endpoint);
formatter.finish()
}
}
/// See [`ModifyEndpointAccessOutput`](crate::output::ModifyEndpointAccessOutput)
pub mod modify_endpoint_access_output {
/// A builder for [`ModifyEndpointAccessOutput`](crate::output::ModifyEndpointAccessOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_identifier: std::option::Option<std::string::String>,
pub(crate) resource_owner: std::option::Option<std::string::String>,
pub(crate) subnet_group_name: std::option::Option<std::string::String>,
pub(crate) endpoint_status: std::option::Option<std::string::String>,
pub(crate) endpoint_name: std::option::Option<std::string::String>,
pub(crate) endpoint_create_time: std::option::Option<smithy_types::Instant>,
pub(crate) port: std::option::Option<i32>,
pub(crate) address: std::option::Option<std::string::String>,
pub(crate) vpc_security_groups:
std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
pub(crate) vpc_endpoint: std::option::Option<crate::model::VpcEndpoint>,
}
impl Builder {
/// <p>The cluster identifier of the cluster associated with the endpoint.</p>
pub fn cluster_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_identifier = Some(input.into());
self
}
pub fn set_cluster_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_identifier = input;
self
}
/// <p>The Amazon Web Services account ID of the owner of the cluster.</p>
pub fn resource_owner(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_owner = Some(input.into());
self
}
pub fn set_resource_owner(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.resource_owner = input;
self
}
/// <p>The subnet group name where Amazon Redshift chooses to deploy the endpoint.</p>
pub fn subnet_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.subnet_group_name = Some(input.into());
self
}
pub fn set_subnet_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.subnet_group_name = input;
self
}
/// <p>The status of the endpoint.</p>
pub fn endpoint_status(mut self, input: impl Into<std::string::String>) -> Self {
self.endpoint_status = Some(input.into());
self
}
pub fn set_endpoint_status(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.endpoint_status = input;
self
}
/// <p>The name of the endpoint.</p>
pub fn endpoint_name(mut self, input: impl Into<std::string::String>) -> Self {
self.endpoint_name = Some(input.into());
self
}
pub fn set_endpoint_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.endpoint_name = input;
self
}
/// <p>The time (UTC) that the endpoint was created.</p>
pub fn endpoint_create_time(mut self, input: smithy_types::Instant) -> Self {
self.endpoint_create_time = Some(input);
self
}
pub fn set_endpoint_create_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.endpoint_create_time = input;
self
}
/// <p>The port number on which the cluster accepts incoming connections.</p>
pub fn port(mut self, input: i32) -> Self {
self.port = Some(input);
self
}
pub fn set_port(mut self, input: std::option::Option<i32>) -> Self {
self.port = input;
self
}
/// <p>The DNS address of the endpoint.</p>
pub fn address(mut self, input: impl Into<std::string::String>) -> Self {
self.address = Some(input.into());
self
}
pub fn set_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.address = input;
self
}
pub fn vpc_security_groups(
mut self,
input: impl Into<crate::model::VpcSecurityGroupMembership>,
) -> Self {
let mut v = self.vpc_security_groups.unwrap_or_default();
v.push(input.into());
self.vpc_security_groups = Some(v);
self
}
pub fn set_vpc_security_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
) -> Self {
self.vpc_security_groups = input;
self
}
/// <p>The connection endpoint for connecting to an Amazon Redshift cluster through the proxy.</p>
pub fn vpc_endpoint(mut self, input: crate::model::VpcEndpoint) -> Self {
self.vpc_endpoint = Some(input);
self
}
pub fn set_vpc_endpoint(
mut self,
input: std::option::Option<crate::model::VpcEndpoint>,
) -> Self {
self.vpc_endpoint = input;
self
}
/// Consumes the builder and constructs a [`ModifyEndpointAccessOutput`](crate::output::ModifyEndpointAccessOutput)
pub fn build(self) -> crate::output::ModifyEndpointAccessOutput {
crate::output::ModifyEndpointAccessOutput {
cluster_identifier: self.cluster_identifier,
resource_owner: self.resource_owner,
subnet_group_name: self.subnet_group_name,
endpoint_status: self.endpoint_status,
endpoint_name: self.endpoint_name,
endpoint_create_time: self.endpoint_create_time,
port: self.port.unwrap_or_default(),
address: self.address,
vpc_security_groups: self.vpc_security_groups,
vpc_endpoint: self.vpc_endpoint,
}
}
}
}
impl ModifyEndpointAccessOutput {
/// Creates a new builder-style object to manufacture [`ModifyEndpointAccessOutput`](crate::output::ModifyEndpointAccessOutput)
pub fn builder() -> crate::output::modify_endpoint_access_output::Builder {
crate::output::modify_endpoint_access_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyClusterSubnetGroupOutput {
/// <p>Describes a subnet group.</p>
pub cluster_subnet_group: std::option::Option<crate::model::ClusterSubnetGroup>,
}
impl std::fmt::Debug for ModifyClusterSubnetGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyClusterSubnetGroupOutput");
formatter.field("cluster_subnet_group", &self.cluster_subnet_group);
formatter.finish()
}
}
/// See [`ModifyClusterSubnetGroupOutput`](crate::output::ModifyClusterSubnetGroupOutput)
pub mod modify_cluster_subnet_group_output {
/// A builder for [`ModifyClusterSubnetGroupOutput`](crate::output::ModifyClusterSubnetGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_subnet_group: std::option::Option<crate::model::ClusterSubnetGroup>,
}
impl Builder {
/// <p>Describes a subnet group.</p>
pub fn cluster_subnet_group(mut self, input: crate::model::ClusterSubnetGroup) -> Self {
self.cluster_subnet_group = Some(input);
self
}
pub fn set_cluster_subnet_group(
mut self,
input: std::option::Option<crate::model::ClusterSubnetGroup>,
) -> Self {
self.cluster_subnet_group = input;
self
}
/// Consumes the builder and constructs a [`ModifyClusterSubnetGroupOutput`](crate::output::ModifyClusterSubnetGroupOutput)
pub fn build(self) -> crate::output::ModifyClusterSubnetGroupOutput {
crate::output::ModifyClusterSubnetGroupOutput {
cluster_subnet_group: self.cluster_subnet_group,
}
}
}
}
impl ModifyClusterSubnetGroupOutput {
/// Creates a new builder-style object to manufacture [`ModifyClusterSubnetGroupOutput`](crate::output::ModifyClusterSubnetGroupOutput)
pub fn builder() -> crate::output::modify_cluster_subnet_group_output::Builder {
crate::output::modify_cluster_subnet_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyClusterSnapshotScheduleOutput {}
impl std::fmt::Debug for ModifyClusterSnapshotScheduleOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyClusterSnapshotScheduleOutput");
formatter.finish()
}
}
/// See [`ModifyClusterSnapshotScheduleOutput`](crate::output::ModifyClusterSnapshotScheduleOutput)
pub mod modify_cluster_snapshot_schedule_output {
/// A builder for [`ModifyClusterSnapshotScheduleOutput`](crate::output::ModifyClusterSnapshotScheduleOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`ModifyClusterSnapshotScheduleOutput`](crate::output::ModifyClusterSnapshotScheduleOutput)
pub fn build(self) -> crate::output::ModifyClusterSnapshotScheduleOutput {
crate::output::ModifyClusterSnapshotScheduleOutput {}
}
}
}
impl ModifyClusterSnapshotScheduleOutput {
/// Creates a new builder-style object to manufacture [`ModifyClusterSnapshotScheduleOutput`](crate::output::ModifyClusterSnapshotScheduleOutput)
pub fn builder() -> crate::output::modify_cluster_snapshot_schedule_output::Builder {
crate::output::modify_cluster_snapshot_schedule_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyClusterSnapshotOutput {
/// <p>Describes a snapshot.</p>
pub snapshot: std::option::Option<crate::model::Snapshot>,
}
impl std::fmt::Debug for ModifyClusterSnapshotOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyClusterSnapshotOutput");
formatter.field("snapshot", &self.snapshot);
formatter.finish()
}
}
/// See [`ModifyClusterSnapshotOutput`](crate::output::ModifyClusterSnapshotOutput)
pub mod modify_cluster_snapshot_output {
/// A builder for [`ModifyClusterSnapshotOutput`](crate::output::ModifyClusterSnapshotOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) snapshot: std::option::Option<crate::model::Snapshot>,
}
impl Builder {
/// <p>Describes a snapshot.</p>
pub fn snapshot(mut self, input: crate::model::Snapshot) -> Self {
self.snapshot = Some(input);
self
}
pub fn set_snapshot(mut self, input: std::option::Option<crate::model::Snapshot>) -> Self {
self.snapshot = input;
self
}
/// Consumes the builder and constructs a [`ModifyClusterSnapshotOutput`](crate::output::ModifyClusterSnapshotOutput)
pub fn build(self) -> crate::output::ModifyClusterSnapshotOutput {
crate::output::ModifyClusterSnapshotOutput {
snapshot: self.snapshot,
}
}
}
}
impl ModifyClusterSnapshotOutput {
/// Creates a new builder-style object to manufacture [`ModifyClusterSnapshotOutput`](crate::output::ModifyClusterSnapshotOutput)
pub fn builder() -> crate::output::modify_cluster_snapshot_output::Builder {
crate::output::modify_cluster_snapshot_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyClusterParameterGroupOutput {
/// <p>The name of the cluster parameter group.</p>
pub parameter_group_name: std::option::Option<std::string::String>,
/// <p>The status of the parameter group. For example, if you made a change to a parameter
/// group name-value pair, then the change could be pending a reboot of an associated
/// cluster.</p>
pub parameter_group_status: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for ModifyClusterParameterGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyClusterParameterGroupOutput");
formatter.field("parameter_group_name", &self.parameter_group_name);
formatter.field("parameter_group_status", &self.parameter_group_status);
formatter.finish()
}
}
/// See [`ModifyClusterParameterGroupOutput`](crate::output::ModifyClusterParameterGroupOutput)
pub mod modify_cluster_parameter_group_output {
/// A builder for [`ModifyClusterParameterGroupOutput`](crate::output::ModifyClusterParameterGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) parameter_group_name: std::option::Option<std::string::String>,
pub(crate) parameter_group_status: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the cluster parameter group.</p>
pub fn parameter_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.parameter_group_name = Some(input.into());
self
}
pub fn set_parameter_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.parameter_group_name = input;
self
}
/// <p>The status of the parameter group. For example, if you made a change to a parameter
/// group name-value pair, then the change could be pending a reboot of an associated
/// cluster.</p>
pub fn parameter_group_status(mut self, input: impl Into<std::string::String>) -> Self {
self.parameter_group_status = Some(input.into());
self
}
pub fn set_parameter_group_status(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.parameter_group_status = input;
self
}
/// Consumes the builder and constructs a [`ModifyClusterParameterGroupOutput`](crate::output::ModifyClusterParameterGroupOutput)
pub fn build(self) -> crate::output::ModifyClusterParameterGroupOutput {
crate::output::ModifyClusterParameterGroupOutput {
parameter_group_name: self.parameter_group_name,
parameter_group_status: self.parameter_group_status,
}
}
}
}
impl ModifyClusterParameterGroupOutput {
/// Creates a new builder-style object to manufacture [`ModifyClusterParameterGroupOutput`](crate::output::ModifyClusterParameterGroupOutput)
pub fn builder() -> crate::output::modify_cluster_parameter_group_output::Builder {
crate::output::modify_cluster_parameter_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyClusterMaintenanceOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for ModifyClusterMaintenanceOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyClusterMaintenanceOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`ModifyClusterMaintenanceOutput`](crate::output::ModifyClusterMaintenanceOutput)
pub mod modify_cluster_maintenance_output {
/// A builder for [`ModifyClusterMaintenanceOutput`](crate::output::ModifyClusterMaintenanceOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`ModifyClusterMaintenanceOutput`](crate::output::ModifyClusterMaintenanceOutput)
pub fn build(self) -> crate::output::ModifyClusterMaintenanceOutput {
crate::output::ModifyClusterMaintenanceOutput {
cluster: self.cluster,
}
}
}
}
impl ModifyClusterMaintenanceOutput {
/// Creates a new builder-style object to manufacture [`ModifyClusterMaintenanceOutput`](crate::output::ModifyClusterMaintenanceOutput)
pub fn builder() -> crate::output::modify_cluster_maintenance_output::Builder {
crate::output::modify_cluster_maintenance_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyClusterIamRolesOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for ModifyClusterIamRolesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyClusterIamRolesOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`ModifyClusterIamRolesOutput`](crate::output::ModifyClusterIamRolesOutput)
pub mod modify_cluster_iam_roles_output {
/// A builder for [`ModifyClusterIamRolesOutput`](crate::output::ModifyClusterIamRolesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`ModifyClusterIamRolesOutput`](crate::output::ModifyClusterIamRolesOutput)
pub fn build(self) -> crate::output::ModifyClusterIamRolesOutput {
crate::output::ModifyClusterIamRolesOutput {
cluster: self.cluster,
}
}
}
}
impl ModifyClusterIamRolesOutput {
/// Creates a new builder-style object to manufacture [`ModifyClusterIamRolesOutput`](crate::output::ModifyClusterIamRolesOutput)
pub fn builder() -> crate::output::modify_cluster_iam_roles_output::Builder {
crate::output::modify_cluster_iam_roles_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyClusterDbRevisionOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for ModifyClusterDbRevisionOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyClusterDbRevisionOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`ModifyClusterDbRevisionOutput`](crate::output::ModifyClusterDbRevisionOutput)
pub mod modify_cluster_db_revision_output {
/// A builder for [`ModifyClusterDbRevisionOutput`](crate::output::ModifyClusterDbRevisionOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`ModifyClusterDbRevisionOutput`](crate::output::ModifyClusterDbRevisionOutput)
pub fn build(self) -> crate::output::ModifyClusterDbRevisionOutput {
crate::output::ModifyClusterDbRevisionOutput {
cluster: self.cluster,
}
}
}
}
impl ModifyClusterDbRevisionOutput {
/// Creates a new builder-style object to manufacture [`ModifyClusterDbRevisionOutput`](crate::output::ModifyClusterDbRevisionOutput)
pub fn builder() -> crate::output::modify_cluster_db_revision_output::Builder {
crate::output::modify_cluster_db_revision_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyClusterOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for ModifyClusterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyClusterOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`ModifyClusterOutput`](crate::output::ModifyClusterOutput)
pub mod modify_cluster_output {
/// A builder for [`ModifyClusterOutput`](crate::output::ModifyClusterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`ModifyClusterOutput`](crate::output::ModifyClusterOutput)
pub fn build(self) -> crate::output::ModifyClusterOutput {
crate::output::ModifyClusterOutput {
cluster: self.cluster,
}
}
}
}
impl ModifyClusterOutput {
/// Creates a new builder-style object to manufacture [`ModifyClusterOutput`](crate::output::ModifyClusterOutput)
pub fn builder() -> crate::output::modify_cluster_output::Builder {
crate::output::modify_cluster_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyAuthenticationProfileOutput {
/// <p>The name of the authentication profile that was replaced.</p>
pub authentication_profile_name: std::option::Option<std::string::String>,
/// <p>The updated content of the authentication profile in JSON format.</p>
pub authentication_profile_content: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for ModifyAuthenticationProfileOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyAuthenticationProfileOutput");
formatter.field(
"authentication_profile_name",
&self.authentication_profile_name,
);
formatter.field(
"authentication_profile_content",
&self.authentication_profile_content,
);
formatter.finish()
}
}
/// See [`ModifyAuthenticationProfileOutput`](crate::output::ModifyAuthenticationProfileOutput)
pub mod modify_authentication_profile_output {
/// A builder for [`ModifyAuthenticationProfileOutput`](crate::output::ModifyAuthenticationProfileOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) authentication_profile_name: std::option::Option<std::string::String>,
pub(crate) authentication_profile_content: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the authentication profile that was replaced.</p>
pub fn authentication_profile_name(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.authentication_profile_name = Some(input.into());
self
}
pub fn set_authentication_profile_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.authentication_profile_name = input;
self
}
/// <p>The updated content of the authentication profile in JSON format.</p>
pub fn authentication_profile_content(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.authentication_profile_content = Some(input.into());
self
}
pub fn set_authentication_profile_content(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.authentication_profile_content = input;
self
}
/// Consumes the builder and constructs a [`ModifyAuthenticationProfileOutput`](crate::output::ModifyAuthenticationProfileOutput)
pub fn build(self) -> crate::output::ModifyAuthenticationProfileOutput {
crate::output::ModifyAuthenticationProfileOutput {
authentication_profile_name: self.authentication_profile_name,
authentication_profile_content: self.authentication_profile_content,
}
}
}
}
impl ModifyAuthenticationProfileOutput {
/// Creates a new builder-style object to manufacture [`ModifyAuthenticationProfileOutput`](crate::output::ModifyAuthenticationProfileOutput)
pub fn builder() -> crate::output::modify_authentication_profile_output::Builder {
crate::output::modify_authentication_profile_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ModifyAquaConfigurationOutput {
/// <p>The updated AQUA configuration of the cluster. </p>
pub aqua_configuration: std::option::Option<crate::model::AquaConfiguration>,
}
impl std::fmt::Debug for ModifyAquaConfigurationOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ModifyAquaConfigurationOutput");
formatter.field("aqua_configuration", &self.aqua_configuration);
formatter.finish()
}
}
/// See [`ModifyAquaConfigurationOutput`](crate::output::ModifyAquaConfigurationOutput)
pub mod modify_aqua_configuration_output {
/// A builder for [`ModifyAquaConfigurationOutput`](crate::output::ModifyAquaConfigurationOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) aqua_configuration: std::option::Option<crate::model::AquaConfiguration>,
}
impl Builder {
/// <p>The updated AQUA configuration of the cluster. </p>
pub fn aqua_configuration(mut self, input: crate::model::AquaConfiguration) -> Self {
self.aqua_configuration = Some(input);
self
}
pub fn set_aqua_configuration(
mut self,
input: std::option::Option<crate::model::AquaConfiguration>,
) -> Self {
self.aqua_configuration = input;
self
}
/// Consumes the builder and constructs a [`ModifyAquaConfigurationOutput`](crate::output::ModifyAquaConfigurationOutput)
pub fn build(self) -> crate::output::ModifyAquaConfigurationOutput {
crate::output::ModifyAquaConfigurationOutput {
aqua_configuration: self.aqua_configuration,
}
}
}
}
impl ModifyAquaConfigurationOutput {
/// Creates a new builder-style object to manufacture [`ModifyAquaConfigurationOutput`](crate::output::ModifyAquaConfigurationOutput)
pub fn builder() -> crate::output::modify_aqua_configuration_output::Builder {
crate::output::modify_aqua_configuration_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetReservedNodeExchangeOfferingsOutput {
/// <p>An optional parameter that specifies the starting point for returning a set of
/// response records. When the results of a <code>GetReservedNodeExchangeOfferings</code>
/// request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the
/// marker field of the response. You can retrieve the next set of response records by
/// providing the returned marker value in the marker parameter and retrying the request.
/// </p>
pub marker: std::option::Option<std::string::String>,
/// <p>Returns an array of <a>ReservedNodeOffering</a> objects.</p>
pub reserved_node_offerings:
std::option::Option<std::vec::Vec<crate::model::ReservedNodeOffering>>,
}
impl std::fmt::Debug for GetReservedNodeExchangeOfferingsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetReservedNodeExchangeOfferingsOutput");
formatter.field("marker", &self.marker);
formatter.field("reserved_node_offerings", &self.reserved_node_offerings);
formatter.finish()
}
}
/// See [`GetReservedNodeExchangeOfferingsOutput`](crate::output::GetReservedNodeExchangeOfferingsOutput)
pub mod get_reserved_node_exchange_offerings_output {
/// A builder for [`GetReservedNodeExchangeOfferingsOutput`](crate::output::GetReservedNodeExchangeOfferingsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) reserved_node_offerings:
std::option::Option<std::vec::Vec<crate::model::ReservedNodeOffering>>,
}
impl Builder {
/// <p>An optional parameter that specifies the starting point for returning a set of
/// response records. When the results of a <code>GetReservedNodeExchangeOfferings</code>
/// request exceed the value specified in MaxRecords, Amazon Redshift returns a value in the
/// marker field of the response. You can retrieve the next set of response records by
/// providing the returned marker value in the marker parameter and retrying the request.
/// </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn reserved_node_offerings(
mut self,
input: impl Into<crate::model::ReservedNodeOffering>,
) -> Self {
let mut v = self.reserved_node_offerings.unwrap_or_default();
v.push(input.into());
self.reserved_node_offerings = Some(v);
self
}
pub fn set_reserved_node_offerings(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReservedNodeOffering>>,
) -> Self {
self.reserved_node_offerings = input;
self
}
/// Consumes the builder and constructs a [`GetReservedNodeExchangeOfferingsOutput`](crate::output::GetReservedNodeExchangeOfferingsOutput)
pub fn build(self) -> crate::output::GetReservedNodeExchangeOfferingsOutput {
crate::output::GetReservedNodeExchangeOfferingsOutput {
marker: self.marker,
reserved_node_offerings: self.reserved_node_offerings,
}
}
}
}
impl GetReservedNodeExchangeOfferingsOutput {
/// Creates a new builder-style object to manufacture [`GetReservedNodeExchangeOfferingsOutput`](crate::output::GetReservedNodeExchangeOfferingsOutput)
pub fn builder() -> crate::output::get_reserved_node_exchange_offerings_output::Builder {
crate::output::get_reserved_node_exchange_offerings_output::Builder::default()
}
}
/// <p>Temporary credentials with authorization to log on to an Amazon Redshift database.
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetClusterCredentialsOutput {
/// <p>A database user name that is authorized to log on to the database
/// <code>DbName</code> using the password <code>DbPassword</code>. If the specified
/// DbUser exists in the database, the new user name has the same database privileges as the
/// the user named in DbUser. By default, the user is added to PUBLIC. If the
/// <code>DbGroups</code> parameter is specifed, <code>DbUser</code> is added to the
/// listed groups for any sessions created using these credentials.</p>
pub db_user: std::option::Option<std::string::String>,
/// <p>A temporary password that authorizes the user name returned by <code>DbUser</code>
/// to log on to the database <code>DbName</code>. </p>
pub db_password: std::option::Option<std::string::String>,
/// <p>The date and time the password in <code>DbPassword</code> expires.</p>
pub expiration: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for GetClusterCredentialsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetClusterCredentialsOutput");
formatter.field("db_user", &self.db_user);
formatter.field("db_password", &"*** Sensitive Data Redacted ***");
formatter.field("expiration", &self.expiration);
formatter.finish()
}
}
/// See [`GetClusterCredentialsOutput`](crate::output::GetClusterCredentialsOutput)
pub mod get_cluster_credentials_output {
/// A builder for [`GetClusterCredentialsOutput`](crate::output::GetClusterCredentialsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) db_user: std::option::Option<std::string::String>,
pub(crate) db_password: std::option::Option<std::string::String>,
pub(crate) expiration: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>A database user name that is authorized to log on to the database
/// <code>DbName</code> using the password <code>DbPassword</code>. If the specified
/// DbUser exists in the database, the new user name has the same database privileges as the
/// the user named in DbUser. By default, the user is added to PUBLIC. If the
/// <code>DbGroups</code> parameter is specifed, <code>DbUser</code> is added to the
/// listed groups for any sessions created using these credentials.</p>
pub fn db_user(mut self, input: impl Into<std::string::String>) -> Self {
self.db_user = Some(input.into());
self
}
pub fn set_db_user(mut self, input: std::option::Option<std::string::String>) -> Self {
self.db_user = input;
self
}
/// <p>A temporary password that authorizes the user name returned by <code>DbUser</code>
/// to log on to the database <code>DbName</code>. </p>
pub fn db_password(mut self, input: impl Into<std::string::String>) -> Self {
self.db_password = Some(input.into());
self
}
pub fn set_db_password(mut self, input: std::option::Option<std::string::String>) -> Self {
self.db_password = input;
self
}
/// <p>The date and time the password in <code>DbPassword</code> expires.</p>
pub fn expiration(mut self, input: smithy_types::Instant) -> Self {
self.expiration = Some(input);
self
}
pub fn set_expiration(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.expiration = input;
self
}
/// Consumes the builder and constructs a [`GetClusterCredentialsOutput`](crate::output::GetClusterCredentialsOutput)
pub fn build(self) -> crate::output::GetClusterCredentialsOutput {
crate::output::GetClusterCredentialsOutput {
db_user: self.db_user,
db_password: self.db_password,
expiration: self.expiration,
}
}
}
}
impl GetClusterCredentialsOutput {
/// Creates a new builder-style object to manufacture [`GetClusterCredentialsOutput`](crate::output::GetClusterCredentialsOutput)
pub fn builder() -> crate::output::get_cluster_credentials_output::Builder {
crate::output::get_cluster_credentials_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EnableSnapshotCopyOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for EnableSnapshotCopyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EnableSnapshotCopyOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`EnableSnapshotCopyOutput`](crate::output::EnableSnapshotCopyOutput)
pub mod enable_snapshot_copy_output {
/// A builder for [`EnableSnapshotCopyOutput`](crate::output::EnableSnapshotCopyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`EnableSnapshotCopyOutput`](crate::output::EnableSnapshotCopyOutput)
pub fn build(self) -> crate::output::EnableSnapshotCopyOutput {
crate::output::EnableSnapshotCopyOutput {
cluster: self.cluster,
}
}
}
}
impl EnableSnapshotCopyOutput {
/// Creates a new builder-style object to manufacture [`EnableSnapshotCopyOutput`](crate::output::EnableSnapshotCopyOutput)
pub fn builder() -> crate::output::enable_snapshot_copy_output::Builder {
crate::output::enable_snapshot_copy_output::Builder::default()
}
}
/// <p>Describes the status of logging for a cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EnableLoggingOutput {
/// <p>
/// <code>true</code> if logging is on, <code>false</code> if logging is off.</p>
pub logging_enabled: bool,
/// <p>The name of the S3 bucket where the log files are stored.</p>
pub bucket_name: std::option::Option<std::string::String>,
/// <p>The prefix applied to the log file names.</p>
pub s3_key_prefix: std::option::Option<std::string::String>,
/// <p>The last time that logs were delivered.</p>
pub last_successful_delivery_time: std::option::Option<smithy_types::Instant>,
/// <p>The last time when logs failed to be delivered.</p>
pub last_failure_time: std::option::Option<smithy_types::Instant>,
/// <p>The message indicating that logs failed to be delivered.</p>
pub last_failure_message: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for EnableLoggingOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EnableLoggingOutput");
formatter.field("logging_enabled", &self.logging_enabled);
formatter.field("bucket_name", &self.bucket_name);
formatter.field("s3_key_prefix", &self.s3_key_prefix);
formatter.field(
"last_successful_delivery_time",
&self.last_successful_delivery_time,
);
formatter.field("last_failure_time", &self.last_failure_time);
formatter.field("last_failure_message", &self.last_failure_message);
formatter.finish()
}
}
/// See [`EnableLoggingOutput`](crate::output::EnableLoggingOutput)
pub mod enable_logging_output {
/// A builder for [`EnableLoggingOutput`](crate::output::EnableLoggingOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) logging_enabled: std::option::Option<bool>,
pub(crate) bucket_name: std::option::Option<std::string::String>,
pub(crate) s3_key_prefix: std::option::Option<std::string::String>,
pub(crate) last_successful_delivery_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_failure_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_failure_message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>
/// <code>true</code> if logging is on, <code>false</code> if logging is off.</p>
pub fn logging_enabled(mut self, input: bool) -> Self {
self.logging_enabled = Some(input);
self
}
pub fn set_logging_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.logging_enabled = input;
self
}
/// <p>The name of the S3 bucket where the log files are stored.</p>
pub fn bucket_name(mut self, input: impl Into<std::string::String>) -> Self {
self.bucket_name = Some(input.into());
self
}
pub fn set_bucket_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bucket_name = input;
self
}
/// <p>The prefix applied to the log file names.</p>
pub fn s3_key_prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.s3_key_prefix = Some(input.into());
self
}
pub fn set_s3_key_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.s3_key_prefix = input;
self
}
/// <p>The last time that logs were delivered.</p>
pub fn last_successful_delivery_time(mut self, input: smithy_types::Instant) -> Self {
self.last_successful_delivery_time = Some(input);
self
}
pub fn set_last_successful_delivery_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_successful_delivery_time = input;
self
}
/// <p>The last time when logs failed to be delivered.</p>
pub fn last_failure_time(mut self, input: smithy_types::Instant) -> Self {
self.last_failure_time = Some(input);
self
}
pub fn set_last_failure_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_failure_time = input;
self
}
/// <p>The message indicating that logs failed to be delivered.</p>
pub fn last_failure_message(mut self, input: impl Into<std::string::String>) -> Self {
self.last_failure_message = Some(input.into());
self
}
pub fn set_last_failure_message(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.last_failure_message = input;
self
}
/// Consumes the builder and constructs a [`EnableLoggingOutput`](crate::output::EnableLoggingOutput)
pub fn build(self) -> crate::output::EnableLoggingOutput {
crate::output::EnableLoggingOutput {
logging_enabled: self.logging_enabled.unwrap_or_default(),
bucket_name: self.bucket_name,
s3_key_prefix: self.s3_key_prefix,
last_successful_delivery_time: self.last_successful_delivery_time,
last_failure_time: self.last_failure_time,
last_failure_message: self.last_failure_message,
}
}
}
}
impl EnableLoggingOutput {
/// Creates a new builder-style object to manufacture [`EnableLoggingOutput`](crate::output::EnableLoggingOutput)
pub fn builder() -> crate::output::enable_logging_output::Builder {
crate::output::enable_logging_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DisassociateDataShareConsumerOutput {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub data_share_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub producer_arn: std::option::Option<std::string::String>,
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub allow_publicly_accessible_consumers: bool,
/// <p>A value that specifies when the datashare has an association between a producer and data consumers.</p>
pub data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl std::fmt::Debug for DisassociateDataShareConsumerOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DisassociateDataShareConsumerOutput");
formatter.field("data_share_arn", &self.data_share_arn);
formatter.field("producer_arn", &self.producer_arn);
formatter.field(
"allow_publicly_accessible_consumers",
&self.allow_publicly_accessible_consumers,
);
formatter.field("data_share_associations", &self.data_share_associations);
formatter.finish()
}
}
/// See [`DisassociateDataShareConsumerOutput`](crate::output::DisassociateDataShareConsumerOutput)
pub mod disassociate_data_share_consumer_output {
/// A builder for [`DisassociateDataShareConsumerOutput`](crate::output::DisassociateDataShareConsumerOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_share_arn: std::option::Option<std::string::String>,
pub(crate) producer_arn: std::option::Option<std::string::String>,
pub(crate) allow_publicly_accessible_consumers: std::option::Option<bool>,
pub(crate) data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl Builder {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub fn data_share_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.data_share_arn = Some(input.into());
self
}
pub fn set_data_share_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.data_share_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub fn producer_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.producer_arn = Some(input.into());
self
}
pub fn set_producer_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.producer_arn = input;
self
}
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub fn allow_publicly_accessible_consumers(mut self, input: bool) -> Self {
self.allow_publicly_accessible_consumers = Some(input);
self
}
pub fn set_allow_publicly_accessible_consumers(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.allow_publicly_accessible_consumers = input;
self
}
pub fn data_share_associations(
mut self,
input: impl Into<crate::model::DataShareAssociation>,
) -> Self {
let mut v = self.data_share_associations.unwrap_or_default();
v.push(input.into());
self.data_share_associations = Some(v);
self
}
pub fn set_data_share_associations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
) -> Self {
self.data_share_associations = input;
self
}
/// Consumes the builder and constructs a [`DisassociateDataShareConsumerOutput`](crate::output::DisassociateDataShareConsumerOutput)
pub fn build(self) -> crate::output::DisassociateDataShareConsumerOutput {
crate::output::DisassociateDataShareConsumerOutput {
data_share_arn: self.data_share_arn,
producer_arn: self.producer_arn,
allow_publicly_accessible_consumers: self
.allow_publicly_accessible_consumers
.unwrap_or_default(),
data_share_associations: self.data_share_associations,
}
}
}
}
impl DisassociateDataShareConsumerOutput {
/// Creates a new builder-style object to manufacture [`DisassociateDataShareConsumerOutput`](crate::output::DisassociateDataShareConsumerOutput)
pub fn builder() -> crate::output::disassociate_data_share_consumer_output::Builder {
crate::output::disassociate_data_share_consumer_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DisableSnapshotCopyOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for DisableSnapshotCopyOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DisableSnapshotCopyOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`DisableSnapshotCopyOutput`](crate::output::DisableSnapshotCopyOutput)
pub mod disable_snapshot_copy_output {
/// A builder for [`DisableSnapshotCopyOutput`](crate::output::DisableSnapshotCopyOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`DisableSnapshotCopyOutput`](crate::output::DisableSnapshotCopyOutput)
pub fn build(self) -> crate::output::DisableSnapshotCopyOutput {
crate::output::DisableSnapshotCopyOutput {
cluster: self.cluster,
}
}
}
}
impl DisableSnapshotCopyOutput {
/// Creates a new builder-style object to manufacture [`DisableSnapshotCopyOutput`](crate::output::DisableSnapshotCopyOutput)
pub fn builder() -> crate::output::disable_snapshot_copy_output::Builder {
crate::output::disable_snapshot_copy_output::Builder::default()
}
}
/// <p>Describes the status of logging for a cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DisableLoggingOutput {
/// <p>
/// <code>true</code> if logging is on, <code>false</code> if logging is off.</p>
pub logging_enabled: bool,
/// <p>The name of the S3 bucket where the log files are stored.</p>
pub bucket_name: std::option::Option<std::string::String>,
/// <p>The prefix applied to the log file names.</p>
pub s3_key_prefix: std::option::Option<std::string::String>,
/// <p>The last time that logs were delivered.</p>
pub last_successful_delivery_time: std::option::Option<smithy_types::Instant>,
/// <p>The last time when logs failed to be delivered.</p>
pub last_failure_time: std::option::Option<smithy_types::Instant>,
/// <p>The message indicating that logs failed to be delivered.</p>
pub last_failure_message: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DisableLoggingOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DisableLoggingOutput");
formatter.field("logging_enabled", &self.logging_enabled);
formatter.field("bucket_name", &self.bucket_name);
formatter.field("s3_key_prefix", &self.s3_key_prefix);
formatter.field(
"last_successful_delivery_time",
&self.last_successful_delivery_time,
);
formatter.field("last_failure_time", &self.last_failure_time);
formatter.field("last_failure_message", &self.last_failure_message);
formatter.finish()
}
}
/// See [`DisableLoggingOutput`](crate::output::DisableLoggingOutput)
pub mod disable_logging_output {
/// A builder for [`DisableLoggingOutput`](crate::output::DisableLoggingOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) logging_enabled: std::option::Option<bool>,
pub(crate) bucket_name: std::option::Option<std::string::String>,
pub(crate) s3_key_prefix: std::option::Option<std::string::String>,
pub(crate) last_successful_delivery_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_failure_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_failure_message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>
/// <code>true</code> if logging is on, <code>false</code> if logging is off.</p>
pub fn logging_enabled(mut self, input: bool) -> Self {
self.logging_enabled = Some(input);
self
}
pub fn set_logging_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.logging_enabled = input;
self
}
/// <p>The name of the S3 bucket where the log files are stored.</p>
pub fn bucket_name(mut self, input: impl Into<std::string::String>) -> Self {
self.bucket_name = Some(input.into());
self
}
pub fn set_bucket_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bucket_name = input;
self
}
/// <p>The prefix applied to the log file names.</p>
pub fn s3_key_prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.s3_key_prefix = Some(input.into());
self
}
pub fn set_s3_key_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.s3_key_prefix = input;
self
}
/// <p>The last time that logs were delivered.</p>
pub fn last_successful_delivery_time(mut self, input: smithy_types::Instant) -> Self {
self.last_successful_delivery_time = Some(input);
self
}
pub fn set_last_successful_delivery_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_successful_delivery_time = input;
self
}
/// <p>The last time when logs failed to be delivered.</p>
pub fn last_failure_time(mut self, input: smithy_types::Instant) -> Self {
self.last_failure_time = Some(input);
self
}
pub fn set_last_failure_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_failure_time = input;
self
}
/// <p>The message indicating that logs failed to be delivered.</p>
pub fn last_failure_message(mut self, input: impl Into<std::string::String>) -> Self {
self.last_failure_message = Some(input.into());
self
}
pub fn set_last_failure_message(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.last_failure_message = input;
self
}
/// Consumes the builder and constructs a [`DisableLoggingOutput`](crate::output::DisableLoggingOutput)
pub fn build(self) -> crate::output::DisableLoggingOutput {
crate::output::DisableLoggingOutput {
logging_enabled: self.logging_enabled.unwrap_or_default(),
bucket_name: self.bucket_name,
s3_key_prefix: self.s3_key_prefix,
last_successful_delivery_time: self.last_successful_delivery_time,
last_failure_time: self.last_failure_time,
last_failure_message: self.last_failure_message,
}
}
}
}
impl DisableLoggingOutput {
/// Creates a new builder-style object to manufacture [`DisableLoggingOutput`](crate::output::DisableLoggingOutput)
pub fn builder() -> crate::output::disable_logging_output::Builder {
crate::output::disable_logging_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeUsageLimitsOutput {
/// <p>Contains the output from the <a>DescribeUsageLimits</a>
/// action. </p>
pub usage_limits: std::option::Option<std::vec::Vec<crate::model::UsageLimit>>,
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeUsageLimitsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeUsageLimitsOutput");
formatter.field("usage_limits", &self.usage_limits);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeUsageLimitsOutput`](crate::output::DescribeUsageLimitsOutput)
pub mod describe_usage_limits_output {
/// A builder for [`DescribeUsageLimitsOutput`](crate::output::DescribeUsageLimitsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) usage_limits: std::option::Option<std::vec::Vec<crate::model::UsageLimit>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn usage_limits(mut self, input: impl Into<crate::model::UsageLimit>) -> Self {
let mut v = self.usage_limits.unwrap_or_default();
v.push(input.into());
self.usage_limits = Some(v);
self
}
pub fn set_usage_limits(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::UsageLimit>>,
) -> Self {
self.usage_limits = input;
self
}
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeUsageLimitsOutput`](crate::output::DescribeUsageLimitsOutput)
pub fn build(self) -> crate::output::DescribeUsageLimitsOutput {
crate::output::DescribeUsageLimitsOutput {
usage_limits: self.usage_limits,
marker: self.marker,
}
}
}
}
impl DescribeUsageLimitsOutput {
/// Creates a new builder-style object to manufacture [`DescribeUsageLimitsOutput`](crate::output::DescribeUsageLimitsOutput)
pub fn builder() -> crate::output::describe_usage_limits_output::Builder {
crate::output::describe_usage_limits_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeTagsOutput {
/// <p>A list of tags with their associated resources.</p>
pub tagged_resources: std::option::Option<std::vec::Vec<crate::model::TaggedResource>>,
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeTagsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeTagsOutput");
formatter.field("tagged_resources", &self.tagged_resources);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeTagsOutput`](crate::output::DescribeTagsOutput)
pub mod describe_tags_output {
/// A builder for [`DescribeTagsOutput`](crate::output::DescribeTagsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) tagged_resources:
std::option::Option<std::vec::Vec<crate::model::TaggedResource>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn tagged_resources(mut self, input: impl Into<crate::model::TaggedResource>) -> Self {
let mut v = self.tagged_resources.unwrap_or_default();
v.push(input.into());
self.tagged_resources = Some(v);
self
}
pub fn set_tagged_resources(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::TaggedResource>>,
) -> Self {
self.tagged_resources = input;
self
}
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeTagsOutput`](crate::output::DescribeTagsOutput)
pub fn build(self) -> crate::output::DescribeTagsOutput {
crate::output::DescribeTagsOutput {
tagged_resources: self.tagged_resources,
marker: self.marker,
}
}
}
}
impl DescribeTagsOutput {
/// Creates a new builder-style object to manufacture [`DescribeTagsOutput`](crate::output::DescribeTagsOutput)
pub fn builder() -> crate::output::describe_tags_output::Builder {
crate::output::describe_tags_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeTableRestoreStatusOutput {
/// <p>A list of status details for one or more table restore requests.</p>
pub table_restore_status_details:
std::option::Option<std::vec::Vec<crate::model::TableRestoreStatus>>,
/// <p>A pagination token that can be used in a subsequent <a>DescribeTableRestoreStatus</a> request.</p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeTableRestoreStatusOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeTableRestoreStatusOutput");
formatter.field(
"table_restore_status_details",
&self.table_restore_status_details,
);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeTableRestoreStatusOutput`](crate::output::DescribeTableRestoreStatusOutput)
pub mod describe_table_restore_status_output {
/// A builder for [`DescribeTableRestoreStatusOutput`](crate::output::DescribeTableRestoreStatusOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) table_restore_status_details:
std::option::Option<std::vec::Vec<crate::model::TableRestoreStatus>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn table_restore_status_details(
mut self,
input: impl Into<crate::model::TableRestoreStatus>,
) -> Self {
let mut v = self.table_restore_status_details.unwrap_or_default();
v.push(input.into());
self.table_restore_status_details = Some(v);
self
}
pub fn set_table_restore_status_details(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::TableRestoreStatus>>,
) -> Self {
self.table_restore_status_details = input;
self
}
/// <p>A pagination token that can be used in a subsequent <a>DescribeTableRestoreStatus</a> request.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeTableRestoreStatusOutput`](crate::output::DescribeTableRestoreStatusOutput)
pub fn build(self) -> crate::output::DescribeTableRestoreStatusOutput {
crate::output::DescribeTableRestoreStatusOutput {
table_restore_status_details: self.table_restore_status_details,
marker: self.marker,
}
}
}
}
impl DescribeTableRestoreStatusOutput {
/// Creates a new builder-style object to manufacture [`DescribeTableRestoreStatusOutput`](crate::output::DescribeTableRestoreStatusOutput)
pub fn builder() -> crate::output::describe_table_restore_status_output::Builder {
crate::output::describe_table_restore_status_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeStorageOutput {
/// <p>The total amount of storage currently used for snapshots.</p>
pub total_backup_size_in_mega_bytes: f64,
/// <p>The total amount of storage currently provisioned.</p>
pub total_provisioned_storage_in_mega_bytes: f64,
}
impl std::fmt::Debug for DescribeStorageOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeStorageOutput");
formatter.field(
"total_backup_size_in_mega_bytes",
&self.total_backup_size_in_mega_bytes,
);
formatter.field(
"total_provisioned_storage_in_mega_bytes",
&self.total_provisioned_storage_in_mega_bytes,
);
formatter.finish()
}
}
/// See [`DescribeStorageOutput`](crate::output::DescribeStorageOutput)
pub mod describe_storage_output {
/// A builder for [`DescribeStorageOutput`](crate::output::DescribeStorageOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) total_backup_size_in_mega_bytes: std::option::Option<f64>,
pub(crate) total_provisioned_storage_in_mega_bytes: std::option::Option<f64>,
}
impl Builder {
/// <p>The total amount of storage currently used for snapshots.</p>
pub fn total_backup_size_in_mega_bytes(mut self, input: f64) -> Self {
self.total_backup_size_in_mega_bytes = Some(input);
self
}
pub fn set_total_backup_size_in_mega_bytes(
mut self,
input: std::option::Option<f64>,
) -> Self {
self.total_backup_size_in_mega_bytes = input;
self
}
/// <p>The total amount of storage currently provisioned.</p>
pub fn total_provisioned_storage_in_mega_bytes(mut self, input: f64) -> Self {
self.total_provisioned_storage_in_mega_bytes = Some(input);
self
}
pub fn set_total_provisioned_storage_in_mega_bytes(
mut self,
input: std::option::Option<f64>,
) -> Self {
self.total_provisioned_storage_in_mega_bytes = input;
self
}
/// Consumes the builder and constructs a [`DescribeStorageOutput`](crate::output::DescribeStorageOutput)
pub fn build(self) -> crate::output::DescribeStorageOutput {
crate::output::DescribeStorageOutput {
total_backup_size_in_mega_bytes: self
.total_backup_size_in_mega_bytes
.unwrap_or_default(),
total_provisioned_storage_in_mega_bytes: self
.total_provisioned_storage_in_mega_bytes
.unwrap_or_default(),
}
}
}
}
impl DescribeStorageOutput {
/// Creates a new builder-style object to manufacture [`DescribeStorageOutput`](crate::output::DescribeStorageOutput)
pub fn builder() -> crate::output::describe_storage_output::Builder {
crate::output::describe_storage_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeSnapshotSchedulesOutput {
/// <p>A list of SnapshotSchedules.</p>
pub snapshot_schedules: std::option::Option<std::vec::Vec<crate::model::SnapshotSchedule>>,
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>marker</code> parameter
/// and retrying the command. If the <code>marker</code> field is empty, all response
/// records have been retrieved for the request.</p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeSnapshotSchedulesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeSnapshotSchedulesOutput");
formatter.field("snapshot_schedules", &self.snapshot_schedules);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeSnapshotSchedulesOutput`](crate::output::DescribeSnapshotSchedulesOutput)
pub mod describe_snapshot_schedules_output {
/// A builder for [`DescribeSnapshotSchedulesOutput`](crate::output::DescribeSnapshotSchedulesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) snapshot_schedules:
std::option::Option<std::vec::Vec<crate::model::SnapshotSchedule>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn snapshot_schedules(
mut self,
input: impl Into<crate::model::SnapshotSchedule>,
) -> Self {
let mut v = self.snapshot_schedules.unwrap_or_default();
v.push(input.into());
self.snapshot_schedules = Some(v);
self
}
pub fn set_snapshot_schedules(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SnapshotSchedule>>,
) -> Self {
self.snapshot_schedules = input;
self
}
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>marker</code> parameter
/// and retrying the command. If the <code>marker</code> field is empty, all response
/// records have been retrieved for the request.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeSnapshotSchedulesOutput`](crate::output::DescribeSnapshotSchedulesOutput)
pub fn build(self) -> crate::output::DescribeSnapshotSchedulesOutput {
crate::output::DescribeSnapshotSchedulesOutput {
snapshot_schedules: self.snapshot_schedules,
marker: self.marker,
}
}
}
}
impl DescribeSnapshotSchedulesOutput {
/// Creates a new builder-style object to manufacture [`DescribeSnapshotSchedulesOutput`](crate::output::DescribeSnapshotSchedulesOutput)
pub fn builder() -> crate::output::describe_snapshot_schedules_output::Builder {
crate::output::describe_snapshot_schedules_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeSnapshotCopyGrantsOutput {
/// <p>An optional parameter that specifies the starting point to return a set of response
/// records. When the results of a <code>DescribeSnapshotCopyGrant</code> request exceed the
/// value specified in <code>MaxRecords</code>, Amazon Web Services returns a value in the
/// <code>Marker</code> field of the response. You can retrieve the next set of response
/// records by providing the returned marker value in the <code>Marker</code> parameter and
/// retrying the request. </p>
/// <p>Constraints: You can specify either the <b>SnapshotCopyGrantName</b> parameter or the <b>Marker</b> parameter, but not both. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>The list of <code>SnapshotCopyGrant</code> objects.</p>
pub snapshot_copy_grants: std::option::Option<std::vec::Vec<crate::model::SnapshotCopyGrant>>,
}
impl std::fmt::Debug for DescribeSnapshotCopyGrantsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeSnapshotCopyGrantsOutput");
formatter.field("marker", &self.marker);
formatter.field("snapshot_copy_grants", &self.snapshot_copy_grants);
formatter.finish()
}
}
/// See [`DescribeSnapshotCopyGrantsOutput`](crate::output::DescribeSnapshotCopyGrantsOutput)
pub mod describe_snapshot_copy_grants_output {
/// A builder for [`DescribeSnapshotCopyGrantsOutput`](crate::output::DescribeSnapshotCopyGrantsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) snapshot_copy_grants:
std::option::Option<std::vec::Vec<crate::model::SnapshotCopyGrant>>,
}
impl Builder {
/// <p>An optional parameter that specifies the starting point to return a set of response
/// records. When the results of a <code>DescribeSnapshotCopyGrant</code> request exceed the
/// value specified in <code>MaxRecords</code>, Amazon Web Services returns a value in the
/// <code>Marker</code> field of the response. You can retrieve the next set of response
/// records by providing the returned marker value in the <code>Marker</code> parameter and
/// retrying the request. </p>
/// <p>Constraints: You can specify either the <b>SnapshotCopyGrantName</b> parameter or the <b>Marker</b> parameter, but not both. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn snapshot_copy_grants(
mut self,
input: impl Into<crate::model::SnapshotCopyGrant>,
) -> Self {
let mut v = self.snapshot_copy_grants.unwrap_or_default();
v.push(input.into());
self.snapshot_copy_grants = Some(v);
self
}
pub fn set_snapshot_copy_grants(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SnapshotCopyGrant>>,
) -> Self {
self.snapshot_copy_grants = input;
self
}
/// Consumes the builder and constructs a [`DescribeSnapshotCopyGrantsOutput`](crate::output::DescribeSnapshotCopyGrantsOutput)
pub fn build(self) -> crate::output::DescribeSnapshotCopyGrantsOutput {
crate::output::DescribeSnapshotCopyGrantsOutput {
marker: self.marker,
snapshot_copy_grants: self.snapshot_copy_grants,
}
}
}
}
impl DescribeSnapshotCopyGrantsOutput {
/// Creates a new builder-style object to manufacture [`DescribeSnapshotCopyGrantsOutput`](crate::output::DescribeSnapshotCopyGrantsOutput)
pub fn builder() -> crate::output::describe_snapshot_copy_grants_output::Builder {
crate::output::describe_snapshot_copy_grants_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeScheduledActionsOutput {
/// <p>An optional parameter that specifies the starting point to return a set of response
/// records. When the results of a <a>DescribeScheduledActions</a> request
/// exceed the value specified in <code>MaxRecords</code>, Amazon Web Services returns a value in the
/// <code>Marker</code> field of the response. You can retrieve the next set of response
/// records by providing the returned marker value in the <code>Marker</code> parameter and
/// retrying the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>List of retrieved scheduled actions. </p>
pub scheduled_actions: std::option::Option<std::vec::Vec<crate::model::ScheduledAction>>,
}
impl std::fmt::Debug for DescribeScheduledActionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeScheduledActionsOutput");
formatter.field("marker", &self.marker);
formatter.field("scheduled_actions", &self.scheduled_actions);
formatter.finish()
}
}
/// See [`DescribeScheduledActionsOutput`](crate::output::DescribeScheduledActionsOutput)
pub mod describe_scheduled_actions_output {
/// A builder for [`DescribeScheduledActionsOutput`](crate::output::DescribeScheduledActionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) scheduled_actions:
std::option::Option<std::vec::Vec<crate::model::ScheduledAction>>,
}
impl Builder {
/// <p>An optional parameter that specifies the starting point to return a set of response
/// records. When the results of a <a>DescribeScheduledActions</a> request
/// exceed the value specified in <code>MaxRecords</code>, Amazon Web Services returns a value in the
/// <code>Marker</code> field of the response. You can retrieve the next set of response
/// records by providing the returned marker value in the <code>Marker</code> parameter and
/// retrying the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn scheduled_actions(
mut self,
input: impl Into<crate::model::ScheduledAction>,
) -> Self {
let mut v = self.scheduled_actions.unwrap_or_default();
v.push(input.into());
self.scheduled_actions = Some(v);
self
}
pub fn set_scheduled_actions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ScheduledAction>>,
) -> Self {
self.scheduled_actions = input;
self
}
/// Consumes the builder and constructs a [`DescribeScheduledActionsOutput`](crate::output::DescribeScheduledActionsOutput)
pub fn build(self) -> crate::output::DescribeScheduledActionsOutput {
crate::output::DescribeScheduledActionsOutput {
marker: self.marker,
scheduled_actions: self.scheduled_actions,
}
}
}
}
impl DescribeScheduledActionsOutput {
/// Creates a new builder-style object to manufacture [`DescribeScheduledActionsOutput`](crate::output::DescribeScheduledActionsOutput)
pub fn builder() -> crate::output::describe_scheduled_actions_output::Builder {
crate::output::describe_scheduled_actions_output::Builder::default()
}
}
/// <p>Describes the result of a cluster resize operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeResizeOutput {
/// <p>The node type that the cluster will have after the resize operation is
/// complete.</p>
pub target_node_type: std::option::Option<std::string::String>,
/// <p>The number of nodes that the cluster will have after the resize operation is
/// complete.</p>
pub target_number_of_nodes: std::option::Option<i32>,
/// <p>The cluster type after the resize operation is complete.</p>
/// <p>Valid Values: <code>multi-node</code> | <code>single-node</code>
/// </p>
pub target_cluster_type: std::option::Option<std::string::String>,
/// <p>The status of the resize operation.</p>
/// <p>Valid Values: <code>NONE</code> | <code>IN_PROGRESS</code> | <code>FAILED</code> |
/// <code>SUCCEEDED</code> | <code>CANCELLING</code>
/// </p>
pub status: std::option::Option<std::string::String>,
/// <p>The names of tables that have been completely imported .</p>
/// <p>Valid Values: List of table names.</p>
pub import_tables_completed: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The names of tables that are being currently imported.</p>
/// <p>Valid Values: List of table names.</p>
pub import_tables_in_progress: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The names of tables that have not been yet imported.</p>
/// <p>Valid Values: List of table names</p>
pub import_tables_not_started: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The average rate of the resize operation over the last few minutes, measured in
/// megabytes per second. After the resize operation completes, this value shows the average
/// rate of the entire resize operation.</p>
pub avg_resize_rate_in_mega_bytes_per_second: std::option::Option<f64>,
/// <p>The estimated total amount of data, in megabytes, on the cluster before the resize
/// operation began.</p>
pub total_resize_data_in_mega_bytes: std::option::Option<i64>,
/// <p>While the resize operation is in progress, this value shows the current amount of
/// data, in megabytes, that has been processed so far. When the resize operation is
/// complete, this value shows the total amount of data, in megabytes, on the cluster, which
/// may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data
/// before resize).</p>
pub progress_in_mega_bytes: std::option::Option<i64>,
/// <p>The amount of seconds that have elapsed since the resize operation began. After the
/// resize operation completes, this value shows the total actual time, in seconds, for the
/// resize operation.</p>
pub elapsed_time_in_seconds: std::option::Option<i64>,
/// <p>The estimated time remaining, in seconds, until the resize operation is complete.
/// This value is calculated based on the average resize rate and the estimated amount of
/// data remaining to be processed. Once the resize operation is complete, this value will
/// be 0.</p>
pub estimated_time_to_completion_in_seconds: std::option::Option<i64>,
/// <p>An enum with possible values of <code>ClassicResize</code> and
/// <code>ElasticResize</code>. These values describe the type of resize operation being
/// performed. </p>
pub resize_type: std::option::Option<std::string::String>,
/// <p>An optional string to provide additional details about the resize action.</p>
pub message: std::option::Option<std::string::String>,
/// <p>The type of encryption for the cluster after the resize is complete.</p>
/// <p>Possible values are <code>KMS</code> and <code>None</code>. </p>
pub target_encryption_type: std::option::Option<std::string::String>,
/// <p>The percent of data transferred from source cluster to target cluster.</p>
pub data_transfer_progress_percent: std::option::Option<f64>,
}
impl std::fmt::Debug for DescribeResizeOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeResizeOutput");
formatter.field("target_node_type", &self.target_node_type);
formatter.field("target_number_of_nodes", &self.target_number_of_nodes);
formatter.field("target_cluster_type", &self.target_cluster_type);
formatter.field("status", &self.status);
formatter.field("import_tables_completed", &self.import_tables_completed);
formatter.field("import_tables_in_progress", &self.import_tables_in_progress);
formatter.field("import_tables_not_started", &self.import_tables_not_started);
formatter.field(
"avg_resize_rate_in_mega_bytes_per_second",
&self.avg_resize_rate_in_mega_bytes_per_second,
);
formatter.field(
"total_resize_data_in_mega_bytes",
&self.total_resize_data_in_mega_bytes,
);
formatter.field("progress_in_mega_bytes", &self.progress_in_mega_bytes);
formatter.field("elapsed_time_in_seconds", &self.elapsed_time_in_seconds);
formatter.field(
"estimated_time_to_completion_in_seconds",
&self.estimated_time_to_completion_in_seconds,
);
formatter.field("resize_type", &self.resize_type);
formatter.field("message", &self.message);
formatter.field("target_encryption_type", &self.target_encryption_type);
formatter.field(
"data_transfer_progress_percent",
&self.data_transfer_progress_percent,
);
formatter.finish()
}
}
/// See [`DescribeResizeOutput`](crate::output::DescribeResizeOutput)
pub mod describe_resize_output {
/// A builder for [`DescribeResizeOutput`](crate::output::DescribeResizeOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) target_node_type: std::option::Option<std::string::String>,
pub(crate) target_number_of_nodes: std::option::Option<i32>,
pub(crate) target_cluster_type: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<std::string::String>,
pub(crate) import_tables_completed: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) import_tables_in_progress:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) import_tables_not_started:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) avg_resize_rate_in_mega_bytes_per_second: std::option::Option<f64>,
pub(crate) total_resize_data_in_mega_bytes: std::option::Option<i64>,
pub(crate) progress_in_mega_bytes: std::option::Option<i64>,
pub(crate) elapsed_time_in_seconds: std::option::Option<i64>,
pub(crate) estimated_time_to_completion_in_seconds: std::option::Option<i64>,
pub(crate) resize_type: std::option::Option<std::string::String>,
pub(crate) message: std::option::Option<std::string::String>,
pub(crate) target_encryption_type: std::option::Option<std::string::String>,
pub(crate) data_transfer_progress_percent: std::option::Option<f64>,
}
impl Builder {
/// <p>The node type that the cluster will have after the resize operation is
/// complete.</p>
pub fn target_node_type(mut self, input: impl Into<std::string::String>) -> Self {
self.target_node_type = Some(input.into());
self
}
pub fn set_target_node_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.target_node_type = input;
self
}
/// <p>The number of nodes that the cluster will have after the resize operation is
/// complete.</p>
pub fn target_number_of_nodes(mut self, input: i32) -> Self {
self.target_number_of_nodes = Some(input);
self
}
pub fn set_target_number_of_nodes(mut self, input: std::option::Option<i32>) -> Self {
self.target_number_of_nodes = input;
self
}
/// <p>The cluster type after the resize operation is complete.</p>
/// <p>Valid Values: <code>multi-node</code> | <code>single-node</code>
/// </p>
pub fn target_cluster_type(mut self, input: impl Into<std::string::String>) -> Self {
self.target_cluster_type = Some(input.into());
self
}
pub fn set_target_cluster_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.target_cluster_type = input;
self
}
/// <p>The status of the resize operation.</p>
/// <p>Valid Values: <code>NONE</code> | <code>IN_PROGRESS</code> | <code>FAILED</code> |
/// <code>SUCCEEDED</code> | <code>CANCELLING</code>
/// </p>
pub fn status(mut self, input: impl Into<std::string::String>) -> Self {
self.status = Some(input.into());
self
}
pub fn set_status(mut self, input: std::option::Option<std::string::String>) -> Self {
self.status = input;
self
}
pub fn import_tables_completed(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.import_tables_completed.unwrap_or_default();
v.push(input.into());
self.import_tables_completed = Some(v);
self
}
pub fn set_import_tables_completed(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.import_tables_completed = input;
self
}
pub fn import_tables_in_progress(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.import_tables_in_progress.unwrap_or_default();
v.push(input.into());
self.import_tables_in_progress = Some(v);
self
}
pub fn set_import_tables_in_progress(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.import_tables_in_progress = input;
self
}
pub fn import_tables_not_started(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.import_tables_not_started.unwrap_or_default();
v.push(input.into());
self.import_tables_not_started = Some(v);
self
}
pub fn set_import_tables_not_started(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.import_tables_not_started = input;
self
}
/// <p>The average rate of the resize operation over the last few minutes, measured in
/// megabytes per second. After the resize operation completes, this value shows the average
/// rate of the entire resize operation.</p>
pub fn avg_resize_rate_in_mega_bytes_per_second(mut self, input: f64) -> Self {
self.avg_resize_rate_in_mega_bytes_per_second = Some(input);
self
}
pub fn set_avg_resize_rate_in_mega_bytes_per_second(
mut self,
input: std::option::Option<f64>,
) -> Self {
self.avg_resize_rate_in_mega_bytes_per_second = input;
self
}
/// <p>The estimated total amount of data, in megabytes, on the cluster before the resize
/// operation began.</p>
pub fn total_resize_data_in_mega_bytes(mut self, input: i64) -> Self {
self.total_resize_data_in_mega_bytes = Some(input);
self
}
pub fn set_total_resize_data_in_mega_bytes(
mut self,
input: std::option::Option<i64>,
) -> Self {
self.total_resize_data_in_mega_bytes = input;
self
}
/// <p>While the resize operation is in progress, this value shows the current amount of
/// data, in megabytes, that has been processed so far. When the resize operation is
/// complete, this value shows the total amount of data, in megabytes, on the cluster, which
/// may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data
/// before resize).</p>
pub fn progress_in_mega_bytes(mut self, input: i64) -> Self {
self.progress_in_mega_bytes = Some(input);
self
}
pub fn set_progress_in_mega_bytes(mut self, input: std::option::Option<i64>) -> Self {
self.progress_in_mega_bytes = input;
self
}
/// <p>The amount of seconds that have elapsed since the resize operation began. After the
/// resize operation completes, this value shows the total actual time, in seconds, for the
/// resize operation.</p>
pub fn elapsed_time_in_seconds(mut self, input: i64) -> Self {
self.elapsed_time_in_seconds = Some(input);
self
}
pub fn set_elapsed_time_in_seconds(mut self, input: std::option::Option<i64>) -> Self {
self.elapsed_time_in_seconds = input;
self
}
/// <p>The estimated time remaining, in seconds, until the resize operation is complete.
/// This value is calculated based on the average resize rate and the estimated amount of
/// data remaining to be processed. Once the resize operation is complete, this value will
/// be 0.</p>
pub fn estimated_time_to_completion_in_seconds(mut self, input: i64) -> Self {
self.estimated_time_to_completion_in_seconds = Some(input);
self
}
pub fn set_estimated_time_to_completion_in_seconds(
mut self,
input: std::option::Option<i64>,
) -> Self {
self.estimated_time_to_completion_in_seconds = input;
self
}
/// <p>An enum with possible values of <code>ClassicResize</code> and
/// <code>ElasticResize</code>. These values describe the type of resize operation being
/// performed. </p>
pub fn resize_type(mut self, input: impl Into<std::string::String>) -> Self {
self.resize_type = Some(input.into());
self
}
pub fn set_resize_type(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resize_type = input;
self
}
/// <p>An optional string to provide additional details about the resize action.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// <p>The type of encryption for the cluster after the resize is complete.</p>
/// <p>Possible values are <code>KMS</code> and <code>None</code>. </p>
pub fn target_encryption_type(mut self, input: impl Into<std::string::String>) -> Self {
self.target_encryption_type = Some(input.into());
self
}
pub fn set_target_encryption_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.target_encryption_type = input;
self
}
/// <p>The percent of data transferred from source cluster to target cluster.</p>
pub fn data_transfer_progress_percent(mut self, input: f64) -> Self {
self.data_transfer_progress_percent = Some(input);
self
}
pub fn set_data_transfer_progress_percent(
mut self,
input: std::option::Option<f64>,
) -> Self {
self.data_transfer_progress_percent = input;
self
}
/// Consumes the builder and constructs a [`DescribeResizeOutput`](crate::output::DescribeResizeOutput)
pub fn build(self) -> crate::output::DescribeResizeOutput {
crate::output::DescribeResizeOutput {
target_node_type: self.target_node_type,
target_number_of_nodes: self.target_number_of_nodes,
target_cluster_type: self.target_cluster_type,
status: self.status,
import_tables_completed: self.import_tables_completed,
import_tables_in_progress: self.import_tables_in_progress,
import_tables_not_started: self.import_tables_not_started,
avg_resize_rate_in_mega_bytes_per_second: self
.avg_resize_rate_in_mega_bytes_per_second,
total_resize_data_in_mega_bytes: self.total_resize_data_in_mega_bytes,
progress_in_mega_bytes: self.progress_in_mega_bytes,
elapsed_time_in_seconds: self.elapsed_time_in_seconds,
estimated_time_to_completion_in_seconds: self
.estimated_time_to_completion_in_seconds,
resize_type: self.resize_type,
message: self.message,
target_encryption_type: self.target_encryption_type,
data_transfer_progress_percent: self.data_transfer_progress_percent,
}
}
}
}
impl DescribeResizeOutput {
/// Creates a new builder-style object to manufacture [`DescribeResizeOutput`](crate::output::DescribeResizeOutput)
pub fn builder() -> crate::output::describe_resize_output::Builder {
crate::output::describe_resize_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeReservedNodesOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>The list of <code>ReservedNode</code> objects.</p>
pub reserved_nodes: std::option::Option<std::vec::Vec<crate::model::ReservedNode>>,
}
impl std::fmt::Debug for DescribeReservedNodesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeReservedNodesOutput");
formatter.field("marker", &self.marker);
formatter.field("reserved_nodes", &self.reserved_nodes);
formatter.finish()
}
}
/// See [`DescribeReservedNodesOutput`](crate::output::DescribeReservedNodesOutput)
pub mod describe_reserved_nodes_output {
/// A builder for [`DescribeReservedNodesOutput`](crate::output::DescribeReservedNodesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) reserved_nodes: std::option::Option<std::vec::Vec<crate::model::ReservedNode>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn reserved_nodes(mut self, input: impl Into<crate::model::ReservedNode>) -> Self {
let mut v = self.reserved_nodes.unwrap_or_default();
v.push(input.into());
self.reserved_nodes = Some(v);
self
}
pub fn set_reserved_nodes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReservedNode>>,
) -> Self {
self.reserved_nodes = input;
self
}
/// Consumes the builder and constructs a [`DescribeReservedNodesOutput`](crate::output::DescribeReservedNodesOutput)
pub fn build(self) -> crate::output::DescribeReservedNodesOutput {
crate::output::DescribeReservedNodesOutput {
marker: self.marker,
reserved_nodes: self.reserved_nodes,
}
}
}
}
impl DescribeReservedNodesOutput {
/// Creates a new builder-style object to manufacture [`DescribeReservedNodesOutput`](crate::output::DescribeReservedNodesOutput)
pub fn builder() -> crate::output::describe_reserved_nodes_output::Builder {
crate::output::describe_reserved_nodes_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeReservedNodeOfferingsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <code>ReservedNodeOffering</code> objects.</p>
pub reserved_node_offerings:
std::option::Option<std::vec::Vec<crate::model::ReservedNodeOffering>>,
}
impl std::fmt::Debug for DescribeReservedNodeOfferingsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeReservedNodeOfferingsOutput");
formatter.field("marker", &self.marker);
formatter.field("reserved_node_offerings", &self.reserved_node_offerings);
formatter.finish()
}
}
/// See [`DescribeReservedNodeOfferingsOutput`](crate::output::DescribeReservedNodeOfferingsOutput)
pub mod describe_reserved_node_offerings_output {
/// A builder for [`DescribeReservedNodeOfferingsOutput`](crate::output::DescribeReservedNodeOfferingsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) reserved_node_offerings:
std::option::Option<std::vec::Vec<crate::model::ReservedNodeOffering>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn reserved_node_offerings(
mut self,
input: impl Into<crate::model::ReservedNodeOffering>,
) -> Self {
let mut v = self.reserved_node_offerings.unwrap_or_default();
v.push(input.into());
self.reserved_node_offerings = Some(v);
self
}
pub fn set_reserved_node_offerings(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReservedNodeOffering>>,
) -> Self {
self.reserved_node_offerings = input;
self
}
/// Consumes the builder and constructs a [`DescribeReservedNodeOfferingsOutput`](crate::output::DescribeReservedNodeOfferingsOutput)
pub fn build(self) -> crate::output::DescribeReservedNodeOfferingsOutput {
crate::output::DescribeReservedNodeOfferingsOutput {
marker: self.marker,
reserved_node_offerings: self.reserved_node_offerings,
}
}
}
}
impl DescribeReservedNodeOfferingsOutput {
/// Creates a new builder-style object to manufacture [`DescribeReservedNodeOfferingsOutput`](crate::output::DescribeReservedNodeOfferingsOutput)
pub fn builder() -> crate::output::describe_reserved_node_offerings_output::Builder {
crate::output::describe_reserved_node_offerings_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribePartnersOutput {
/// <p>A list of partner integrations.</p>
pub partner_integration_info_list:
std::option::Option<std::vec::Vec<crate::model::PartnerIntegrationInfo>>,
}
impl std::fmt::Debug for DescribePartnersOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribePartnersOutput");
formatter.field(
"partner_integration_info_list",
&self.partner_integration_info_list,
);
formatter.finish()
}
}
/// See [`DescribePartnersOutput`](crate::output::DescribePartnersOutput)
pub mod describe_partners_output {
/// A builder for [`DescribePartnersOutput`](crate::output::DescribePartnersOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) partner_integration_info_list:
std::option::Option<std::vec::Vec<crate::model::PartnerIntegrationInfo>>,
}
impl Builder {
pub fn partner_integration_info_list(
mut self,
input: impl Into<crate::model::PartnerIntegrationInfo>,
) -> Self {
let mut v = self.partner_integration_info_list.unwrap_or_default();
v.push(input.into());
self.partner_integration_info_list = Some(v);
self
}
pub fn set_partner_integration_info_list(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PartnerIntegrationInfo>>,
) -> Self {
self.partner_integration_info_list = input;
self
}
/// Consumes the builder and constructs a [`DescribePartnersOutput`](crate::output::DescribePartnersOutput)
pub fn build(self) -> crate::output::DescribePartnersOutput {
crate::output::DescribePartnersOutput {
partner_integration_info_list: self.partner_integration_info_list,
}
}
}
}
impl DescribePartnersOutput {
/// Creates a new builder-style object to manufacture [`DescribePartnersOutput`](crate::output::DescribePartnersOutput)
pub fn builder() -> crate::output::describe_partners_output::Builder {
crate::output::describe_partners_output::Builder::default()
}
}
/// <p>Contains the output from the <a>DescribeOrderableClusterOptions</a>
/// action. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeOrderableClusterOptionsOutput {
/// <p>An <code>OrderableClusterOption</code> structure containing information about
/// orderable options for the cluster.</p>
pub orderable_cluster_options:
std::option::Option<std::vec::Vec<crate::model::OrderableClusterOption>>,
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeOrderableClusterOptionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeOrderableClusterOptionsOutput");
formatter.field("orderable_cluster_options", &self.orderable_cluster_options);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeOrderableClusterOptionsOutput`](crate::output::DescribeOrderableClusterOptionsOutput)
pub mod describe_orderable_cluster_options_output {
/// A builder for [`DescribeOrderableClusterOptionsOutput`](crate::output::DescribeOrderableClusterOptionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) orderable_cluster_options:
std::option::Option<std::vec::Vec<crate::model::OrderableClusterOption>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn orderable_cluster_options(
mut self,
input: impl Into<crate::model::OrderableClusterOption>,
) -> Self {
let mut v = self.orderable_cluster_options.unwrap_or_default();
v.push(input.into());
self.orderable_cluster_options = Some(v);
self
}
pub fn set_orderable_cluster_options(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::OrderableClusterOption>>,
) -> Self {
self.orderable_cluster_options = input;
self
}
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeOrderableClusterOptionsOutput`](crate::output::DescribeOrderableClusterOptionsOutput)
pub fn build(self) -> crate::output::DescribeOrderableClusterOptionsOutput {
crate::output::DescribeOrderableClusterOptionsOutput {
orderable_cluster_options: self.orderable_cluster_options,
marker: self.marker,
}
}
}
}
impl DescribeOrderableClusterOptionsOutput {
/// Creates a new builder-style object to manufacture [`DescribeOrderableClusterOptionsOutput`](crate::output::DescribeOrderableClusterOptionsOutput)
pub fn builder() -> crate::output::describe_orderable_cluster_options_output::Builder {
crate::output::describe_orderable_cluster_options_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeNodeConfigurationOptionsOutput {
/// <p>A list of valid node configurations.</p>
pub node_configuration_option_list:
std::option::Option<std::vec::Vec<crate::model::NodeConfigurationOption>>,
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeNodeConfigurationOptionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeNodeConfigurationOptionsOutput");
formatter.field(
"node_configuration_option_list",
&self.node_configuration_option_list,
);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeNodeConfigurationOptionsOutput`](crate::output::DescribeNodeConfigurationOptionsOutput)
pub mod describe_node_configuration_options_output {
/// A builder for [`DescribeNodeConfigurationOptionsOutput`](crate::output::DescribeNodeConfigurationOptionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) node_configuration_option_list:
std::option::Option<std::vec::Vec<crate::model::NodeConfigurationOption>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn node_configuration_option_list(
mut self,
input: impl Into<crate::model::NodeConfigurationOption>,
) -> Self {
let mut v = self.node_configuration_option_list.unwrap_or_default();
v.push(input.into());
self.node_configuration_option_list = Some(v);
self
}
pub fn set_node_configuration_option_list(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::NodeConfigurationOption>>,
) -> Self {
self.node_configuration_option_list = input;
self
}
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeNodeConfigurationOptionsOutput`](crate::output::DescribeNodeConfigurationOptionsOutput)
pub fn build(self) -> crate::output::DescribeNodeConfigurationOptionsOutput {
crate::output::DescribeNodeConfigurationOptionsOutput {
node_configuration_option_list: self.node_configuration_option_list,
marker: self.marker,
}
}
}
}
impl DescribeNodeConfigurationOptionsOutput {
/// Creates a new builder-style object to manufacture [`DescribeNodeConfigurationOptionsOutput`](crate::output::DescribeNodeConfigurationOptionsOutput)
pub fn builder() -> crate::output::describe_node_configuration_options_output::Builder {
crate::output::describe_node_configuration_options_output::Builder::default()
}
}
/// <p>Describes the status of logging for a cluster.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeLoggingStatusOutput {
/// <p>
/// <code>true</code> if logging is on, <code>false</code> if logging is off.</p>
pub logging_enabled: bool,
/// <p>The name of the S3 bucket where the log files are stored.</p>
pub bucket_name: std::option::Option<std::string::String>,
/// <p>The prefix applied to the log file names.</p>
pub s3_key_prefix: std::option::Option<std::string::String>,
/// <p>The last time that logs were delivered.</p>
pub last_successful_delivery_time: std::option::Option<smithy_types::Instant>,
/// <p>The last time when logs failed to be delivered.</p>
pub last_failure_time: std::option::Option<smithy_types::Instant>,
/// <p>The message indicating that logs failed to be delivered.</p>
pub last_failure_message: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeLoggingStatusOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeLoggingStatusOutput");
formatter.field("logging_enabled", &self.logging_enabled);
formatter.field("bucket_name", &self.bucket_name);
formatter.field("s3_key_prefix", &self.s3_key_prefix);
formatter.field(
"last_successful_delivery_time",
&self.last_successful_delivery_time,
);
formatter.field("last_failure_time", &self.last_failure_time);
formatter.field("last_failure_message", &self.last_failure_message);
formatter.finish()
}
}
/// See [`DescribeLoggingStatusOutput`](crate::output::DescribeLoggingStatusOutput)
pub mod describe_logging_status_output {
/// A builder for [`DescribeLoggingStatusOutput`](crate::output::DescribeLoggingStatusOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) logging_enabled: std::option::Option<bool>,
pub(crate) bucket_name: std::option::Option<std::string::String>,
pub(crate) s3_key_prefix: std::option::Option<std::string::String>,
pub(crate) last_successful_delivery_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_failure_time: std::option::Option<smithy_types::Instant>,
pub(crate) last_failure_message: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>
/// <code>true</code> if logging is on, <code>false</code> if logging is off.</p>
pub fn logging_enabled(mut self, input: bool) -> Self {
self.logging_enabled = Some(input);
self
}
pub fn set_logging_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.logging_enabled = input;
self
}
/// <p>The name of the S3 bucket where the log files are stored.</p>
pub fn bucket_name(mut self, input: impl Into<std::string::String>) -> Self {
self.bucket_name = Some(input.into());
self
}
pub fn set_bucket_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.bucket_name = input;
self
}
/// <p>The prefix applied to the log file names.</p>
pub fn s3_key_prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.s3_key_prefix = Some(input.into());
self
}
pub fn set_s3_key_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.s3_key_prefix = input;
self
}
/// <p>The last time that logs were delivered.</p>
pub fn last_successful_delivery_time(mut self, input: smithy_types::Instant) -> Self {
self.last_successful_delivery_time = Some(input);
self
}
pub fn set_last_successful_delivery_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_successful_delivery_time = input;
self
}
/// <p>The last time when logs failed to be delivered.</p>
pub fn last_failure_time(mut self, input: smithy_types::Instant) -> Self {
self.last_failure_time = Some(input);
self
}
pub fn set_last_failure_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.last_failure_time = input;
self
}
/// <p>The message indicating that logs failed to be delivered.</p>
pub fn last_failure_message(mut self, input: impl Into<std::string::String>) -> Self {
self.last_failure_message = Some(input.into());
self
}
pub fn set_last_failure_message(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.last_failure_message = input;
self
}
/// Consumes the builder and constructs a [`DescribeLoggingStatusOutput`](crate::output::DescribeLoggingStatusOutput)
pub fn build(self) -> crate::output::DescribeLoggingStatusOutput {
crate::output::DescribeLoggingStatusOutput {
logging_enabled: self.logging_enabled.unwrap_or_default(),
bucket_name: self.bucket_name,
s3_key_prefix: self.s3_key_prefix,
last_successful_delivery_time: self.last_successful_delivery_time,
last_failure_time: self.last_failure_time,
last_failure_message: self.last_failure_message,
}
}
}
}
impl DescribeLoggingStatusOutput {
/// Creates a new builder-style object to manufacture [`DescribeLoggingStatusOutput`](crate::output::DescribeLoggingStatusOutput)
pub fn builder() -> crate::output::describe_logging_status_output::Builder {
crate::output::describe_logging_status_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeHsmConfigurationsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <code>HsmConfiguration</code> objects.</p>
pub hsm_configurations: std::option::Option<std::vec::Vec<crate::model::HsmConfiguration>>,
}
impl std::fmt::Debug for DescribeHsmConfigurationsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeHsmConfigurationsOutput");
formatter.field("marker", &self.marker);
formatter.field("hsm_configurations", &self.hsm_configurations);
formatter.finish()
}
}
/// See [`DescribeHsmConfigurationsOutput`](crate::output::DescribeHsmConfigurationsOutput)
pub mod describe_hsm_configurations_output {
/// A builder for [`DescribeHsmConfigurationsOutput`](crate::output::DescribeHsmConfigurationsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) hsm_configurations:
std::option::Option<std::vec::Vec<crate::model::HsmConfiguration>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn hsm_configurations(
mut self,
input: impl Into<crate::model::HsmConfiguration>,
) -> Self {
let mut v = self.hsm_configurations.unwrap_or_default();
v.push(input.into());
self.hsm_configurations = Some(v);
self
}
pub fn set_hsm_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::HsmConfiguration>>,
) -> Self {
self.hsm_configurations = input;
self
}
/// Consumes the builder and constructs a [`DescribeHsmConfigurationsOutput`](crate::output::DescribeHsmConfigurationsOutput)
pub fn build(self) -> crate::output::DescribeHsmConfigurationsOutput {
crate::output::DescribeHsmConfigurationsOutput {
marker: self.marker,
hsm_configurations: self.hsm_configurations,
}
}
}
}
impl DescribeHsmConfigurationsOutput {
/// Creates a new builder-style object to manufacture [`DescribeHsmConfigurationsOutput`](crate::output::DescribeHsmConfigurationsOutput)
pub fn builder() -> crate::output::describe_hsm_configurations_output::Builder {
crate::output::describe_hsm_configurations_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeHsmClientCertificatesOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of the identifiers for one or more HSM client certificates used by Amazon Redshift
/// clusters to store and retrieve database encryption keys in an HSM.</p>
pub hsm_client_certificates:
std::option::Option<std::vec::Vec<crate::model::HsmClientCertificate>>,
}
impl std::fmt::Debug for DescribeHsmClientCertificatesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeHsmClientCertificatesOutput");
formatter.field("marker", &self.marker);
formatter.field("hsm_client_certificates", &self.hsm_client_certificates);
formatter.finish()
}
}
/// See [`DescribeHsmClientCertificatesOutput`](crate::output::DescribeHsmClientCertificatesOutput)
pub mod describe_hsm_client_certificates_output {
/// A builder for [`DescribeHsmClientCertificatesOutput`](crate::output::DescribeHsmClientCertificatesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) hsm_client_certificates:
std::option::Option<std::vec::Vec<crate::model::HsmClientCertificate>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn hsm_client_certificates(
mut self,
input: impl Into<crate::model::HsmClientCertificate>,
) -> Self {
let mut v = self.hsm_client_certificates.unwrap_or_default();
v.push(input.into());
self.hsm_client_certificates = Some(v);
self
}
pub fn set_hsm_client_certificates(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::HsmClientCertificate>>,
) -> Self {
self.hsm_client_certificates = input;
self
}
/// Consumes the builder and constructs a [`DescribeHsmClientCertificatesOutput`](crate::output::DescribeHsmClientCertificatesOutput)
pub fn build(self) -> crate::output::DescribeHsmClientCertificatesOutput {
crate::output::DescribeHsmClientCertificatesOutput {
marker: self.marker,
hsm_client_certificates: self.hsm_client_certificates,
}
}
}
}
impl DescribeHsmClientCertificatesOutput {
/// Creates a new builder-style object to manufacture [`DescribeHsmClientCertificatesOutput`](crate::output::DescribeHsmClientCertificatesOutput)
pub fn builder() -> crate::output::describe_hsm_client_certificates_output::Builder {
crate::output::describe_hsm_client_certificates_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeEventSubscriptionsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of event subscriptions.</p>
pub event_subscriptions_list:
std::option::Option<std::vec::Vec<crate::model::EventSubscription>>,
}
impl std::fmt::Debug for DescribeEventSubscriptionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeEventSubscriptionsOutput");
formatter.field("marker", &self.marker);
formatter.field("event_subscriptions_list", &self.event_subscriptions_list);
formatter.finish()
}
}
/// See [`DescribeEventSubscriptionsOutput`](crate::output::DescribeEventSubscriptionsOutput)
pub mod describe_event_subscriptions_output {
/// A builder for [`DescribeEventSubscriptionsOutput`](crate::output::DescribeEventSubscriptionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) event_subscriptions_list:
std::option::Option<std::vec::Vec<crate::model::EventSubscription>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn event_subscriptions_list(
mut self,
input: impl Into<crate::model::EventSubscription>,
) -> Self {
let mut v = self.event_subscriptions_list.unwrap_or_default();
v.push(input.into());
self.event_subscriptions_list = Some(v);
self
}
pub fn set_event_subscriptions_list(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EventSubscription>>,
) -> Self {
self.event_subscriptions_list = input;
self
}
/// Consumes the builder and constructs a [`DescribeEventSubscriptionsOutput`](crate::output::DescribeEventSubscriptionsOutput)
pub fn build(self) -> crate::output::DescribeEventSubscriptionsOutput {
crate::output::DescribeEventSubscriptionsOutput {
marker: self.marker,
event_subscriptions_list: self.event_subscriptions_list,
}
}
}
}
impl DescribeEventSubscriptionsOutput {
/// Creates a new builder-style object to manufacture [`DescribeEventSubscriptionsOutput`](crate::output::DescribeEventSubscriptionsOutput)
pub fn builder() -> crate::output::describe_event_subscriptions_output::Builder {
crate::output::describe_event_subscriptions_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeEventsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <code>Event</code> instances. </p>
pub events: std::option::Option<std::vec::Vec<crate::model::Event>>,
}
impl std::fmt::Debug for DescribeEventsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeEventsOutput");
formatter.field("marker", &self.marker);
formatter.field("events", &self.events);
formatter.finish()
}
}
/// See [`DescribeEventsOutput`](crate::output::DescribeEventsOutput)
pub mod describe_events_output {
/// A builder for [`DescribeEventsOutput`](crate::output::DescribeEventsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) events: std::option::Option<std::vec::Vec<crate::model::Event>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn events(mut self, input: impl Into<crate::model::Event>) -> Self {
let mut v = self.events.unwrap_or_default();
v.push(input.into());
self.events = Some(v);
self
}
pub fn set_events(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Event>>,
) -> Self {
self.events = input;
self
}
/// Consumes the builder and constructs a [`DescribeEventsOutput`](crate::output::DescribeEventsOutput)
pub fn build(self) -> crate::output::DescribeEventsOutput {
crate::output::DescribeEventsOutput {
marker: self.marker,
events: self.events,
}
}
}
}
impl DescribeEventsOutput {
/// Creates a new builder-style object to manufacture [`DescribeEventsOutput`](crate::output::DescribeEventsOutput)
pub fn builder() -> crate::output::describe_events_output::Builder {
crate::output::describe_events_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeEventCategoriesOutput {
/// <p>A list of event categories descriptions.</p>
pub event_categories_map_list:
std::option::Option<std::vec::Vec<crate::model::EventCategoriesMap>>,
}
impl std::fmt::Debug for DescribeEventCategoriesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeEventCategoriesOutput");
formatter.field("event_categories_map_list", &self.event_categories_map_list);
formatter.finish()
}
}
/// See [`DescribeEventCategoriesOutput`](crate::output::DescribeEventCategoriesOutput)
pub mod describe_event_categories_output {
/// A builder for [`DescribeEventCategoriesOutput`](crate::output::DescribeEventCategoriesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) event_categories_map_list:
std::option::Option<std::vec::Vec<crate::model::EventCategoriesMap>>,
}
impl Builder {
pub fn event_categories_map_list(
mut self,
input: impl Into<crate::model::EventCategoriesMap>,
) -> Self {
let mut v = self.event_categories_map_list.unwrap_or_default();
v.push(input.into());
self.event_categories_map_list = Some(v);
self
}
pub fn set_event_categories_map_list(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EventCategoriesMap>>,
) -> Self {
self.event_categories_map_list = input;
self
}
/// Consumes the builder and constructs a [`DescribeEventCategoriesOutput`](crate::output::DescribeEventCategoriesOutput)
pub fn build(self) -> crate::output::DescribeEventCategoriesOutput {
crate::output::DescribeEventCategoriesOutput {
event_categories_map_list: self.event_categories_map_list,
}
}
}
}
impl DescribeEventCategoriesOutput {
/// Creates a new builder-style object to manufacture [`DescribeEventCategoriesOutput`](crate::output::DescribeEventCategoriesOutput)
pub fn builder() -> crate::output::describe_event_categories_output::Builder {
crate::output::describe_event_categories_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeEndpointAuthorizationOutput {
/// <p>The authorizations to an endpoint.</p>
pub endpoint_authorization_list:
std::option::Option<std::vec::Vec<crate::model::EndpointAuthorization>>,
/// <p>An optional pagination token provided by a previous
/// <code>DescribeEndpointAuthorization</code> request. If this parameter is specified, the
/// response includes only records beyond the marker, up to the value specified by the
/// <code>MaxRecords</code> parameter.</p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeEndpointAuthorizationOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeEndpointAuthorizationOutput");
formatter.field(
"endpoint_authorization_list",
&self.endpoint_authorization_list,
);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeEndpointAuthorizationOutput`](crate::output::DescribeEndpointAuthorizationOutput)
pub mod describe_endpoint_authorization_output {
/// A builder for [`DescribeEndpointAuthorizationOutput`](crate::output::DescribeEndpointAuthorizationOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) endpoint_authorization_list:
std::option::Option<std::vec::Vec<crate::model::EndpointAuthorization>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn endpoint_authorization_list(
mut self,
input: impl Into<crate::model::EndpointAuthorization>,
) -> Self {
let mut v = self.endpoint_authorization_list.unwrap_or_default();
v.push(input.into());
self.endpoint_authorization_list = Some(v);
self
}
pub fn set_endpoint_authorization_list(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EndpointAuthorization>>,
) -> Self {
self.endpoint_authorization_list = input;
self
}
/// <p>An optional pagination token provided by a previous
/// <code>DescribeEndpointAuthorization</code> request. If this parameter is specified, the
/// response includes only records beyond the marker, up to the value specified by the
/// <code>MaxRecords</code> parameter.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeEndpointAuthorizationOutput`](crate::output::DescribeEndpointAuthorizationOutput)
pub fn build(self) -> crate::output::DescribeEndpointAuthorizationOutput {
crate::output::DescribeEndpointAuthorizationOutput {
endpoint_authorization_list: self.endpoint_authorization_list,
marker: self.marker,
}
}
}
}
impl DescribeEndpointAuthorizationOutput {
/// Creates a new builder-style object to manufacture [`DescribeEndpointAuthorizationOutput`](crate::output::DescribeEndpointAuthorizationOutput)
pub fn builder() -> crate::output::describe_endpoint_authorization_output::Builder {
crate::output::describe_endpoint_authorization_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeEndpointAccessOutput {
/// <p>The list of endpoints with access to the cluster.</p>
pub endpoint_access_list: std::option::Option<std::vec::Vec<crate::model::EndpointAccess>>,
/// <p>An optional pagination token provided by a previous
/// <code>DescribeEndpointAccess</code> request. If this parameter is specified, the
/// response includes only records beyond the marker, up to the value specified by the
/// <code>MaxRecords</code> parameter.</p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeEndpointAccessOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeEndpointAccessOutput");
formatter.field("endpoint_access_list", &self.endpoint_access_list);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeEndpointAccessOutput`](crate::output::DescribeEndpointAccessOutput)
pub mod describe_endpoint_access_output {
/// A builder for [`DescribeEndpointAccessOutput`](crate::output::DescribeEndpointAccessOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) endpoint_access_list:
std::option::Option<std::vec::Vec<crate::model::EndpointAccess>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn endpoint_access_list(
mut self,
input: impl Into<crate::model::EndpointAccess>,
) -> Self {
let mut v = self.endpoint_access_list.unwrap_or_default();
v.push(input.into());
self.endpoint_access_list = Some(v);
self
}
pub fn set_endpoint_access_list(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EndpointAccess>>,
) -> Self {
self.endpoint_access_list = input;
self
}
/// <p>An optional pagination token provided by a previous
/// <code>DescribeEndpointAccess</code> request. If this parameter is specified, the
/// response includes only records beyond the marker, up to the value specified by the
/// <code>MaxRecords</code> parameter.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeEndpointAccessOutput`](crate::output::DescribeEndpointAccessOutput)
pub fn build(self) -> crate::output::DescribeEndpointAccessOutput {
crate::output::DescribeEndpointAccessOutput {
endpoint_access_list: self.endpoint_access_list,
marker: self.marker,
}
}
}
}
impl DescribeEndpointAccessOutput {
/// Creates a new builder-style object to manufacture [`DescribeEndpointAccessOutput`](crate::output::DescribeEndpointAccessOutput)
pub fn builder() -> crate::output::describe_endpoint_access_output::Builder {
crate::output::describe_endpoint_access_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeDefaultClusterParametersOutput {
/// <p>Describes the default cluster parameters for a parameter group family.</p>
pub default_cluster_parameters: std::option::Option<crate::model::DefaultClusterParameters>,
}
impl std::fmt::Debug for DescribeDefaultClusterParametersOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeDefaultClusterParametersOutput");
formatter.field(
"default_cluster_parameters",
&self.default_cluster_parameters,
);
formatter.finish()
}
}
/// See [`DescribeDefaultClusterParametersOutput`](crate::output::DescribeDefaultClusterParametersOutput)
pub mod describe_default_cluster_parameters_output {
/// A builder for [`DescribeDefaultClusterParametersOutput`](crate::output::DescribeDefaultClusterParametersOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) default_cluster_parameters:
std::option::Option<crate::model::DefaultClusterParameters>,
}
impl Builder {
/// <p>Describes the default cluster parameters for a parameter group family.</p>
pub fn default_cluster_parameters(
mut self,
input: crate::model::DefaultClusterParameters,
) -> Self {
self.default_cluster_parameters = Some(input);
self
}
pub fn set_default_cluster_parameters(
mut self,
input: std::option::Option<crate::model::DefaultClusterParameters>,
) -> Self {
self.default_cluster_parameters = input;
self
}
/// Consumes the builder and constructs a [`DescribeDefaultClusterParametersOutput`](crate::output::DescribeDefaultClusterParametersOutput)
pub fn build(self) -> crate::output::DescribeDefaultClusterParametersOutput {
crate::output::DescribeDefaultClusterParametersOutput {
default_cluster_parameters: self.default_cluster_parameters,
}
}
}
}
impl DescribeDefaultClusterParametersOutput {
/// Creates a new builder-style object to manufacture [`DescribeDefaultClusterParametersOutput`](crate::output::DescribeDefaultClusterParametersOutput)
pub fn builder() -> crate::output::describe_default_cluster_parameters_output::Builder {
crate::output::describe_default_cluster_parameters_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeDataSharesForProducerOutput {
/// <p>Shows the results of datashares available for producers.</p>
pub data_shares: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
/// <p>An optional parameter that specifies the starting point to return a set of response
/// records. When the results of a <a>DescribeDataSharesForProducer</a> request
/// exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the
/// <code>Marker</code> field of the response. You can retrieve the next set of response
/// records by providing the returned marker value in the <code>Marker</code> parameter and
/// retrying the request. </p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeDataSharesForProducerOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeDataSharesForProducerOutput");
formatter.field("data_shares", &self.data_shares);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeDataSharesForProducerOutput`](crate::output::DescribeDataSharesForProducerOutput)
pub mod describe_data_shares_for_producer_output {
/// A builder for [`DescribeDataSharesForProducerOutput`](crate::output::DescribeDataSharesForProducerOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_shares: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn data_shares(mut self, input: impl Into<crate::model::DataShare>) -> Self {
let mut v = self.data_shares.unwrap_or_default();
v.push(input.into());
self.data_shares = Some(v);
self
}
pub fn set_data_shares(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
) -> Self {
self.data_shares = input;
self
}
/// <p>An optional parameter that specifies the starting point to return a set of response
/// records. When the results of a <a>DescribeDataSharesForProducer</a> request
/// exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the
/// <code>Marker</code> field of the response. You can retrieve the next set of response
/// records by providing the returned marker value in the <code>Marker</code> parameter and
/// retrying the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeDataSharesForProducerOutput`](crate::output::DescribeDataSharesForProducerOutput)
pub fn build(self) -> crate::output::DescribeDataSharesForProducerOutput {
crate::output::DescribeDataSharesForProducerOutput {
data_shares: self.data_shares,
marker: self.marker,
}
}
}
}
impl DescribeDataSharesForProducerOutput {
/// Creates a new builder-style object to manufacture [`DescribeDataSharesForProducerOutput`](crate::output::DescribeDataSharesForProducerOutput)
pub fn builder() -> crate::output::describe_data_shares_for_producer_output::Builder {
crate::output::describe_data_shares_for_producer_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeDataSharesForConsumerOutput {
/// <p>Shows the results of datashares available for consumers.</p>
pub data_shares: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
/// <p>An optional parameter that specifies the starting point to return a set of response
/// records. When the results of a <a>DescribeDataSharesForConsumer</a> request
/// exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the
/// <code>Marker</code> field of the response. You can retrieve the next set of response
/// records by providing the returned marker value in the <code>Marker</code> parameter and
/// retrying the request. </p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeDataSharesForConsumerOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeDataSharesForConsumerOutput");
formatter.field("data_shares", &self.data_shares);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeDataSharesForConsumerOutput`](crate::output::DescribeDataSharesForConsumerOutput)
pub mod describe_data_shares_for_consumer_output {
/// A builder for [`DescribeDataSharesForConsumerOutput`](crate::output::DescribeDataSharesForConsumerOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_shares: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn data_shares(mut self, input: impl Into<crate::model::DataShare>) -> Self {
let mut v = self.data_shares.unwrap_or_default();
v.push(input.into());
self.data_shares = Some(v);
self
}
pub fn set_data_shares(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
) -> Self {
self.data_shares = input;
self
}
/// <p>An optional parameter that specifies the starting point to return a set of response
/// records. When the results of a <a>DescribeDataSharesForConsumer</a> request
/// exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the
/// <code>Marker</code> field of the response. You can retrieve the next set of response
/// records by providing the returned marker value in the <code>Marker</code> parameter and
/// retrying the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeDataSharesForConsumerOutput`](crate::output::DescribeDataSharesForConsumerOutput)
pub fn build(self) -> crate::output::DescribeDataSharesForConsumerOutput {
crate::output::DescribeDataSharesForConsumerOutput {
data_shares: self.data_shares,
marker: self.marker,
}
}
}
}
impl DescribeDataSharesForConsumerOutput {
/// Creates a new builder-style object to manufacture [`DescribeDataSharesForConsumerOutput`](crate::output::DescribeDataSharesForConsumerOutput)
pub fn builder() -> crate::output::describe_data_shares_for_consumer_output::Builder {
crate::output::describe_data_shares_for_consumer_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeDataSharesOutput {
/// <p>The results returned from describing datashares.</p>
pub data_shares: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
/// <p>An optional parameter that specifies the starting point to return a set of response records. When the results of a <a>DescribeDataShares</a> request exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the <code>Marker</code> field of the response. You can retrieve the next set of response records by providing the returned marker value in the <code>Marker</code> parameter and retrying the request. </p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeDataSharesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeDataSharesOutput");
formatter.field("data_shares", &self.data_shares);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeDataSharesOutput`](crate::output::DescribeDataSharesOutput)
pub mod describe_data_shares_output {
/// A builder for [`DescribeDataSharesOutput`](crate::output::DescribeDataSharesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_shares: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn data_shares(mut self, input: impl Into<crate::model::DataShare>) -> Self {
let mut v = self.data_shares.unwrap_or_default();
v.push(input.into());
self.data_shares = Some(v);
self
}
pub fn set_data_shares(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DataShare>>,
) -> Self {
self.data_shares = input;
self
}
/// <p>An optional parameter that specifies the starting point to return a set of response records. When the results of a <a>DescribeDataShares</a> request exceed the value specified in <code>MaxRecords</code>, AWS returns a value in the <code>Marker</code> field of the response. You can retrieve the next set of response records by providing the returned marker value in the <code>Marker</code> parameter and retrying the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeDataSharesOutput`](crate::output::DescribeDataSharesOutput)
pub fn build(self) -> crate::output::DescribeDataSharesOutput {
crate::output::DescribeDataSharesOutput {
data_shares: self.data_shares,
marker: self.marker,
}
}
}
}
impl DescribeDataSharesOutput {
/// Creates a new builder-style object to manufacture [`DescribeDataSharesOutput`](crate::output::DescribeDataSharesOutput)
pub fn builder() -> crate::output::describe_data_shares_output::Builder {
crate::output::describe_data_shares_output::Builder::default()
}
}
/// <p>Contains the output from the <a>DescribeClusterVersions</a> action.
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClusterVersionsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <code>Version</code> elements. </p>
pub cluster_versions: std::option::Option<std::vec::Vec<crate::model::ClusterVersion>>,
}
impl std::fmt::Debug for DescribeClusterVersionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClusterVersionsOutput");
formatter.field("marker", &self.marker);
formatter.field("cluster_versions", &self.cluster_versions);
formatter.finish()
}
}
/// See [`DescribeClusterVersionsOutput`](crate::output::DescribeClusterVersionsOutput)
pub mod describe_cluster_versions_output {
/// A builder for [`DescribeClusterVersionsOutput`](crate::output::DescribeClusterVersionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) cluster_versions:
std::option::Option<std::vec::Vec<crate::model::ClusterVersion>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn
|
(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn cluster_versions(mut self, input: impl Into<crate::model::ClusterVersion>) -> Self {
let mut v = self.cluster_versions.unwrap_or_default();
v.push(input.into());
self.cluster_versions = Some(v);
self
}
pub fn set_cluster_versions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ClusterVersion>>,
) -> Self {
self.cluster_versions = input;
self
}
/// Consumes the builder and constructs a [`DescribeClusterVersionsOutput`](crate::output::DescribeClusterVersionsOutput)
pub fn build(self) -> crate::output::DescribeClusterVersionsOutput {
crate::output::DescribeClusterVersionsOutput {
marker: self.marker,
cluster_versions: self.cluster_versions,
}
}
}
}
impl DescribeClusterVersionsOutput {
/// Creates a new builder-style object to manufacture [`DescribeClusterVersionsOutput`](crate::output::DescribeClusterVersionsOutput)
pub fn builder() -> crate::output::describe_cluster_versions_output::Builder {
crate::output::describe_cluster_versions_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClusterTracksOutput {
/// <p>A list of maintenance tracks output by the <code>DescribeClusterTracks</code>
/// operation. </p>
pub maintenance_tracks: std::option::Option<std::vec::Vec<crate::model::MaintenanceTrack>>,
/// <p>The starting point to return a set of response tracklist records. You can retrieve the
/// next set of response records by providing the returned marker value in the
/// <code>Marker</code> parameter and retrying the request.</p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeClusterTracksOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClusterTracksOutput");
formatter.field("maintenance_tracks", &self.maintenance_tracks);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeClusterTracksOutput`](crate::output::DescribeClusterTracksOutput)
pub mod describe_cluster_tracks_output {
/// A builder for [`DescribeClusterTracksOutput`](crate::output::DescribeClusterTracksOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) maintenance_tracks:
std::option::Option<std::vec::Vec<crate::model::MaintenanceTrack>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn maintenance_tracks(
mut self,
input: impl Into<crate::model::MaintenanceTrack>,
) -> Self {
let mut v = self.maintenance_tracks.unwrap_or_default();
v.push(input.into());
self.maintenance_tracks = Some(v);
self
}
pub fn set_maintenance_tracks(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MaintenanceTrack>>,
) -> Self {
self.maintenance_tracks = input;
self
}
/// <p>The starting point to return a set of response tracklist records. You can retrieve the
/// next set of response records by providing the returned marker value in the
/// <code>Marker</code> parameter and retrying the request.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeClusterTracksOutput`](crate::output::DescribeClusterTracksOutput)
pub fn build(self) -> crate::output::DescribeClusterTracksOutput {
crate::output::DescribeClusterTracksOutput {
maintenance_tracks: self.maintenance_tracks,
marker: self.marker,
}
}
}
}
impl DescribeClusterTracksOutput {
/// Creates a new builder-style object to manufacture [`DescribeClusterTracksOutput`](crate::output::DescribeClusterTracksOutput)
pub fn builder() -> crate::output::describe_cluster_tracks_output::Builder {
crate::output::describe_cluster_tracks_output::Builder::default()
}
}
/// <p>Contains the output from the <a>DescribeClusterSubnetGroups</a> action.
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClusterSubnetGroupsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <a>ClusterSubnetGroup</a> instances. </p>
pub cluster_subnet_groups: std::option::Option<std::vec::Vec<crate::model::ClusterSubnetGroup>>,
}
impl std::fmt::Debug for DescribeClusterSubnetGroupsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClusterSubnetGroupsOutput");
formatter.field("marker", &self.marker);
formatter.field("cluster_subnet_groups", &self.cluster_subnet_groups);
formatter.finish()
}
}
/// See [`DescribeClusterSubnetGroupsOutput`](crate::output::DescribeClusterSubnetGroupsOutput)
pub mod describe_cluster_subnet_groups_output {
/// A builder for [`DescribeClusterSubnetGroupsOutput`](crate::output::DescribeClusterSubnetGroupsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) cluster_subnet_groups:
std::option::Option<std::vec::Vec<crate::model::ClusterSubnetGroup>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn cluster_subnet_groups(
mut self,
input: impl Into<crate::model::ClusterSubnetGroup>,
) -> Self {
let mut v = self.cluster_subnet_groups.unwrap_or_default();
v.push(input.into());
self.cluster_subnet_groups = Some(v);
self
}
pub fn set_cluster_subnet_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ClusterSubnetGroup>>,
) -> Self {
self.cluster_subnet_groups = input;
self
}
/// Consumes the builder and constructs a [`DescribeClusterSubnetGroupsOutput`](crate::output::DescribeClusterSubnetGroupsOutput)
pub fn build(self) -> crate::output::DescribeClusterSubnetGroupsOutput {
crate::output::DescribeClusterSubnetGroupsOutput {
marker: self.marker,
cluster_subnet_groups: self.cluster_subnet_groups,
}
}
}
}
impl DescribeClusterSubnetGroupsOutput {
/// Creates a new builder-style object to manufacture [`DescribeClusterSubnetGroupsOutput`](crate::output::DescribeClusterSubnetGroupsOutput)
pub fn builder() -> crate::output::describe_cluster_subnet_groups_output::Builder {
crate::output::describe_cluster_subnet_groups_output::Builder::default()
}
}
/// <p>Contains the output from the <a>DescribeClusterSnapshots</a> action.
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClusterSnapshotsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <a>Snapshot</a> instances. </p>
pub snapshots: std::option::Option<std::vec::Vec<crate::model::Snapshot>>,
}
impl std::fmt::Debug for DescribeClusterSnapshotsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClusterSnapshotsOutput");
formatter.field("marker", &self.marker);
formatter.field("snapshots", &self.snapshots);
formatter.finish()
}
}
/// See [`DescribeClusterSnapshotsOutput`](crate::output::DescribeClusterSnapshotsOutput)
pub mod describe_cluster_snapshots_output {
/// A builder for [`DescribeClusterSnapshotsOutput`](crate::output::DescribeClusterSnapshotsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) snapshots: std::option::Option<std::vec::Vec<crate::model::Snapshot>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn snapshots(mut self, input: impl Into<crate::model::Snapshot>) -> Self {
let mut v = self.snapshots.unwrap_or_default();
v.push(input.into());
self.snapshots = Some(v);
self
}
pub fn set_snapshots(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Snapshot>>,
) -> Self {
self.snapshots = input;
self
}
/// Consumes the builder and constructs a [`DescribeClusterSnapshotsOutput`](crate::output::DescribeClusterSnapshotsOutput)
pub fn build(self) -> crate::output::DescribeClusterSnapshotsOutput {
crate::output::DescribeClusterSnapshotsOutput {
marker: self.marker,
snapshots: self.snapshots,
}
}
}
}
impl DescribeClusterSnapshotsOutput {
/// Creates a new builder-style object to manufacture [`DescribeClusterSnapshotsOutput`](crate::output::DescribeClusterSnapshotsOutput)
pub fn builder() -> crate::output::describe_cluster_snapshots_output::Builder {
crate::output::describe_cluster_snapshots_output::Builder::default()
}
}
/// <p></p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClusterSecurityGroupsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <a>ClusterSecurityGroup</a> instances. </p>
pub cluster_security_groups:
std::option::Option<std::vec::Vec<crate::model::ClusterSecurityGroup>>,
}
impl std::fmt::Debug for DescribeClusterSecurityGroupsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClusterSecurityGroupsOutput");
formatter.field("marker", &self.marker);
formatter.field("cluster_security_groups", &self.cluster_security_groups);
formatter.finish()
}
}
/// See [`DescribeClusterSecurityGroupsOutput`](crate::output::DescribeClusterSecurityGroupsOutput)
pub mod describe_cluster_security_groups_output {
/// A builder for [`DescribeClusterSecurityGroupsOutput`](crate::output::DescribeClusterSecurityGroupsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) cluster_security_groups:
std::option::Option<std::vec::Vec<crate::model::ClusterSecurityGroup>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn cluster_security_groups(
mut self,
input: impl Into<crate::model::ClusterSecurityGroup>,
) -> Self {
let mut v = self.cluster_security_groups.unwrap_or_default();
v.push(input.into());
self.cluster_security_groups = Some(v);
self
}
pub fn set_cluster_security_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ClusterSecurityGroup>>,
) -> Self {
self.cluster_security_groups = input;
self
}
/// Consumes the builder and constructs a [`DescribeClusterSecurityGroupsOutput`](crate::output::DescribeClusterSecurityGroupsOutput)
pub fn build(self) -> crate::output::DescribeClusterSecurityGroupsOutput {
crate::output::DescribeClusterSecurityGroupsOutput {
marker: self.marker,
cluster_security_groups: self.cluster_security_groups,
}
}
}
}
impl DescribeClusterSecurityGroupsOutput {
/// Creates a new builder-style object to manufacture [`DescribeClusterSecurityGroupsOutput`](crate::output::DescribeClusterSecurityGroupsOutput)
pub fn builder() -> crate::output::describe_cluster_security_groups_output::Builder {
crate::output::describe_cluster_security_groups_output::Builder::default()
}
}
/// <p>Contains the output from the <a>DescribeClusters</a> action. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClustersOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <code>Cluster</code> objects, where each object describes one cluster.
/// </p>
pub clusters: std::option::Option<std::vec::Vec<crate::model::Cluster>>,
}
impl std::fmt::Debug for DescribeClustersOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClustersOutput");
formatter.field("marker", &self.marker);
formatter.field("clusters", &self.clusters);
formatter.finish()
}
}
/// See [`DescribeClustersOutput`](crate::output::DescribeClustersOutput)
pub mod describe_clusters_output {
/// A builder for [`DescribeClustersOutput`](crate::output::DescribeClustersOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) clusters: std::option::Option<std::vec::Vec<crate::model::Cluster>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn clusters(mut self, input: impl Into<crate::model::Cluster>) -> Self {
let mut v = self.clusters.unwrap_or_default();
v.push(input.into());
self.clusters = Some(v);
self
}
pub fn set_clusters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Cluster>>,
) -> Self {
self.clusters = input;
self
}
/// Consumes the builder and constructs a [`DescribeClustersOutput`](crate::output::DescribeClustersOutput)
pub fn build(self) -> crate::output::DescribeClustersOutput {
crate::output::DescribeClustersOutput {
marker: self.marker,
clusters: self.clusters,
}
}
}
}
impl DescribeClustersOutput {
/// Creates a new builder-style object to manufacture [`DescribeClustersOutput`](crate::output::DescribeClustersOutput)
pub fn builder() -> crate::output::describe_clusters_output::Builder {
crate::output::describe_clusters_output::Builder::default()
}
}
/// <p>Contains the output from the <a>DescribeClusterParameters</a> action.
/// </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClusterParametersOutput {
/// <p>A list of <a>Parameter</a> instances. Each instance lists the parameters
/// of one cluster parameter group. </p>
pub parameters: std::option::Option<std::vec::Vec<crate::model::Parameter>>,
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeClusterParametersOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClusterParametersOutput");
formatter.field("parameters", &self.parameters);
formatter.field("marker", &self.marker);
formatter.finish()
}
}
/// See [`DescribeClusterParametersOutput`](crate::output::DescribeClusterParametersOutput)
pub mod describe_cluster_parameters_output {
/// A builder for [`DescribeClusterParametersOutput`](crate::output::DescribeClusterParametersOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) parameters: std::option::Option<std::vec::Vec<crate::model::Parameter>>,
pub(crate) marker: std::option::Option<std::string::String>,
}
impl Builder {
pub fn parameters(mut self, input: impl Into<crate::model::Parameter>) -> Self {
let mut v = self.parameters.unwrap_or_default();
v.push(input.into());
self.parameters = Some(v);
self
}
pub fn set_parameters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Parameter>>,
) -> Self {
self.parameters = input;
self
}
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
/// Consumes the builder and constructs a [`DescribeClusterParametersOutput`](crate::output::DescribeClusterParametersOutput)
pub fn build(self) -> crate::output::DescribeClusterParametersOutput {
crate::output::DescribeClusterParametersOutput {
parameters: self.parameters,
marker: self.marker,
}
}
}
}
impl DescribeClusterParametersOutput {
/// Creates a new builder-style object to manufacture [`DescribeClusterParametersOutput`](crate::output::DescribeClusterParametersOutput)
pub fn builder() -> crate::output::describe_cluster_parameters_output::Builder {
crate::output::describe_cluster_parameters_output::Builder::default()
}
}
/// <p>Contains the output from the <a>DescribeClusterParameterGroups</a>
/// action. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClusterParameterGroupsOutput {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of <a>ClusterParameterGroup</a> instances. Each instance
/// describes one cluster parameter group. </p>
pub parameter_groups: std::option::Option<std::vec::Vec<crate::model::ClusterParameterGroup>>,
}
impl std::fmt::Debug for DescribeClusterParameterGroupsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClusterParameterGroupsOutput");
formatter.field("marker", &self.marker);
formatter.field("parameter_groups", &self.parameter_groups);
formatter.finish()
}
}
/// See [`DescribeClusterParameterGroupsOutput`](crate::output::DescribeClusterParameterGroupsOutput)
pub mod describe_cluster_parameter_groups_output {
/// A builder for [`DescribeClusterParameterGroupsOutput`](crate::output::DescribeClusterParameterGroupsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) parameter_groups:
std::option::Option<std::vec::Vec<crate::model::ClusterParameterGroup>>,
}
impl Builder {
/// <p>A value that indicates the starting point for the next set of response records in a
/// subsequent request. If a value is returned in a response, you can retrieve the next set
/// of records by providing this returned marker value in the <code>Marker</code> parameter
/// and retrying the command. If the <code>Marker</code> field is empty, all response
/// records have been retrieved for the request. </p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn parameter_groups(
mut self,
input: impl Into<crate::model::ClusterParameterGroup>,
) -> Self {
let mut v = self.parameter_groups.unwrap_or_default();
v.push(input.into());
self.parameter_groups = Some(v);
self
}
pub fn set_parameter_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ClusterParameterGroup>>,
) -> Self {
self.parameter_groups = input;
self
}
/// Consumes the builder and constructs a [`DescribeClusterParameterGroupsOutput`](crate::output::DescribeClusterParameterGroupsOutput)
pub fn build(self) -> crate::output::DescribeClusterParameterGroupsOutput {
crate::output::DescribeClusterParameterGroupsOutput {
marker: self.marker,
parameter_groups: self.parameter_groups,
}
}
}
}
impl DescribeClusterParameterGroupsOutput {
/// Creates a new builder-style object to manufacture [`DescribeClusterParameterGroupsOutput`](crate::output::DescribeClusterParameterGroupsOutput)
pub fn builder() -> crate::output::describe_cluster_parameter_groups_output::Builder {
crate::output::describe_cluster_parameter_groups_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeClusterDbRevisionsOutput {
/// <p>A string representing the starting point for the next set of revisions. If a value is
/// returned in a response, you can retrieve the next set of revisions by providing the
/// value in the <code>marker</code> parameter and retrying the command. If the
/// <code>marker</code> field is empty, all revisions have already been returned.</p>
pub marker: std::option::Option<std::string::String>,
/// <p>A list of revisions.</p>
pub cluster_db_revisions: std::option::Option<std::vec::Vec<crate::model::ClusterDbRevision>>,
}
impl std::fmt::Debug for DescribeClusterDbRevisionsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeClusterDbRevisionsOutput");
formatter.field("marker", &self.marker);
formatter.field("cluster_db_revisions", &self.cluster_db_revisions);
formatter.finish()
}
}
/// See [`DescribeClusterDbRevisionsOutput`](crate::output::DescribeClusterDbRevisionsOutput)
pub mod describe_cluster_db_revisions_output {
/// A builder for [`DescribeClusterDbRevisionsOutput`](crate::output::DescribeClusterDbRevisionsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) marker: std::option::Option<std::string::String>,
pub(crate) cluster_db_revisions:
std::option::Option<std::vec::Vec<crate::model::ClusterDbRevision>>,
}
impl Builder {
/// <p>A string representing the starting point for the next set of revisions. If a value is
/// returned in a response, you can retrieve the next set of revisions by providing the
/// value in the <code>marker</code> parameter and retrying the command. If the
/// <code>marker</code> field is empty, all revisions have already been returned.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.marker = Some(input.into());
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.marker = input;
self
}
pub fn cluster_db_revisions(
mut self,
input: impl Into<crate::model::ClusterDbRevision>,
) -> Self {
let mut v = self.cluster_db_revisions.unwrap_or_default();
v.push(input.into());
self.cluster_db_revisions = Some(v);
self
}
pub fn set_cluster_db_revisions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ClusterDbRevision>>,
) -> Self {
self.cluster_db_revisions = input;
self
}
/// Consumes the builder and constructs a [`DescribeClusterDbRevisionsOutput`](crate::output::DescribeClusterDbRevisionsOutput)
pub fn build(self) -> crate::output::DescribeClusterDbRevisionsOutput {
crate::output::DescribeClusterDbRevisionsOutput {
marker: self.marker,
cluster_db_revisions: self.cluster_db_revisions,
}
}
}
}
impl DescribeClusterDbRevisionsOutput {
/// Creates a new builder-style object to manufacture [`DescribeClusterDbRevisionsOutput`](crate::output::DescribeClusterDbRevisionsOutput)
pub fn builder() -> crate::output::describe_cluster_db_revisions_output::Builder {
crate::output::describe_cluster_db_revisions_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAuthenticationProfilesOutput {
/// <p>The list of authentication profiles.</p>
pub authentication_profiles:
std::option::Option<std::vec::Vec<crate::model::AuthenticationProfile>>,
}
impl std::fmt::Debug for DescribeAuthenticationProfilesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAuthenticationProfilesOutput");
formatter.field("authentication_profiles", &self.authentication_profiles);
formatter.finish()
}
}
/// See [`DescribeAuthenticationProfilesOutput`](crate::output::DescribeAuthenticationProfilesOutput)
pub mod describe_authentication_profiles_output {
/// A builder for [`DescribeAuthenticationProfilesOutput`](crate::output::DescribeAuthenticationProfilesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) authentication_profiles:
std::option::Option<std::vec::Vec<crate::model::AuthenticationProfile>>,
}
impl Builder {
pub fn authentication_profiles(
mut self,
input: impl Into<crate::model::AuthenticationProfile>,
) -> Self {
let mut v = self.authentication_profiles.unwrap_or_default();
v.push(input.into());
self.authentication_profiles = Some(v);
self
}
pub fn set_authentication_profiles(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AuthenticationProfile>>,
) -> Self {
self.authentication_profiles = input;
self
}
/// Consumes the builder and constructs a [`DescribeAuthenticationProfilesOutput`](crate::output::DescribeAuthenticationProfilesOutput)
pub fn build(self) -> crate::output::DescribeAuthenticationProfilesOutput {
crate::output::DescribeAuthenticationProfilesOutput {
authentication_profiles: self.authentication_profiles,
}
}
}
}
impl DescribeAuthenticationProfilesOutput {
/// Creates a new builder-style object to manufacture [`DescribeAuthenticationProfilesOutput`](crate::output::DescribeAuthenticationProfilesOutput)
pub fn builder() -> crate::output::describe_authentication_profiles_output::Builder {
crate::output::describe_authentication_profiles_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAccountAttributesOutput {
/// <p>A list of attributes assigned to an account.</p>
pub account_attributes: std::option::Option<std::vec::Vec<crate::model::AccountAttribute>>,
}
impl std::fmt::Debug for DescribeAccountAttributesOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAccountAttributesOutput");
formatter.field("account_attributes", &self.account_attributes);
formatter.finish()
}
}
/// See [`DescribeAccountAttributesOutput`](crate::output::DescribeAccountAttributesOutput)
pub mod describe_account_attributes_output {
/// A builder for [`DescribeAccountAttributesOutput`](crate::output::DescribeAccountAttributesOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) account_attributes:
std::option::Option<std::vec::Vec<crate::model::AccountAttribute>>,
}
impl Builder {
pub fn account_attributes(
mut self,
input: impl Into<crate::model::AccountAttribute>,
) -> Self {
let mut v = self.account_attributes.unwrap_or_default();
v.push(input.into());
self.account_attributes = Some(v);
self
}
pub fn set_account_attributes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AccountAttribute>>,
) -> Self {
self.account_attributes = input;
self
}
/// Consumes the builder and constructs a [`DescribeAccountAttributesOutput`](crate::output::DescribeAccountAttributesOutput)
pub fn build(self) -> crate::output::DescribeAccountAttributesOutput {
crate::output::DescribeAccountAttributesOutput {
account_attributes: self.account_attributes,
}
}
}
}
impl DescribeAccountAttributesOutput {
/// Creates a new builder-style object to manufacture [`DescribeAccountAttributesOutput`](crate::output::DescribeAccountAttributesOutput)
pub fn builder() -> crate::output::describe_account_attributes_output::Builder {
crate::output::describe_account_attributes_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteUsageLimitOutput {}
impl std::fmt::Debug for DeleteUsageLimitOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteUsageLimitOutput");
formatter.finish()
}
}
/// See [`DeleteUsageLimitOutput`](crate::output::DeleteUsageLimitOutput)
pub mod delete_usage_limit_output {
/// A builder for [`DeleteUsageLimitOutput`](crate::output::DeleteUsageLimitOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteUsageLimitOutput`](crate::output::DeleteUsageLimitOutput)
pub fn build(self) -> crate::output::DeleteUsageLimitOutput {
crate::output::DeleteUsageLimitOutput {}
}
}
}
impl DeleteUsageLimitOutput {
/// Creates a new builder-style object to manufacture [`DeleteUsageLimitOutput`](crate::output::DeleteUsageLimitOutput)
pub fn builder() -> crate::output::delete_usage_limit_output::Builder {
crate::output::delete_usage_limit_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteTagsOutput {}
impl std::fmt::Debug for DeleteTagsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteTagsOutput");
formatter.finish()
}
}
/// See [`DeleteTagsOutput`](crate::output::DeleteTagsOutput)
pub mod delete_tags_output {
/// A builder for [`DeleteTagsOutput`](crate::output::DeleteTagsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteTagsOutput`](crate::output::DeleteTagsOutput)
pub fn build(self) -> crate::output::DeleteTagsOutput {
crate::output::DeleteTagsOutput {}
}
}
}
impl DeleteTagsOutput {
/// Creates a new builder-style object to manufacture [`DeleteTagsOutput`](crate::output::DeleteTagsOutput)
pub fn builder() -> crate::output::delete_tags_output::Builder {
crate::output::delete_tags_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteSnapshotScheduleOutput {}
impl std::fmt::Debug for DeleteSnapshotScheduleOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteSnapshotScheduleOutput");
formatter.finish()
}
}
/// See [`DeleteSnapshotScheduleOutput`](crate::output::DeleteSnapshotScheduleOutput)
pub mod delete_snapshot_schedule_output {
/// A builder for [`DeleteSnapshotScheduleOutput`](crate::output::DeleteSnapshotScheduleOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteSnapshotScheduleOutput`](crate::output::DeleteSnapshotScheduleOutput)
pub fn build(self) -> crate::output::DeleteSnapshotScheduleOutput {
crate::output::DeleteSnapshotScheduleOutput {}
}
}
}
impl DeleteSnapshotScheduleOutput {
/// Creates a new builder-style object to manufacture [`DeleteSnapshotScheduleOutput`](crate::output::DeleteSnapshotScheduleOutput)
pub fn builder() -> crate::output::delete_snapshot_schedule_output::Builder {
crate::output::delete_snapshot_schedule_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteSnapshotCopyGrantOutput {}
impl std::fmt::Debug for DeleteSnapshotCopyGrantOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteSnapshotCopyGrantOutput");
formatter.finish()
}
}
/// See [`DeleteSnapshotCopyGrantOutput`](crate::output::DeleteSnapshotCopyGrantOutput)
pub mod delete_snapshot_copy_grant_output {
/// A builder for [`DeleteSnapshotCopyGrantOutput`](crate::output::DeleteSnapshotCopyGrantOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteSnapshotCopyGrantOutput`](crate::output::DeleteSnapshotCopyGrantOutput)
pub fn build(self) -> crate::output::DeleteSnapshotCopyGrantOutput {
crate::output::DeleteSnapshotCopyGrantOutput {}
}
}
}
impl DeleteSnapshotCopyGrantOutput {
/// Creates a new builder-style object to manufacture [`DeleteSnapshotCopyGrantOutput`](crate::output::DeleteSnapshotCopyGrantOutput)
pub fn builder() -> crate::output::delete_snapshot_copy_grant_output::Builder {
crate::output::delete_snapshot_copy_grant_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteScheduledActionOutput {}
impl std::fmt::Debug for DeleteScheduledActionOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteScheduledActionOutput");
formatter.finish()
}
}
/// See [`DeleteScheduledActionOutput`](crate::output::DeleteScheduledActionOutput)
pub mod delete_scheduled_action_output {
/// A builder for [`DeleteScheduledActionOutput`](crate::output::DeleteScheduledActionOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteScheduledActionOutput`](crate::output::DeleteScheduledActionOutput)
pub fn build(self) -> crate::output::DeleteScheduledActionOutput {
crate::output::DeleteScheduledActionOutput {}
}
}
}
impl DeleteScheduledActionOutput {
/// Creates a new builder-style object to manufacture [`DeleteScheduledActionOutput`](crate::output::DeleteScheduledActionOutput)
pub fn builder() -> crate::output::delete_scheduled_action_output::Builder {
crate::output::delete_scheduled_action_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeletePartnerOutput {
/// <p>The name of the database that receives data from the partner.</p>
pub database_name: std::option::Option<std::string::String>,
/// <p>The name of the partner that is authorized to send data.</p>
pub partner_name: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DeletePartnerOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeletePartnerOutput");
formatter.field("database_name", &self.database_name);
formatter.field("partner_name", &self.partner_name);
formatter.finish()
}
}
/// See [`DeletePartnerOutput`](crate::output::DeletePartnerOutput)
pub mod delete_partner_output {
/// A builder for [`DeletePartnerOutput`](crate::output::DeletePartnerOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) database_name: std::option::Option<std::string::String>,
pub(crate) partner_name: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the database that receives data from the partner.</p>
pub fn database_name(mut self, input: impl Into<std::string::String>) -> Self {
self.database_name = Some(input.into());
self
}
pub fn set_database_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.database_name = input;
self
}
/// <p>The name of the partner that is authorized to send data.</p>
pub fn partner_name(mut self, input: impl Into<std::string::String>) -> Self {
self.partner_name = Some(input.into());
self
}
pub fn set_partner_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.partner_name = input;
self
}
/// Consumes the builder and constructs a [`DeletePartnerOutput`](crate::output::DeletePartnerOutput)
pub fn build(self) -> crate::output::DeletePartnerOutput {
crate::output::DeletePartnerOutput {
database_name: self.database_name,
partner_name: self.partner_name,
}
}
}
}
impl DeletePartnerOutput {
/// Creates a new builder-style object to manufacture [`DeletePartnerOutput`](crate::output::DeletePartnerOutput)
pub fn builder() -> crate::output::delete_partner_output::Builder {
crate::output::delete_partner_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteHsmConfigurationOutput {}
impl std::fmt::Debug for DeleteHsmConfigurationOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteHsmConfigurationOutput");
formatter.finish()
}
}
/// See [`DeleteHsmConfigurationOutput`](crate::output::DeleteHsmConfigurationOutput)
pub mod delete_hsm_configuration_output {
/// A builder for [`DeleteHsmConfigurationOutput`](crate::output::DeleteHsmConfigurationOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteHsmConfigurationOutput`](crate::output::DeleteHsmConfigurationOutput)
pub fn build(self) -> crate::output::DeleteHsmConfigurationOutput {
crate::output::DeleteHsmConfigurationOutput {}
}
}
}
impl DeleteHsmConfigurationOutput {
/// Creates a new builder-style object to manufacture [`DeleteHsmConfigurationOutput`](crate::output::DeleteHsmConfigurationOutput)
pub fn builder() -> crate::output::delete_hsm_configuration_output::Builder {
crate::output::delete_hsm_configuration_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteHsmClientCertificateOutput {}
impl std::fmt::Debug for DeleteHsmClientCertificateOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteHsmClientCertificateOutput");
formatter.finish()
}
}
/// See [`DeleteHsmClientCertificateOutput`](crate::output::DeleteHsmClientCertificateOutput)
pub mod delete_hsm_client_certificate_output {
/// A builder for [`DeleteHsmClientCertificateOutput`](crate::output::DeleteHsmClientCertificateOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteHsmClientCertificateOutput`](crate::output::DeleteHsmClientCertificateOutput)
pub fn build(self) -> crate::output::DeleteHsmClientCertificateOutput {
crate::output::DeleteHsmClientCertificateOutput {}
}
}
}
impl DeleteHsmClientCertificateOutput {
/// Creates a new builder-style object to manufacture [`DeleteHsmClientCertificateOutput`](crate::output::DeleteHsmClientCertificateOutput)
pub fn builder() -> crate::output::delete_hsm_client_certificate_output::Builder {
crate::output::delete_hsm_client_certificate_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteEventSubscriptionOutput {}
impl std::fmt::Debug for DeleteEventSubscriptionOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteEventSubscriptionOutput");
formatter.finish()
}
}
/// See [`DeleteEventSubscriptionOutput`](crate::output::DeleteEventSubscriptionOutput)
pub mod delete_event_subscription_output {
/// A builder for [`DeleteEventSubscriptionOutput`](crate::output::DeleteEventSubscriptionOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteEventSubscriptionOutput`](crate::output::DeleteEventSubscriptionOutput)
pub fn build(self) -> crate::output::DeleteEventSubscriptionOutput {
crate::output::DeleteEventSubscriptionOutput {}
}
}
}
impl DeleteEventSubscriptionOutput {
/// Creates a new builder-style object to manufacture [`DeleteEventSubscriptionOutput`](crate::output::DeleteEventSubscriptionOutput)
pub fn builder() -> crate::output::delete_event_subscription_output::Builder {
crate::output::delete_event_subscription_output::Builder::default()
}
}
/// <p>Describes a Redshift-managed VPC endpoint.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteEndpointAccessOutput {
/// <p>The cluster identifier of the cluster associated with the endpoint.</p>
pub cluster_identifier: std::option::Option<std::string::String>,
/// <p>The Amazon Web Services account ID of the owner of the cluster.</p>
pub resource_owner: std::option::Option<std::string::String>,
/// <p>The subnet group name where Amazon Redshift chooses to deploy the endpoint.</p>
pub subnet_group_name: std::option::Option<std::string::String>,
/// <p>The status of the endpoint.</p>
pub endpoint_status: std::option::Option<std::string::String>,
/// <p>The name of the endpoint.</p>
pub endpoint_name: std::option::Option<std::string::String>,
/// <p>The time (UTC) that the endpoint was created.</p>
pub endpoint_create_time: std::option::Option<smithy_types::Instant>,
/// <p>The port number on which the cluster accepts incoming connections.</p>
pub port: i32,
/// <p>The DNS address of the endpoint.</p>
pub address: std::option::Option<std::string::String>,
/// <p>The security groups associated with the endpoint.</p>
pub vpc_security_groups:
std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
/// <p>The connection endpoint for connecting to an Amazon Redshift cluster through the proxy.</p>
pub vpc_endpoint: std::option::Option<crate::model::VpcEndpoint>,
}
impl std::fmt::Debug for DeleteEndpointAccessOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteEndpointAccessOutput");
formatter.field("cluster_identifier", &self.cluster_identifier);
formatter.field("resource_owner", &self.resource_owner);
formatter.field("subnet_group_name", &self.subnet_group_name);
formatter.field("endpoint_status", &self.endpoint_status);
formatter.field("endpoint_name", &self.endpoint_name);
formatter.field("endpoint_create_time", &self.endpoint_create_time);
formatter.field("port", &self.port);
formatter.field("address", &self.address);
formatter.field("vpc_security_groups", &self.vpc_security_groups);
formatter.field("vpc_endpoint", &self.vpc_endpoint);
formatter.finish()
}
}
/// See [`DeleteEndpointAccessOutput`](crate::output::DeleteEndpointAccessOutput)
pub mod delete_endpoint_access_output {
/// A builder for [`DeleteEndpointAccessOutput`](crate::output::DeleteEndpointAccessOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_identifier: std::option::Option<std::string::String>,
pub(crate) resource_owner: std::option::Option<std::string::String>,
pub(crate) subnet_group_name: std::option::Option<std::string::String>,
pub(crate) endpoint_status: std::option::Option<std::string::String>,
pub(crate) endpoint_name: std::option::Option<std::string::String>,
pub(crate) endpoint_create_time: std::option::Option<smithy_types::Instant>,
pub(crate) port: std::option::Option<i32>,
pub(crate) address: std::option::Option<std::string::String>,
pub(crate) vpc_security_groups:
std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
pub(crate) vpc_endpoint: std::option::Option<crate::model::VpcEndpoint>,
}
impl Builder {
/// <p>The cluster identifier of the cluster associated with the endpoint.</p>
pub fn cluster_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_identifier = Some(input.into());
self
}
pub fn set_cluster_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_identifier = input;
self
}
/// <p>The Amazon Web Services account ID of the owner of the cluster.</p>
pub fn resource_owner(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_owner = Some(input.into());
self
}
pub fn set_resource_owner(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.resource_owner = input;
self
}
/// <p>The subnet group name where Amazon Redshift chooses to deploy the endpoint.</p>
pub fn subnet_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.subnet_group_name = Some(input.into());
self
}
pub fn set_subnet_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.subnet_group_name = input;
self
}
/// <p>The status of the endpoint.</p>
pub fn endpoint_status(mut self, input: impl Into<std::string::String>) -> Self {
self.endpoint_status = Some(input.into());
self
}
pub fn set_endpoint_status(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.endpoint_status = input;
self
}
/// <p>The name of the endpoint.</p>
pub fn endpoint_name(mut self, input: impl Into<std::string::String>) -> Self {
self.endpoint_name = Some(input.into());
self
}
pub fn set_endpoint_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.endpoint_name = input;
self
}
/// <p>The time (UTC) that the endpoint was created.</p>
pub fn endpoint_create_time(mut self, input: smithy_types::Instant) -> Self {
self.endpoint_create_time = Some(input);
self
}
pub fn set_endpoint_create_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.endpoint_create_time = input;
self
}
/// <p>The port number on which the cluster accepts incoming connections.</p>
pub fn port(mut self, input: i32) -> Self {
self.port = Some(input);
self
}
pub fn set_port(mut self, input: std::option::Option<i32>) -> Self {
self.port = input;
self
}
/// <p>The DNS address of the endpoint.</p>
pub fn address(mut self, input: impl Into<std::string::String>) -> Self {
self.address = Some(input.into());
self
}
pub fn set_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.address = input;
self
}
pub fn vpc_security_groups(
mut self,
input: impl Into<crate::model::VpcSecurityGroupMembership>,
) -> Self {
let mut v = self.vpc_security_groups.unwrap_or_default();
v.push(input.into());
self.vpc_security_groups = Some(v);
self
}
pub fn set_vpc_security_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
) -> Self {
self.vpc_security_groups = input;
self
}
/// <p>The connection endpoint for connecting to an Amazon Redshift cluster through the proxy.</p>
pub fn vpc_endpoint(mut self, input: crate::model::VpcEndpoint) -> Self {
self.vpc_endpoint = Some(input);
self
}
pub fn set_vpc_endpoint(
mut self,
input: std::option::Option<crate::model::VpcEndpoint>,
) -> Self {
self.vpc_endpoint = input;
self
}
/// Consumes the builder and constructs a [`DeleteEndpointAccessOutput`](crate::output::DeleteEndpointAccessOutput)
pub fn build(self) -> crate::output::DeleteEndpointAccessOutput {
crate::output::DeleteEndpointAccessOutput {
cluster_identifier: self.cluster_identifier,
resource_owner: self.resource_owner,
subnet_group_name: self.subnet_group_name,
endpoint_status: self.endpoint_status,
endpoint_name: self.endpoint_name,
endpoint_create_time: self.endpoint_create_time,
port: self.port.unwrap_or_default(),
address: self.address,
vpc_security_groups: self.vpc_security_groups,
vpc_endpoint: self.vpc_endpoint,
}
}
}
}
impl DeleteEndpointAccessOutput {
/// Creates a new builder-style object to manufacture [`DeleteEndpointAccessOutput`](crate::output::DeleteEndpointAccessOutput)
pub fn builder() -> crate::output::delete_endpoint_access_output::Builder {
crate::output::delete_endpoint_access_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteClusterSubnetGroupOutput {}
impl std::fmt::Debug for DeleteClusterSubnetGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteClusterSubnetGroupOutput");
formatter.finish()
}
}
/// See [`DeleteClusterSubnetGroupOutput`](crate::output::DeleteClusterSubnetGroupOutput)
pub mod delete_cluster_subnet_group_output {
/// A builder for [`DeleteClusterSubnetGroupOutput`](crate::output::DeleteClusterSubnetGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteClusterSubnetGroupOutput`](crate::output::DeleteClusterSubnetGroupOutput)
pub fn build(self) -> crate::output::DeleteClusterSubnetGroupOutput {
crate::output::DeleteClusterSubnetGroupOutput {}
}
}
}
impl DeleteClusterSubnetGroupOutput {
/// Creates a new builder-style object to manufacture [`DeleteClusterSubnetGroupOutput`](crate::output::DeleteClusterSubnetGroupOutput)
pub fn builder() -> crate::output::delete_cluster_subnet_group_output::Builder {
crate::output::delete_cluster_subnet_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteClusterSnapshotOutput {
/// <p>Describes a snapshot.</p>
pub snapshot: std::option::Option<crate::model::Snapshot>,
}
impl std::fmt::Debug for DeleteClusterSnapshotOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteClusterSnapshotOutput");
formatter.field("snapshot", &self.snapshot);
formatter.finish()
}
}
/// See [`DeleteClusterSnapshotOutput`](crate::output::DeleteClusterSnapshotOutput)
pub mod delete_cluster_snapshot_output {
/// A builder for [`DeleteClusterSnapshotOutput`](crate::output::DeleteClusterSnapshotOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) snapshot: std::option::Option<crate::model::Snapshot>,
}
impl Builder {
/// <p>Describes a snapshot.</p>
pub fn snapshot(mut self, input: crate::model::Snapshot) -> Self {
self.snapshot = Some(input);
self
}
pub fn set_snapshot(mut self, input: std::option::Option<crate::model::Snapshot>) -> Self {
self.snapshot = input;
self
}
/// Consumes the builder and constructs a [`DeleteClusterSnapshotOutput`](crate::output::DeleteClusterSnapshotOutput)
pub fn build(self) -> crate::output::DeleteClusterSnapshotOutput {
crate::output::DeleteClusterSnapshotOutput {
snapshot: self.snapshot,
}
}
}
}
impl DeleteClusterSnapshotOutput {
/// Creates a new builder-style object to manufacture [`DeleteClusterSnapshotOutput`](crate::output::DeleteClusterSnapshotOutput)
pub fn builder() -> crate::output::delete_cluster_snapshot_output::Builder {
crate::output::delete_cluster_snapshot_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteClusterSecurityGroupOutput {}
impl std::fmt::Debug for DeleteClusterSecurityGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteClusterSecurityGroupOutput");
formatter.finish()
}
}
/// See [`DeleteClusterSecurityGroupOutput`](crate::output::DeleteClusterSecurityGroupOutput)
pub mod delete_cluster_security_group_output {
/// A builder for [`DeleteClusterSecurityGroupOutput`](crate::output::DeleteClusterSecurityGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteClusterSecurityGroupOutput`](crate::output::DeleteClusterSecurityGroupOutput)
pub fn build(self) -> crate::output::DeleteClusterSecurityGroupOutput {
crate::output::DeleteClusterSecurityGroupOutput {}
}
}
}
impl DeleteClusterSecurityGroupOutput {
/// Creates a new builder-style object to manufacture [`DeleteClusterSecurityGroupOutput`](crate::output::DeleteClusterSecurityGroupOutput)
pub fn builder() -> crate::output::delete_cluster_security_group_output::Builder {
crate::output::delete_cluster_security_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteClusterParameterGroupOutput {}
impl std::fmt::Debug for DeleteClusterParameterGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteClusterParameterGroupOutput");
formatter.finish()
}
}
/// See [`DeleteClusterParameterGroupOutput`](crate::output::DeleteClusterParameterGroupOutput)
pub mod delete_cluster_parameter_group_output {
/// A builder for [`DeleteClusterParameterGroupOutput`](crate::output::DeleteClusterParameterGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`DeleteClusterParameterGroupOutput`](crate::output::DeleteClusterParameterGroupOutput)
pub fn build(self) -> crate::output::DeleteClusterParameterGroupOutput {
crate::output::DeleteClusterParameterGroupOutput {}
}
}
}
impl DeleteClusterParameterGroupOutput {
/// Creates a new builder-style object to manufacture [`DeleteClusterParameterGroupOutput`](crate::output::DeleteClusterParameterGroupOutput)
pub fn builder() -> crate::output::delete_cluster_parameter_group_output::Builder {
crate::output::delete_cluster_parameter_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteClusterOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for DeleteClusterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteClusterOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`DeleteClusterOutput`](crate::output::DeleteClusterOutput)
pub mod delete_cluster_output {
/// A builder for [`DeleteClusterOutput`](crate::output::DeleteClusterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`DeleteClusterOutput`](crate::output::DeleteClusterOutput)
pub fn build(self) -> crate::output::DeleteClusterOutput {
crate::output::DeleteClusterOutput {
cluster: self.cluster,
}
}
}
}
impl DeleteClusterOutput {
/// Creates a new builder-style object to manufacture [`DeleteClusterOutput`](crate::output::DeleteClusterOutput)
pub fn builder() -> crate::output::delete_cluster_output::Builder {
crate::output::delete_cluster_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteAuthenticationProfileOutput {
/// <p>The name of the authentication profile that was deleted.</p>
pub authentication_profile_name: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DeleteAuthenticationProfileOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteAuthenticationProfileOutput");
formatter.field(
"authentication_profile_name",
&self.authentication_profile_name,
);
formatter.finish()
}
}
/// See [`DeleteAuthenticationProfileOutput`](crate::output::DeleteAuthenticationProfileOutput)
pub mod delete_authentication_profile_output {
/// A builder for [`DeleteAuthenticationProfileOutput`](crate::output::DeleteAuthenticationProfileOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) authentication_profile_name: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the authentication profile that was deleted.</p>
pub fn authentication_profile_name(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.authentication_profile_name = Some(input.into());
self
}
pub fn set_authentication_profile_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.authentication_profile_name = input;
self
}
/// Consumes the builder and constructs a [`DeleteAuthenticationProfileOutput`](crate::output::DeleteAuthenticationProfileOutput)
pub fn build(self) -> crate::output::DeleteAuthenticationProfileOutput {
crate::output::DeleteAuthenticationProfileOutput {
authentication_profile_name: self.authentication_profile_name,
}
}
}
}
impl DeleteAuthenticationProfileOutput {
/// Creates a new builder-style object to manufacture [`DeleteAuthenticationProfileOutput`](crate::output::DeleteAuthenticationProfileOutput)
pub fn builder() -> crate::output::delete_authentication_profile_output::Builder {
crate::output::delete_authentication_profile_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeauthorizeDataShareOutput {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub data_share_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub producer_arn: std::option::Option<std::string::String>,
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub allow_publicly_accessible_consumers: bool,
/// <p>A value that specifies when the datashare has an association between a producer and data consumers.</p>
pub data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl std::fmt::Debug for DeauthorizeDataShareOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeauthorizeDataShareOutput");
formatter.field("data_share_arn", &self.data_share_arn);
formatter.field("producer_arn", &self.producer_arn);
formatter.field(
"allow_publicly_accessible_consumers",
&self.allow_publicly_accessible_consumers,
);
formatter.field("data_share_associations", &self.data_share_associations);
formatter.finish()
}
}
/// See [`DeauthorizeDataShareOutput`](crate::output::DeauthorizeDataShareOutput)
pub mod deauthorize_data_share_output {
/// A builder for [`DeauthorizeDataShareOutput`](crate::output::DeauthorizeDataShareOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_share_arn: std::option::Option<std::string::String>,
pub(crate) producer_arn: std::option::Option<std::string::String>,
pub(crate) allow_publicly_accessible_consumers: std::option::Option<bool>,
pub(crate) data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl Builder {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub fn data_share_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.data_share_arn = Some(input.into());
self
}
pub fn set_data_share_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.data_share_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub fn producer_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.producer_arn = Some(input.into());
self
}
pub fn set_producer_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.producer_arn = input;
self
}
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub fn allow_publicly_accessible_consumers(mut self, input: bool) -> Self {
self.allow_publicly_accessible_consumers = Some(input);
self
}
pub fn set_allow_publicly_accessible_consumers(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.allow_publicly_accessible_consumers = input;
self
}
pub fn data_share_associations(
mut self,
input: impl Into<crate::model::DataShareAssociation>,
) -> Self {
let mut v = self.data_share_associations.unwrap_or_default();
v.push(input.into());
self.data_share_associations = Some(v);
self
}
pub fn set_data_share_associations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
) -> Self {
self.data_share_associations = input;
self
}
/// Consumes the builder and constructs a [`DeauthorizeDataShareOutput`](crate::output::DeauthorizeDataShareOutput)
pub fn build(self) -> crate::output::DeauthorizeDataShareOutput {
crate::output::DeauthorizeDataShareOutput {
data_share_arn: self.data_share_arn,
producer_arn: self.producer_arn,
allow_publicly_accessible_consumers: self
.allow_publicly_accessible_consumers
.unwrap_or_default(),
data_share_associations: self.data_share_associations,
}
}
}
}
impl DeauthorizeDataShareOutput {
/// Creates a new builder-style object to manufacture [`DeauthorizeDataShareOutput`](crate::output::DeauthorizeDataShareOutput)
pub fn builder() -> crate::output::deauthorize_data_share_output::Builder {
crate::output::deauthorize_data_share_output::Builder::default()
}
}
/// <p>Describes a usage limit object for a cluster. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateUsageLimitOutput {
/// <p>The identifier of the usage limit.</p>
pub usage_limit_id: std::option::Option<std::string::String>,
/// <p>The identifier of the cluster with a usage limit.</p>
pub cluster_identifier: std::option::Option<std::string::String>,
/// <p>The Amazon Redshift feature to which the limit applies.</p>
pub feature_type: std::option::Option<crate::model::UsageLimitFeatureType>,
/// <p>The type of limit. Depending on the feature type, this can be based on a time duration or data size.</p>
pub limit_type: std::option::Option<crate::model::UsageLimitLimitType>,
/// <p>The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).</p>
pub amount: i64,
/// <p>The time period that the amount applies to. A <code>weekly</code> period begins on Sunday. The default is <code>monthly</code>. </p>
pub period: std::option::Option<crate::model::UsageLimitPeriod>,
/// <p>The action that Amazon Redshift takes when the limit is reached. Possible values are: </p>
/// <ul>
/// <li>
/// <p>
/// <b>log</b> - To log an event in a system table. The default is log.</p>
/// </li>
/// <li>
/// <p>
/// <b>emit-metric</b> - To emit CloudWatch metrics.</p>
/// </li>
/// <li>
/// <p>
/// <b>disable</b> - To disable the feature until the next usage period begins.</p>
/// </li>
/// </ul>
pub breach_action: std::option::Option<crate::model::UsageLimitBreachAction>,
/// <p>A list of tag instances.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl std::fmt::Debug for CreateUsageLimitOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateUsageLimitOutput");
formatter.field("usage_limit_id", &self.usage_limit_id);
formatter.field("cluster_identifier", &self.cluster_identifier);
formatter.field("feature_type", &self.feature_type);
formatter.field("limit_type", &self.limit_type);
formatter.field("amount", &self.amount);
formatter.field("period", &self.period);
formatter.field("breach_action", &self.breach_action);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`CreateUsageLimitOutput`](crate::output::CreateUsageLimitOutput)
pub mod create_usage_limit_output {
/// A builder for [`CreateUsageLimitOutput`](crate::output::CreateUsageLimitOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) usage_limit_id: std::option::Option<std::string::String>,
pub(crate) cluster_identifier: std::option::Option<std::string::String>,
pub(crate) feature_type: std::option::Option<crate::model::UsageLimitFeatureType>,
pub(crate) limit_type: std::option::Option<crate::model::UsageLimitLimitType>,
pub(crate) amount: std::option::Option<i64>,
pub(crate) period: std::option::Option<crate::model::UsageLimitPeriod>,
pub(crate) breach_action: std::option::Option<crate::model::UsageLimitBreachAction>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>The identifier of the usage limit.</p>
pub fn usage_limit_id(mut self, input: impl Into<std::string::String>) -> Self {
self.usage_limit_id = Some(input.into());
self
}
pub fn set_usage_limit_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.usage_limit_id = input;
self
}
/// <p>The identifier of the cluster with a usage limit.</p>
pub fn cluster_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_identifier = Some(input.into());
self
}
pub fn set_cluster_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_identifier = input;
self
}
/// <p>The Amazon Redshift feature to which the limit applies.</p>
pub fn feature_type(mut self, input: crate::model::UsageLimitFeatureType) -> Self {
self.feature_type = Some(input);
self
}
pub fn set_feature_type(
mut self,
input: std::option::Option<crate::model::UsageLimitFeatureType>,
) -> Self {
self.feature_type = input;
self
}
/// <p>The type of limit. Depending on the feature type, this can be based on a time duration or data size.</p>
pub fn limit_type(mut self, input: crate::model::UsageLimitLimitType) -> Self {
self.limit_type = Some(input);
self
}
pub fn set_limit_type(
mut self,
input: std::option::Option<crate::model::UsageLimitLimitType>,
) -> Self {
self.limit_type = input;
self
}
/// <p>The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).</p>
pub fn amount(mut self, input: i64) -> Self {
self.amount = Some(input);
self
}
pub fn set_amount(mut self, input: std::option::Option<i64>) -> Self {
self.amount = input;
self
}
/// <p>The time period that the amount applies to. A <code>weekly</code> period begins on Sunday. The default is <code>monthly</code>. </p>
pub fn period(mut self, input: crate::model::UsageLimitPeriod) -> Self {
self.period = Some(input);
self
}
pub fn set_period(
mut self,
input: std::option::Option<crate::model::UsageLimitPeriod>,
) -> Self {
self.period = input;
self
}
/// <p>The action that Amazon Redshift takes when the limit is reached. Possible values are: </p>
/// <ul>
/// <li>
/// <p>
/// <b>log</b> - To log an event in a system table. The default is log.</p>
/// </li>
/// <li>
/// <p>
/// <b>emit-metric</b> - To emit CloudWatch metrics.</p>
/// </li>
/// <li>
/// <p>
/// <b>disable</b> - To disable the feature until the next usage period begins.</p>
/// </li>
/// </ul>
pub fn breach_action(mut self, input: crate::model::UsageLimitBreachAction) -> Self {
self.breach_action = Some(input);
self
}
pub fn set_breach_action(
mut self,
input: std::option::Option<crate::model::UsageLimitBreachAction>,
) -> Self {
self.breach_action = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`CreateUsageLimitOutput`](crate::output::CreateUsageLimitOutput)
pub fn build(self) -> crate::output::CreateUsageLimitOutput {
crate::output::CreateUsageLimitOutput {
usage_limit_id: self.usage_limit_id,
cluster_identifier: self.cluster_identifier,
feature_type: self.feature_type,
limit_type: self.limit_type,
amount: self.amount.unwrap_or_default(),
period: self.period,
breach_action: self.breach_action,
tags: self.tags,
}
}
}
}
impl CreateUsageLimitOutput {
/// Creates a new builder-style object to manufacture [`CreateUsageLimitOutput`](crate::output::CreateUsageLimitOutput)
pub fn builder() -> crate::output::create_usage_limit_output::Builder {
crate::output::create_usage_limit_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateTagsOutput {}
impl std::fmt::Debug for CreateTagsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateTagsOutput");
formatter.finish()
}
}
/// See [`CreateTagsOutput`](crate::output::CreateTagsOutput)
pub mod create_tags_output {
/// A builder for [`CreateTagsOutput`](crate::output::CreateTagsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {}
impl Builder {
/// Consumes the builder and constructs a [`CreateTagsOutput`](crate::output::CreateTagsOutput)
pub fn build(self) -> crate::output::CreateTagsOutput {
crate::output::CreateTagsOutput {}
}
}
}
impl CreateTagsOutput {
/// Creates a new builder-style object to manufacture [`CreateTagsOutput`](crate::output::CreateTagsOutput)
pub fn builder() -> crate::output::create_tags_output::Builder {
crate::output::create_tags_output::Builder::default()
}
}
/// <p>Describes a snapshot schedule. You can set a regular interval for creating
/// snapshots of a cluster. You can also schedule snapshots for specific dates. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateSnapshotScheduleOutput {
/// <p>A list of ScheduleDefinitions.</p>
pub schedule_definitions: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>A unique identifier for the schedule.</p>
pub schedule_identifier: std::option::Option<std::string::String>,
/// <p>The description of the schedule.</p>
pub schedule_description: std::option::Option<std::string::String>,
/// <p>An optional set of tags describing the schedule.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
/// <p></p>
pub next_invocations: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
/// <p>The number of clusters associated with the schedule.</p>
pub associated_cluster_count: std::option::Option<i32>,
/// <p>A list of clusters associated with the schedule. A maximum of 100 clusters is returned.</p>
pub associated_clusters:
std::option::Option<std::vec::Vec<crate::model::ClusterAssociatedToSchedule>>,
}
impl std::fmt::Debug for CreateSnapshotScheduleOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateSnapshotScheduleOutput");
formatter.field("schedule_definitions", &self.schedule_definitions);
formatter.field("schedule_identifier", &self.schedule_identifier);
formatter.field("schedule_description", &self.schedule_description);
formatter.field("tags", &self.tags);
formatter.field("next_invocations", &self.next_invocations);
formatter.field("associated_cluster_count", &self.associated_cluster_count);
formatter.field("associated_clusters", &self.associated_clusters);
formatter.finish()
}
}
/// See [`CreateSnapshotScheduleOutput`](crate::output::CreateSnapshotScheduleOutput)
pub mod create_snapshot_schedule_output {
/// A builder for [`CreateSnapshotScheduleOutput`](crate::output::CreateSnapshotScheduleOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) schedule_definitions: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) schedule_identifier: std::option::Option<std::string::String>,
pub(crate) schedule_description: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
pub(crate) next_invocations: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
pub(crate) associated_cluster_count: std::option::Option<i32>,
pub(crate) associated_clusters:
std::option::Option<std::vec::Vec<crate::model::ClusterAssociatedToSchedule>>,
}
impl Builder {
pub fn schedule_definitions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.schedule_definitions.unwrap_or_default();
v.push(input.into());
self.schedule_definitions = Some(v);
self
}
pub fn set_schedule_definitions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.schedule_definitions = input;
self
}
/// <p>A unique identifier for the schedule.</p>
pub fn schedule_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.schedule_identifier = Some(input.into());
self
}
pub fn set_schedule_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.schedule_identifier = input;
self
}
/// <p>The description of the schedule.</p>
pub fn schedule_description(mut self, input: impl Into<std::string::String>) -> Self {
self.schedule_description = Some(input.into());
self
}
pub fn set_schedule_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.schedule_description = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
pub fn next_invocations(mut self, input: impl Into<smithy_types::Instant>) -> Self {
let mut v = self.next_invocations.unwrap_or_default();
v.push(input.into());
self.next_invocations = Some(v);
self
}
pub fn set_next_invocations(
mut self,
input: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
) -> Self {
self.next_invocations = input;
self
}
/// <p>The number of clusters associated with the schedule.</p>
pub fn associated_cluster_count(mut self, input: i32) -> Self {
self.associated_cluster_count = Some(input);
self
}
pub fn set_associated_cluster_count(mut self, input: std::option::Option<i32>) -> Self {
self.associated_cluster_count = input;
self
}
pub fn associated_clusters(
mut self,
input: impl Into<crate::model::ClusterAssociatedToSchedule>,
) -> Self {
let mut v = self.associated_clusters.unwrap_or_default();
v.push(input.into());
self.associated_clusters = Some(v);
self
}
pub fn set_associated_clusters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ClusterAssociatedToSchedule>>,
) -> Self {
self.associated_clusters = input;
self
}
/// Consumes the builder and constructs a [`CreateSnapshotScheduleOutput`](crate::output::CreateSnapshotScheduleOutput)
pub fn build(self) -> crate::output::CreateSnapshotScheduleOutput {
crate::output::CreateSnapshotScheduleOutput {
schedule_definitions: self.schedule_definitions,
schedule_identifier: self.schedule_identifier,
schedule_description: self.schedule_description,
tags: self.tags,
next_invocations: self.next_invocations,
associated_cluster_count: self.associated_cluster_count,
associated_clusters: self.associated_clusters,
}
}
}
}
impl CreateSnapshotScheduleOutput {
/// Creates a new builder-style object to manufacture [`CreateSnapshotScheduleOutput`](crate::output::CreateSnapshotScheduleOutput)
pub fn builder() -> crate::output::create_snapshot_schedule_output::Builder {
crate::output::create_snapshot_schedule_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateSnapshotCopyGrantOutput {
/// <p>The snapshot copy grant that grants Amazon Redshift permission to encrypt copied
/// snapshots with the specified customer master key (CMK) from Amazon Web Services KMS in the destination
/// region.</p>
/// <p>
/// For more information about managing snapshot copy grants, go to
/// <a href="https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html">Amazon Redshift Database Encryption</a>
/// in the <i>Amazon Redshift Cluster Management Guide</i>.
/// </p>
pub snapshot_copy_grant: std::option::Option<crate::model::SnapshotCopyGrant>,
}
impl std::fmt::Debug for CreateSnapshotCopyGrantOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateSnapshotCopyGrantOutput");
formatter.field("snapshot_copy_grant", &self.snapshot_copy_grant);
formatter.finish()
}
}
/// See [`CreateSnapshotCopyGrantOutput`](crate::output::CreateSnapshotCopyGrantOutput)
pub mod create_snapshot_copy_grant_output {
/// A builder for [`CreateSnapshotCopyGrantOutput`](crate::output::CreateSnapshotCopyGrantOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) snapshot_copy_grant: std::option::Option<crate::model::SnapshotCopyGrant>,
}
impl Builder {
/// <p>The snapshot copy grant that grants Amazon Redshift permission to encrypt copied
/// snapshots with the specified customer master key (CMK) from Amazon Web Services KMS in the destination
/// region.</p>
/// <p>
/// For more information about managing snapshot copy grants, go to
/// <a href="https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-db-encryption.html">Amazon Redshift Database Encryption</a>
/// in the <i>Amazon Redshift Cluster Management Guide</i>.
/// </p>
pub fn snapshot_copy_grant(mut self, input: crate::model::SnapshotCopyGrant) -> Self {
self.snapshot_copy_grant = Some(input);
self
}
pub fn set_snapshot_copy_grant(
mut self,
input: std::option::Option<crate::model::SnapshotCopyGrant>,
) -> Self {
self.snapshot_copy_grant = input;
self
}
/// Consumes the builder and constructs a [`CreateSnapshotCopyGrantOutput`](crate::output::CreateSnapshotCopyGrantOutput)
pub fn build(self) -> crate::output::CreateSnapshotCopyGrantOutput {
crate::output::CreateSnapshotCopyGrantOutput {
snapshot_copy_grant: self.snapshot_copy_grant,
}
}
}
}
impl CreateSnapshotCopyGrantOutput {
/// Creates a new builder-style object to manufacture [`CreateSnapshotCopyGrantOutput`](crate::output::CreateSnapshotCopyGrantOutput)
pub fn builder() -> crate::output::create_snapshot_copy_grant_output::Builder {
crate::output::create_snapshot_copy_grant_output::Builder::default()
}
}
/// <p>Describes a scheduled action. You can use a scheduled action to trigger some Amazon Redshift API operations on a schedule.
/// For information about which API operations can be scheduled, see <a>ScheduledActionType</a>. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateScheduledActionOutput {
/// <p>The name of the scheduled action. </p>
pub scheduled_action_name: std::option::Option<std::string::String>,
/// <p>A JSON format string of the Amazon Redshift API operation with input parameters. </p>
/// <p>"<code>{\"ResizeCluster\":{\"NodeType\":\"ds2.8xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}</code>". </p>
pub target_action: std::option::Option<crate::model::ScheduledActionType>,
/// <p>The schedule for a one-time (at format) or recurring (cron format) scheduled action.
/// Schedule invocations must be separated by at least one hour.</p>
/// <p>Format of at expressions is "<code>at(yyyy-mm-ddThh:mm:ss)</code>". For example, "<code>at(2016-03-04T17:27:00)</code>".</p>
/// <p>Format of cron expressions is "<code>cron(Minutes Hours Day-of-month Month Day-of-week Year)</code>".
/// For example, "<code>cron(0 10 ? * MON *)</code>". For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions">Cron Expressions</a>
/// in the <i>Amazon CloudWatch Events User Guide</i>.</p>
pub schedule: std::option::Option<std::string::String>,
/// <p>The IAM role to assume to run the scheduled action.
/// This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action.
/// This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf.
/// For more information about the IAM role to use with the Amazon Redshift scheduler, see
/// <a href="https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html">Using Identity-Based Policies for Amazon Redshift</a>
/// in the <i>Amazon Redshift Cluster Management Guide</i>.
/// </p>
pub iam_role: std::option::Option<std::string::String>,
/// <p>The description of the scheduled action. </p>
pub scheduled_action_description: std::option::Option<std::string::String>,
/// <p>The state of the scheduled action. For example, <code>DISABLED</code>. </p>
pub state: std::option::Option<crate::model::ScheduledActionState>,
/// <p>List of times when the scheduled action will run. </p>
pub next_invocations: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
/// <p>The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger. </p>
pub start_time: std::option::Option<smithy_types::Instant>,
/// <p>The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger. </p>
pub end_time: std::option::Option<smithy_types::Instant>,
}
impl std::fmt::Debug for CreateScheduledActionOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateScheduledActionOutput");
formatter.field("scheduled_action_name", &self.scheduled_action_name);
formatter.field("target_action", &self.target_action);
formatter.field("schedule", &self.schedule);
formatter.field("iam_role", &self.iam_role);
formatter.field(
"scheduled_action_description",
&self.scheduled_action_description,
);
formatter.field("state", &self.state);
formatter.field("next_invocations", &self.next_invocations);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.finish()
}
}
/// See [`CreateScheduledActionOutput`](crate::output::CreateScheduledActionOutput)
pub mod create_scheduled_action_output {
/// A builder for [`CreateScheduledActionOutput`](crate::output::CreateScheduledActionOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) scheduled_action_name: std::option::Option<std::string::String>,
pub(crate) target_action: std::option::Option<crate::model::ScheduledActionType>,
pub(crate) schedule: std::option::Option<std::string::String>,
pub(crate) iam_role: std::option::Option<std::string::String>,
pub(crate) scheduled_action_description: std::option::Option<std::string::String>,
pub(crate) state: std::option::Option<crate::model::ScheduledActionState>,
pub(crate) next_invocations: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
pub(crate) start_time: std::option::Option<smithy_types::Instant>,
pub(crate) end_time: std::option::Option<smithy_types::Instant>,
}
impl Builder {
/// <p>The name of the scheduled action. </p>
pub fn scheduled_action_name(mut self, input: impl Into<std::string::String>) -> Self {
self.scheduled_action_name = Some(input.into());
self
}
pub fn set_scheduled_action_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.scheduled_action_name = input;
self
}
/// <p>A JSON format string of the Amazon Redshift API operation with input parameters. </p>
/// <p>"<code>{\"ResizeCluster\":{\"NodeType\":\"ds2.8xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}</code>". </p>
pub fn target_action(mut self, input: crate::model::ScheduledActionType) -> Self {
self.target_action = Some(input);
self
}
pub fn set_target_action(
mut self,
input: std::option::Option<crate::model::ScheduledActionType>,
) -> Self {
self.target_action = input;
self
}
/// <p>The schedule for a one-time (at format) or recurring (cron format) scheduled action.
/// Schedule invocations must be separated by at least one hour.</p>
/// <p>Format of at expressions is "<code>at(yyyy-mm-ddThh:mm:ss)</code>". For example, "<code>at(2016-03-04T17:27:00)</code>".</p>
/// <p>Format of cron expressions is "<code>cron(Minutes Hours Day-of-month Month Day-of-week Year)</code>".
/// For example, "<code>cron(0 10 ? * MON *)</code>". For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions">Cron Expressions</a>
/// in the <i>Amazon CloudWatch Events User Guide</i>.</p>
pub fn schedule(mut self, input: impl Into<std::string::String>) -> Self {
self.schedule = Some(input.into());
self
}
pub fn set_schedule(mut self, input: std::option::Option<std::string::String>) -> Self {
self.schedule = input;
self
}
/// <p>The IAM role to assume to run the scheduled action.
/// This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action.
/// This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf.
/// For more information about the IAM role to use with the Amazon Redshift scheduler, see
/// <a href="https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html">Using Identity-Based Policies for Amazon Redshift</a>
/// in the <i>Amazon Redshift Cluster Management Guide</i>.
/// </p>
pub fn iam_role(mut self, input: impl Into<std::string::String>) -> Self {
self.iam_role = Some(input.into());
self
}
pub fn set_iam_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.iam_role = input;
self
}
/// <p>The description of the scheduled action. </p>
pub fn scheduled_action_description(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.scheduled_action_description = Some(input.into());
self
}
pub fn set_scheduled_action_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.scheduled_action_description = input;
self
}
/// <p>The state of the scheduled action. For example, <code>DISABLED</code>. </p>
pub fn state(mut self, input: crate::model::ScheduledActionState) -> Self {
self.state = Some(input);
self
}
pub fn set_state(
mut self,
input: std::option::Option<crate::model::ScheduledActionState>,
) -> Self {
self.state = input;
self
}
pub fn next_invocations(mut self, input: impl Into<smithy_types::Instant>) -> Self {
let mut v = self.next_invocations.unwrap_or_default();
v.push(input.into());
self.next_invocations = Some(v);
self
}
pub fn set_next_invocations(
mut self,
input: std::option::Option<std::vec::Vec<smithy_types::Instant>>,
) -> Self {
self.next_invocations = input;
self
}
/// <p>The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger. </p>
pub fn start_time(mut self, input: smithy_types::Instant) -> Self {
self.start_time = Some(input);
self
}
pub fn set_start_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.start_time = input;
self
}
/// <p>The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger. </p>
pub fn end_time(mut self, input: smithy_types::Instant) -> Self {
self.end_time = Some(input);
self
}
pub fn set_end_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.end_time = input;
self
}
/// Consumes the builder and constructs a [`CreateScheduledActionOutput`](crate::output::CreateScheduledActionOutput)
pub fn build(self) -> crate::output::CreateScheduledActionOutput {
crate::output::CreateScheduledActionOutput {
scheduled_action_name: self.scheduled_action_name,
target_action: self.target_action,
schedule: self.schedule,
iam_role: self.iam_role,
scheduled_action_description: self.scheduled_action_description,
state: self.state,
next_invocations: self.next_invocations,
start_time: self.start_time,
end_time: self.end_time,
}
}
}
}
impl CreateScheduledActionOutput {
/// Creates a new builder-style object to manufacture [`CreateScheduledActionOutput`](crate::output::CreateScheduledActionOutput)
pub fn builder() -> crate::output::create_scheduled_action_output::Builder {
crate::output::create_scheduled_action_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateHsmConfigurationOutput {
/// <p>Returns information about an HSM configuration, which is an object that describes
/// to Amazon Redshift clusters the information they require to connect to an HSM where they can
/// store database encryption keys.</p>
pub hsm_configuration: std::option::Option<crate::model::HsmConfiguration>,
}
impl std::fmt::Debug for CreateHsmConfigurationOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateHsmConfigurationOutput");
formatter.field("hsm_configuration", &self.hsm_configuration);
formatter.finish()
}
}
/// See [`CreateHsmConfigurationOutput`](crate::output::CreateHsmConfigurationOutput)
pub mod create_hsm_configuration_output {
/// A builder for [`CreateHsmConfigurationOutput`](crate::output::CreateHsmConfigurationOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) hsm_configuration: std::option::Option<crate::model::HsmConfiguration>,
}
impl Builder {
/// <p>Returns information about an HSM configuration, which is an object that describes
/// to Amazon Redshift clusters the information they require to connect to an HSM where they can
/// store database encryption keys.</p>
pub fn hsm_configuration(mut self, input: crate::model::HsmConfiguration) -> Self {
self.hsm_configuration = Some(input);
self
}
pub fn set_hsm_configuration(
mut self,
input: std::option::Option<crate::model::HsmConfiguration>,
) -> Self {
self.hsm_configuration = input;
self
}
/// Consumes the builder and constructs a [`CreateHsmConfigurationOutput`](crate::output::CreateHsmConfigurationOutput)
pub fn build(self) -> crate::output::CreateHsmConfigurationOutput {
crate::output::CreateHsmConfigurationOutput {
hsm_configuration: self.hsm_configuration,
}
}
}
}
impl CreateHsmConfigurationOutput {
/// Creates a new builder-style object to manufacture [`CreateHsmConfigurationOutput`](crate::output::CreateHsmConfigurationOutput)
pub fn builder() -> crate::output::create_hsm_configuration_output::Builder {
crate::output::create_hsm_configuration_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateHsmClientCertificateOutput {
/// <p>Returns information about an HSM client certificate. The certificate is stored in a
/// secure Hardware Storage Module (HSM), and used by the Amazon Redshift cluster to encrypt data
/// files.</p>
pub hsm_client_certificate: std::option::Option<crate::model::HsmClientCertificate>,
}
impl std::fmt::Debug for CreateHsmClientCertificateOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateHsmClientCertificateOutput");
formatter.field("hsm_client_certificate", &self.hsm_client_certificate);
formatter.finish()
}
}
/// See [`CreateHsmClientCertificateOutput`](crate::output::CreateHsmClientCertificateOutput)
pub mod create_hsm_client_certificate_output {
/// A builder for [`CreateHsmClientCertificateOutput`](crate::output::CreateHsmClientCertificateOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) hsm_client_certificate: std::option::Option<crate::model::HsmClientCertificate>,
}
impl Builder {
/// <p>Returns information about an HSM client certificate. The certificate is stored in a
/// secure Hardware Storage Module (HSM), and used by the Amazon Redshift cluster to encrypt data
/// files.</p>
pub fn hsm_client_certificate(mut self, input: crate::model::HsmClientCertificate) -> Self {
self.hsm_client_certificate = Some(input);
self
}
pub fn set_hsm_client_certificate(
mut self,
input: std::option::Option<crate::model::HsmClientCertificate>,
) -> Self {
self.hsm_client_certificate = input;
self
}
/// Consumes the builder and constructs a [`CreateHsmClientCertificateOutput`](crate::output::CreateHsmClientCertificateOutput)
pub fn build(self) -> crate::output::CreateHsmClientCertificateOutput {
crate::output::CreateHsmClientCertificateOutput {
hsm_client_certificate: self.hsm_client_certificate,
}
}
}
}
impl CreateHsmClientCertificateOutput {
/// Creates a new builder-style object to manufacture [`CreateHsmClientCertificateOutput`](crate::output::CreateHsmClientCertificateOutput)
pub fn builder() -> crate::output::create_hsm_client_certificate_output::Builder {
crate::output::create_hsm_client_certificate_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateEventSubscriptionOutput {
/// <p>Describes event subscriptions.</p>
pub event_subscription: std::option::Option<crate::model::EventSubscription>,
}
impl std::fmt::Debug for CreateEventSubscriptionOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateEventSubscriptionOutput");
formatter.field("event_subscription", &self.event_subscription);
formatter.finish()
}
}
/// See [`CreateEventSubscriptionOutput`](crate::output::CreateEventSubscriptionOutput)
pub mod create_event_subscription_output {
/// A builder for [`CreateEventSubscriptionOutput`](crate::output::CreateEventSubscriptionOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) event_subscription: std::option::Option<crate::model::EventSubscription>,
}
impl Builder {
/// <p>Describes event subscriptions.</p>
pub fn event_subscription(mut self, input: crate::model::EventSubscription) -> Self {
self.event_subscription = Some(input);
self
}
pub fn set_event_subscription(
mut self,
input: std::option::Option<crate::model::EventSubscription>,
) -> Self {
self.event_subscription = input;
self
}
/// Consumes the builder and constructs a [`CreateEventSubscriptionOutput`](crate::output::CreateEventSubscriptionOutput)
pub fn build(self) -> crate::output::CreateEventSubscriptionOutput {
crate::output::CreateEventSubscriptionOutput {
event_subscription: self.event_subscription,
}
}
}
}
impl CreateEventSubscriptionOutput {
/// Creates a new builder-style object to manufacture [`CreateEventSubscriptionOutput`](crate::output::CreateEventSubscriptionOutput)
pub fn builder() -> crate::output::create_event_subscription_output::Builder {
crate::output::create_event_subscription_output::Builder::default()
}
}
/// <p>Describes a Redshift-managed VPC endpoint.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateEndpointAccessOutput {
/// <p>The cluster identifier of the cluster associated with the endpoint.</p>
pub cluster_identifier: std::option::Option<std::string::String>,
/// <p>The Amazon Web Services account ID of the owner of the cluster.</p>
pub resource_owner: std::option::Option<std::string::String>,
/// <p>The subnet group name where Amazon Redshift chooses to deploy the endpoint.</p>
pub subnet_group_name: std::option::Option<std::string::String>,
/// <p>The status of the endpoint.</p>
pub endpoint_status: std::option::Option<std::string::String>,
/// <p>The name of the endpoint.</p>
pub endpoint_name: std::option::Option<std::string::String>,
/// <p>The time (UTC) that the endpoint was created.</p>
pub endpoint_create_time: std::option::Option<smithy_types::Instant>,
/// <p>The port number on which the cluster accepts incoming connections.</p>
pub port: i32,
/// <p>The DNS address of the endpoint.</p>
pub address: std::option::Option<std::string::String>,
/// <p>The security groups associated with the endpoint.</p>
pub vpc_security_groups:
std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
/// <p>The connection endpoint for connecting to an Amazon Redshift cluster through the proxy.</p>
pub vpc_endpoint: std::option::Option<crate::model::VpcEndpoint>,
}
impl std::fmt::Debug for CreateEndpointAccessOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateEndpointAccessOutput");
formatter.field("cluster_identifier", &self.cluster_identifier);
formatter.field("resource_owner", &self.resource_owner);
formatter.field("subnet_group_name", &self.subnet_group_name);
formatter.field("endpoint_status", &self.endpoint_status);
formatter.field("endpoint_name", &self.endpoint_name);
formatter.field("endpoint_create_time", &self.endpoint_create_time);
formatter.field("port", &self.port);
formatter.field("address", &self.address);
formatter.field("vpc_security_groups", &self.vpc_security_groups);
formatter.field("vpc_endpoint", &self.vpc_endpoint);
formatter.finish()
}
}
/// See [`CreateEndpointAccessOutput`](crate::output::CreateEndpointAccessOutput)
pub mod create_endpoint_access_output {
/// A builder for [`CreateEndpointAccessOutput`](crate::output::CreateEndpointAccessOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_identifier: std::option::Option<std::string::String>,
pub(crate) resource_owner: std::option::Option<std::string::String>,
pub(crate) subnet_group_name: std::option::Option<std::string::String>,
pub(crate) endpoint_status: std::option::Option<std::string::String>,
pub(crate) endpoint_name: std::option::Option<std::string::String>,
pub(crate) endpoint_create_time: std::option::Option<smithy_types::Instant>,
pub(crate) port: std::option::Option<i32>,
pub(crate) address: std::option::Option<std::string::String>,
pub(crate) vpc_security_groups:
std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
pub(crate) vpc_endpoint: std::option::Option<crate::model::VpcEndpoint>,
}
impl Builder {
/// <p>The cluster identifier of the cluster associated with the endpoint.</p>
pub fn cluster_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_identifier = Some(input.into());
self
}
pub fn set_cluster_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_identifier = input;
self
}
/// <p>The Amazon Web Services account ID of the owner of the cluster.</p>
pub fn resource_owner(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_owner = Some(input.into());
self
}
pub fn set_resource_owner(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.resource_owner = input;
self
}
/// <p>The subnet group name where Amazon Redshift chooses to deploy the endpoint.</p>
pub fn subnet_group_name(mut self, input: impl Into<std::string::String>) -> Self {
self.subnet_group_name = Some(input.into());
self
}
pub fn set_subnet_group_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.subnet_group_name = input;
self
}
/// <p>The status of the endpoint.</p>
pub fn endpoint_status(mut self, input: impl Into<std::string::String>) -> Self {
self.endpoint_status = Some(input.into());
self
}
pub fn set_endpoint_status(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.endpoint_status = input;
self
}
/// <p>The name of the endpoint.</p>
pub fn endpoint_name(mut self, input: impl Into<std::string::String>) -> Self {
self.endpoint_name = Some(input.into());
self
}
pub fn set_endpoint_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.endpoint_name = input;
self
}
/// <p>The time (UTC) that the endpoint was created.</p>
pub fn endpoint_create_time(mut self, input: smithy_types::Instant) -> Self {
self.endpoint_create_time = Some(input);
self
}
pub fn set_endpoint_create_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.endpoint_create_time = input;
self
}
/// <p>The port number on which the cluster accepts incoming connections.</p>
pub fn port(mut self, input: i32) -> Self {
self.port = Some(input);
self
}
pub fn set_port(mut self, input: std::option::Option<i32>) -> Self {
self.port = input;
self
}
/// <p>The DNS address of the endpoint.</p>
pub fn address(mut self, input: impl Into<std::string::String>) -> Self {
self.address = Some(input.into());
self
}
pub fn set_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.address = input;
self
}
pub fn vpc_security_groups(
mut self,
input: impl Into<crate::model::VpcSecurityGroupMembership>,
) -> Self {
let mut v = self.vpc_security_groups.unwrap_or_default();
v.push(input.into());
self.vpc_security_groups = Some(v);
self
}
pub fn set_vpc_security_groups(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::VpcSecurityGroupMembership>>,
) -> Self {
self.vpc_security_groups = input;
self
}
/// <p>The connection endpoint for connecting to an Amazon Redshift cluster through the proxy.</p>
pub fn vpc_endpoint(mut self, input: crate::model::VpcEndpoint) -> Self {
self.vpc_endpoint = Some(input);
self
}
pub fn set_vpc_endpoint(
mut self,
input: std::option::Option<crate::model::VpcEndpoint>,
) -> Self {
self.vpc_endpoint = input;
self
}
/// Consumes the builder and constructs a [`CreateEndpointAccessOutput`](crate::output::CreateEndpointAccessOutput)
pub fn build(self) -> crate::output::CreateEndpointAccessOutput {
crate::output::CreateEndpointAccessOutput {
cluster_identifier: self.cluster_identifier,
resource_owner: self.resource_owner,
subnet_group_name: self.subnet_group_name,
endpoint_status: self.endpoint_status,
endpoint_name: self.endpoint_name,
endpoint_create_time: self.endpoint_create_time,
port: self.port.unwrap_or_default(),
address: self.address,
vpc_security_groups: self.vpc_security_groups,
vpc_endpoint: self.vpc_endpoint,
}
}
}
}
impl CreateEndpointAccessOutput {
/// Creates a new builder-style object to manufacture [`CreateEndpointAccessOutput`](crate::output::CreateEndpointAccessOutput)
pub fn builder() -> crate::output::create_endpoint_access_output::Builder {
crate::output::create_endpoint_access_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateClusterSubnetGroupOutput {
/// <p>Describes a subnet group.</p>
pub cluster_subnet_group: std::option::Option<crate::model::ClusterSubnetGroup>,
}
impl std::fmt::Debug for CreateClusterSubnetGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateClusterSubnetGroupOutput");
formatter.field("cluster_subnet_group", &self.cluster_subnet_group);
formatter.finish()
}
}
/// See [`CreateClusterSubnetGroupOutput`](crate::output::CreateClusterSubnetGroupOutput)
pub mod create_cluster_subnet_group_output {
/// A builder for [`CreateClusterSubnetGroupOutput`](crate::output::CreateClusterSubnetGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_subnet_group: std::option::Option<crate::model::ClusterSubnetGroup>,
}
impl Builder {
/// <p>Describes a subnet group.</p>
pub fn cluster_subnet_group(mut self, input: crate::model::ClusterSubnetGroup) -> Self {
self.cluster_subnet_group = Some(input);
self
}
pub fn set_cluster_subnet_group(
mut self,
input: std::option::Option<crate::model::ClusterSubnetGroup>,
) -> Self {
self.cluster_subnet_group = input;
self
}
/// Consumes the builder and constructs a [`CreateClusterSubnetGroupOutput`](crate::output::CreateClusterSubnetGroupOutput)
pub fn build(self) -> crate::output::CreateClusterSubnetGroupOutput {
crate::output::CreateClusterSubnetGroupOutput {
cluster_subnet_group: self.cluster_subnet_group,
}
}
}
}
impl CreateClusterSubnetGroupOutput {
/// Creates a new builder-style object to manufacture [`CreateClusterSubnetGroupOutput`](crate::output::CreateClusterSubnetGroupOutput)
pub fn builder() -> crate::output::create_cluster_subnet_group_output::Builder {
crate::output::create_cluster_subnet_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateClusterSnapshotOutput {
/// <p>Describes a snapshot.</p>
pub snapshot: std::option::Option<crate::model::Snapshot>,
}
impl std::fmt::Debug for CreateClusterSnapshotOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateClusterSnapshotOutput");
formatter.field("snapshot", &self.snapshot);
formatter.finish()
}
}
/// See [`CreateClusterSnapshotOutput`](crate::output::CreateClusterSnapshotOutput)
pub mod create_cluster_snapshot_output {
/// A builder for [`CreateClusterSnapshotOutput`](crate::output::CreateClusterSnapshotOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) snapshot: std::option::Option<crate::model::Snapshot>,
}
impl Builder {
/// <p>Describes a snapshot.</p>
pub fn snapshot(mut self, input: crate::model::Snapshot) -> Self {
self.snapshot = Some(input);
self
}
pub fn set_snapshot(mut self, input: std::option::Option<crate::model::Snapshot>) -> Self {
self.snapshot = input;
self
}
/// Consumes the builder and constructs a [`CreateClusterSnapshotOutput`](crate::output::CreateClusterSnapshotOutput)
pub fn build(self) -> crate::output::CreateClusterSnapshotOutput {
crate::output::CreateClusterSnapshotOutput {
snapshot: self.snapshot,
}
}
}
}
impl CreateClusterSnapshotOutput {
/// Creates a new builder-style object to manufacture [`CreateClusterSnapshotOutput`](crate::output::CreateClusterSnapshotOutput)
pub fn builder() -> crate::output::create_cluster_snapshot_output::Builder {
crate::output::create_cluster_snapshot_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateClusterSecurityGroupOutput {
/// <p>Describes a security group.</p>
pub cluster_security_group: std::option::Option<crate::model::ClusterSecurityGroup>,
}
impl std::fmt::Debug for CreateClusterSecurityGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateClusterSecurityGroupOutput");
formatter.field("cluster_security_group", &self.cluster_security_group);
formatter.finish()
}
}
/// See [`CreateClusterSecurityGroupOutput`](crate::output::CreateClusterSecurityGroupOutput)
pub mod create_cluster_security_group_output {
/// A builder for [`CreateClusterSecurityGroupOutput`](crate::output::CreateClusterSecurityGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_security_group: std::option::Option<crate::model::ClusterSecurityGroup>,
}
impl Builder {
/// <p>Describes a security group.</p>
pub fn cluster_security_group(mut self, input: crate::model::ClusterSecurityGroup) -> Self {
self.cluster_security_group = Some(input);
self
}
pub fn set_cluster_security_group(
mut self,
input: std::option::Option<crate::model::ClusterSecurityGroup>,
) -> Self {
self.cluster_security_group = input;
self
}
/// Consumes the builder and constructs a [`CreateClusterSecurityGroupOutput`](crate::output::CreateClusterSecurityGroupOutput)
pub fn build(self) -> crate::output::CreateClusterSecurityGroupOutput {
crate::output::CreateClusterSecurityGroupOutput {
cluster_security_group: self.cluster_security_group,
}
}
}
}
impl CreateClusterSecurityGroupOutput {
/// Creates a new builder-style object to manufacture [`CreateClusterSecurityGroupOutput`](crate::output::CreateClusterSecurityGroupOutput)
pub fn builder() -> crate::output::create_cluster_security_group_output::Builder {
crate::output::create_cluster_security_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateClusterParameterGroupOutput {
/// <p>Describes a parameter group.</p>
pub cluster_parameter_group: std::option::Option<crate::model::ClusterParameterGroup>,
}
impl std::fmt::Debug for CreateClusterParameterGroupOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateClusterParameterGroupOutput");
formatter.field("cluster_parameter_group", &self.cluster_parameter_group);
formatter.finish()
}
}
/// See [`CreateClusterParameterGroupOutput`](crate::output::CreateClusterParameterGroupOutput)
pub mod create_cluster_parameter_group_output {
/// A builder for [`CreateClusterParameterGroupOutput`](crate::output::CreateClusterParameterGroupOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_parameter_group:
std::option::Option<crate::model::ClusterParameterGroup>,
}
impl Builder {
/// <p>Describes a parameter group.</p>
pub fn cluster_parameter_group(
mut self,
input: crate::model::ClusterParameterGroup,
) -> Self {
self.cluster_parameter_group = Some(input);
self
}
pub fn set_cluster_parameter_group(
mut self,
input: std::option::Option<crate::model::ClusterParameterGroup>,
) -> Self {
self.cluster_parameter_group = input;
self
}
/// Consumes the builder and constructs a [`CreateClusterParameterGroupOutput`](crate::output::CreateClusterParameterGroupOutput)
pub fn build(self) -> crate::output::CreateClusterParameterGroupOutput {
crate::output::CreateClusterParameterGroupOutput {
cluster_parameter_group: self.cluster_parameter_group,
}
}
}
}
impl CreateClusterParameterGroupOutput {
/// Creates a new builder-style object to manufacture [`CreateClusterParameterGroupOutput`](crate::output::CreateClusterParameterGroupOutput)
pub fn builder() -> crate::output::create_cluster_parameter_group_output::Builder {
crate::output::create_cluster_parameter_group_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateClusterOutput {
/// <p>Describes a cluster.</p>
pub cluster: std::option::Option<crate::model::Cluster>,
}
impl std::fmt::Debug for CreateClusterOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateClusterOutput");
formatter.field("cluster", &self.cluster);
formatter.finish()
}
}
/// See [`CreateClusterOutput`](crate::output::CreateClusterOutput)
pub mod create_cluster_output {
/// A builder for [`CreateClusterOutput`](crate::output::CreateClusterOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster: std::option::Option<crate::model::Cluster>,
}
impl Builder {
/// <p>Describes a cluster.</p>
pub fn cluster(mut self, input: crate::model::Cluster) -> Self {
self.cluster = Some(input);
self
}
pub fn set_cluster(mut self, input: std::option::Option<crate::model::Cluster>) -> Self {
self.cluster = input;
self
}
/// Consumes the builder and constructs a [`CreateClusterOutput`](crate::output::CreateClusterOutput)
pub fn build(self) -> crate::output::CreateClusterOutput {
crate::output::CreateClusterOutput {
cluster: self.cluster,
}
}
}
}
impl CreateClusterOutput {
/// Creates a new builder-style object to manufacture [`CreateClusterOutput`](crate::output::CreateClusterOutput)
pub fn builder() -> crate::output::create_cluster_output::Builder {
crate::output::create_cluster_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateAuthenticationProfileOutput {
/// <p>The name of the authentication profile that was created.</p>
pub authentication_profile_name: std::option::Option<std::string::String>,
/// <p>The content of the authentication profile in JSON format.</p>
pub authentication_profile_content: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for CreateAuthenticationProfileOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateAuthenticationProfileOutput");
formatter.field(
"authentication_profile_name",
&self.authentication_profile_name,
);
formatter.field(
"authentication_profile_content",
&self.authentication_profile_content,
);
formatter.finish()
}
}
/// See [`CreateAuthenticationProfileOutput`](crate::output::CreateAuthenticationProfileOutput)
pub mod create_authentication_profile_output {
/// A builder for [`CreateAuthenticationProfileOutput`](crate::output::CreateAuthenticationProfileOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) authentication_profile_name: std::option::Option<std::string::String>,
pub(crate) authentication_profile_content: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the authentication profile that was created.</p>
pub fn authentication_profile_name(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.authentication_profile_name = Some(input.into());
self
}
pub fn set_authentication_profile_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.authentication_profile_name = input;
self
}
/// <p>The content of the authentication profile in JSON format.</p>
pub fn authentication_profile_content(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.authentication_profile_content = Some(input.into());
self
}
pub fn set_authentication_profile_content(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.authentication_profile_content = input;
self
}
/// Consumes the builder and constructs a [`CreateAuthenticationProfileOutput`](crate::output::CreateAuthenticationProfileOutput)
pub fn build(self) -> crate::output::CreateAuthenticationProfileOutput {
crate::output::CreateAuthenticationProfileOutput {
authentication_profile_name: self.authentication_profile_name,
authentication_profile_content: self.authentication_profile_content,
}
}
}
}
impl CreateAuthenticationProfileOutput {
/// Creates a new builder-style object to manufacture [`CreateAuthenticationProfileOutput`](crate::output::CreateAuthenticationProfileOutput)
pub fn builder() -> crate::output::create_authentication_profile_output::Builder {
crate::output::create_authentication_profile_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CopyClusterSnapshotOutput {
/// <p>Describes a snapshot.</p>
pub snapshot: std::option::Option<crate::model::Snapshot>,
}
impl std::fmt::Debug for CopyClusterSnapshotOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CopyClusterSnapshotOutput");
formatter.field("snapshot", &self.snapshot);
formatter.finish()
}
}
/// See [`CopyClusterSnapshotOutput`](crate::output::CopyClusterSnapshotOutput)
pub mod copy_cluster_snapshot_output {
/// A builder for [`CopyClusterSnapshotOutput`](crate::output::CopyClusterSnapshotOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) snapshot: std::option::Option<crate::model::Snapshot>,
}
impl Builder {
/// <p>Describes a snapshot.</p>
pub fn snapshot(mut self, input: crate::model::Snapshot) -> Self {
self.snapshot = Some(input);
self
}
pub fn set_snapshot(mut self, input: std::option::Option<crate::model::Snapshot>) -> Self {
self.snapshot = input;
self
}
/// Consumes the builder and constructs a [`CopyClusterSnapshotOutput`](crate::output::CopyClusterSnapshotOutput)
pub fn build(self) -> crate::output::CopyClusterSnapshotOutput {
crate::output::CopyClusterSnapshotOutput {
snapshot: self.snapshot,
}
}
}
}
impl CopyClusterSnapshotOutput {
/// Creates a new builder-style object to manufacture [`CopyClusterSnapshotOutput`](crate::output::CopyClusterSnapshotOutput)
pub fn builder() -> crate::output::copy_cluster_snapshot_output::Builder {
crate::output::copy_cluster_snapshot_output::Builder::default()
}
}
/// <p>Describes the result of a cluster resize operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CancelResizeOutput {
/// <p>The node type that the cluster will have after the resize operation is
/// complete.</p>
pub target_node_type: std::option::Option<std::string::String>,
/// <p>The number of nodes that the cluster will have after the resize operation is
/// complete.</p>
pub target_number_of_nodes: std::option::Option<i32>,
/// <p>The cluster type after the resize operation is complete.</p>
/// <p>Valid Values: <code>multi-node</code> | <code>single-node</code>
/// </p>
pub target_cluster_type: std::option::Option<std::string::String>,
/// <p>The status of the resize operation.</p>
/// <p>Valid Values: <code>NONE</code> | <code>IN_PROGRESS</code> | <code>FAILED</code> |
/// <code>SUCCEEDED</code> | <code>CANCELLING</code>
/// </p>
pub status: std::option::Option<std::string::String>,
/// <p>The names of tables that have been completely imported .</p>
/// <p>Valid Values: List of table names.</p>
pub import_tables_completed: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The names of tables that are being currently imported.</p>
/// <p>Valid Values: List of table names.</p>
pub import_tables_in_progress: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The names of tables that have not been yet imported.</p>
/// <p>Valid Values: List of table names</p>
pub import_tables_not_started: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The average rate of the resize operation over the last few minutes, measured in
/// megabytes per second. After the resize operation completes, this value shows the average
/// rate of the entire resize operation.</p>
pub avg_resize_rate_in_mega_bytes_per_second: std::option::Option<f64>,
/// <p>The estimated total amount of data, in megabytes, on the cluster before the resize
/// operation began.</p>
pub total_resize_data_in_mega_bytes: std::option::Option<i64>,
/// <p>While the resize operation is in progress, this value shows the current amount of
/// data, in megabytes, that has been processed so far. When the resize operation is
/// complete, this value shows the total amount of data, in megabytes, on the cluster, which
/// may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data
/// before resize).</p>
pub progress_in_mega_bytes: std::option::Option<i64>,
/// <p>The amount of seconds that have elapsed since the resize operation began. After the
/// resize operation completes, this value shows the total actual time, in seconds, for the
/// resize operation.</p>
pub elapsed_time_in_seconds: std::option::Option<i64>,
/// <p>The estimated time remaining, in seconds, until the resize operation is complete.
/// This value is calculated based on the average resize rate and the estimated amount of
/// data remaining to be processed. Once the resize operation is complete, this value will
/// be 0.</p>
pub estimated_time_to_completion_in_seconds: std::option::Option<i64>,
/// <p>An enum with possible values of <code>ClassicResize</code> and
/// <code>ElasticResize</code>. These values describe the type of resize operation being
/// performed. </p>
pub resize_type: std::option::Option<std::string::String>,
/// <p>An optional string to provide additional details about the resize action.</p>
pub message: std::option::Option<std::string::String>,
/// <p>The type of encryption for the cluster after the resize is complete.</p>
/// <p>Possible values are <code>KMS</code> and <code>None</code>. </p>
pub target_encryption_type: std::option::Option<std::string::String>,
/// <p>The percent of data transferred from source cluster to target cluster.</p>
pub data_transfer_progress_percent: std::option::Option<f64>,
}
impl std::fmt::Debug for CancelResizeOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CancelResizeOutput");
formatter.field("target_node_type", &self.target_node_type);
formatter.field("target_number_of_nodes", &self.target_number_of_nodes);
formatter.field("target_cluster_type", &self.target_cluster_type);
formatter.field("status", &self.status);
formatter.field("import_tables_completed", &self.import_tables_completed);
formatter.field("import_tables_in_progress", &self.import_tables_in_progress);
formatter.field("import_tables_not_started", &self.import_tables_not_started);
formatter.field(
"avg_resize_rate_in_mega_bytes_per_second",
&self.avg_resize_rate_in_mega_bytes_per_second,
);
formatter.field(
"total_resize_data_in_mega_bytes",
&self.total_resize_data_in_mega_bytes,
);
formatter.field("progress_in_mega_bytes", &self.progress_in_mega_bytes);
formatter.field("elapsed_time_in_seconds", &self.elapsed_time_in_seconds);
formatter.field(
"estimated_time_to_completion_in_seconds",
&self.estimated_time_to_completion_in_seconds,
);
formatter.field("resize_type", &self.resize_type);
formatter.field("message", &self.message);
formatter.field("target_encryption_type", &self.target_encryption_type);
formatter.field(
"data_transfer_progress_percent",
&self.data_transfer_progress_percent,
);
formatter.finish()
}
}
/// See [`CancelResizeOutput`](crate::output::CancelResizeOutput)
pub mod cancel_resize_output {
/// A builder for [`CancelResizeOutput`](crate::output::CancelResizeOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) target_node_type: std::option::Option<std::string::String>,
pub(crate) target_number_of_nodes: std::option::Option<i32>,
pub(crate) target_cluster_type: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<std::string::String>,
pub(crate) import_tables_completed: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) import_tables_in_progress:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) import_tables_not_started:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) avg_resize_rate_in_mega_bytes_per_second: std::option::Option<f64>,
pub(crate) total_resize_data_in_mega_bytes: std::option::Option<i64>,
pub(crate) progress_in_mega_bytes: std::option::Option<i64>,
pub(crate) elapsed_time_in_seconds: std::option::Option<i64>,
pub(crate) estimated_time_to_completion_in_seconds: std::option::Option<i64>,
pub(crate) resize_type: std::option::Option<std::string::String>,
pub(crate) message: std::option::Option<std::string::String>,
pub(crate) target_encryption_type: std::option::Option<std::string::String>,
pub(crate) data_transfer_progress_percent: std::option::Option<f64>,
}
impl Builder {
/// <p>The node type that the cluster will have after the resize operation is
/// complete.</p>
pub fn target_node_type(mut self, input: impl Into<std::string::String>) -> Self {
self.target_node_type = Some(input.into());
self
}
pub fn set_target_node_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.target_node_type = input;
self
}
/// <p>The number of nodes that the cluster will have after the resize operation is
/// complete.</p>
pub fn target_number_of_nodes(mut self, input: i32) -> Self {
self.target_number_of_nodes = Some(input);
self
}
pub fn set_target_number_of_nodes(mut self, input: std::option::Option<i32>) -> Self {
self.target_number_of_nodes = input;
self
}
/// <p>The cluster type after the resize operation is complete.</p>
/// <p>Valid Values: <code>multi-node</code> | <code>single-node</code>
/// </p>
pub fn target_cluster_type(mut self, input: impl Into<std::string::String>) -> Self {
self.target_cluster_type = Some(input.into());
self
}
pub fn set_target_cluster_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.target_cluster_type = input;
self
}
/// <p>The status of the resize operation.</p>
/// <p>Valid Values: <code>NONE</code> | <code>IN_PROGRESS</code> | <code>FAILED</code> |
/// <code>SUCCEEDED</code> | <code>CANCELLING</code>
/// </p>
pub fn status(mut self, input: impl Into<std::string::String>) -> Self {
self.status = Some(input.into());
self
}
pub fn set_status(mut self, input: std::option::Option<std::string::String>) -> Self {
self.status = input;
self
}
pub fn import_tables_completed(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.import_tables_completed.unwrap_or_default();
v.push(input.into());
self.import_tables_completed = Some(v);
self
}
pub fn set_import_tables_completed(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.import_tables_completed = input;
self
}
pub fn import_tables_in_progress(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.import_tables_in_progress.unwrap_or_default();
v.push(input.into());
self.import_tables_in_progress = Some(v);
self
}
pub fn set_import_tables_in_progress(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.import_tables_in_progress = input;
self
}
pub fn import_tables_not_started(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.import_tables_not_started.unwrap_or_default();
v.push(input.into());
self.import_tables_not_started = Some(v);
self
}
pub fn set_import_tables_not_started(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.import_tables_not_started = input;
self
}
/// <p>The average rate of the resize operation over the last few minutes, measured in
/// megabytes per second. After the resize operation completes, this value shows the average
/// rate of the entire resize operation.</p>
pub fn avg_resize_rate_in_mega_bytes_per_second(mut self, input: f64) -> Self {
self.avg_resize_rate_in_mega_bytes_per_second = Some(input);
self
}
pub fn set_avg_resize_rate_in_mega_bytes_per_second(
mut self,
input: std::option::Option<f64>,
) -> Self {
self.avg_resize_rate_in_mega_bytes_per_second = input;
self
}
/// <p>The estimated total amount of data, in megabytes, on the cluster before the resize
/// operation began.</p>
pub fn total_resize_data_in_mega_bytes(mut self, input: i64) -> Self {
self.total_resize_data_in_mega_bytes = Some(input);
self
}
pub fn set_total_resize_data_in_mega_bytes(
mut self,
input: std::option::Option<i64>,
) -> Self {
self.total_resize_data_in_mega_bytes = input;
self
}
/// <p>While the resize operation is in progress, this value shows the current amount of
/// data, in megabytes, that has been processed so far. When the resize operation is
/// complete, this value shows the total amount of data, in megabytes, on the cluster, which
/// may be more or less than TotalResizeDataInMegaBytes (the estimated total amount of data
/// before resize).</p>
pub fn progress_in_mega_bytes(mut self, input: i64) -> Self {
self.progress_in_mega_bytes = Some(input);
self
}
pub fn set_progress_in_mega_bytes(mut self, input: std::option::Option<i64>) -> Self {
self.progress_in_mega_bytes = input;
self
}
/// <p>The amount of seconds that have elapsed since the resize operation began. After the
/// resize operation completes, this value shows the total actual time, in seconds, for the
/// resize operation.</p>
pub fn elapsed_time_in_seconds(mut self, input: i64) -> Self {
self.elapsed_time_in_seconds = Some(input);
self
}
pub fn set_elapsed_time_in_seconds(mut self, input: std::option::Option<i64>) -> Self {
self.elapsed_time_in_seconds = input;
self
}
/// <p>The estimated time remaining, in seconds, until the resize operation is complete.
/// This value is calculated based on the average resize rate and the estimated amount of
/// data remaining to be processed. Once the resize operation is complete, this value will
/// be 0.</p>
pub fn estimated_time_to_completion_in_seconds(mut self, input: i64) -> Self {
self.estimated_time_to_completion_in_seconds = Some(input);
self
}
pub fn set_estimated_time_to_completion_in_seconds(
mut self,
input: std::option::Option<i64>,
) -> Self {
self.estimated_time_to_completion_in_seconds = input;
self
}
/// <p>An enum with possible values of <code>ClassicResize</code> and
/// <code>ElasticResize</code>. These values describe the type of resize operation being
/// performed. </p>
pub fn resize_type(mut self, input: impl Into<std::string::String>) -> Self {
self.resize_type = Some(input.into());
self
}
pub fn set_resize_type(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resize_type = input;
self
}
/// <p>An optional string to provide additional details about the resize action.</p>
pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
self.message = Some(input.into());
self
}
pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
self.message = input;
self
}
/// <p>The type of encryption for the cluster after the resize is complete.</p>
/// <p>Possible values are <code>KMS</code> and <code>None</code>. </p>
pub fn target_encryption_type(mut self, input: impl Into<std::string::String>) -> Self {
self.target_encryption_type = Some(input.into());
self
}
pub fn set_target_encryption_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.target_encryption_type = input;
self
}
/// <p>The percent of data transferred from source cluster to target cluster.</p>
pub fn data_transfer_progress_percent(mut self, input: f64) -> Self {
self.data_transfer_progress_percent = Some(input);
self
}
pub fn set_data_transfer_progress_percent(
mut self,
input: std::option::Option<f64>,
) -> Self {
self.data_transfer_progress_percent = input;
self
}
/// Consumes the builder and constructs a [`CancelResizeOutput`](crate::output::CancelResizeOutput)
pub fn build(self) -> crate::output::CancelResizeOutput {
crate::output::CancelResizeOutput {
target_node_type: self.target_node_type,
target_number_of_nodes: self.target_number_of_nodes,
target_cluster_type: self.target_cluster_type,
status: self.status,
import_tables_completed: self.import_tables_completed,
import_tables_in_progress: self.import_tables_in_progress,
import_tables_not_started: self.import_tables_not_started,
avg_resize_rate_in_mega_bytes_per_second: self
.avg_resize_rate_in_mega_bytes_per_second,
total_resize_data_in_mega_bytes: self.total_resize_data_in_mega_bytes,
progress_in_mega_bytes: self.progress_in_mega_bytes,
elapsed_time_in_seconds: self.elapsed_time_in_seconds,
estimated_time_to_completion_in_seconds: self
.estimated_time_to_completion_in_seconds,
resize_type: self.resize_type,
message: self.message,
target_encryption_type: self.target_encryption_type,
data_transfer_progress_percent: self.data_transfer_progress_percent,
}
}
}
}
impl CancelResizeOutput {
/// Creates a new builder-style object to manufacture [`CancelResizeOutput`](crate::output::CancelResizeOutput)
pub fn builder() -> crate::output::cancel_resize_output::Builder {
crate::output::cancel_resize_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct BatchModifyClusterSnapshotsOutput {
/// <p>A list of the snapshots that were modified.</p>
pub resources: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>A list of any errors returned.</p>
pub errors: std::option::Option<std::vec::Vec<crate::model::SnapshotErrorMessage>>,
}
impl std::fmt::Debug for BatchModifyClusterSnapshotsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("BatchModifyClusterSnapshotsOutput");
formatter.field("resources", &self.resources);
formatter.field("errors", &self.errors);
formatter.finish()
}
}
/// See [`BatchModifyClusterSnapshotsOutput`](crate::output::BatchModifyClusterSnapshotsOutput)
pub mod batch_modify_cluster_snapshots_output {
/// A builder for [`BatchModifyClusterSnapshotsOutput`](crate::output::BatchModifyClusterSnapshotsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resources: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) errors: std::option::Option<std::vec::Vec<crate::model::SnapshotErrorMessage>>,
}
impl Builder {
pub fn resources(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.resources.unwrap_or_default();
v.push(input.into());
self.resources = Some(v);
self
}
pub fn set_resources(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.resources = input;
self
}
pub fn errors(mut self, input: impl Into<crate::model::SnapshotErrorMessage>) -> Self {
let mut v = self.errors.unwrap_or_default();
v.push(input.into());
self.errors = Some(v);
self
}
pub fn set_errors(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SnapshotErrorMessage>>,
) -> Self {
self.errors = input;
self
}
/// Consumes the builder and constructs a [`BatchModifyClusterSnapshotsOutput`](crate::output::BatchModifyClusterSnapshotsOutput)
pub fn build(self) -> crate::output::BatchModifyClusterSnapshotsOutput {
crate::output::BatchModifyClusterSnapshotsOutput {
resources: self.resources,
errors: self.errors,
}
}
}
}
impl BatchModifyClusterSnapshotsOutput {
/// Creates a new builder-style object to manufacture [`BatchModifyClusterSnapshotsOutput`](crate::output::BatchModifyClusterSnapshotsOutput)
pub fn builder() -> crate::output::batch_modify_cluster_snapshots_output::Builder {
crate::output::batch_modify_cluster_snapshots_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct BatchDeleteClusterSnapshotsOutput {
/// <p>A list of the snapshot identifiers that were deleted. </p>
pub resources: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>A list of any errors returned.</p>
pub errors: std::option::Option<std::vec::Vec<crate::model::SnapshotErrorMessage>>,
}
impl std::fmt::Debug for BatchDeleteClusterSnapshotsOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("BatchDeleteClusterSnapshotsOutput");
formatter.field("resources", &self.resources);
formatter.field("errors", &self.errors);
formatter.finish()
}
}
/// See [`BatchDeleteClusterSnapshotsOutput`](crate::output::BatchDeleteClusterSnapshotsOutput)
pub mod batch_delete_cluster_snapshots_output {
/// A builder for [`BatchDeleteClusterSnapshotsOutput`](crate::output::BatchDeleteClusterSnapshotsOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resources: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) errors: std::option::Option<std::vec::Vec<crate::model::SnapshotErrorMessage>>,
}
impl Builder {
pub fn resources(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.resources.unwrap_or_default();
v.push(input.into());
self.resources = Some(v);
self
}
pub fn set_resources(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.resources = input;
self
}
pub fn errors(mut self, input: impl Into<crate::model::SnapshotErrorMessage>) -> Self {
let mut v = self.errors.unwrap_or_default();
v.push(input.into());
self.errors = Some(v);
self
}
pub fn set_errors(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SnapshotErrorMessage>>,
) -> Self {
self.errors = input;
self
}
/// Consumes the builder and constructs a [`BatchDeleteClusterSnapshotsOutput`](crate::output::BatchDeleteClusterSnapshotsOutput)
pub fn build(self) -> crate::output::BatchDeleteClusterSnapshotsOutput {
crate::output::BatchDeleteClusterSnapshotsOutput {
resources: self.resources,
errors: self.errors,
}
}
}
}
impl BatchDeleteClusterSnapshotsOutput {
/// Creates a new builder-style object to manufacture [`BatchDeleteClusterSnapshotsOutput`](crate::output::BatchDeleteClusterSnapshotsOutput)
pub fn builder() -> crate::output::batch_delete_cluster_snapshots_output::Builder {
crate::output::batch_delete_cluster_snapshots_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AuthorizeSnapshotAccessOutput {
/// <p>Describes a snapshot.</p>
pub snapshot: std::option::Option<crate::model::Snapshot>,
}
impl std::fmt::Debug for AuthorizeSnapshotAccessOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AuthorizeSnapshotAccessOutput");
formatter.field("snapshot", &self.snapshot);
formatter.finish()
}
}
/// See [`AuthorizeSnapshotAccessOutput`](crate::output::AuthorizeSnapshotAccessOutput)
pub mod authorize_snapshot_access_output {
/// A builder for [`AuthorizeSnapshotAccessOutput`](crate::output::AuthorizeSnapshotAccessOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) snapshot: std::option::Option<crate::model::Snapshot>,
}
impl Builder {
/// <p>Describes a snapshot.</p>
pub fn snapshot(mut self, input: crate::model::Snapshot) -> Self {
self.snapshot = Some(input);
self
}
pub fn set_snapshot(mut self, input: std::option::Option<crate::model::Snapshot>) -> Self {
self.snapshot = input;
self
}
/// Consumes the builder and constructs a [`AuthorizeSnapshotAccessOutput`](crate::output::AuthorizeSnapshotAccessOutput)
pub fn build(self) -> crate::output::AuthorizeSnapshotAccessOutput {
crate::output::AuthorizeSnapshotAccessOutput {
snapshot: self.snapshot,
}
}
}
}
impl AuthorizeSnapshotAccessOutput {
/// Creates a new builder-style object to manufacture [`AuthorizeSnapshotAccessOutput`](crate::output::AuthorizeSnapshotAccessOutput)
pub fn builder() -> crate::output::authorize_snapshot_access_output::Builder {
crate::output::authorize_snapshot_access_output::Builder::default()
}
}
/// <p>Describes an endpoint authorization for authorizing Redshift-managed VPC endpoint access to a cluster across Amazon Web Services accounts.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AuthorizeEndpointAccessOutput {
/// <p>The Amazon Web Services account ID of the cluster owner.</p>
pub grantor: std::option::Option<std::string::String>,
/// <p>The Amazon Web Services account ID of the grantee of the cluster.</p>
pub grantee: std::option::Option<std::string::String>,
/// <p>The cluster identifier.</p>
pub cluster_identifier: std::option::Option<std::string::String>,
/// <p>The time (UTC) when the authorization was created.</p>
pub authorize_time: std::option::Option<smithy_types::Instant>,
/// <p>The status of the cluster.</p>
pub cluster_status: std::option::Option<std::string::String>,
/// <p>The status of the authorization action.</p>
pub status: std::option::Option<crate::model::AuthorizationStatus>,
/// <p>Indicates whether all VPCs in the grantee account are allowed access to the cluster.</p>
pub allowed_all_vp_cs: bool,
/// <p>The VPCs allowed access to the cluster.</p>
pub allowed_vp_cs: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The number of Redshift-managed VPC endpoints created for the authorization.</p>
pub endpoint_count: i32,
}
impl std::fmt::Debug for AuthorizeEndpointAccessOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AuthorizeEndpointAccessOutput");
formatter.field("grantor", &self.grantor);
formatter.field("grantee", &self.grantee);
formatter.field("cluster_identifier", &self.cluster_identifier);
formatter.field("authorize_time", &self.authorize_time);
formatter.field("cluster_status", &self.cluster_status);
formatter.field("status", &self.status);
formatter.field("allowed_all_vp_cs", &self.allowed_all_vp_cs);
formatter.field("allowed_vp_cs", &self.allowed_vp_cs);
formatter.field("endpoint_count", &self.endpoint_count);
formatter.finish()
}
}
/// See [`AuthorizeEndpointAccessOutput`](crate::output::AuthorizeEndpointAccessOutput)
pub mod authorize_endpoint_access_output {
/// A builder for [`AuthorizeEndpointAccessOutput`](crate::output::AuthorizeEndpointAccessOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) grantor: std::option::Option<std::string::String>,
pub(crate) grantee: std::option::Option<std::string::String>,
pub(crate) cluster_identifier: std::option::Option<std::string::String>,
pub(crate) authorize_time: std::option::Option<smithy_types::Instant>,
pub(crate) cluster_status: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::AuthorizationStatus>,
pub(crate) allowed_all_vp_cs: std::option::Option<bool>,
pub(crate) allowed_vp_cs: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) endpoint_count: std::option::Option<i32>,
}
impl Builder {
/// <p>The Amazon Web Services account ID of the cluster owner.</p>
pub fn grantor(mut self, input: impl Into<std::string::String>) -> Self {
self.grantor = Some(input.into());
self
}
pub fn set_grantor(mut self, input: std::option::Option<std::string::String>) -> Self {
self.grantor = input;
self
}
/// <p>The Amazon Web Services account ID of the grantee of the cluster.</p>
pub fn grantee(mut self, input: impl Into<std::string::String>) -> Self {
self.grantee = Some(input.into());
self
}
pub fn set_grantee(mut self, input: std::option::Option<std::string::String>) -> Self {
self.grantee = input;
self
}
/// <p>The cluster identifier.</p>
pub fn cluster_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_identifier = Some(input.into());
self
}
pub fn set_cluster_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_identifier = input;
self
}
/// <p>The time (UTC) when the authorization was created.</p>
pub fn authorize_time(mut self, input: smithy_types::Instant) -> Self {
self.authorize_time = Some(input);
self
}
pub fn set_authorize_time(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.authorize_time = input;
self
}
/// <p>The status of the cluster.</p>
pub fn cluster_status(mut self, input: impl Into<std::string::String>) -> Self {
self.cluster_status = Some(input.into());
self
}
pub fn set_cluster_status(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.cluster_status = input;
self
}
/// <p>The status of the authorization action.</p>
pub fn status(mut self, input: crate::model::AuthorizationStatus) -> Self {
self.status = Some(input);
self
}
pub fn set_status(
mut self,
input: std::option::Option<crate::model::AuthorizationStatus>,
) -> Self {
self.status = input;
self
}
/// <p>Indicates whether all VPCs in the grantee account are allowed access to the cluster.</p>
pub fn allowed_all_vp_cs(mut self, input: bool) -> Self {
self.allowed_all_vp_cs = Some(input);
self
}
pub fn set_allowed_all_vp_cs(mut self, input: std::option::Option<bool>) -> Self {
self.allowed_all_vp_cs = input;
self
}
pub fn allowed_vp_cs(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.allowed_vp_cs.unwrap_or_default();
v.push(input.into());
self.allowed_vp_cs = Some(v);
self
}
pub fn set_allowed_vp_cs(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.allowed_vp_cs = input;
self
}
/// <p>The number of Redshift-managed VPC endpoints created for the authorization.</p>
pub fn endpoint_count(mut self, input: i32) -> Self {
self.endpoint_count = Some(input);
self
}
pub fn set_endpoint_count(mut self, input: std::option::Option<i32>) -> Self {
self.endpoint_count = input;
self
}
/// Consumes the builder and constructs a [`AuthorizeEndpointAccessOutput`](crate::output::AuthorizeEndpointAccessOutput)
pub fn build(self) -> crate::output::AuthorizeEndpointAccessOutput {
crate::output::AuthorizeEndpointAccessOutput {
grantor: self.grantor,
grantee: self.grantee,
cluster_identifier: self.cluster_identifier,
authorize_time: self.authorize_time,
cluster_status: self.cluster_status,
status: self.status,
allowed_all_vp_cs: self.allowed_all_vp_cs.unwrap_or_default(),
allowed_vp_cs: self.allowed_vp_cs,
endpoint_count: self.endpoint_count.unwrap_or_default(),
}
}
}
}
impl AuthorizeEndpointAccessOutput {
/// Creates a new builder-style object to manufacture [`AuthorizeEndpointAccessOutput`](crate::output::AuthorizeEndpointAccessOutput)
pub fn builder() -> crate::output::authorize_endpoint_access_output::Builder {
crate::output::authorize_endpoint_access_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AuthorizeDataShareOutput {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub data_share_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub producer_arn: std::option::Option<std::string::String>,
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub allow_publicly_accessible_consumers: bool,
/// <p>A value that specifies when the datashare has an association between a producer and data consumers.</p>
pub data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl std::fmt::Debug for AuthorizeDataShareOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AuthorizeDataShareOutput");
formatter.field("data_share_arn", &self.data_share_arn);
formatter.field("producer_arn", &self.producer_arn);
formatter.field(
"allow_publicly_accessible_consumers",
&self.allow_publicly_accessible_consumers,
);
formatter.field("data_share_associations", &self.data_share_associations);
formatter.finish()
}
}
/// See [`AuthorizeDataShareOutput`](crate::output::AuthorizeDataShareOutput)
pub mod authorize_data_share_output {
/// A builder for [`AuthorizeDataShareOutput`](crate::output::AuthorizeDataShareOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_share_arn: std::option::Option<std::string::String>,
pub(crate) producer_arn: std::option::Option<std::string::String>,
pub(crate) allow_publicly_accessible_consumers: std::option::Option<bool>,
pub(crate) data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl Builder {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub fn data_share_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.data_share_arn = Some(input.into());
self
}
pub fn set_data_share_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.data_share_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub fn producer_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.producer_arn = Some(input.into());
self
}
pub fn set_producer_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.producer_arn = input;
self
}
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub fn allow_publicly_accessible_consumers(mut self, input: bool) -> Self {
self.allow_publicly_accessible_consumers = Some(input);
self
}
pub fn set_allow_publicly_accessible_consumers(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.allow_publicly_accessible_consumers = input;
self
}
pub fn data_share_associations(
mut self,
input: impl Into<crate::model::DataShareAssociation>,
) -> Self {
let mut v = self.data_share_associations.unwrap_or_default();
v.push(input.into());
self.data_share_associations = Some(v);
self
}
pub fn set_data_share_associations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
) -> Self {
self.data_share_associations = input;
self
}
/// Consumes the builder and constructs a [`AuthorizeDataShareOutput`](crate::output::AuthorizeDataShareOutput)
pub fn build(self) -> crate::output::AuthorizeDataShareOutput {
crate::output::AuthorizeDataShareOutput {
data_share_arn: self.data_share_arn,
producer_arn: self.producer_arn,
allow_publicly_accessible_consumers: self
.allow_publicly_accessible_consumers
.unwrap_or_default(),
data_share_associations: self.data_share_associations,
}
}
}
}
impl AuthorizeDataShareOutput {
/// Creates a new builder-style object to manufacture [`AuthorizeDataShareOutput`](crate::output::AuthorizeDataShareOutput)
pub fn builder() -> crate::output::authorize_data_share_output::Builder {
crate::output::authorize_data_share_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AuthorizeClusterSecurityGroupIngressOutput {
/// <p>Describes a security group.</p>
pub cluster_security_group: std::option::Option<crate::model::ClusterSecurityGroup>,
}
impl std::fmt::Debug for AuthorizeClusterSecurityGroupIngressOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AuthorizeClusterSecurityGroupIngressOutput");
formatter.field("cluster_security_group", &self.cluster_security_group);
formatter.finish()
}
}
/// See [`AuthorizeClusterSecurityGroupIngressOutput`](crate::output::AuthorizeClusterSecurityGroupIngressOutput)
pub mod authorize_cluster_security_group_ingress_output {
/// A builder for [`AuthorizeClusterSecurityGroupIngressOutput`](crate::output::AuthorizeClusterSecurityGroupIngressOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) cluster_security_group: std::option::Option<crate::model::ClusterSecurityGroup>,
}
impl Builder {
/// <p>Describes a security group.</p>
pub fn cluster_security_group(mut self, input: crate::model::ClusterSecurityGroup) -> Self {
self.cluster_security_group = Some(input);
self
}
pub fn set_cluster_security_group(
mut self,
input: std::option::Option<crate::model::ClusterSecurityGroup>,
) -> Self {
self.cluster_security_group = input;
self
}
/// Consumes the builder and constructs a [`AuthorizeClusterSecurityGroupIngressOutput`](crate::output::AuthorizeClusterSecurityGroupIngressOutput)
pub fn build(self) -> crate::output::AuthorizeClusterSecurityGroupIngressOutput {
crate::output::AuthorizeClusterSecurityGroupIngressOutput {
cluster_security_group: self.cluster_security_group,
}
}
}
}
impl AuthorizeClusterSecurityGroupIngressOutput {
/// Creates a new builder-style object to manufacture [`AuthorizeClusterSecurityGroupIngressOutput`](crate::output::AuthorizeClusterSecurityGroupIngressOutput)
pub fn builder() -> crate::output::authorize_cluster_security_group_ingress_output::Builder {
crate::output::authorize_cluster_security_group_ingress_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AssociateDataShareConsumerOutput {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub data_share_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub producer_arn: std::option::Option<std::string::String>,
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub allow_publicly_accessible_consumers: bool,
/// <p>A value that specifies when the datashare has an association between a producer and data consumers.</p>
pub data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl std::fmt::Debug for AssociateDataShareConsumerOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AssociateDataShareConsumerOutput");
formatter.field("data_share_arn", &self.data_share_arn);
formatter.field("producer_arn", &self.producer_arn);
formatter.field(
"allow_publicly_accessible_consumers",
&self.allow_publicly_accessible_consumers,
);
formatter.field("data_share_associations", &self.data_share_associations);
formatter.finish()
}
}
/// See [`AssociateDataShareConsumerOutput`](crate::output::AssociateDataShareConsumerOutput)
pub mod associate_data_share_consumer_output {
/// A builder for [`AssociateDataShareConsumerOutput`](crate::output::AssociateDataShareConsumerOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) data_share_arn: std::option::Option<std::string::String>,
pub(crate) producer_arn: std::option::Option<std::string::String>,
pub(crate) allow_publicly_accessible_consumers: std::option::Option<bool>,
pub(crate) data_share_associations:
std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
}
impl Builder {
/// <p>An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the <code>arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}</code> format.</p>
pub fn data_share_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.data_share_arn = Some(input.into());
self
}
pub fn set_data_share_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.data_share_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the producer.</p>
pub fn producer_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.producer_arn = Some(input.into());
self
}
pub fn set_producer_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.producer_arn = input;
self
}
/// <p>A value that specifies whether the datashare can be shared to a publicly accessible cluster.</p>
pub fn allow_publicly_accessible_consumers(mut self, input: bool) -> Self {
self.allow_publicly_accessible_consumers = Some(input);
self
}
pub fn set_allow_publicly_accessible_consumers(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.allow_publicly_accessible_consumers = input;
self
}
pub fn data_share_associations(
mut self,
input: impl Into<crate::model::DataShareAssociation>,
) -> Self {
let mut v = self.data_share_associations.unwrap_or_default();
v.push(input.into());
self.data_share_associations = Some(v);
self
}
pub fn set_data_share_associations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DataShareAssociation>>,
) -> Self {
self.data_share_associations = input;
self
}
/// Consumes the builder and constructs a [`AssociateDataShareConsumerOutput`](crate::output::AssociateDataShareConsumerOutput)
pub fn build(self) -> crate::output::AssociateDataShareConsumerOutput {
crate::output::AssociateDataShareConsumerOutput {
data_share_arn: self.data_share_arn,
producer_arn: self.producer_arn,
allow_publicly_accessible_consumers: self
.allow_publicly_accessible_consumers
.unwrap_or_default(),
data_share_associations: self.data_share_associations,
}
}
}
}
impl AssociateDataShareConsumerOutput {
/// Creates a new builder-style object to manufacture [`AssociateDataShareConsumerOutput`](crate::output::AssociateDataShareConsumerOutput)
pub fn builder() -> crate::output::associate_data_share_consumer_output::Builder {
crate::output::associate_data_share_consumer_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AddPartnerOutput {
/// <p>The name of the database that receives data from the partner.</p>
pub database_name: std::option::Option<std::string::String>,
/// <p>The name of the partner that is authorized to send data.</p>
pub partner_name: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for AddPartnerOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AddPartnerOutput");
formatter.field("database_name", &self.database_name);
formatter.field("partner_name", &self.partner_name);
formatter.finish()
}
}
/// See [`AddPartnerOutput`](crate::output::AddPartnerOutput)
pub mod add_partner_output {
/// A builder for [`AddPartnerOutput`](crate::output::AddPartnerOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) database_name: std::option::Option<std::string::String>,
pub(crate) partner_name: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the database that receives data from the partner.</p>
pub fn database_name(mut self, input: impl Into<std::string::String>) -> Self {
self.database_name = Some(input.into());
self
}
pub fn set_database_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.database_name = input;
self
}
/// <p>The name of the partner that is authorized to send data.</p>
pub fn partner_name(mut self, input: impl Into<std::string::String>) -> Self {
self.partner_name = Some(input.into());
self
}
pub fn set_partner_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.partner_name = input;
self
}
/// Consumes the builder and constructs a [`AddPartnerOutput`](crate::output::AddPartnerOutput)
pub fn build(self) -> crate::output::AddPartnerOutput {
crate::output::AddPartnerOutput {
database_name: self.database_name,
partner_name: self.partner_name,
}
}
}
}
impl AddPartnerOutput {
/// Creates a new builder-style object to manufacture [`AddPartnerOutput`](crate::output::AddPartnerOutput)
pub fn builder() -> crate::output::add_partner_output::Builder {
crate::output::add_partner_output::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AcceptReservedNodeExchangeOutput {
/// <p></p>
pub exchanged_reserved_node: std::option::Option<crate::model::ReservedNode>,
}
impl std::fmt::Debug for AcceptReservedNodeExchangeOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AcceptReservedNodeExchangeOutput");
formatter.field("exchanged_reserved_node", &self.exchanged_reserved_node);
formatter.finish()
}
}
/// See [`AcceptReservedNodeExchangeOutput`](crate::output::AcceptReservedNodeExchangeOutput)
pub mod accept_reserved_node_exchange_output {
/// A builder for [`AcceptReservedNodeExchangeOutput`](crate::output::AcceptReservedNodeExchangeOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) exchanged_reserved_node: std::option::Option<crate::model::ReservedNode>,
}
impl Builder {
/// <p></p>
pub fn exchanged_reserved_node(mut self, input: crate::model::ReservedNode) -> Self {
self.exchanged_reserved_node = Some(input);
self
}
pub fn set_exchanged_reserved_node(
mut self,
input: std::option::Option<crate::model::ReservedNode>,
) -> Self {
self.exchanged_reserved_node = input;
self
}
/// Consumes the builder and constructs a [`AcceptReservedNodeExchangeOutput`](crate::output::AcceptReservedNodeExchangeOutput)
pub fn build(self) -> crate::output::AcceptReservedNodeExchangeOutput {
crate::output::AcceptReservedNodeExchangeOutput {
exchanged_reserved_node: self.exchanged_reserved_node,
}
}
}
}
impl AcceptReservedNodeExchangeOutput {
/// Creates a new builder-style object to manufacture [`AcceptReservedNodeExchangeOutput`](crate::output::AcceptReservedNodeExchangeOutput)
pub fn builder() -> crate::output::accept_reserved_node_exchange_output::Builder {
crate::output::accept_reserved_node_exchange_output::Builder::default()
}
}
|
set_marker
|
nbody.py
|
# Authored by Tiantian Liu, Taichi Graphics.
import math
import taichi as ti
ti.init(arch=ti.cpu)
# global control
paused = ti.field(ti.i32, ())
# gravitational constant 6.67408e-11, using 1 for simplicity
G = 1
# number of planets
N = 3000
# unit mass
m = 1
# galaxy size
galaxy_size = 0.4
# planet radius (for rendering)
planet_radius = 2
# init vel
init_vel = 120
# time-step size
h = 1e-4
# substepping
substepping = 10
# center of the screen
center = ti.Vector.field(2, ti.f32, ())
# pos, vel and force of the planets
# Nx2 vectors
pos = ti.Vector.field(2, ti.f32, N)
vel = ti.Vector.field(2, ti.f32, N)
force = ti.Vector.field(2, ti.f32, N)
@ti.kernel
def initialize():
center[None] = [0.5, 0.5]
for i in range(N):
theta = ti.random() * 2 * math.pi
r = (ti.sqrt(ti.random()) * 0.6 + 0.4) * galaxy_size
offset = r * ti.Vector([ti.cos(theta), ti.sin(theta)])
pos[i] = center[None] + offset
vel[i] = [-offset.y, offset.x]
vel[i] *= init_vel
@ti.kernel
def compute_force():
# clear force
|
@ti.kernel
def update():
dt = h / substepping
for i in range(N):
#symplectic euler
vel[i] += dt * force[i] / m
pos[i] += dt * vel[i]
gui = ti.GUI('N-body problem', (800, 800))
initialize()
while gui.running:
for e in gui.get_events(ti.GUI.PRESS):
if e.key in [ti.GUI.ESCAPE, ti.GUI.EXIT]:
exit()
elif e.key == 'r':
initialize()
elif e.key == ti.GUI.SPACE:
paused[None] = not paused[None]
if not paused[None]:
for i in range(substepping):
compute_force()
update()
gui.circles(pos.to_numpy(), color=0xffffff, radius=planet_radius)
gui.show()
|
for i in range(N):
force[i] = [0.0, 0.0]
# compute gravitational force
for i in range(N):
p = pos[i]
for j in range(N):
if i != j: # double the computation for a better memory footprint and load balance
diff = p - pos[j]
r = diff.norm(1e-5)
# gravitational force -(GMm / r^2) * (diff/r) for i
f = -G * m * m * (1.0 / r)**3 * diff
# assign to each particle
force[i] += f
|
errorInterceptor.service.js
|
/**
* @ngdoc service
* @name refigureShared.services:errorInterceptor
* @description
* HttpProvider Error interceptor
*/
(function (angular) {
'use strict';
|
.module('refigureShared')
.factory('errorInterceptor', errorInterceptor);
errorInterceptor.$inject = [
'$q',
'$log',
'$injector'
];
function errorInterceptor($q, $log, $injector) {
var exports = {
response: response,
responseError: responseError
};
return exports;
////////////////////////////
function _error(res, defErr) {
var error = utils.get(res, ['data', 'error'], defErr),
message = utils.get(res, ['data', 'message'], 'Network error occurred.');
if (error) {
utils.set(res, ['data', 'error'], error);
utils.set(res, ['data', 'message'], message);
}
return {
error: error,
message: message
};
}
function response(res) {
var error = _error(res, 0);
if (error.error) {
if (intercept(res, error)) {
showError(error);
}
return $q.reject(res);
}
return res;
}
function responseError(res) {
var error = _error(res, -1);
if (intercept(res, error)) {
showError(error);
}
return $q.reject(res);
}
function intercept(res, error) {
var url = utils.get(res, ['config', 'url']),
noIntercept = utils.get(res, ['config', 'noIntercept']);
if (noIntercept) {
return false;
}
return !validError(error);
}
function validError(error) {
return false;
}
function showError(error) {
var modalDialog = $injector.get('modalDialog');
return modalDialog.error(error.message);
}
}
})(window.angular);
|
angular
|
download_data.py
|
"""Script to download and cache all data."""
import os
from typing import List
import openml
from automl import openml_utils
BENCHMARK_TASKS = {"adult": 7592, "nomao": 9977, "phoneme": 9952}
FOLD_COL = "fold"
def
|
(task_ids: List[int]):
"""Downloads the given task_ids from OpenML and dumps them as OpenMLTasks."""
tasks = openml.tasks.get_tasks(
task_ids, download_data=True, download_qualities=False
)
for task in tasks:
dataset = task.get_dataset()
df, _, categorical, columns = dataset.get_data()
label_col = dataset.default_target_attribute
feature_cols = [col for col in columns if col != label_col]
numerical_cols = [col for ind, col in zip(categorical, feature_cols) if not ind]
categorical_cols = [col for ind, col in zip(categorical, feature_cols) if ind]
df[FOLD_COL] = -1
splits = task.download_split().split[0] # We assume one repetition.
for split, idxs in splits.items():
idxs = idxs[0].test
df.loc[idxs, FOLD_COL] = split
out_path = openml_utils.task_path(task.task_id)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
task = openml_utils.OpenMLTask(
df, feature_cols, numerical_cols, categorical_cols, label_col, FOLD_COL
)
task.dump(out_path)
if __name__ == "__main__":
download_openml_tasks(list(BENCHMARK_TASKS.values()))
|
download_openml_tasks
|
hamming.py
|
#!/usr/bin/env python3
"""
Author : cory
Date : 2020-03-03
Purpose: Rock the Casbah
"""
import argparse
import os
import sys
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
type=argparse.FileType('r'),
help='An Input File',
default='')
parser.add_argument('-m',
'--min',
help='Minimum pairs',
metavar='Int',
type=int,
default='0')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
for line in args.file:
sp = line.split()
comp = (list(zip(sp[0], sp[1])))
dis = sum(1 for a, b in comp if a != b) + abs(len(sp[1])) - abs(len(sp[0]))
if dis >= args.min:
|
# --------------------------------------------------
if __name__ == '__main__':
main()
|
print(f'{dis:8}:{sp[0]:19} {sp[1]:20}')
|
test_utilities.rs
|
//! Test functions used both for testing and fuzzing.
use super::*;
use itertools::Itertools;
use log::warn;
use num_traits::Zero;
use rand::Rng;
use rayon::iter::ParallelIterator;
use std::collections::HashSet;
use std::fs;
use std::path::Path;
// where to save the test files
#[cfg(any(target_os = "linux", target_os = "macos"))]
static DEFAULT_PATH: &str = "/tmp/";
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
static DEFAULT_PATH: &str = "";
const NONEXISTENT: &str = "Cthulhu is a fictional cosmic entity created by writer H. P. Lovecraft and first introduced in the short story The Call of Cthulhu,[2] published in the American pulp magazine Weird Tales in 1928. Considered a Great Old One within the pantheon of Lovecraftian cosmic entities, the creature has since been featured in numerous popular culture references. Lovecraft depicts it as a gigantic entity worshipped by cultists, in shape like an octopus, a dragon, and a caricature of human form. Its name was given to the Lovecraft-inspired universe where it and its fellow entities existed, the Cthulhu Mythos.";
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
/// Computes a random string of the chosen length
pub fn random_string(len: usize) -> String {
let mut rng = rand::thread_rng();
(0..len)
.map(|_| {
let idx = rng.gen_range(0, CHARSET.len());
CHARSET[idx] as char
})
.collect()
}
/// Computes a random path.
pub fn random_path(path: Option<&str>) -> String
|
#[allow(clippy::redundant_clone)]
/// Load the Strings Protein Protein Interaction graph with given parametrization.
/// This is our default graph we use on tests.
pub fn load_ppi(
load_nodes: bool,
load_edge_types: bool,
load_weights: bool,
directed: bool,
verbose: bool,
parallel: bool,
) -> Graph {
let graph_name = "STRING PPI".to_owned();
let nodes_reader = if load_nodes {
Some(
NodeFileReader::new(Some("tests/data/ppi/nodes.tsv".to_string()))
.unwrap()
.set_verbose(Some(false))
.set_rows_to_skip(Some(0))
.unwrap()
.set_header(Some(true))
.unwrap()
.set_max_rows_number(Some(100000))
.unwrap()
.set_default_node_type(Some("default".to_string()))
.set_ignore_duplicates(Some(true))
.unwrap()
.set_separator(Some('\t'))
.unwrap()
.set_nodes_column(Some("id".to_string()))
.unwrap()
.set_node_types_column_number(Some(1))
.unwrap()
.set_nodes_column_number(Some(0))
.unwrap()
.set_node_types_column(Some("category".to_string()))
.unwrap()
.set_csv_is_correct(Some(true))
.unwrap()
.set_nodes_number(Some(37163))
.set_parallel(Some(parallel))
.unwrap()
.clone(),
)
} else {
None
};
let edges_reader = EdgeFileReader::new("tests/data/ppi/edges.tsv".to_string())
.unwrap()
.set_verbose(Some(verbose))
.set_ignore_duplicates(Some(true))
.set_header(Some(true))
.unwrap()
.set_max_rows_number(Some(100000))
.unwrap()
.set_rows_to_skip(Some(0))
.unwrap()
.set_separator(None::<char>)
.unwrap()
.set_sources_column(Some("subject".to_string()))
.unwrap()
.set_destinations_column(Some("object".to_string()))
.unwrap()
.set_parallel(Some(parallel))
.set_weights_column(if load_weights {
Some("weight".to_string())
} else {
None
})
.unwrap()
.set_edge_types_column(if load_edge_types {
Some("edge_label".to_string())
} else {
None
})
.unwrap()
.set_csv_is_correct(Some(true))
.set_default_edge_type(if load_edge_types {
Some("Kebab".to_string())
} else {
None
})
.set_default_weight(if load_weights { Some(5.0) } else { None })
.unwrap()
.clone();
let ppi = Graph::from_file_readers(
Some(edges_reader),
nodes_reader,
None,
None,
true,
true,
directed,
graph_name.clone(),
)
.unwrap();
assert_eq!(ppi.has_node_types(), load_nodes);
assert_eq!(
ppi.has_edge_types(),
load_edge_types,
concat!(
"Both the `has_edge_types` method and the `load_edge_types`\n",
"flag shoud have the same value but were:\n",
"* has_edge_types: {}\n",
"* load_edge_types: {}\n",
),
ppi.has_edge_types(),
load_edge_types,
);
assert_eq!(ppi.has_edge_weights(), load_weights);
ppi
}
#[allow(clippy::redundant_clone)]
/// This is our default graph we use on tests with node types.
pub fn load_cora() -> Graph {
let graph_name = "Cora".to_owned();
let edges_reader = EdgeFileReader::new("tests/data/cora/edges.tsv")
.unwrap()
.set_separator(Some('\t'))
.unwrap()
.set_verbose(Some(false))
.set_sources_column(Some("subject"))
.unwrap()
.set_destinations_column(Some("object"))
.unwrap()
.set_edge_types_column(Some("edge_type"))
.unwrap();
let nodes_reader = NodeFileReader::new(Some("tests/data/cora/nodes.tsv".to_owned()))
.unwrap()
.set_separator(Some('\t'))
.unwrap()
.set_nodes_column(Some("id"))
.unwrap()
.set_verbose(Some(false))
.set_node_types_column(Some("node_type"))
.unwrap();
Graph::from_file_readers(
Some(edges_reader),
Some(nodes_reader),
None,
None,
true,
true,
false,
graph_name.clone(),
)
.unwrap()
}
/// Return WalksParameters to execute a first order walk.
pub fn first_order_walker(graph: &Graph) -> Result<WalksParameters> {
Ok(WalksParameters::new(8)?
.set_iterations(Some(1))?
.set_random_state(Some(43))
.set_dense_node_mapping(Some(graph.get_dense_nodes_mapping())))
}
/// Return WalksParameters to execute a second order walk.
pub fn second_order_walker(
graph: &Graph,
return_weight: WeightT,
explore_weight: WeightT,
) -> Result<WalksParameters> {
Ok(WalksParameters::new(8)?
.set_iterations(Some(1))?
.set_return_weight(Some(return_weight))?
.set_explore_weight(Some(explore_weight))?
.set_max_neighbours(Some(3))?
.set_change_edge_type_weight(Some(2.0))?
.set_change_node_type_weight(Some(2.0))?
.set_dense_node_mapping(Some(graph.get_dense_nodes_mapping()))
.set_random_state(Some(43)))
}
fn validate_vocabularies(graph: &Graph) {
if let Some(ets) = &*graph.edge_types {
assert_eq!(!ets.ids.is_empty(), graph.has_edge_types(),
"We expected that if the graph has edge types then it cannot be empty. The report of the graph is:\n{:?}",
graph.textual_report()
);
}
if let Some(nts) = &*graph.node_types {
assert_eq!(!nts.ids.is_empty(), graph.has_node_types());
}
if let Some(ws) = &*graph.weights {
assert_eq!(
!ws.is_empty(), graph.has_edge_weights(),
concat!(
"We expect the edge weights vector to NOT be empty if the graph says it has weights.\n",
"The graph report is:\n{:?}"
),
graph.textual_report()
);
}
}
/// Executes the default test suite for holdouts.
pub fn default_holdout_test_suite(graph: &Graph, train: &Graph, test: &Graph) -> Result<()> {
for g in &[graph, train, test] {
validate_vocabularies(g);
}
test_graph_properties(train, None)?;
test_graph_properties(test, None)?;
assert!(
!train.overlaps(&test)?,
"Training graph overlaps with test graph!"
);
assert!(
!test.overlaps(&train)?,
"Test graph overlaps with training graph!"
);
assert!(graph.contains(&train)?, "Graph does not training graph.");
assert!(graph.contains(&test)?, "Graph does not contain test graph.");
let summed = (train | test)?;
validate_vocabularies(&summed);
assert!(
summed.contains(&graph)?,
"Composed train and test graph do not contained original graph."
);
let subtracted = (graph - test)?;
validate_vocabularies(&subtracted);
assert!(
subtracted.contains(&train)?,
"Main graph subtracted test does not contain training graph."
);
assert!(
!subtracted.overlaps(&test)?,
"Main graph subtracted train does not contain test graph."
);
let xorred = (graph ^ test)?;
validate_vocabularies(&xorred);
assert!(
xorred.contains(&train)?,
"Main graph xorred test does not contain training graph."
);
assert!(
!xorred.overlaps(&test)?,
"Main graph xorred train does not contain testing graph."
);
let anded = (graph & test)?;
validate_vocabularies(&anded);
assert!(
anded.contains(&test)?,
"Main graph anded test does not contain training graph."
);
Ok(())
}
/// Test that the spanning arborescence algorithm from bader is working correctly.
pub fn test_spanning_arborescence_bader(graph: &Graph, verbose: Option<bool>) {
let kruskal_tree = graph.spanning_arborescence_kruskal(verbose).0;
let random_kruskal_tree = graph
.random_spanning_arborescence_kruskal(Some(42), None, verbose)
.0;
if !graph.directed {
let spanning_arborescence_bader: Vec<(NodeT, NodeT)> =
graph.spanning_arborescence(verbose).unwrap().1.collect();
assert_eq!(
spanning_arborescence_bader.len(), kruskal_tree.len(),
"The number of extracted edges forming the spanning arborescence computed by the bader's algorithm does not match the one computed by kruskal. The graph report is:\n{:?}\nThe bader's tree is:\n{:?}\nThe kruskal's tree is:\n{:?}",
graph.textual_report(), spanning_arborescence_bader, kruskal_tree,
);
} else {
assert!(graph.spanning_arborescence(verbose).is_err());
}
assert_eq!(random_kruskal_tree.len() as usize, kruskal_tree.len());
}
pub fn test_graph_properties(graph: &Graph, verbose: Option<bool>) -> Result<()> {
// Testing that vocabularies are properly loaded
validate_vocabularies(graph);
// Collect set of connected nodes, INCLUDING singleton with selfloops.
let not_singleton_nodes = graph
.get_edge_node_ids(true)
.into_iter()
.flatten()
.unique()
.collect::<HashSet<NodeT>>();
// Collect the set of singleton nodes, i.e. nodes not in the previous set.
let singleton_nodes = graph
.iter_node_ids()
.filter(|node_id| !not_singleton_nodes.contains(node_id))
.collect::<HashSet<NodeT>>();
if graph.has_nodes() && !graph.has_edges() {
assert!(
graph.has_singleton_nodes(),
concat!(
"This graph has nodes (nodes number: {}) but ",
"has no edges (edges number: {}), therefore it ",
"should have singletons, but this does not seem ",
"to be the case (singletons number {}).\n",
"The graph report is {:?}."
),
graph.get_nodes_number(),
graph.get_edges_number(),
graph.get_singleton_nodes_number(),
graph.textual_report()
);
}
// Check properties relative to singletons.
assert_eq!(
!singleton_nodes.is_empty(),
graph.has_singleton_nodes(),
concat!(
"If singleton nodes are detected, the has_singleton_nodes ",
"method of the graph should return true.\n",
"The singleton nodes detected are: {:?}.\n",
"The has_singleton_nodes method returned: {:?}."
),
singleton_nodes,
graph.has_singleton_nodes()
);
assert_eq!(
singleton_nodes.len(),
graph.get_singleton_nodes_number() as usize,
concat!(
"The computed number of singleton nodes in this graph ",
"is {}, but the number of singletons that have been computed ",
"during the execution of the constructor are {}.\n",
"The report of this graph is: {:?}."
),
singleton_nodes.len(),
graph.get_singleton_nodes_number() as usize,
graph.textual_report()
);
assert!(unsafe {
singleton_nodes
.iter()
.all(|node_id| graph.is_unchecked_singleton_from_node_id(*node_id))
});
assert!(unsafe {
singleton_nodes
.iter()
.all(|node_id| graph.get_unchecked_node_degree_from_node_id(*node_id) == 0)
});
// For now we limit this test to undirected graphs
// to avoid the complexity of computing the indegree.
if !graph.is_directed() {
let singleton_nodes_with_selfloops = graph
.iter_node_ids()
.filter(|node_id| unsafe {
graph.get_unchecked_node_degree_from_node_id(*node_id) > 0
&& graph
.iter_unchecked_neighbour_node_ids_from_source_node_id(*node_id)
.all(|dst| dst == *node_id)
})
.collect::<HashSet<NodeT>>();
assert_eq!(
!singleton_nodes_with_selfloops.is_empty(),
graph.has_singleton_nodes_with_selfloops(),
concat!(
"Singleton nodes with selfloops were found within ",
"the provided, but the graph would not seem to ",
"contain any.\n",
"The graph edge list is:\n{:?}."
),
graph.get_edge_node_ids(true)
);
assert_eq!(
singleton_nodes_with_selfloops.len(),
graph.get_singleton_nodes_with_selfloops_number() as usize
);
assert!(
singleton_nodes_with_selfloops.iter().all(|node_id| unsafe {
graph.is_unchecked_singleton_with_selfloops_from_node_id(*node_id)
}),
concat!(
"The singleton with self-loops are defined as the set of nodes that ",
"exclusively have self-loop edges.\n",
"We have found the following list of singleton nodes with selfloops: {:?}\n"
),
singleton_nodes_with_selfloops
);
}
// If the graph is undirected, all the edges must have their symmetrical one
if !graph.is_directed() {
graph
.iter_edge_node_ids(true)
.for_each(|(_, src_node_id, dst_node_id)| {
assert!(
graph.has_edge_from_node_ids(dst_node_id, src_node_id),
concat!(
"In an undirected graph, for every edge there must ",
"have its own symmetric edge.\n",
"In the provided graph instance, for the edge from ",
"the source node ID {} to the destination node ID {} ",
"the symmetric edge does not exist.\n",
"This error is likely caused by some mis-parametrization ",
"in a method that is expected to produce a simmetric graph.",
),
src_node_id,
dst_node_id
);
});
}
assert_eq!(
graph.iter_node_degrees().is_sorted(),
graph.has_nodes_sorted_by_increasing_outbound_node_degree(),
concat!(
"The cached value for the method ",
"has_nodes_sorted_by_increasing_outbound_node_degree ",
"does not match the computed method.\n",
"The degrees of this graph are:\n {:?}\n",
"The reported order was: {}"
),
graph.get_node_degrees(),
graph.has_nodes_sorted_by_increasing_outbound_node_degree(),
);
let mut degrees = graph.get_node_degrees();
degrees.reverse();
assert_eq!(
degrees.is_sorted(),
graph.has_nodes_sorted_by_decreasing_outbound_node_degree(),
concat!(
"The cached value for the method ",
"has_nodes_sorted_by_decreasing_outbound_node_degree ",
"does not match the computed method."
)
);
if graph.has_nodes() && (graph.has_singleton_nodes() || graph.has_trap_nodes()) {
assert!(
graph.get_minimum_node_degree().unwrap() == 0,
concat!(
"When the graph either contains singleton nodes or trap nodes ",
"we expect for the minimum node degree to be zero, but is {}."
),
graph.get_minimum_node_degree().unwrap()
);
assert!(graph.iter_node_degrees().min().unwrap() == 0);
}
if let (Ok(min_degree), Ok(max_degree)) = (
graph.get_minimum_node_degree(),
graph.get_maximum_node_degree(),
) {
assert_eq!(
graph.has_nodes_sorted_by_decreasing_outbound_node_degree()
&& graph.has_nodes_sorted_by_increasing_outbound_node_degree(),
min_degree == max_degree,
concat!(
"When the the nodes are sorted both by decreasing and increasing node degree ",
"the minimum and maximum node degrees must be equal, and viceversa.\n",
"The computed minimum node degree is {}.\n",
"The computed maximum node degree is {}.\n",
"The result of has_nodes_sorted_by_decreasing_outbound_node_degree is {}.\n",
"The result of has_nodes_sorted_by_increasing_outbound_node_degree is {}.\n",
"The node degrees are:\n{:?}."
),
min_degree,
max_degree,
graph.has_nodes_sorted_by_decreasing_outbound_node_degree(),
graph.has_nodes_sorted_by_increasing_outbound_node_degree(),
graph.get_node_degrees()
);
}
// Test that the weights do not contain zeros.
if graph.has_edge_weights() {
for w in graph.iter_edge_weights().unwrap() {
assert!(!w.is_zero(), "The graph cannot contain a zero weight.");
assert!(
!w.is_infinite(),
"The graph cannot contain an infinite weight."
);
assert!(!w.is_nan(), "The graph cannot contain a NaN weight.");
}
// If the graph is undirected, the edge weights must be symmetrical
if !graph.is_directed() {
graph
.iter_edge_node_ids(false)
.for_each(|(_, src_node_id, dst_node_id)| unsafe {
assert!(
(graph.get_unchecked_edge_weight_from_node_ids(src_node_id, dst_node_id)
- graph
.get_unchecked_edge_weight_from_node_ids(dst_node_id, src_node_id))
.abs()
< WeightT::EPSILON * 10.0,
concat!(
"In an undirected graph, we expect for the edge weights to be symmetrical ",
"but in the provided graph there has been found a case where the edge ",
"from {} to {} has weight {}, while the edge from {} to {} has ",
"weight {}, creating an asymetrical case."
),
src_node_id,
dst_node_id,
graph.get_unchecked_edge_weight_from_node_ids(src_node_id, dst_node_id),
dst_node_id,
src_node_id,
graph.get_unchecked_edge_weight_from_node_ids(dst_node_id, src_node_id),
);
});
}
}
// Testing that the degrees computation is correct
assert_eq!(
graph.get_maximum_node_degree()?,
graph.iter_node_degrees().max().unwrap(),
"The cached maximum degree does not match the one computed from the node degrees."
);
if !graph.is_directed() && !graph.has_singleton_nodes() {
assert!(graph.get_minimum_node_degree()? > 0);
assert!(graph.iter_node_degrees().min().unwrap() > 0);
}
if !graph.is_directed() && graph.get_minimum_node_degree()? == 0 {
assert!(graph.has_singleton_nodes());
}
if !graph.has_disconnected_nodes() && !graph.has_trap_nodes() {
assert!(graph.get_minimum_node_degree()? > 0);
assert!(
graph.iter_node_degrees().min().unwrap() > 0,
concat!(
"Since the graph does not contain disconnected nodes nor it ",
"contains trap nodes, the minimum outbound node degree must be ",
"greater than zero.\n\n",
"The graph edges are: {:?}"
),
graph.get_edge_node_ids(true)
);
}
if graph.has_node_types() {
assert!(graph.has_nodes());
assert_eq!(
graph.get_known_node_types_number().unwrap(),
graph
.iter_node_ids_and_node_type_ids()
.map(|(_, node_type)| node_type.is_some() as NodeT)
.sum::<NodeT>()
);
assert_eq!(
graph.get_unknown_node_types_number().unwrap(),
graph
.iter_node_ids_and_node_type_ids()
.map(|(_, node_type)| node_type.is_none() as NodeT)
.sum::<NodeT>()
);
if graph.has_unknown_node_types().unwrap() {
assert!(graph
.iter_node_ids_and_node_type_ids()
.any(|(_, node_type)| node_type.is_none()));
assert!(graph.get_node_ids_with_unknown_node_types().unwrap().len() > 0);
}
if graph.has_known_node_types().unwrap() {
assert!(graph
.iter_node_ids_and_node_type_ids()
.any(|(_, node_type)| node_type.is_some()));
assert!(graph.get_node_ids_with_known_node_types().unwrap().len() > 0);
}
}
if graph.has_edge_types() {
assert!(graph.has_edges());
assert_eq!(
graph.get_known_edge_types_number().unwrap(),
graph
.iter_edge_node_ids_and_edge_type_id(true)
.map(|(_, _, _, edge_type)| edge_type.is_some() as EdgeT)
.sum::<EdgeT>()
);
assert_eq!(
graph.get_unknown_edge_types_number().unwrap(),
graph
.iter_edge_node_ids_and_edge_type_id(true)
.map(|(_, _, _, edge_type)| edge_type.is_none() as EdgeT)
.sum::<EdgeT>()
);
if graph.has_unknown_edge_types().unwrap() {
assert!(graph
.iter_edge_node_ids_and_edge_type_id(true)
.any(|(_, _, _, edge_type)| edge_type.is_none()));
assert!(graph.get_edge_ids_with_unknown_edge_types().unwrap().len() > 0);
}
if graph.has_known_edge_types().unwrap() {
assert!(
graph
.iter_edge_node_ids_and_edge_type_id(true)
.any(|(_, _, _, edge_type)| edge_type.is_some()),
concat!(
"We expected for the graph to contain at least one edge ",
"with a known edge type, but apparently it does not contain ",
"any. The graph contains {} edges and would have seemed to contain ",
"{} edges with known edge types."
),
graph.get_number_of_directed_edges(),
graph.get_known_edge_types_number().unwrap()
);
assert!(graph.get_edge_ids_with_known_edge_types().unwrap().len() > 0);
}
}
assert_eq!(
graph.get_minimum_node_degree()?,
graph.iter_node_degrees().min().unwrap(),
concat!(
"The cached minimum degree does not match the one computed from the node degrees.\n",
"The outbound node degrees are: {:?}"
),
graph.get_node_degrees()
);
if graph.has_edge_weights() {
assert!(
((graph.get_weighted_maximum_node_degree().clone())?
- graph
.iter_weighted_node_degrees()?
.max_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap())
.abs()
< f64::EPSILON,
concat!(
"The cached weighted maximum degree ({}) ",
"does not match the one computed from the node degrees ({}), ",
"where the node degrees list is {:?}.\n",
"Additionally the number of weighted singleton nodes is {:?}."
),
(graph.get_weighted_maximum_node_degree().clone())?,
graph
.iter_weighted_node_degrees()?
.max_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap(),
graph.get_weighted_node_degrees(),
graph.get_weighted_singleton_nodes_number()
);
assert!(
((graph.get_weighted_minimum_node_degree().clone())?
- graph
.iter_weighted_node_degrees()?
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap())
.abs()
< f64::EPSILON,
"The cached weighted minimum degree ({:?}) does not match the one computed from the node degrees ({:?}).",
(graph.get_weighted_minimum_node_degree().clone())?,
graph
.iter_weighted_node_degrees()?
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap()
);
}
for singleton_node_id in graph.iter_singleton_node_ids() {
assert!(unsafe { graph.get_unchecked_node_degree_from_node_id(singleton_node_id) } == 0);
assert!(unsafe { graph.is_unchecked_singleton_from_node_id(singleton_node_id) });
}
if !graph.is_directed() {
for node_id in graph.iter_node_ids() {
unsafe {
assert_eq!(
graph.is_unchecked_singleton_from_node_id(node_id),
graph.get_unchecked_node_degree_from_node_id(node_id) == 0
)
};
}
}
// Test get_edge_id_from_node_names_and_edge_type_name()
assert!(
graph
.get_edge_id_from_node_names_and_edge_type_name(NONEXISTENT, NONEXISTENT, None)
.is_err(),
"Graph contains non-existing edge."
);
// Test has_node_from_name
assert!(
!(graph.has_node_name_and_node_type_name(NONEXISTENT, None)),
"The graph seems to have a non-existing node."
);
assert!(
!(graph.has_node_name(NONEXISTENT)),
"The graph seems to have a non-existing node."
);
// Singletons with selfloops can only exist if the graph has nodes AND selfloops
if graph.has_singleton_nodes() {
assert!(graph.has_nodes());
}
if graph.has_singleton_nodes_with_selfloops() {
assert!(graph.has_nodes());
assert!(graph.has_selfloops());
}
// Test translate_edge|node_types()
assert!(
graph
.get_edge_type_ids_from_edge_type_names(vec![Some(NONEXISTENT.to_string())])
.is_err(),
"The graph seems to have a non-existing edge type."
);
assert!(
graph
.get_node_type_ids_from_node_type_names(vec![Some(NONEXISTENT.to_string())])
.is_err(),
"The graph seems to have a non-existing node type."
);
assert_eq!(
graph.get_connected_nodes_number()
+ graph.get_singleton_nodes_number()
+ graph.get_singleton_nodes_with_selfloops_number(),
graph.get_nodes_number(),
"Sum of singleton and non singleton nodes number does not match."
);
assert_eq!(
graph.get_disconnected_nodes_number(), graph.get_singleton_nodes_number() + graph.get_singleton_nodes_with_selfloops_number(),
"Sum of singleton and singleton with selfloops does not match the number of disconnected nodes."
);
warn!("Running connected components tests.");
let (_components_number, smallest, biggest) = graph.get_connected_components_number(None);
assert!(
biggest >= smallest,
"smallest: {} biggest: {}",
smallest,
biggest
);
if smallest == 1 {
assert!(
graph.has_singleton_nodes() || graph.has_singleton_nodes_with_selfloops(),
"When the smallest component is one the graph must have singletons! Graph report: \n{:?}",
graph.textual_report()
);
}
assert_eq!(
!graph.has_nodes(),
smallest == 0,
"When the smallest component is zero the graph must be empty! Graph report: \n{:?}",
graph.textual_report()
);
// Get one edge from the graph if there are any presents
if let Some(edge) = graph.iter_unique_edge_node_ids(true).next() {
let src_string = unsafe { graph.get_unchecked_node_name_from_node_id(edge.0) };
let dst_string = unsafe { graph.get_unchecked_node_name_from_node_id(edge.1) };
let edge_id = graph.get_edge_id_from_node_names(&src_string, &dst_string)?;
if graph.has_edge_types() {
let edge_type = graph.get_edge_type_name_from_edge_id(edge_id)?;
let clone_edge_type = edge_type.clone();
assert!(
graph.has_edge_from_node_names_and_edge_type_name(&src_string, &dst_string, clone_edge_type.as_deref()),
"I was expecting for the edge ({}, {}, {:?}) to exist, but it seems to not exist in graph {:?}",
src_string,
dst_string,
edge_type,
graph.textual_report()
);
} else {
assert!(
graph.has_edge_from_node_names(&src_string, &dst_string),
"I was expecting for the edge ({}, {}) without type to exist, but it seems to not exist in graph {:?}",
src_string,
dst_string,
graph.textual_report()
);
}
assert!(graph.has_node_name(&src_string) && graph.has_node_name(&dst_string));
if graph.has_node_types() {
assert!(
graph.has_node_name_and_node_type_name(
&src_string,
graph.get_node_type_names_from_node_name(&src_string)?
) && graph.has_node_name_and_node_type_name(
&dst_string,
graph.get_node_type_names_from_node_name(&dst_string)?
),
concat!(
"The nodes {:?} and {:?} with node types are not present in the graph.\n",
"The node types are {:?} and {:?}.\n",
"The first node existance is {}\n",
"The second node existance is {}\n",
"The graph report is {:?}"
),
src_string,
dst_string,
graph.get_node_type_names_from_node_name(&src_string),
graph.get_node_type_names_from_node_name(&dst_string),
graph.has_node_name_and_node_type_name(
&src_string,
graph.get_node_type_names_from_node_name(&src_string)?
),
graph.has_node_name_and_node_type_name(
&dst_string,
graph.get_node_type_names_from_node_name(&dst_string)?
),
graph.textual_report()
);
}
assert_eq!(
graph.get_edge_id_from_node_names(&src_string, &dst_string)?,
graph.get_edge_id_from_node_ids(edge.0, edge.1).unwrap(),
"Check of given edge ID does not match."
);
}
// Test the generation of the textual report, this includes the connected components algorithm.
graph.report();
graph.textual_report();
graph.overlap_textual_report(&graph, verbose)?;
// Compute degrees metrics
for src in 0..5 {
for dst in 0..5 {
let _ = graph.get_preferential_attachment_from_node_ids(src, dst, true);
let _ = graph.get_preferential_attachment_from_node_ids(src, dst, false);
let _ = graph.get_jaccard_coefficient_from_node_ids(src, dst);
let _ = graph.get_adamic_adar_index_from_node_ids(src, dst);
let _ = graph.get_resource_allocation_index_from_node_ids(src, dst);
if graph.has_edge_weights() {
let _ = graph.get_weighted_preferential_attachment_from_node_ids(src, dst, true);
let _ = graph.get_weighted_preferential_attachment_from_node_ids(src, dst, false);
let _ = graph.get_weighted_resource_allocation_index_from_node_ids(src, dst);
}
}
}
assert_eq!(
graph.has_node_types(),
graph.get_node_type_ids_from_node_id(0).is_ok()
);
assert!(
graph.get_node_type_ids_from_node_id(graph.get_nodes_number() + 1).is_err(),
"Given graph does not raise an exception when a node's node type greater than the number of available nodes is requested."
);
assert_eq!(
graph.has_edge_types(),
graph.get_edge_type_id_from_edge_id(0).is_ok()
);
assert!(
graph.get_edge_type_id_from_edge_id(graph.get_number_of_directed_edges() + 1).is_err(),
"Given graph does not raise an exception when a edge's edge type greater than the number of available edges is requested."
);
// Evaluate get_node_type
assert_eq!(
graph.get_node_type_ids_from_node_id(0).is_ok(),
graph.has_node_types()
);
// Evaluate get_edge_type
assert_eq!(
graph.get_edge_type_id_from_edge_id(0).is_ok(),
graph.has_edge_types()
);
// Evaluate get_edge_type_counts
assert_eq!(
graph.get_edge_type_id_counts_hashmap().is_ok(),
graph.has_edge_types()
);
// Evaluate get_edge_type_id_counts_hashmap
assert_eq!(
graph.get_edge_type_id_counts_hashmap().is_ok(),
graph.has_edge_types()
);
graph.strongly_connected_components();
// Checking that the connected components are a dense range.
let (_, connected_components, total_connected_components, _, _) =
graph.random_spanning_arborescence_kruskal(Some(42), None, verbose);
let actual_components_number = connected_components.iter().unique().count() as NodeT;
assert_eq!(
actual_components_number, total_connected_components,
concat!(
"The measured number of connected components ({}) ",
"does not match the computed number of connected components ({}).\n",
"That is, the components are not a dense set.\n",
"This is likely caused by a problem with the remapping of the ",
"components."
),
actual_components_number, total_connected_components,
);
let max_component_id = connected_components.iter().max();
if let Some(mci) = max_component_id {
assert_eq!(
*mci as usize,
total_connected_components as usize - 1,
"We expected the connected components to be a dense set.\n The obtained components are: \n{:?}\n The graph report is:\n{:?}",
connected_components,
graph.textual_report()
);
}
if !graph.is_directed() {
// Checking that the connected components are a dense range.
let (connected_components, total_connected_components, _, _) =
graph.get_connected_components(verbose).unwrap();
let actual_components_number = connected_components.iter().unique().count() as NodeT;
assert_eq!(
actual_components_number,
total_connected_components,
"The measured number of connected components ({}) does not match the computed number of connected components ({}).",
actual_components_number,
total_connected_components
);
let max_component_id = connected_components.iter().max();
if let Some(mci) = max_component_id {
assert_eq!(
*mci as usize,
total_connected_components as usize - 1,
"We expected the connected components to be a dense set.\n The obtained components are: \n{:?}\n The graph report is:\n{:?}",
connected_components,
graph.textual_report()
);
}
}
let _total_memory_used = graph.memory_stats().total();
Ok(())
}
pub fn test_node_centralities(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
if graph.has_edges() {
let node_degree_centralities = graph.get_degree_centrality().unwrap();
assert_eq!(
node_degree_centralities.len(),
graph.get_nodes_number() as usize
);
assert!(
node_degree_centralities
.iter()
.cloned()
.all(|value| value <= 1.0 && value >= 0.0),
"All node degrees centralities are expected to be within 0 and 1, but are {:?}.",
node_degree_centralities
);
}
if graph.has_edge_weights() && !graph.has_negative_edge_weights().unwrap() {
let node_degree_centralities = graph.get_weighted_degree_centrality().unwrap();
assert_eq!(
node_degree_centralities.len(),
graph.get_nodes_number() as usize
);
assert!(
node_degree_centralities
.iter()
.cloned()
.all(|value| value <= 1.0 && value >= 0.0),
concat!(
"All weighted node degrees centralities ",
"are expected to be within 0 and 1, ",
"but are {:?} and the node degrees are {:?}, with the ",
"minimum weighted node degree being {} and ",
"maximum weighted node degree being {}.",
),
node_degree_centralities,
graph.get_weighted_node_degrees(),
graph.get_weighted_minimum_node_degree().clone().unwrap(),
graph.get_weighted_maximum_node_degree().clone().unwrap(),
);
}
let node_betweenness_centralities = graph.get_betweenness_centrality(None, verbose);
assert_eq!(
node_betweenness_centralities.len(),
graph.get_nodes_number() as usize
);
node_betweenness_centralities
.into_iter()
.enumerate()
.for_each(|(node_id, value)| {
if unsafe { graph.is_unchecked_singleton_from_node_id(node_id as NodeT) } {
assert!(value.abs() < f32::EPSILON);
}
});
Ok(())
}
pub fn test_vertex_cover(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
let vertex_cover = graph.approximated_vertex_cover_set();
graph
.par_iter_edge_node_ids(true)
.for_each(|(_, src_node_id, dst_node_id)| {
assert!(
vertex_cover.contains(&src_node_id) || vertex_cover.contains(&dst_node_id),
concat!(
"We expected for either the node {} or {} to be in the vertex cover.\n",
"The vertex cover is {:?}"
),
src_node_id,
dst_node_id,
vertex_cover
);
});
Ok(())
}
pub fn test_bfs(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
// We avoid running this test on too big graphs so to avoid slowing down the test suite
if graph.get_nodes_number() > 100 {
return Ok(());
}
// If the graph is empty the other tests on BFS make little sense
if !graph.has_nodes() {
assert!(graph
.get_breadth_first_search_from_node_ids(0, None, None, None)
.is_err());
return Ok(());
}
// BFS on an unweighted graph gives simmetric results.
if !graph.is_directed() {
let components_ids = graph.get_node_connected_component_ids(verbose);
for maximal_depth in [None, Some(1), Some(2), Some(3)] {
graph.iter_node_ids().for_each(|src_node_id| {
graph.iter_node_ids().for_each(|dst_node_id| unsafe {
// Check that the obtained results are simmetric
let src_to_dst = graph.get_unchecked_shortest_path_node_ids_from_node_ids(
src_node_id,
dst_node_id,
maximal_depth,
);
let dst_to_src = graph.get_unchecked_shortest_path_node_ids_from_node_ids(
dst_node_id,
src_node_id,
maximal_depth,
);
if src_node_id == dst_node_id {
assert!(src_to_dst.is_err());
assert!(dst_to_src.is_err());
return;
}
if components_ids[src_node_id as usize] != components_ids[dst_node_id as usize]
{
assert!(src_to_dst.is_err());
assert!(dst_to_src.is_err());
return;
}
if let (Ok(src_to_dst), Ok(dst_to_src)) = (src_to_dst, dst_to_src) {
// Check that the two paths have the same length
assert_eq!(src_to_dst.len(), dst_to_src.len());
// Test that the k-paths return a compatible result
let kpaths = graph.get_unchecked_k_shortest_path_node_ids_from_node_ids(
src_node_id,
dst_node_id,
5,
);
let min_length = kpaths.into_iter().map(|path| path.len()).min().unwrap();
assert_eq!(min_length, src_to_dst.len());
}
});
});
}
}
Ok(())
}
pub fn test_dijkstra(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
// We avoid running this test on too big graphs so to avoid slowing down the test suite
if graph.get_nodes_number() > 100 {
return Ok(());
}
// If the graph is empty the other tests on Dijkstra make little sense
if !graph.has_nodes() {
assert!(graph
.get_dijkstra_from_node_ids(0, None, None, None, None, None)
.is_err());
return Ok(());
}
// Dijkstra on unweighted graphs does not make sense
// Dijkstra on weighted graphs with negative weights does not make sense
if !graph.has_edge_weights() || graph.has_negative_edge_weights().unwrap() {
assert!(graph
.get_dijkstra_from_node_names(
unsafe { graph.get_unchecked_node_name_from_node_id(0).as_ref() },
None,
None,
None,
None,
None
)
.is_err());
return Ok(());
}
// Dijkstra on an unweighted graph gives simmetric results.
if !graph.is_directed() {
for use_edge_weights_as_probabilities in [true, false] {
if use_edge_weights_as_probabilities
&& !graph.has_edge_weights_representing_probabilities().unwrap()
{
continue;
}
graph.iter_node_ids().for_each(|src_node_id| {
graph.iter_node_ids().for_each(|dst_node_id| unsafe {
// Check that the obtained results are simmetric
let (src_to_dst_distance, src_to_dst) = graph
.get_unchecked_weighted_shortest_path_node_ids_from_node_ids(
src_node_id,
dst_node_id,
Some(use_edge_weights_as_probabilities),
None,
);
let (dst_to_src_distance, dst_to_src) = graph
.get_unchecked_weighted_shortest_path_node_ids_from_node_ids(
dst_node_id,
src_node_id,
Some(use_edge_weights_as_probabilities),
None,
);
let src_to_dst_distance = src_to_dst_distance as WeightT;
let dst_to_src_distance = dst_to_src_distance as WeightT;
assert!(
// We need both checks because both distances
// my be infinite, and therefore the epsilon check
// may not be enough.
src_to_dst_distance.is_infinite() && dst_to_src_distance.is_infinite()
|| (src_to_dst_distance - dst_to_src_distance).abs()
< WeightT::EPSILON * 10.0,
concat!(
"The path from source to destination has distance {} ",
"while the distance from destination to source has ",
"destination {}. The path from source to destination ",
"is {:?}, while the path from destination to source ",
"is {:?}. The two paths should be symmetric and with ",
"the same distance.\nThe graph report is:\n{:?}"
),
src_to_dst_distance,
dst_to_src_distance,
src_to_dst,
dst_to_src,
graph.textual_report()
);
});
});
}
}
Ok(())
}
pub fn test_polygons(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
assert_eq!(
graph
.get_number_of_triangles_per_node(Some(false), None, verbose)
.into_iter()
.map(|triangles_number| triangles_number as EdgeT)
.sum::<EdgeT>(),
graph.get_number_of_triangles(Some(false), None, verbose)
);
Ok(())
}
pub fn test_transitivity(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
// We skip this test of graph with more than 1000 nodes to avoid slowing down
// too much the test suite.
if graph.get_nodes_number() > 1000 {
return Ok(());
}
if !graph.has_edge_weights() && !graph.has_edge_types() {
// We define the 0-th iteration of transitive closure as the graph itself
assert_eq!(
graph.clone(),
graph.get_transitive_closure(Some(0), verbose)
);
// We define the first iteration of transitive closure as the graph itself
let graph_with_selfloops = graph.add_selfloops(None, None).unwrap();
assert_eq!(
graph_with_selfloops,
graph_with_selfloops.get_transitive_closure(Some(1), verbose),
concat!(
"We expected the original graph to equal to the graph obtained after ",
"a single iteration of transitive closure, but they are different.\n",
"The report of the first graph is: \n {}\n",
"The report of the second graph is: \n {}\n",
),
graph_with_selfloops.textual_report(),
graph_with_selfloops
.get_transitive_closure(Some(1), verbose)
.textual_report(),
);
// Doing multiple iterations should be equal to doing the same iteration multiple times
let four_iterations = graph_with_selfloops.get_transitive_closure(Some(4), verbose);
let two_times_two = graph_with_selfloops
.get_transitive_closure(Some(2), verbose)
.get_transitive_closure(Some(2), verbose);
assert_eq!(
four_iterations,
two_times_two,
concat!(
"We expected the graph after 4 transitive closures to be ",
"equal to the graph after two times two transitive closures.\n",
"The to_dot of the first graph is: \n {}\n",
"The to_dot of the second graph is: \n {}\n",
),
four_iterations.to_dot(),
two_times_two.to_dot()
);
}
let mut transitive_closure = graph.get_transitive_closure(None, verbose);
let connected_components = graph.get_node_connected_component_ids(verbose);
if !graph.is_directed() {
for (src_node_id, src_component_id) in connected_components.iter().cloned().enumerate() {
if unsafe { graph.is_unchecked_singleton_from_node_id(src_node_id as NodeT) } {
continue;
}
for (dst_node_id, dst_component_id) in connected_components.iter().cloned().enumerate()
{
assert_eq!(
transitive_closure
.has_edge_from_node_ids(src_node_id as NodeT, dst_node_id as NodeT),
src_component_id == dst_component_id,
concat!(
"In an undirected graph, the transitive closure of the graph should ",
"contain an edge between all nodes in the same component, but ",
"the node {} and {} have as component IDs {} and {} respectively, ",
"and the test has edge has returned {}."
),
src_node_id,
dst_node_id,
src_component_id,
dst_component_id,
transitive_closure
.has_edge_from_node_ids(src_node_id as NodeT, dst_node_id as NodeT)
);
}
}
}
test_graph_properties(&mut transitive_closure, verbose)?;
Ok(())
}
pub fn test_all_paths(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
// We skip this test of graph with more than 1000 nodes to avoid slowing down
// too much the test suite.
if graph.get_nodes_number() > 1000 {
return Ok(());
}
for iteration in [None, Some(0), Some(1), Some(2)] {
let mut unweighted_all_paths = graph.get_all_shortest_paths(iteration, verbose);
test_graph_properties(&mut unweighted_all_paths, verbose)?;
}
if !graph.has_edge_weights() || graph.has_negative_edge_weights().unwrap() {
assert!(graph
.get_weighted_all_shortest_paths(None, None, verbose)
.is_err());
return Ok(());
}
for iteration in [None, Some(0), Some(1), Some(2)] {
let mut weighted_all_paths = graph
.get_weighted_all_shortest_paths(iteration, None, verbose)
.unwrap();
test_graph_properties(&mut weighted_all_paths, verbose)?;
}
Ok(())
}
pub fn test_selfloops(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
assert!(!graph.remove_selfloops().has_selfloops());
assert_eq!(
graph.add_selfloops(None, Some(1.0)).is_ok(),
graph.has_edge_weights()
);
let mut graph_with_selfloops = graph
.add_selfloops(
None,
if graph.has_edge_weights() {
Some(1.0)
} else {
None
},
)
.unwrap();
for node_id in graph.iter_node_ids() {
assert!(graph_with_selfloops.has_selfloop_from_node_id(node_id));
}
test_graph_properties(&mut graph_with_selfloops, verbose)?;
Ok(())
}
pub fn test_sorting(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
let sorted_increasing = graph.sort_by_increasing_outbound_node_degree();
// The sorted graph is now sorted.
assert!(sorted_increasing.has_nodes_sorted_by_increasing_outbound_node_degree());
// The sorted graph has the same node types as the original graph
if graph.has_node_types() {
for node_name in sorted_increasing.iter_node_names() {
assert_eq!(
graph.get_node_type_ids_from_node_name(node_name.as_str()),
sorted_increasing.get_node_type_ids_from_node_name(node_name.as_str()),
concat!(
"We expected the graph unsorted and sorted by increasing ",
"node degree to have the same node types, but we have found ",
"a node, namely `{}`, to have a different node type in the two ",
"versions of this graph."
),
node_name
);
}
}
let sorted_decreasing = graph.sort_by_decreasing_outbound_node_degree();
// The sorted graph is now sorted.
assert!(sorted_decreasing.has_nodes_sorted_by_decreasing_outbound_node_degree());
// The sorted graph has the same node types as the original graph
if graph.has_node_types() {
for node_name in sorted_decreasing.iter_node_names() {
assert_eq!(
graph.get_node_type_ids_from_node_name(node_name.as_str()),
sorted_decreasing.get_node_type_ids_from_node_name(node_name.as_str()),
concat!(
"We expected the graph unsorted and sorted by decreasing ",
"node degree to have the same node types, but we have found ",
"a node, namely `{}`, to have a different node type in the two ",
"versions of this graph."
),
node_name
);
}
}
let sorted_lexicographical = graph.sort_by_node_lexicographic_order();
// The sorted graph is now sorted.
assert!(sorted_lexicographical.has_nodes_sorted_by_lexicographic_order());
// The sorted graph has the same node types as the original graph
if graph.has_node_types() {
for node_name in sorted_lexicographical.iter_node_names() {
assert_eq!(
graph.get_node_type_ids_from_node_name(node_name.as_str()),
sorted_lexicographical.get_node_type_ids_from_node_name(node_name.as_str()),
concat!(
"We expected the graph unsorted and sorted by lexicographical ",
"node degree to have the same node types, but we have found ",
"a node, namely `{}`, to have a different node type in the two ",
"versions of this graph."
),
node_name
);
}
}
Ok(())
}
pub fn test_random_walks(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
// Testing principal random walk algorithms
let walker = first_order_walker(&graph)?;
assert_eq!(walker.clone(), walker);
let walker2 = second_order_walker(&graph, 2.0, 2.0)?;
assert_eq!(walker2.clone(), walker2);
if !graph.directed {
warn!("Executing random walks tests.");
for mode in 0..2 {
if mode == 1 {
graph.enable(None, None, None, None)?;
if let Some(cumulative_node_degrees) = &*graph.cumulative_node_degrees {
assert_eq!(
cumulative_node_degrees.len(),
graph.get_nodes_number() as usize,
"Length of cumulative_node_degrees does not match number of nodes in the graph."
);
}
if let Some(destinations) = &*graph.destinations {
assert_eq!(
destinations.len(),
graph.get_number_of_directed_edges() as usize,
"Length of destinations does not match number of edges in the graph."
);
}
}
assert_eq!(
graph
.par_iter_random_walks(1, &walker)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
graph
.par_iter_random_walks(1, &walker)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
"Walks of first order are not reproducible!"
);
assert_eq!(
graph
.par_iter_random_walks(1, &second_order_walker(&graph, 2.0, 2.0)?)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
graph
.par_iter_random_walks(1, &second_order_walker(&graph, 2.0, 2.0)?)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
"Walks of second order are not reproducible!"
);
assert_eq!(
graph
.par_iter_complete_walks(&walker)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
graph
.par_iter_complete_walks(&walker)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
"Complete first order walks are not reproducible!"
);
assert_eq!(
graph
.par_iter_complete_walks(&second_order_walker(&graph, 2.0, 2.0)?)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
graph
.par_iter_complete_walks(&second_order_walker(&graph, 2.0, 2.0)?)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
"Complete second order walks are not reproducible!"
);
assert_eq!(
graph
.par_iter_complete_walks(&second_order_walker(&graph, 2.0, 1.0)?)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
graph
.par_iter_complete_walks(&second_order_walker(&graph, 2.0, 1.0)?)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
"Complete second order walks are not reproducible!"
);
assert_eq!(
graph
.par_iter_complete_walks(&second_order_walker(&graph, 1.0, 2.0)?)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
graph
.par_iter_complete_walks(&second_order_walker(&graph, 1.0, 2.0)?)
.map(|iter| iter.collect::<Vec<Vec<NodeT>>>()),
"Complete second order walks are not reproducible!"
);
}
} else {
assert!(graph.par_iter_complete_walks(&walker).is_err());
}
Ok(())
}
pub fn test_edge_holdouts(graph: &Graph, verbose: Option<bool>) -> Result<()> {
if !graph.has_edge_types() {
assert!(graph
.connected_holdout(0.8, None, Some(vec![None]), Some(false), None, None, None)
.is_err());
}
for include_all_edge_types in &[false, true] {
let (train, test) = graph.random_holdout(
0.6,
None,
Some(*include_all_edge_types),
None,
None,
verbose,
)?;
default_holdout_test_suite(graph, &train, &test)?;
let (train, test) = graph.connected_holdout(
0.8,
None,
None,
Some(*include_all_edge_types),
None,
None,
verbose,
)?;
assert_eq!(graph.get_nodes_number(), train.get_nodes_number());
assert_eq!(graph.get_nodes_number(), test.get_nodes_number());
let (original_total, original_min_comp, original_max_comp) =
graph.get_connected_components_number(verbose);
let (train_total, train_min_comp, train_max_comp) =
train.get_connected_components_number(verbose);
if original_total == 1 {
assert_eq!(
original_min_comp, original_max_comp,
concat!(
"When the number of components is only one, ",
"the minimum component size should be equal ",
"to the maximum component size.\n",
"The minimum component size was: {}.\n",
"The maximum component size was: {}.\n",
),
original_min_comp, original_max_comp
);
assert_eq!(
original_min_comp,
graph.get_nodes_number(),
concat!(
"When the number of components is only one, ",
"the minimum component size should be equal ",
"to the number of nodes of the graph.\n",
"The minimum component size was: {}.\n",
"The number of nodes of the graph was: {}.\n",
),
original_min_comp,
graph.get_nodes_number(),
);
}
if original_total == 2 {
assert!(original_min_comp <= original_max_comp);
assert_eq!(
original_min_comp + original_max_comp,
graph.get_nodes_number(),
concat!(
"When a graph contains two connected components, ",
"summing the two connected components should give ",
"the number of nodes in the graph.\n",
"The graph is {}."
),
if graph.is_directed() {
"directed"
} else {
"undirected"
}
);
}
if train_total == 1 {
assert!(train_min_comp == train_max_comp);
assert_eq!(train_min_comp, graph.get_nodes_number());
}
if train_total == 2 {
assert!(train_min_comp <= train_max_comp);
assert_eq!(train_min_comp + train_max_comp, train.get_nodes_number());
}
assert_eq!(
train_total, original_total,
concat!(
"In a connected holdout the training graph must have the ",
"same number of connected components as in the original ",
"graph, but here the training graph has {} components ",
"while the original graph has {} components."
),
train_total, original_total
);
assert_eq!(
train_min_comp, original_min_comp,
concat!(
"In a connected holdout the training graph must have the ",
"same number of connected components as in the original ",
"graph, but here the minimum connected component size ",
"of the training graph has size {} while the corresponding one ",
"from the original graph has size {}."
),
train_min_comp, original_min_comp
);
assert_eq!(
train_max_comp, original_max_comp,
concat!(
"In a connected holdout the training graph must have the ",
"same number of connected components as in the original ",
"graph, but here the maximum connected component size ",
"of the training graph has size {} while the corresponding one ",
"from the original graph has size {}."
),
train_max_comp, original_max_comp
);
default_holdout_test_suite(graph, &train, &test)?;
}
Ok(())
}
pub fn test_remove_components(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
if graph.get_connected_components_number(verbose).0 > 1 {
let without_selfloops = graph.remove_selfloops();
assert_eq!(
graph.get_connected_components_number(verbose),
without_selfloops.get_connected_components_number(verbose),
concat!(
"We expected the graph to have the same components once we remove the selfloops.\n",
"The report of the original graph is {:?}\n",
"The report of the filtered graph is {:?}\n",
"The edge node ids of the original graph are {:?}\n",
"The edge node ids of the filtered graph are {:?}\n"
),
graph.textual_report(),
without_selfloops.textual_report(),
graph.get_edge_node_ids(true),
without_selfloops.get_edge_node_ids(true),
);
let single_component = graph.remove_components(None, None, None, None, Some(1), verbose);
assert!(
single_component.is_ok(),
concat!(
"Removing all the components except the first one returned an error.\n",
"The error is:\n{:?}\nand the graph report is:\n{:?}"
),
single_component,
graph.textual_report()
);
let single_component_number = single_component
.unwrap()
.get_connected_components_number(verbose)
.0;
assert_eq!(
single_component_number,
1,
concat!(
"Removing all the components except the first one returned a graph ",
"with {} components, which is not one.\nThe report of the graph is:{:?}\n"
),
single_component_number,
graph.textual_report()
);
let test = graph.remove_components(
Some(vec![graph.nodes.unchecked_translate(0)]),
None,
None,
None,
None,
verbose,
)?;
let without_selfloops = test.remove_selfloops();
assert_eq!(
without_selfloops.get_connected_components_number(verbose).0,
1,
concat!(
"Expected number of components (1) is not matched!\n",
"The report of the original graph is {:?}\n",
"The report of the graph with only one component is {:?}\n",
"The report of the graph without selfloops is {:?}\n",
),
graph.textual_report(),
test.textual_report(),
without_selfloops.textual_report()
);
if let Ok(node_type_name) = graph.get_node_type_name_from_node_type_id(0) {
assert!(graph
.remove_components(
None,
Some(vec![Some(node_type_name)]),
None,
None,
None,
verbose
)
.is_ok());
}
if graph.has_unknown_node_types()? {
let without_unknowns =
graph.remove_components(None, Some(vec![None]), None, None, None, verbose);
assert!(
without_unknowns.is_ok(),
"Could not remove components without node type None.\nThe error is {:?}\nThe graph report is {:?}",
without_unknowns, graph.textual_report()
);
}
if let Ok(edge_type_name) = graph.get_edge_type_name_from_edge_type_id(0) {
assert!(graph
.remove_components(
None,
None,
Some(vec![Some(edge_type_name)]),
None,
None,
verbose
)
.is_ok());
}
if graph.has_unknown_edge_types()? {
assert!(graph
.remove_components(None, None, Some(vec![None]), None, None, verbose)
.is_ok());
}
} else {
assert!(
graph
.remove_components(None, None, None, None, None, verbose)
.is_ok(),
"We expect it to be possible, now, to create empty graphs."
);
}
Ok(())
}
pub fn test_kfold(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
let k = 3;
for i in 0..k {
let (train, test) = graph.get_edge_prediction_kfold(k, i, None, None, None)?;
assert!(
test.get_edges_number() <= (graph.get_edges_number() / k as EdgeT) + 1,
concat!(
"Check that test kfolds respect size bound has failed!\n",
"The value of k is {}.\n",
"The report of the original graph is:\n{:?}\n",
"The report of the train graph is:\n{:?}\n",
"The report of the test graph is:\n{:?}\n",
"We expect that the test graph has at most {} edges but it has {}.\n",
"The holdout index is {}.\n",
),
k,
graph.textual_report(),
train.textual_report(),
test.textual_report(),
(graph.get_edges_number() / k as EdgeT) + 1,
test.get_edges_number(),
i
);
default_holdout_test_suite(graph, &train, &test)?;
}
if let Ok(edge_t) = graph.get_edge_type_name_from_edge_type_id(0) {
for i in 0..k {
let (train, test) = graph.get_edge_prediction_kfold(
k,
i,
Some(vec![Some(edge_t.clone())]),
None,
None,
)?;
default_holdout_test_suite(graph, &train, &test)?;
}
}
Ok(())
}
pub fn test_negative_edges_generation(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
let number_of_edges = graph.get_edges_number().min(10) as usize;
let positives = graph.sample_positive_graph(
number_of_edges,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)?;
assert_eq!(positives.get_edges_number() as usize, number_of_edges);
assert!(positives.overlaps(graph)?);
assert!(graph.contains(&positives)?);
for only_from_same_component in &[true, false] {
// If the graph is very sparse, this takes a lot of time
// and makes the test suite very slow.
if *only_from_same_component && graph.get_number_of_directed_edges() < 100 {
continue;
}
let negatives = graph.sample_negative_graph(
graph.get_edges_number(),
None,
Some(*only_from_same_component),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)?;
assert_eq!(
graph.get_edges_number(),
negatives.get_edges_number(),
"We expect the graph and its negative graph to have the same number of edges but we got {} and {}.",
graph.get_edges_number(),
negatives.get_edges_number()
);
validate_vocabularies(&negatives);
if !graph.has_edge_types() {
assert!(!graph.overlaps(&negatives)?);
assert!(!negatives.overlaps(&graph)?);
}
// Testing holdouts executed on negative edges.
let (neg_train, neg_test) =
negatives.random_holdout(0.8, None, None, None, None, verbose)?;
neg_test.get_trap_nodes_number();
default_holdout_test_suite(&negatives, &neg_train, &neg_test)?;
}
Ok(())
}
pub fn test_subgraph_generation(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
let expected_nodes = graph.get_connected_nodes_number() / 10;
let subgraph = graph.get_random_subgraph(expected_nodes, None, verbose)?;
assert!(subgraph.overlaps(&graph)?);
assert!(subgraph.get_connected_nodes_number() <= expected_nodes + 1);
Ok(())
}
pub fn test_dump_graph(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
let node_file = random_path(None);
let nodes_writer = NodeFileWriter::new(node_file.clone())
.set_verbose(verbose)
.set_separator(Some('\t'))?
.set_header(Some(true))
.set_node_types_column_number(Some(4))
.set_nodes_column_number(Some(6))
.set_node_types_column(Some("node_types"))
.set_nodes_column(Some("node_column".to_string()));
nodes_writer.dump_graph(&graph)?;
fs::remove_file(node_file).unwrap();
let edges_file = random_path(None);
let edges_writer = EdgeFileWriter::new(edges_file.clone())
.set_verbose(verbose)
.set_separator(Some('\t'))?
.set_header(Some(true))
.set_edge_types_column(Some("edge_types".to_owned()))
.set_destinations_column_number(Some(3))
.set_weights_column(Some("weight".to_string()))
.set_weights_column_number(Some(2))
.set_sources_column(Some("The land of sushi".to_string()))
.set_sources_column_number(Some(0))
.set_destinations_column(Some("The land of pizza".to_string()))
.set_destinations_column_number(Some(1));
edges_writer.dump_graph(&graph)?;
fs::remove_file(edges_file).unwrap();
Ok(())
}
pub fn test_embiggen_preprocessing(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
let walker = first_order_walker(&graph)?;
if !graph.directed {
let window_size = 3;
let batch_size = 256;
let data = graph
.node2vec(&walker, batch_size, window_size)?
.collect::<Vec<_>>();
assert_eq!(
data.len(),
batch_size as usize
* walker.iterations as usize
* (walker.single_walk_parameters.walk_length as usize - window_size * 2)
);
for (context, _) in data.iter() {
assert_eq!(context.len(), window_size * 2);
}
}
if graph.has_edges() {
graph
.link_prediction_degrees(
0,
256,
Some(true),
Some(0.3),
Some(false),
Some(10),
false,
None,
None,
None,
)
.unwrap()
.collect::<Vec<_>>();
graph
.par_iter_attributed_edge_prediction_mini_batch(
0, 256, false, false, false, None, None, None, None, None, None,
)
.unwrap()
.collect::<Vec<_>>();
}
Ok(())
}
pub fn test_edgelist_generation(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
let _clique = graph.get_clique_edge_names(
None,
None,
Some(false),
None,
// limit to compute the clique for at most the first 3 nodes
// because it's really expensive computationally.
Some(
graph
.get_node_names()
.iter()
.take(3)
.cloned()
.collect::<HashSet<String>>(),
),
);
warn!("Running edge lists generator tests.");
if graph.get_nodes_number() > 1 {
let _bipartite = graph.get_bipartite_edge_names(
None,
Some(
[unsafe { graph.get_unchecked_node_name_from_node_id(0) }]
.iter()
.cloned()
.collect::<HashSet<String>>(),
),
Some(
[unsafe { graph.get_unchecked_node_name_from_node_id(1) }]
.iter()
.cloned()
.collect::<HashSet<String>>(),
),
None,
None,
)?;
let _star = graph.get_star_edges(
unsafe { graph.get_unchecked_node_name_from_node_id(0) },
Some(false),
Some(
[unsafe { graph.get_unchecked_node_name_from_node_id(1) }]
.iter()
.cloned()
.collect::<HashSet<String>>(),
),
None,
)?;
let _star = graph.get_star_edge_names(
unsafe { graph.get_unchecked_node_name_from_node_id(0) },
Some(false),
Some(
[unsafe { graph.get_unchecked_node_name_from_node_id(1) }]
.iter()
.cloned()
.collect::<HashSet<String>>(),
),
None,
)?;
}
Ok(())
}
pub fn test_nodelabel_holdouts(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
for use_stratification in [true, false] {
if graph.get_known_node_types_number()? < 2
|| (use_stratification
&& (graph.has_multilabel_node_types()? || graph.has_singleton_node_types()?))
{
assert!(graph
.get_node_label_holdout_graphs(0.8, Some(use_stratification), Some(42))
.is_err());
continue;
}
let (train, test) =
graph.get_node_label_holdout_graphs(0.8, Some(use_stratification), Some(42))?;
assert!(train.has_unknown_node_types()?);
assert!(test.has_unknown_node_types()?);
let remerged = &mut (&train | &test)?;
assert_eq!(remerged.node_types, graph.node_types);
assert!(
remerged.contains(graph)?,
"The re-merged holdouts does not contain the original graph."
);
assert!(
graph.contains(remerged)?,
"The re-merged holdouts does not contain the original graph."
);
assert!(
train
.node_types
.as_ref()
.as_ref()
.map_or(false, |train_nts| {
test.node_types.as_ref().as_ref().map_or(false, |test_nts| {
train_nts.ids.iter().zip(test_nts.ids.iter()).all(
|(train_node_type, test_node_type)| {
!(train_node_type.is_some() && test_node_type.is_some())
},
)
})
}),
"The train and test node-label graphs are overlapping!"
);
}
Ok(())
}
pub fn test_edgelabel_holdouts(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
for use_stratification in [true, false].iter() {
if *use_stratification && graph.has_singleton_edge_types()?
|| graph.get_number_of_directed_edges() - graph.get_unknown_edge_types_number()? < 2
|| !graph.has_edge_types()
{
assert!(graph
.get_edge_label_holdout_graphs(0.8, Some(*use_stratification), None)
.is_err());
continue;
}
let (train, test) =
graph.get_edge_label_holdout_graphs(0.8, Some(*use_stratification), None)?;
assert!(train.has_unknown_edge_types()?);
assert!(test.has_unknown_edge_types()?);
assert!(
train
.edge_types
.as_ref()
.as_ref()
.map_or(false, |train_nts| {
test.edge_types.as_ref().as_ref().map_or(false, |test_nts| {
train_nts.ids.iter().zip(test_nts.ids.iter()).all(
|(train_edge_type, test_edge_type)| {
!(train_edge_type.is_some() && test_edge_type.is_some())
},
)
})
}),
"The train and test edge-label graphs are overlapping!"
);
}
Ok(())
}
pub fn test_graph_filter(graph: &Graph, _verbose: Option<bool>) -> Result<()> {
let unfiltered = graph
.filter_from_ids(
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None,
)
.unwrap();
assert_eq!(&unfiltered, graph);
assert!(graph
.filter_from_names(
None,
Some(vec![NONEXISTENT]),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
.is_err());
for node_name in graph.iter_node_names().take(10) {
// The following test should remove ONLY the given node dijkstra
let graph_without_given_name_result = graph.filter_from_names(
None,
Some(vec![node_name.as_str()]),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
);
assert!(
graph_without_given_name_result.is_ok(),
concat!(
"Expected the filter operation to execute successfully, but raised error {:?}.\n",
"The graph report is: {:?}."
),
graph_without_given_name_result,
graph.textual_report()
);
let graph_without_given_id = graph_without_given_name_result.unwrap();
assert_eq!(
graph_without_given_id.has_nodes(),
graph.get_nodes_number() > 1
);
assert!(!graph_without_given_id.has_node_name(node_name.as_str()));
// The following test should keep ONLY the given node name
let graph_with_given_name_result = graph.filter_from_names(
Some(vec![node_name.as_str()]),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
);
assert!(
graph_with_given_name_result.is_ok(),
concat!(
"Graph built with filter from names was expected to be okay, ",
"but produced the following error message: {:?}."
),
graph_with_given_name_result
);
let graph_with_given_node_name = graph_with_given_name_result.unwrap();
assert_eq!(
graph_with_given_node_name.has_selfloops(),
graph.has_edge_from_node_names(node_name.as_ref(), node_name.as_ref())
);
assert_eq!(
graph_with_given_node_name.has_edges(),
graph_with_given_node_name.has_selfloops()
);
assert_eq!(graph_with_given_node_name.get_nodes_number(), 1);
assert!(graph_with_given_node_name.has_node_name(node_name.as_str()));
}
for node_type_name in graph.iter_unique_node_type_names()?.take(10) {
// The following test should remove ONLY the given node name
let graph_without_given_node_type_name_result = graph.filter_from_names(
None,
None,
None,
None,
None,
None,
None,
None,
None,
Some(vec![Some(node_type_name.clone())]),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
);
assert!(graph_without_given_node_type_name_result.is_ok());
let graph_without_given_node_type_name = graph_without_given_node_type_name_result.unwrap();
if graph.get_node_types_number()? > 1 && !graph.has_multilabel_node_types()? {
assert!(graph_without_given_node_type_name.has_node_types());
assert!(graph_without_given_node_type_name.has_nodes());
}
assert!(!graph_without_given_node_type_name.has_node_type_name(node_type_name.as_str()));
}
Ok(())
}
pub fn test_graph_removes(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
let without_edge_types = graph.remove_edge_types()?;
validate_vocabularies(&without_edge_types);
assert!(!without_edge_types.has_edge_types());
assert_eq!(
without_edge_types.has_edge_weights(),
graph.has_edge_weights()
);
assert_eq!(without_edge_types.node_types, graph.node_types);
if !graph.is_multigraph() {
assert_eq!(
without_edge_types.get_unique_edges_number(),
graph.get_unique_edges_number(),
concat!(
"Number of unique edges does not match in graph without edge types.\n",
"The report of the original graph is \n{:?}\n",
"The report of the graph without edge types is \n{:?}",
),
graph.textual_report(),
without_edge_types.textual_report()
);
assert_eq!(
without_edge_types.get_unique_selfloops_number(),
graph.get_unique_selfloops_number(),
"Number of unique self loops does not match in graph without edge types."
);
}
assert_eq!(without_edge_types.nodes, graph.nodes);
let without_node_types = graph.remove_node_types()?;
validate_vocabularies(&without_node_types);
assert!(!without_node_types.has_node_types());
assert_eq!(
graph.is_multigraph(),
without_node_types.is_multigraph(),
"If the original graph is a multigraph, the removal of node types should not change that."
);
assert_eq!(
without_node_types.weights,
graph.weights,
concat!(
"We expected the weights not to change when removig node types.",
"\nThe report of the original graph is {:?}.",
"\nThe report of the filtered graph is {:?}."
),
graph.textual_report(),
without_node_types.textual_report()
);
assert_eq!(without_node_types.has_selfloops(), graph.has_selfloops());
assert_eq!(without_node_types.nodes, graph.nodes);
let without_weights = graph.remove_edge_weights()?;
validate_vocabularies(&without_weights);
assert!(!without_weights.has_edge_weights());
assert_eq!(without_weights.node_types, graph.node_types);
assert_eq!(without_weights.has_selfloops(), graph.has_selfloops());
assert_eq!(without_weights.nodes, graph.nodes);
Ok(())
}
pub fn test_clone_and_setters(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
let mut clone = graph.clone();
clone = clone.set_all_edge_types("TEST_SET_ALL_EDGE_TYPES")?;
assert!(!clone.is_multigraph());
clone = clone.set_all_node_types("TEST_SET_ALL_NODE_TYPES")?;
assert_eq!(
clone.get_edge_types_number().unwrap(),
1,
"Number of edge types of the graph is not 1."
);
if !graph.is_multigraph() {
assert_eq!(
unsafe{clone.get_unchecked_edge_count_from_edge_type_id(Some(0))},
graph.get_number_of_directed_edges(),
"Number of edges with the unique edge type does not match number of edges in the graph."
);
}
assert_eq!(
clone.get_node_types_number().unwrap(),
1,
"Number of node types of the graph is not 1."
);
unsafe {
assert_eq!(
clone.get_unchecked_node_count_from_node_type_id(Some(0)),
graph.get_nodes_number(),
"Number of nodes with the unique node type does not match number of nodes in the graph."
);
}
Ok(())
}
pub fn test_graph_remapping(graph: &mut Graph, _verbose: Option<bool>) -> Result<()> {
assert!(
graph.are_nodes_remappable(&graph),
"Graph always should be remappable to itself."
);
assert!(
graph.remap_from_graph(&graph).is_ok(),
"Graph always should be remappable to itself."
);
Ok(())
}
pub fn test_graph_diameter(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
// TODO! update this when we will support the graph diameter on directed graphs
let (n_of_components, _, _) = graph.get_connected_components_number(verbose);
assert_eq!(
graph.get_diameter_naive(Some(false), verbose),
graph.get_diameter(Some(false), verbose),
);
match n_of_components {
0 => {
// on an empty graph this should always fail
assert!(graph.get_diameter(Some(false), verbose).is_err());
assert!(graph.get_diameter(Some(true), verbose).is_err());
}
1 => {
// by definition the diameter of a graph with a single component
// cannot be infinite unless it's just a singleton and it does not have edges.
if graph.get_nodes_number() == 1 && !graph.has_edges() {
assert!(graph
.get_diameter(Some(false), verbose)
.unwrap()
.is_infinite());
assert!(graph
.get_diameter(Some(true), verbose)
.unwrap()
.is_infinite());
} else {
assert!(graph
.get_diameter(Some(false), verbose)
.unwrap()
.is_finite());
assert!(graph.get_diameter(Some(true), verbose).unwrap().is_finite());
}
}
_ => {
assert!(graph
.get_diameter(Some(false), verbose)
.unwrap()
.is_infinite());
}
}
Ok(())
}
/// Executes near-complete test of all functions for the given graph.
fn _default_test_suite(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
warn!("Starting default test suite.");
let _ = test_graph_properties(graph, verbose);
warn!("Testing SkipGram / CBOW / GloVe preprocessing.");
let _ = test_embiggen_preprocessing(graph, verbose);
warn!("Testing subgraph generation.");
let _ = test_subgraph_generation(graph, verbose);
warn!("Testing clone and setters.");
let _ = test_clone_and_setters(graph, verbose);
warn!("Testing edge-label holdouts tests.");
let _ = test_edgelabel_holdouts(graph, verbose);
warn!("Testing writing out graph to file.");
let _ = test_dump_graph(graph, verbose);
warn!("Testing generic filtering mechanism.");
let _ = test_graph_filter(graph, verbose);
warn!("Testing the spanning arborescences.");
let _ = test_spanning_arborescence_bader(graph, verbose);
warn!("Testing the graph diameter.");
let _ = test_graph_diameter(graph, verbose);
warn!("Running node-label holdouts tests.");
let _ = test_nodelabel_holdouts(graph, verbose);
warn!("Running remove components tests.");
let _ = test_remove_components(graph, verbose);
warn!("Testing removes.");
let _ = test_graph_removes(graph, verbose);
warn!("Testing negative edges generation.");
let _ = test_negative_edges_generation(graph, verbose);
warn!("Executing edge holdouts tests.");
let _ = test_edge_holdouts(graph, verbose);
warn!("Testing k-fold holdouts.");
let _ = test_kfold(graph, verbose);
warn!("Testing edge lists generation.");
let _ = test_edgelist_generation(graph, verbose);
warn!("Testing graph remapping.");
let _ = test_graph_remapping(graph, verbose);
warn!("Testing random walks.");
let _ = test_random_walks(graph, verbose);
warn!("Testing BFS.");
let _ = test_bfs(graph, verbose);
warn!("Testing dijkstra.");
let _ = test_dijkstra(graph, verbose);
warn!("Testing approximated vertex cover");
let _ = test_vertex_cover(graph, verbose);
warn!("Testing node centralities.");
let _ = test_node_centralities(graph, verbose);
warn!("Testing polygons.");
let _ = test_polygons(graph, verbose);
warn!("Testing transitivity.");
let _ = test_transitivity(graph, verbose);
warn!("Testing all paths.");
let _ = test_all_paths(graph, verbose);
warn!("Testing generation of selfloops.");
let _ = test_selfloops(graph, verbose);
warn!("Testing sorting of the graph.");
let _ = test_sorting(graph, verbose);
Ok(())
}
macro_rules! test_mut_graph {
($graph:expr, $func:ident, $verbose:expr) => {{
println!("Testing the graph transformation: {}", stringify!($func));
let mut transformed_graph = $graph.$func();
let _ = _default_test_suite(&mut transformed_graph, $verbose);
}};
($graph:expr, $func:ident, $verbose:expr, result) => {{
println!("Testing the graph transformation: {}", stringify!($func));
let mut transformed_graph = $graph.$func()?;
let _ = _default_test_suite(&mut transformed_graph, $verbose);
}};
}
/// Executes near-complete test of all functions for the given graph.
pub fn default_test_suite(graph: &mut Graph, verbose: Option<bool>) -> Result<()> {
warn!("Starting default test suite.");
let _ = _default_test_suite(graph, verbose);
warn!("Starting default test suite with speedups enabled.");
graph.enable(Some(true), Some(true), Some(true), Some(true))?;
let _ = _default_test_suite(graph, verbose);
warn!("Starting default test suite on transformed graphs.");
test_mut_graph!(graph, get_laplacian_transformed_graph, verbose);
test_mut_graph!(
graph,
get_symmetric_normalized_transformed_graph,
verbose,
result
);
test_mut_graph!(
graph,
get_symmetric_normalized_laplacian_transformed_graph,
verbose,
result
);
test_mut_graph!(
graph,
get_left_normalized_laplacian_transformed_graph,
verbose
);
test_mut_graph!(graph, to_upper_triangular, verbose);
test_mut_graph!(graph, to_lower_triangular, verbose);
test_mut_graph!(graph, to_main_diagonal, verbose);
test_mut_graph!(graph, to_anti_diagonal, verbose);
test_mut_graph!(graph, to_bidiagonal, verbose);
test_mut_graph!(graph, to_arrowhead, verbose);
test_mut_graph!(graph, to_transposed, verbose);
// We skip very heavy operations on graphs with more than 20
// nodes because it would take way too much time.
if graph.get_nodes_number() > 20 {
return Ok(());
}
test_mut_graph!(graph, to_complementary, verbose);
Ok(())
}
|
{
Path::new(path.unwrap_or(DEFAULT_PATH))
.join(random_string(64))
.to_str()
.unwrap()
.to_string()
}
|
leonardo_s_monorail.py
|
"""
Jordi explained that a recursive search may not work as you might
first follow an extremely long path.
Thus, the process should be done by levels
"""
import os
from collections import defaultdict
class Computer:
def __init__(self):
self.operations = {'cpy': self.copy, 'inc': self.add, 'dec': self.subs, 'jnz': self.jump}
self.registers = defaultdict(int)
self.instruction = 0
def run(self, program):
while self.instruction < len(program):
values = program[self.instruction].split(' ')
self.operations[values[0]](*(values[1:]))
self.instruction += 1
def get_val(self, v):
try:
return int(v)
except ValueError:
return self.registers[v]
def copy(self, value, register):
self.registers[register] = self.get_val(value)
def add(self, register):
|
def subs(self, register):
self.registers[register] -= 1
def jump(self, register, amount):
if self.get_val(register) != 0:
self.instruction += (int(amount) - 1)
def get(self, register=None):
if register is None:
return self.registers
else:
return self.registers[register]
if __name__ == '__main__':
dir = os.path.dirname(__file__)
file = os.path.join(dir, 'input.txt')
program = []
with open(file) as fd:
for line in fd:
program.append(line.strip())
computer = Computer()
computer.run(program)
print('Part 1:', computer.get('a'))
program = ['cpy 1 c'] + program
computer = Computer()
computer.run(program)
print('Part 2:', computer.get('a'))
|
self.registers[register] += 1
|
class.fixture.ts
|
function classDeco(): ClassDecorator {
|
return () => {};
}
function propDeco(): PropertyDecorator {
return () => {};
}
@classDeco()
class Foo {
@propDeco()
prop1: number;
static prop2: string;
private readonly prop3: string;
public prop4: string;
protected prop5: string;
@methodDeco()
public method1() {}
private async method2() {}
protected async method3() {}
static method4() {}
method5() {}
}
class Bar {}
|
return () => {};
}
function methodDeco(): MethodDecorator {
|
doc.go
|
/*
Package jwtauth manages request authentication with jwts
This module provides a simple API to manage authentication and authorization
with JWTs (JSON Web Token). Use this module when you have any one of the
following needs
1. You need to authenticate JWTs from various token issues
2. You need to authorize JWTs have sufficient claims to execute a request
3. You need fine-grained control over the endpoints that get auth'n/o, and what
gets checked
Authentication
|
from a trusted source. This occurs via the Authenticator interface...
type Authenticator interface {
Authenticate(string token) (*Claims, error)
}
TODO(cantosd): document configuration when done
Authorization
Authorization in the JWT world refers to verifying the claims have sufficient
permissions to execute the request. This always happens after authentication,
and it is generally assumed that jwts coming from trusted sources have had the
claims verified by the source (issuer will not generate a jwt with admin
permissions for a customer).
Authorization is handled by the Authorizor interface...
type Authorizor interface {
Authorize(claims *Claims) error
}
It is up to the application to implement this interface. To do this, first
define what valid permissions are for any given request, then implement this
interface and make sure the request passes through it.
TODO(cantosd): Add middleware when done
*/
package jwtauth
|
Authentication in the JWT world refers to the action of verifying a jwt comes
|
mod.rs
|
//! # IOService related functionality
//! An IOService provides access for external entities.
//! For example the WebThing-IOService provides the WebThing API for OHX and allows clients to interact
//! with OHX via that API. The cloud-connector IOService mediates between Google Home, Alexa and OHX.
pub mod ioservice_template_registry;
pub mod ioservice_registry;
pub mod ioservice_store;
use libohxcore::acl::{Access, self};
use std::collections::BTreeMap;
use libohxcore::command::Command;
use libohxaddon::users::UserID;
pub type IOServiceInstanceID = String;
pub type FilterPropPipe = Vec<FilterProp>;
pub struct FilterProp {}
pub type FilterCommandPipe = Vec<FilterCommand>;
pub struct FilterCommand {}
pub struct
|
{
/// Filters for outgoing values
filter_property_pipe: FilterPropPipe,
/// Filters for incoming values
command_filter_pipe: FilterCommandPipe,
/// Who is able to edit this interconnection entry?
access: Access,
}
pub struct PropertyValue(serde_json::Value);
pub struct IOServiceInterconnect {
connections: BTreeMap<IOServiceInstanceID, Entry>,
command_receiver: tokio::sync::mpsc::Receiver<Command>,
command_sender: tokio::sync::mpsc::Sender<Command>,
}
pub struct IOServiceCommandPublisher {
command_sender: tokio::sync::mpsc::Sender<Command>,
}
/// Future
/// * on file change -> reload
/// * command_receive -> AddonRegistry.exec_command
/// *
///
/// * store on update/remove without reload
impl IOServiceInterconnect {
pub fn new() -> Self {
let (command_sender, command_receiver) = tokio::sync::mpsc::channel::<Command>(1);
IOServiceInterconnect { connections: Default::default(), command_receiver, command_sender }
}
pub async fn property_changed(&mut self, addon_id: &str, thing_uid: &str, prop_name: &str, context_properties: BTreeMap<String,PropertyValue>) {}
pub fn command_publisher(&self) -> IOServiceCommandPublisher {
IOServiceCommandPublisher { command_sender: self.command_sender.clone() }
}
pub fn update(&mut self, user: UserID, instance_id: &str, filter_property_pipe: FilterPropPipe, command_filter_pipe: FilterCommandPipe) -> Result<(), acl::Error> {
Ok(())
}
pub fn remove(&mut self, user: UserID, instance_id: &str) -> Result<(), acl::Error> {
Ok(())
}
}
pub struct Interconnect {
}
|
Entry
|
common.go
|
/*
* Copyright 2020-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package openflow Common Logger initialization
package openflow
import (
"github.com/opencord/voltha-lib-go/v3/pkg/log"
)
var logger log.Logger
func init() {
// Setup this package so that it's log level can be modified at run time
|
if err != nil {
panic(err)
}
}
|
var err error
logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "openflow"})
|
worker.go
|
package worker
import (
"context"
"errors"
"fmt"
logger "log"
"os/exec"
"sync"
"syscall"
"github.com/google/uuid"
"github.com/renatoaguimaraes/job-scheduler/pkg/worker/conf"
"github.com/renatoaguimaraes/job-scheduler/pkg/worker/log"
)
// Command is a job request with the program name and arguments.
type Command struct {
// Name program path/name
Name string
// Args program arguments
Args []string
}
// Job represents an arbitrary Linux process schedule by the Worker.
type Job struct {
// ID job identifier
ID string
// Command pipeline
Cmd *exec.Cmd
// Status of the process.
Status *Status
}
// IsRunning checks if the process still running.
func (j *Job) IsRunning() bool {
return j.Status.ExitCode == 0 && !j.Status.Exited
}
// Status of the process.
type Status struct {
// Process identifier
Pid int
// ExitCode of the exited process, or -1 if the process hasn't
// exited or was terminated by a signal
ExitCode int
// Exited reports whether the program has exited
Exited bool
}
// Worker defines the basic operations to manage Jobs.
type Worker interface {
// Start creates a Linux process.
// - command: command to be executed
// It returns the job ID and the execution error encountered.
Start(command Command) (jobID string, err error)
// Stop a running Job which kills a running process.
// - ID: Job identifier
// It returns the execution error encountered.
Stop(jobID string) (err error)
// Query a Job to check the current status.
// - ID: Job identifier
// It returns process status and the execution error
// encountered.
Query(jobID string) (status Status, err error)
// Streams the process output.
// - ctx: context to cancel the log stream
// - ID: Job identifier
// It returns read chan to stream process stdout/stderr and the
// execution error encountered.
Stream(ctx context.Context, jobID string) (logchan chan string, err error)
}
// NewWorker creates a new Worker instance.
func NewWorker(config conf.Config) Worker
|
// worker implementation.
type worker struct {
// logger is responsible to handle the
// stdout and stderr of a running process
logger log.Logger
// jobs is concurrency safe map to store
// the requested jobs
jobs map[string]*Job
// mtx to control jobs concurret access
mtx sync.RWMutex
}
// Start runs a Linux single command with arguments.
// If the command runs with success, a Job identifier will be returned.
// A log file will be created with the Job id name to capture the stdout and stderr
// of a running process.
// To get the process status, the Job request will be stored in memory,
// and a goroutine will be launched to update the job status when the process is finished.
func (w *worker) Start(command Command) (string, error) {
cmd := exec.Command(command.Name, command.Args...)
jobID := uuid.NewString()
logfile, err := w.logger.Create(jobID)
if err != nil {
return jobID, err
}
// redirect the stdout and stderr to the log file
cmd.Stdout = logfile
cmd.Stderr = logfile
if err = cmd.Start(); err != nil {
w.logger.Remove(jobID)
return jobID, err
}
// create and store the job
job := Job{ID: jobID, Cmd: cmd, Status: &Status{Pid: cmd.Process.Pid}}
w.mtx.Lock()
w.jobs[jobID] = &job
w.mtx.Unlock()
// update the job status in background
go func() {
if err := job.Cmd.Wait(); err != nil {
logger.Printf("Command execution fails, %v", err)
}
// update the job status with information about
// the exited process
status := Status{
Pid: job.Cmd.ProcessState.Pid(),
ExitCode: job.Cmd.ProcessState.ExitCode(),
Exited: job.Cmd.ProcessState.Exited(),
}
w.mtx.Lock()
job.Status = &status
w.mtx.Unlock()
}()
return jobID, nil
}
// Stop terminates a running Job gracefully sending a SIGTERM to the process.
// If the job doesn't exitis an error will be returned.
func (w *worker) Stop(jobID string) error {
w.mtx.RLock()
defer w.mtx.RUnlock()
job, err := w.getJob(jobID)
if err != nil {
return err
}
if job.IsRunning() {
return job.Cmd.Process.Signal(syscall.SIGTERM)
}
return errors.New("the process is already finished")
}
// Query returns the process status of a specific Job.
// If the job doesn't exitis an error will be returned.
func (w *worker) Query(jobID string) (Status, error) {
w.mtx.RLock()
defer w.mtx.RUnlock()
job, err := w.getJob(jobID)
if err != nil {
return Status{}, err
}
return *job.Status, nil
}
// Stream reads from the log file, like 'tail -f' through
// a channel. If the context is canceled the channel will
// be closed and the tailing will be stopped.
func (w *worker) Stream(ctx context.Context, jobID string) (chan string, error) {
w.mtx.RLock()
job, err := w.getJob(jobID)
w.mtx.RUnlock()
if err != nil {
return nil, err
}
return w.logger.Tailf(ctx, job.ID)
}
// getJob helper to get a job given an id.
func (w *worker) getJob(jobID string) (*Job, error) {
job, ok := w.jobs[jobID]
if !ok {
return nil, fmt.Errorf("Job %v not found", jobID)
}
return job, nil
}
|
{
return &worker{
logger: log.NewLogger(config),
jobs: make(map[string]*Job),
}
}
|
client.go
|
package webservices
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"text/template"
)
const (
libraryVersion = "0.0.1"
userAgent = "go-exactglobe-webservices/" + libraryVersion
mediaType = "application/json"
charset = "utf-8"
)
var (
BaseURL = url.URL{
Scheme: "https",
Host: "dijk177740vm001:8020",
Path: "Services/Exact.Entity.REST.EG/",
}
)
// NewClient returns a new Exact Globe Client client
func
|
(httpClient *http.Client, baseURL url.URL, databaseName string, databaseServerName string) *Client {
if httpClient == nil {
httpClient = http.DefaultClient
}
client := &Client{}
client.SetHTTPClient(httpClient)
client.SetDatabaseName(databaseName)
client.SetDatabaseServerName(databaseServerName)
client.SetBaseURL(baseURL)
client.SetDebug(false)
client.SetUserAgent(userAgent)
client.SetMediaType(mediaType)
client.SetCharset(charset)
return client
}
// Client manages communication with Exact Globe Client
type Client struct {
// HTTP client used to communicate with the Client.
http *http.Client
debug bool
baseURL url.URL
// credentials
databaseName string
databaseServerName string
// User agent for client
userAgent string
mediaType string
charset string
disallowUnknownFields bool
// Optional function called after every successful request made to the DO Clients
beforeRequestDo BeforeRequestDoCallback
onRequestCompleted RequestCompletionCallback
}
type BeforeRequestDoCallback func(*http.Client, *http.Request, interface{})
// RequestCompletionCallback defines the type of the request callback function
type RequestCompletionCallback func(*http.Request, *http.Response)
func (c *Client) SetHTTPClient(client *http.Client) {
c.http = client
}
func (c *Client) Debug() bool {
return c.debug
}
func (c *Client) SetDebug(debug bool) {
c.debug = debug
}
func (c *Client) DatabaseName() string {
return c.databaseName
}
func (c *Client) SetDatabaseName(databaseName string) {
c.databaseName = databaseName
}
func (c *Client) DatabaseServerName() string {
return c.databaseServerName
}
func (c *Client) SetDatabaseServerName(databaseServerName string) {
c.databaseServerName = databaseServerName
}
func (c *Client) BaseURL() url.URL {
return c.baseURL
}
func (c *Client) SetBaseURL(baseURL url.URL) {
c.baseURL = baseURL
}
func (c *Client) SetMediaType(mediaType string) {
c.mediaType = mediaType
}
func (c *Client) MediaType() string {
return mediaType
}
func (c *Client) SetCharset(charset string) {
c.charset = charset
}
func (c *Client) Charset() string {
return charset
}
func (c *Client) SetUserAgent(userAgent string) {
c.userAgent = userAgent
}
func (c *Client) UserAgent() string {
return userAgent
}
func (c *Client) SetDisallowUnknownFields(disallowUnknownFields bool) {
c.disallowUnknownFields = disallowUnknownFields
}
func (c *Client) SetBeforeRequestDo(fun BeforeRequestDoCallback) {
c.beforeRequestDo = fun
}
func (c *Client) GetEndpointURL(path string, pathParams PathParams) url.URL {
clientURL := c.BaseURL()
clientURL.Path = clientURL.Path + path
tmpl, err := template.New("endpoint_url").Parse(clientURL.Path)
if err != nil {
log.Fatal(err)
}
buf := new(bytes.Buffer)
params := pathParams.Params()
err = tmpl.Execute(buf, params)
if err != nil {
log.Fatal(err)
}
clientURL.Path = buf.String()
clientURL.RawPath = buf.String()
return clientURL
}
func (c *Client) NewRequest(ctx context.Context, method string, URL url.URL, body interface{}) (*http.Request, error) {
// convert body struct to json
buf := new(bytes.Buffer)
if body != nil {
err := json.NewEncoder(buf).Encode(body)
if err != nil {
return nil, err
}
}
// create new http request
req, err := http.NewRequest(method, URL.String(), buf)
// req.Host = URL.Hostname()
if err != nil {
return nil, err
}
// optionally pass along context
if ctx != nil {
req = req.WithContext(ctx)
}
// set other headers
req.Header.Add("DatabaseName", c.databaseName)
req.Header.Add("ServerName", c.databaseServerName)
req.Header.Add("Content-Type", fmt.Sprintf("%s; charset=%s", c.MediaType(), c.Charset()))
req.Header.Add("Accept", c.MediaType())
req.Header.Add("User-Agent", c.UserAgent())
return req, nil
}
// Do sends an Client request and returns the Client response. The Client response is json decoded and stored in the value
// pointed to by v, or returned as an error if an Client error has occurred. If v implements the io.Writer interface,
// the raw response will be written to v, without attempting to decode it.
func (c *Client) Do(req *http.Request, responseBody interface{}) (*http.Response, error) {
if c.beforeRequestDo != nil {
c.beforeRequestDo(c.http, req, responseBody)
}
if c.debug == true {
dump, _ := httputil.DumpRequestOut(req, true)
log.Println(string(dump))
}
httpResp, err := c.http.Do(req)
if err != nil {
return nil, err
}
if c.onRequestCompleted != nil {
c.onRequestCompleted(req, httpResp)
}
// close body io.Reader
defer func() {
if rerr := httpResp.Body.Close(); err == nil {
err = rerr
}
}()
if c.debug == true {
dump, _ := httputil.DumpResponse(httpResp, true)
log.Println(string(dump))
}
// check if the response isn't an error
err = CheckResponse(httpResp)
if err != nil {
return httpResp, err
}
// check the provided interface parameter
if httpResp == nil {
return httpResp, nil
}
if responseBody == nil {
return httpResp, err
}
// interface implements io.Writer: write Body to it
// if w, ok := response.Envelope.(io.Writer); ok {
// _, err := io.Copy(w, httpResp.Body)
// return httpResp, err
// }
// type Wrapper struct {
// D struct {
// Next string `json:"__next"`
// MetaData edm.MetaData `json:"__metadata"`
// } `json:"d"`
// }
// try to decode body into interface parameter
// w := &Wrapper{}
dec := json.NewDecoder(httpResp.Body)
if c.disallowUnknownFields {
dec.DisallowUnknownFields()
}
err = dec.Decode(responseBody)
if err != nil && err != io.EOF {
// create a simple error response
errorResponse := &ErrorResponse{Response: httpResp}
errorResponse.Errors = append(errorResponse.Errors, err)
return httpResp, errorResponse
}
// err = json.Unmarshal(w.D.Results, responseBody)
// if err != nil && err != io.EOF {
// // @TODO: fix this
// log.Fatal(err)
// }
return httpResp, nil
}
// CheckResponse checks the Client response for errors, and returns them if
// present. A response is considered an error if it has a status code outside
// the 200 range. Client error responses are expected to have either no response
// body, or a json response body that maps to ErrorResponse. Any other response
// body will be silently ignored.
func CheckResponse(r *http.Response) error {
errorResponse := &ErrorResponse{Response: r}
// Don't check content-lenght: a created response, for example, has no body
// if r.Header.Get("Content-Length") == "0" {
// errorResponse.Errors.Message = r.Status
// return errorResponse
// }
if c := r.StatusCode; c >= 200 && c <= 299 {
return nil
}
err := checkContentType(r)
if err != nil {
errorResponse.Errors = append(errorResponse.Errors, errors.New(r.Status))
return errorResponse
}
// read data and copy it back
data, err := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewReader(data))
if err != nil {
return errorResponse
}
if len(data) == 0 {
return errorResponse
}
// convert json to struct
err = json.Unmarshal(data, errorResponse)
if err != nil {
errorResponse.Errors = append(errorResponse.Errors, err)
return errorResponse
}
return errorResponse
}
type ErrorResponse struct {
// HTTP response that caused this error
Response *http.Response `json:"-"`
Errors []error
}
type Error struct {
Code string `json:"code"`
Message Message `json:"message"`
InnerError InnerError `json:"innererror"`
}
func (e Error) Empty() bool {
return e.Code == "" && e.Message.Value == "" && e.InnerError.Message == ""
}
func (e Error) Error() string {
if e.Code != "" {
if e.InnerError.InternalException.Message != "" {
return fmt.Sprintf("%s: %s", e.Code, e.InnerError.InternalException.Message)
}
if e.InnerError.Message != "" {
return fmt.Sprintf("%s: %s", e.Code, e.InnerError.Message)
}
return fmt.Sprintf("%s: %s", e.Code, e.Message.Value)
}
if e.InnerError.InternalException.Message != "" {
return fmt.Sprintf("%s", e.InnerError.InternalException.Message)
}
if e.InnerError.Message != "" {
return fmt.Sprintf("%s", e.InnerError.Message)
}
return fmt.Sprintf("%s", e.Message.Value)
}
type Message struct {
Lang string `json:"lang"`
Value string `json:"value"`
}
type InnerError struct {
Message string `json:"message"`
Type string `json:"type"`
Stacktrace string `json:"stacktrace"`
InternalException InternalException `json:"internalexception"`
}
type InternalException struct {
Message string `json:"message"`
Type string `json:"type"`
Stacktrace string `json:"stacktrace"`
}
func (r *ErrorResponse) UnmarshalJSON(data []byte) error {
tmp := struct {
Error Error `json:"error"`
}{}
err := json.Unmarshal(data, &tmp)
if err != nil {
return err
}
if !tmp.Error.Empty() {
r.Errors = append(r.Errors, tmp.Error)
}
return nil
}
func (r ErrorResponse) Error() string {
if len(r.Errors) > 0 {
str := []string{}
for _, err := range r.Errors {
str = append(str, err.Error())
}
return strings.Join(str, ", ")
}
return r.Errors[0].Error()
}
func checkContentType(response *http.Response) error {
header := response.Header.Get("Content-Type")
contentType := strings.Split(header, ";")[0]
if contentType != mediaType {
return fmt.Errorf("Expected Content-Type \"%s\", got \"%s\"", mediaType, contentType)
}
return nil
}
type PathParams interface {
Params() map[string]string
}
|
NewClient
|
api_integ_test.go
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// +build integration
package codepipeline_test
import (
"context"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/awserr"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
"github.com/aws/aws-sdk-go-v2/internal/awstesting/integration"
"github.com/aws/aws-sdk-go-v2/service/codepipeline"
)
var _ aws.Config
var _ awserr.Error
func TestInteg_00_ListPipelines(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
cfg := integration.ConfigWithDefaultRegion("us-west-2")
svc := codepipeline.New(cfg)
params := &codepipeline.ListPipelinesInput{}
req := svc.ListPipelinesRequest(params)
req.Handlers.Validate.Remove(defaults.ValidateParametersHandler)
_, err := req.Send(ctx)
if err != nil {
t.Errorf("expect no error, got %v", err)
}
}
func TestInteg_01_GetPipeline(t *testing.T)
|
{
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
cfg := integration.ConfigWithDefaultRegion("us-west-2")
svc := codepipeline.New(cfg)
params := &codepipeline.GetPipelineInput{
Name: aws.String("fake-pipeline"),
}
req := svc.GetPipelineRequest(params)
req.Handlers.Validate.Remove(defaults.ValidateParametersHandler)
_, err := req.Send(ctx)
if err == nil {
t.Fatalf("expect request to fail")
}
aerr, ok := err.(awserr.RequestFailure)
if !ok {
t.Fatalf("expect awserr, was %T", err)
}
if len(aerr.Code()) == 0 {
t.Errorf("expect non-empty error code")
}
if v := aerr.Code(); v == aws.ErrCodeSerialization {
t.Errorf("expect API error code got serialization failure")
}
}
|
|
boolean.py
|
import re
from typing import Union
from .validator import Validator, ValidationError, StopValidation
class Boolean(Validator):
|
true_types = (True, "true", 1, "1",)
false_types = (False, "false", 0, "0")
def __init__(self, message: Union[str, None] = None, parse: bool = True) -> None:
self.parse = parse
self.message = message or 'This field must be true or false.'
def parse_data(self, value):
if value in self.true_types:
return True
elif value in self.false_types:
return False
else:
return value
def handler(self, value, field, request):
if not value in self.true_types and not value in self.false_types:
raise ValidationError(self.message)
|
|
app.py
|
# Copyright (c) 2015 Pixomondo
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the MIT License included in this
# distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the MIT License. All rights
# not expressly granted therein are reserved by Pixomondo.
"""
Geometry Output App for Houdini
"""
import sgtk
class GeometryOutputNode(sgtk.platform.Application):
def init_app(self):
module = self.import_module("tk_houdini_geometrynode")
self.handler = module.ToolkitGeometryNodeHandler(self)
def convert_to_geometry_nodes(self):
"""
Convert all Shotgun Geometry nodes found in the current Script to regular
Geometry nodes. Additional toolkit information will be stored in
user data named 'tk_*'
"""
self.handler.convert_sg_to_geometry_nodes()
def
|
(self):
"""
Convert all regular Geometry nodes that have previously been converted
from Shotgun Geometry nodes, back into Shotgun Geometry nodes.
"""
self.handler.convert_geometry_to_sg_nodes()
|
convert_from_geometry_nodes
|
django-import-finder.py
|
# Copyright (C) 2009, Lorenzo Berni
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import PyInstaller.compat as compat
from hookutils import logger
if not compat.getenv("DJANGO_SETTINGS_MODULE"):
compat.setenv("DJANGO_SETTINGS_MODULE", "settings")
from django.conf import settings
hiddenimports = (list(settings.AUTHENTICATION_BACKENDS) +
[settings.DEFAULT_FILE_STORAGE] +
list(settings.FILE_UPLOAD_HANDLERS) +
list(settings.INSTALLED_APPS) +
list(settings.MIDDLEWARE_CLASSES) +
list(settings.TEMPLATE_CONTEXT_PROCESSORS) +
list(settings.TEMPLATE_LOADERS) +
[settings.ROOT_URLCONF])
def find_url_callbacks(urls_module):
urlpatterns = urls_module.urlpatterns
hid_list = [urls_module.__name__]
for pattern in urlpatterns:
if isinstance(pattern, RegexURLPattern):
hid_list.append(pattern.callback.__module__)
elif isinstance(pattern, RegexURLResolver):
hid_list += find_url_callbacks(pattern.urlconf_module)
return hid_list
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
base_module_name = ".".join(compat.getenv("DJANGO_SETTINGS_MODULE", "settings").split(".")[:-1])
if base_module_name:
base_module = __import__(base_module_name, {}, {}, ["urls"])
urls = base_module.urls
else:
|
hiddenimports += find_url_callbacks(urls)
logger.debug('%r', sorted(set(hiddenimports)))
|
import urls
|
generic_scheduler_test.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
"context"
"fmt"
"math"
"reflect"
"strconv"
"strings"
"sync/atomic"
"testing"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
var (
errPrioritize = fmt.Errorf("priority map encounters an error")
)
const ErrReasonFake = "Nodes failed the fake predicate"
type trueFilterPlugin struct{}
// Name returns name of the plugin.
func (pl *trueFilterPlugin) Name() string {
return "TrueFilter"
}
// Filter invoked at the filter extension point.
func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
return nil
}
// NewTrueFilterPlugin initializes a trueFilterPlugin and returns it.
func NewTrueFilterPlugin(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &trueFilterPlugin{}, nil
}
type falseFilterPlugin struct{}
// Name returns name of the plugin.
func (pl *falseFilterPlugin) Name() string {
return "FalseFilter"
}
// Filter invoked at the filter extension point.
func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}
// NewFalseFilterPlugin initializes a falseFilterPlugin and returns it.
func NewFalseFilterPlugin(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &falseFilterPlugin{}, nil
}
type matchFilterPlugin struct{}
// Name returns name of the plugin.
func (pl *matchFilterPlugin) Name() string {
return "MatchFilter"
}
// Filter invoked at the filter extension point.
func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")
}
if pod.Name == node.Name {
return nil
}
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}
// NewMatchFilterPlugin initializes a matchFilterPlugin and returns it.
func NewMatchFilterPlugin(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &matchFilterPlugin{}, nil
}
type noPodsFilterPlugin struct{}
// Name returns name of the plugin.
func (pl *noPodsFilterPlugin) Name() string {
return "NoPodsFilter"
}
// Filter invoked at the filter extension point.
func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
if len(nodeInfo.Pods()) == 0 {
return nil
}
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}
// NewNoPodsFilterPlugin initializes a noPodsFilterPlugin and returns it.
func NewNoPodsFilterPlugin(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error)
|
// fakeFilterPlugin is a test filter plugin to record how many times its Filter() function have
// been called, and it returns different 'Code' depending on its internal 'failedNodeReturnCodeMap'.
type fakeFilterPlugin struct {
numFilterCalled int32
failedNodeReturnCodeMap map[string]framework.Code
}
// Name returns name of the plugin.
func (pl *fakeFilterPlugin) Name() string {
return "FakeFilter"
}
// Filter invoked at the filter extension point.
func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
atomic.AddInt32(&pl.numFilterCalled, 1)
if returnCode, ok := pl.failedNodeReturnCodeMap[nodeInfo.Node().Name]; ok {
return framework.NewStatus(returnCode, fmt.Sprintf("injecting failure for pod %v", pod.Name))
}
return nil
}
// NewFakeFilterPlugin initializes a fakeFilterPlugin and returns it.
func NewFakeFilterPlugin(failedNodeReturnCodeMap map[string]framework.Code) framework.PluginFactory {
return func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &fakeFilterPlugin{
failedNodeReturnCodeMap: failedNodeReturnCodeMap,
}, nil
}
}
type numericMapPlugin struct{}
func newNumericMapPlugin() framework.PluginFactory {
return func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &numericMapPlugin{}, nil
}
}
func (pl *numericMapPlugin) Name() string {
return "NumericMap"
}
func (pl *numericMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeName string) (int64, *framework.Status) {
score, err := strconv.Atoi(nodeName)
if err != nil {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error converting nodename to int: %+v", nodeName))
}
return int64(score), nil
}
func (pl *numericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
return nil
}
type reverseNumericMapPlugin struct{}
func newReverseNumericMapPlugin() framework.PluginFactory {
return func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &reverseNumericMapPlugin{}, nil
}
}
func (pl *reverseNumericMapPlugin) Name() string {
return "ReverseNumericMap"
}
func (pl *reverseNumericMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeName string) (int64, *framework.Status) {
score, err := strconv.Atoi(nodeName)
if err != nil {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error converting nodename to int: %+v", nodeName))
}
return int64(score), nil
}
func (pl *reverseNumericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
return pl
}
func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
var maxScore float64
minScore := math.MaxFloat64
for _, hostPriority := range nodeScores {
maxScore = math.Max(maxScore, float64(hostPriority.Score))
minScore = math.Min(minScore, float64(hostPriority.Score))
}
for i, hostPriority := range nodeScores {
nodeScores[i] = framework.NodeScore{
Name: hostPriority.Name,
Score: int64(maxScore + minScore - float64(hostPriority.Score)),
}
}
return nil
}
type trueMapPlugin struct{}
func newTrueMapPlugin() framework.PluginFactory {
return func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &trueMapPlugin{}, nil
}
}
func (pl *trueMapPlugin) Name() string {
return "TrueMap"
}
func (pl *trueMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) (int64, *framework.Status) {
return 1, nil
}
func (pl *trueMapPlugin) ScoreExtensions() framework.ScoreExtensions {
return pl
}
func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
for _, host := range nodeScores {
if host.Name == "" {
return framework.NewStatus(framework.Error, "unexpected empty host name")
}
}
return nil
}
type falseMapPlugin struct{}
func newFalseMapPlugin() framework.PluginFactory {
return func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
return &falseMapPlugin{}, nil
}
}
func (pl *falseMapPlugin) Name() string {
return "FalseMap"
}
func (pl *falseMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) (int64, *framework.Status) {
return 0, framework.NewStatus(framework.Error, errPrioritize.Error())
}
func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions {
return nil
}
var emptySnapshot = internalcache.NewEmptySnapshot()
func makeNodeList(nodeNames []string) []*v1.Node {
result := make([]*v1.Node, 0, len(nodeNames))
for _, nodeName := range nodeNames {
result = append(result, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
}
return result
}
func TestSelectHost(t *testing.T) {
scheduler := genericScheduler{}
tests := []struct {
name string
list framework.NodeScoreList
possibleHosts sets.String
expectsErr bool
}{
{
name: "unique properly ordered scores",
list: []framework.NodeScore{
{Name: "machine1.1", Score: 1},
{Name: "machine2.1", Score: 2},
},
possibleHosts: sets.NewString("machine2.1"),
expectsErr: false,
},
{
name: "equal scores",
list: []framework.NodeScore{
{Name: "machine1.1", Score: 1},
{Name: "machine1.2", Score: 2},
{Name: "machine1.3", Score: 2},
{Name: "machine2.1", Score: 2},
},
possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"),
expectsErr: false,
},
{
name: "out of order scores",
list: []framework.NodeScore{
{Name: "machine1.1", Score: 3},
{Name: "machine1.2", Score: 3},
{Name: "machine2.1", Score: 2},
{Name: "machine3.1", Score: 1},
{Name: "machine1.3", Score: 3},
},
possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"),
expectsErr: false,
},
{
name: "empty priority list",
list: []framework.NodeScore{},
possibleHosts: sets.NewString(),
expectsErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// increase the randomness
for i := 0; i < 10; i++ {
got, err := scheduler.selectHost(test.list)
if test.expectsErr {
if err == nil {
t.Error("Unexpected non-error")
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !test.possibleHosts.Has(got) {
t.Errorf("got %s is not in the possible map %v", got, test.possibleHosts)
}
}
}
})
}
}
func TestGenericScheduler(t *testing.T) {
tests := []struct {
name string
registerPlugins []st.RegisterPluginFunc
nodes []string
pvcs []v1.PersistentVolumeClaim
pod *v1.Pod
pods []*v1.Pod
expectedHosts sets.String
wErr error
}{
{
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("FalseFilter", NewFalseFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
name: "test 1",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FilteredNodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
"machine2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
},
},
},
{
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
expectedHosts: sets.NewString("machine1", "machine2"),
name: "test 2",
wErr: nil,
},
{
// Fits on a machine where the pod ID matches the machine name
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine2", UID: types.UID("machine2")}},
expectedHosts: sets.NewString("machine2"),
name: "test 3",
wErr: nil,
},
{
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")}},
expectedHosts: sets.NewString("3"),
name: "test 4",
wErr: nil,
},
{
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
expectedHosts: sets.NewString("2"),
name: "test 5",
wErr: nil,
},
{
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
st.RegisterScorePlugin("ReverseNumericMap", newReverseNumericMapPlugin(), 2),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
expectedHosts: sets.NewString("1"),
name: "test 6",
wErr: nil,
},
{
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterFilterPlugin("FalseFilter", NewFalseFilterPlugin),
st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3", "2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
name: "test 7",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3,
FilteredNodesStatuses: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
"2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
"1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
},
},
},
{
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("NoPodsFilter", NewNoPodsFilterPlugin),
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")},
Spec: v1.PodSpec{
NodeName: "2",
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
nodes: []string{"1", "2"},
name: "test 8",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FilteredNodesStatuses: framework.NodeToStatusMap{
"1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
"2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
},
},
},
{
// Pod with existing PVC
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC"}}},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "existingPVC",
},
},
},
},
},
},
expectedHosts: sets.NewString("machine1", "machine2"),
name: "existing PVC",
wErr: nil,
},
{
// Pod with non existing PVC
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "unknownPVC",
},
},
},
},
},
},
name: "unknown PVC",
wErr: fmt.Errorf("persistentvolumeclaim \"unknownPVC\" not found"),
},
{
// Pod with deleting PVC
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pvcs: []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", DeletionTimestamp: &metav1.Time{}}}},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "ignore", UID: types.UID("ignore")},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "existingPVC",
},
},
},
},
},
},
name: "deleted PVC",
wErr: fmt.Errorf("persistentvolumeclaim \"existingPVC\" is being deleted"),
},
{
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterScorePlugin("FalseMap", newFalseMapPlugin(), 1),
st.RegisterScorePlugin("TrueMap", newTrueMapPlugin(), 2),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"2", "1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
name: "test error with priority map",
wErr: fmt.Errorf("error while running score plugin for pod \"2\": %+v", errPrioritize),
},
{
name: "test even pods spread predicate - 2 nodes with maxskew=1",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterPluginAsExtensions(
podtopologyspread.Name,
1,
podtopologyspread.New,
"PreFilter",
"Filter",
),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "p", UID: types.UID("p"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "hostname",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
},
},
},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{
NodeName: "machine1",
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
},
expectedHosts: sets.NewString("machine2"),
wErr: nil,
},
{
name: "test even pods spread predicate - 3 nodes with maxskew=2",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterPluginAsExtensions(
podtopologyspread.Name,
1,
podtopologyspread.New,
"PreFilter",
"Filter",
),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "p", UID: types.UID("p"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 2,
TopologyKey: "hostname",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
},
},
},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "pod1a", UID: types.UID("pod1a"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{
NodeName: "machine1",
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod1b", UID: types.UID("pod1b"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{
NodeName: "machine1",
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: types.UID("pod2"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{
NodeName: "machine2",
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
},
expectedHosts: sets.NewString("machine2", "machine3"),
wErr: nil,
},
{
name: "test with filter plugin returning Unschedulable status",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(
"FakeFilter",
NewFakeFilterPlugin(map[string]framework.Code{"3": framework.Unschedulable}),
),
st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
expectedHosts: nil,
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
NumAllNodes: 1,
FilteredNodesStatuses: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter"),
},
},
},
{
name: "test with filter plugin returning UnschedulableAndUnresolvable status",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(
"FakeFilter",
NewFakeFilterPlugin(map[string]framework.Code{"3": framework.UnschedulableAndUnresolvable}),
),
st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
expectedHosts: nil,
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
NumAllNodes: 1,
FilteredNodesStatuses: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter"),
},
},
},
{
name: "test with partial failed filter plugin",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(
"FakeFilter",
NewFakeFilterPlugin(map[string]framework.Code{"1": framework.Unschedulable}),
),
st.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"1", "2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
expectedHosts: nil,
wErr: nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
cache := internalcache.New(time.Duration(0), wait.NeverStop)
for _, pod := range test.pods {
cache.AddPod(pod)
}
var nodes []*v1.Node
for _, name := range test.nodes {
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}}
nodes = append(nodes, node)
cache.AddNode(node)
}
snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, err := st.NewFramework(test.registerPlugins, framework.WithSnapshotSharedLister(snapshot))
if err != nil {
t.Fatal(err)
}
var pvcs []v1.PersistentVolumeClaim
pvcs = append(pvcs, test.pvcs...)
pvcLister := fakelisters.PersistentVolumeClaimLister(pvcs)
scheduler := NewGenericScheduler(
cache,
internalqueue.NewSchedulingQueue(nil),
snapshot,
fwk,
[]SchedulerExtender{},
nil,
pvcLister,
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false)
result, err := scheduler.Schedule(context.Background(), framework.NewCycleState(), test.pod)
if !reflect.DeepEqual(err, test.wErr) {
t.Errorf("Unexpected error: %v, expected: %v", err.Error(), test.wErr)
}
if test.expectedHosts != nil && !test.expectedHosts.Has(result.SuggestedHost) {
t.Errorf("Expected: %s, got: %s", test.expectedHosts, result.SuggestedHost)
}
if test.wErr == nil && len(test.nodes) != result.EvaluatedNodes {
t.Errorf("Expected EvaluatedNodes: %d, got: %d", len(test.nodes), result.EvaluatedNodes)
}
})
}
}
// makeScheduler makes a simple genericScheduler for testing.
func makeScheduler(nodes []*v1.Node, fns ...st.RegisterPluginFunc) *genericScheduler {
cache := internalcache.New(time.Duration(0), wait.NeverStop)
for _, n := range nodes {
cache.AddNode(n)
}
fwk, _ := st.NewFramework(fns)
s := NewGenericScheduler(
cache,
internalqueue.NewSchedulingQueue(nil),
emptySnapshot,
fwk,
nil, nil, nil, nil, false,
schedulerapi.DefaultPercentageOfNodesToScore, false)
cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot)
return s.(*genericScheduler)
}
func TestFindFitAllError(t *testing.T) {
nodes := makeNodeList([]string{"3", "2", "1"})
scheduler := makeScheduler(
nodes,
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
)
_, nodeToStatusMap, err := scheduler.findNodesThatFitPod(context.Background(), framework.NewCycleState(), &v1.Pod{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(nodeToStatusMap) != len(nodes) {
t.Errorf("unexpected failed status map: %v", nodeToStatusMap)
}
for _, node := range nodes {
t.Run(node.Name, func(t *testing.T) {
status, found := nodeToStatusMap[node.Name]
if !found {
t.Errorf("failed to find node %v in %v", node.Name, nodeToStatusMap)
}
reasons := status.Reasons()
if len(reasons) != 1 || reasons[0] != ErrReasonFake {
t.Errorf("unexpected failure reasons: %v", reasons)
}
})
}
}
func TestFindFitSomeError(t *testing.T) {
nodes := makeNodeList([]string{"3", "2", "1"})
scheduler := makeScheduler(
nodes,
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}}
_, nodeToStatusMap, err := scheduler.findNodesThatFitPod(context.Background(), framework.NewCycleState(), pod)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(nodeToStatusMap) != len(nodes)-1 {
t.Errorf("unexpected failed status map: %v", nodeToStatusMap)
}
for _, node := range nodes {
if node.Name == pod.Name {
continue
}
t.Run(node.Name, func(t *testing.T) {
status, found := nodeToStatusMap[node.Name]
if !found {
t.Errorf("failed to find node %v in %v", node.Name, nodeToStatusMap)
}
reasons := status.Reasons()
if len(reasons) != 1 || reasons[0] != ErrReasonFake {
t.Errorf("unexpected failures: %v", reasons)
}
})
}
}
func TestFindFitPredicateCallCounts(t *testing.T) {
tests := []struct {
name string
pod *v1.Pod
expectedCount int32
}{
{
name: "nominated pods have lower priority, predicate is called once",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}, Spec: v1.PodSpec{Priority: &highPriority}},
expectedCount: 1,
},
{
name: "nominated pods have higher priority, predicate is called twice",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}, Spec: v1.PodSpec{Priority: &lowPriority}},
expectedCount: 2,
},
}
for _, test := range tests {
nodes := makeNodeList([]string{"1"})
cache := internalcache.New(time.Duration(0), wait.NeverStop)
for _, n := range nodes {
cache.AddNode(n)
}
plugin := fakeFilterPlugin{}
registerFakeFilterFunc := st.RegisterFilterPlugin(
"FakeFilter",
func(_ *runtime.Unknown, fh framework.FrameworkHandle) (framework.Plugin, error) {
return &plugin, nil
},
)
registerPlugins := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
registerFakeFilterFunc,
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
}
fwk, err := st.NewFramework(registerPlugins)
if err != nil {
t.Fatal(err)
}
queue := internalqueue.NewSchedulingQueue(nil)
scheduler := NewGenericScheduler(
cache,
queue,
emptySnapshot,
fwk,
nil, nil, nil, nil, false,
schedulerapi.DefaultPercentageOfNodesToScore, false).(*genericScheduler)
cache.UpdateSnapshot(scheduler.nodeInfoSnapshot)
queue.UpdateNominatedPodForNode(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("nominated")}, Spec: v1.PodSpec{Priority: &midPriority}}, "1")
_, _, err = scheduler.findNodesThatFitPod(context.Background(), framework.NewCycleState(), test.pod)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if test.expectedCount != plugin.numFilterCalled {
t.Errorf("predicate was called %d times, expected is %d", plugin.numFilterCalled, test.expectedCount)
}
}
}
func makeNode(node string, milliCPU, memory int64) *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: node},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
"pods": *resource.NewQuantity(100, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
"pods": *resource.NewQuantity(100, resource.DecimalSI),
},
},
}
}
// The point of this test is to show that you:
// - get the same priority for a zero-request pod as for a pod with the defaults requests,
// both when the zero-request pod is already on the machine and when the zero-request pod
// is the one being scheduled.
// - don't get the same score no matter what we schedule.
func TestZeroRequest(t *testing.T) {
// A pod with no resources. We expect spreading to count it as having the default resources.
noResources := v1.PodSpec{
Containers: []v1.Container{
{},
},
}
noResources1 := noResources
noResources1.NodeName = "machine1"
// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
small := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(
strconv.FormatInt(schedutil.DefaultMilliCPURequest, 10) + "m"),
v1.ResourceMemory: resource.MustParse(
strconv.FormatInt(schedutil.DefaultMemoryRequest, 10)),
},
},
},
},
}
small2 := small
small2.NodeName = "machine2"
// A larger pod.
large := v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(
strconv.FormatInt(schedutil.DefaultMilliCPURequest*3, 10) + "m"),
v1.ResourceMemory: resource.MustParse(
strconv.FormatInt(schedutil.DefaultMemoryRequest*3, 10)),
},
},
},
},
}
large1 := large
large1.NodeName = "machine1"
large2 := large
large2.NodeName = "machine2"
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
name string
expectedScore int64
}{
// The point of these next two tests is to show you get the same priority for a zero-request pod
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
// and when the zero-request pod is the one being scheduled.
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
name: "test priority of zero-request pod with machine with zero-request pod",
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 250,
},
{
pod: &v1.Pod{Spec: small},
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
name: "test priority of nonzero-request pod with machine with zero-request pod",
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 250,
},
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{
pod: &v1.Pod{Spec: large},
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
name: "test priority of larger pod with machine with zero-request pod",
pods: []*v1.Pod{
{Spec: large1}, {Spec: noResources1},
{Spec: large2}, {Spec: small2},
},
expectedScore: 230,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
snapshot := internalcache.NewSnapshot(test.pods, test.nodes)
pluginRegistrations := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterScorePlugin(noderesources.LeastAllocatedName, noderesources.NewLeastAllocated, 1),
st.RegisterScorePlugin(noderesources.BalancedAllocationName, noderesources.NewBalancedAllocation, 1),
st.RegisterScorePlugin(defaultpodtopologyspread.Name, defaultpodtopologyspread.New, 1),
st.RegisterPostFilterPlugin(defaultpodtopologyspread.Name, defaultpodtopologyspread.New),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
}
fwk, err := st.NewFramework(
pluginRegistrations,
framework.WithInformerFactory(informerFactory),
framework.WithSnapshotSharedLister(snapshot),
framework.WithClientSet(client),
)
if err != nil {
t.Fatalf("error creating framework: %+v", err)
}
scheduler := NewGenericScheduler(
nil,
nil,
emptySnapshot,
fwk,
[]SchedulerExtender{},
nil,
nil,
nil,
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false).(*genericScheduler)
scheduler.nodeInfoSnapshot = snapshot
ctx := context.Background()
state := framework.NewCycleState()
_, filteredNodesStatuses, err := scheduler.findNodesThatFitPod(ctx, state, test.pod)
if err != nil {
t.Fatalf("error filtering nodes: %+v", err)
}
scheduler.framework.RunPostFilterPlugins(ctx, state, test.pod, test.nodes, filteredNodesStatuses)
list, err := scheduler.prioritizeNodes(
ctx,
state,
test.pod,
test.nodes,
)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
for _, hp := range list {
if hp.Score != test.expectedScore {
t.Errorf("expected %d for all priorities, got list %#v", test.expectedScore, list)
}
}
})
}
}
func printNodeToVictims(nodeToVictims map[*v1.Node]*extenderv1.Victims) string {
var output string
for node, victims := range nodeToVictims {
output += node.Name + ": ["
for _, pod := range victims.Pods {
output += pod.Name + ", "
}
output += "]"
}
return output
}
func checkPreemptionVictims(expected map[string]map[string]bool, nodeToPods map[*v1.Node]*extenderv1.Victims) error {
if len(expected) == len(nodeToPods) {
for k, victims := range nodeToPods {
if expPods, ok := expected[k.Name]; ok {
if len(victims.Pods) != len(expPods) {
return fmt.Errorf("unexpected number of pods. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
}
prevPriority := int32(math.MaxInt32)
for _, p := range victims.Pods {
// Check that pods are sorted by their priority.
if *p.Spec.Priority > prevPriority {
return fmt.Errorf("pod %v of node %v was not sorted by priority", p.Name, k)
}
prevPriority = *p.Spec.Priority
if _, ok := expPods[p.Name]; !ok {
return fmt.Errorf("pod %v was not expected. Expected: %v", p.Name, expPods)
}
}
} else {
return fmt.Errorf("unexpected machines. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
}
}
} else {
return fmt.Errorf("unexpected number of machines. expected: %v, got: %v", expected, printNodeToVictims(nodeToPods))
}
return nil
}
var smallContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(schedutil.DefaultMilliCPURequest, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(schedutil.DefaultMemoryRequest, 10)),
},
},
},
}
var mediumContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(schedutil.DefaultMilliCPURequest*2, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(schedutil.DefaultMemoryRequest*2, 10)),
},
},
},
}
var largeContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(schedutil.DefaultMilliCPURequest*3, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(schedutil.DefaultMemoryRequest*3, 10)),
},
},
},
}
var veryLargeContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(
strconv.FormatInt(schedutil.DefaultMilliCPURequest*5, 10) + "m"),
"memory": resource.MustParse(
strconv.FormatInt(schedutil.DefaultMemoryRequest*5, 10)),
},
},
},
}
var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int32(-100), int32(0), int32(100), int32(1000), int32(10000)
var startTime = metav1.Date(2019, 1, 1, 1, 1, 1, 0, time.UTC)
var startTime20190102 = metav1.Date(2019, 1, 2, 1, 1, 1, 0, time.UTC)
var startTime20190103 = metav1.Date(2019, 1, 3, 1, 1, 1, 0, time.UTC)
var startTime20190104 = metav1.Date(2019, 1, 4, 1, 1, 1, 0, time.UTC)
var startTime20190105 = metav1.Date(2019, 1, 5, 1, 1, 1, 0, time.UTC)
var startTime20190106 = metav1.Date(2019, 1, 6, 1, 1, 1, 0, time.UTC)
var startTime20190107 = metav1.Date(2019, 1, 7, 1, 1, 1, 0, time.UTC)
// TestSelectNodesForPreemption tests selectNodesForPreemption. This test assumes
// that podsFitsOnNode works correctly and is tested separately.
func TestSelectNodesForPreemption(t *testing.T) {
tests := []struct {
name string
registerPlugins []st.RegisterPluginFunc
nodes []string
pod *v1.Pod
pods []*v1.Pod
filterReturnCode framework.Code
expected map[string]map[string]bool // Map from node name to a list of pods names which should be preempted.
expectedNumFilterCalled int32
}{
{
name: "a pod that does not fit on any machine",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("FalseFilter", NewFalseFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{},
expectedNumFilterCalled: 2,
},
{
name: "a pod that fits with no preemption",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "new", UID: types.UID("new")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {}, "machine2": {}},
expectedNumFilterCalled: 4,
},
{
name: "a pod that fits on one machine with no preemption",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {}},
expectedNumFilterCalled: 3,
},
{
name: "a pod that fits on both machines when lower priority pods are preempted",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {"a": true}, "machine2": {"b": true}},
expectedNumFilterCalled: 4,
},
{
name: "a pod that would fit on the machines, but other pods running are higher priority",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &lowPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{},
expectedNumFilterCalled: 2,
},
{
name: "medium priority pod is preempted, but lower priority one stays as it is small",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "c", UID: types.UID("c")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {"b": true}, "machine2": {"c": true}},
expectedNumFilterCalled: 5,
},
{
name: "mixed priority pods are preempted",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "c", UID: types.UID("c")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", UID: types.UID("d")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &highPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "e", UID: types.UID("e")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {"b": true, "c": true}},
expectedNumFilterCalled: 5,
},
{
name: "mixed priority pods are preempted, pick later StartTime one when priorities are equal",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190107}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190106}},
{ObjectMeta: metav1.ObjectMeta{Name: "c", UID: types.UID("c")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190105}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", UID: types.UID("d")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &highPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190104}},
{ObjectMeta: metav1.ObjectMeta{Name: "e", UID: types.UID("e")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime20190103}}},
expected: map[string]map[string]bool{"machine1": {"a": true, "c": true}},
expectedNumFilterCalled: 5,
},
{
name: "pod with anti-affinity is preempted",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterFilterPlugin(interpodaffinity.Name, interpodaffinity.New),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{
Name: "machine1",
Labels: map[string]string{"pod": "preemptor"}}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a"), Labels: map[string]string{"service": "securityscan"}}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1", Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "pod",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"preemptor", "value2"},
},
},
},
TopologyKey: "hostname",
},
},
}}}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", UID: types.UID("d")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &highPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "e", UID: types.UID("e")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}}},
expected: map[string]map[string]bool{"machine1": {"a": true}, "machine2": {}},
expectedNumFilterCalled: 4,
},
{
name: "preemption to resolve even pods spread FitError",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterPluginAsExtensions(
podtopologyspread.Name,
1,
podtopologyspread.New,
"PreFilter",
"Filter",
),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node-a/zone1", "node-b/zone1", "node-x/zone2"},
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "p",
Labels: map[string]string{"foo": ""},
},
Spec: v1.PodSpec{
Priority: &highPriority,
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
{
MaxSkew: 1,
TopologyKey: "hostname",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
},
},
},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-a1", UID: types.UID("pod-a1"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-a", Priority: &midPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-a2", UID: types.UID("pod-a2"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-a", Priority: &lowPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-b1", UID: types.UID("pod-b1"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-b", Priority: &lowPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-x1", UID: types.UID("pod-x1"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-x", Priority: &highPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-x2", UID: types.UID("pod-x2"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-x", Priority: &highPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
},
expected: map[string]map[string]bool{
"node-a": {"pod-a2": true},
"node-b": {"pod-b1": true},
},
expectedNumFilterCalled: 6,
},
{
name: "get Unschedulable in the preemption phase when the filter plugins filtering the nodes",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "a", UID: types.UID("a")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", UID: types.UID("b")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}}},
filterReturnCode: framework.Unschedulable,
expected: map[string]map[string]bool{},
expectedNumFilterCalled: 2,
},
}
labelKeys := []string{"hostname", "zone", "region"}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
filterFailedNodeReturnCodeMap := map[string]framework.Code{}
cache := internalcache.New(time.Duration(0), wait.NeverStop)
for _, pod := range test.pods {
cache.AddPod(pod)
}
for _, name := range test.nodes {
filterFailedNodeReturnCodeMap[name] = test.filterReturnCode
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}})
}
var nodes []*v1.Node
for _, n := range test.nodes {
node := makeNode(n, 1000*5, schedutil.DefaultMemoryRequest*5)
// if possible, split node name by '/' to form labels in a format of
// {"hostname": node.Name[0], "zone": node.Name[1], "region": node.Name[2]}
node.ObjectMeta.Labels = make(map[string]string)
for i, label := range strings.Split(node.Name, "/") {
node.ObjectMeta.Labels[labelKeys[i]] = label
}
node.Name = node.ObjectMeta.Labels["hostname"]
nodes = append(nodes, node)
}
// For each test, prepend a FakeFilterPlugin.
fakePlugin := fakeFilterPlugin{}
fakePlugin.failedNodeReturnCodeMap = filterFailedNodeReturnCodeMap
registerFakeFilterFunc := st.RegisterFilterPlugin(
"FakeFilter",
func(_ *runtime.Unknown, fh framework.FrameworkHandle) (framework.Plugin, error) {
return &fakePlugin, nil
},
)
registerPlugins := append([]st.RegisterPluginFunc{registerFakeFilterFunc}, test.registerPlugins...)
// Use a real snapshot since it's needed in some Filter Plugin (e.g., PodAffinity)
snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, err := st.NewFramework(registerPlugins, framework.WithSnapshotSharedLister(snapshot))
if err != nil {
t.Fatal(err)
}
scheduler := NewGenericScheduler(
nil,
internalqueue.NewSchedulingQueue(nil),
snapshot,
fwk,
[]SchedulerExtender{},
nil,
nil,
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
false)
g := scheduler.(*genericScheduler)
assignDefaultStartTime(test.pods)
state := framework.NewCycleState()
// Some tests rely on PreFilter plugin to compute its CycleState.
preFilterStatus := fwk.RunPreFilterPlugins(context.Background(), state, test.pod)
if !preFilterStatus.IsSuccess() {
t.Errorf("Unexpected preFilterStatus: %v", preFilterStatus)
}
nodeInfos, err := nodesToNodeInfos(nodes, snapshot)
if err != nil {
t.Fatal(err)
}
nodeToPods, err := g.selectNodesForPreemption(context.Background(), state, test.pod, nodeInfos, nil)
if err != nil {
t.Error(err)
}
if test.expectedNumFilterCalled != fakePlugin.numFilterCalled {
t.Errorf("expected fakePlugin.numFilterCalled is %d, but got %d", test.expectedNumFilterCalled, fakePlugin.numFilterCalled)
}
if err := checkPreemptionVictims(test.expected, nodeToPods); err != nil {
t.Error(err)
}
})
}
}
// TestPickOneNodeForPreemption tests pickOneNodeForPreemption.
func TestPickOneNodeForPreemption(t *testing.T) {
tests := []struct {
name string
registerPlugins []st.RegisterPluginFunc
nodes []string
pod *v1.Pod
pods []*v1.Pod
expected []string // any of the items is valid
}{
{
name: "No node needs preemption",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}}},
expected: []string{"machine1"},
},
{
name: "a pod that fits on both machines when lower priority pods are preempted",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}}},
expected: []string{"machine1", "machine2"},
},
{
name: "a pod that fits on a machine with no preemption",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}}},
expected: []string{"machine3"},
},
{
name: "machine with min highest priority pod is picked",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
},
expected: []string{"machine3"},
},
{
name: "when highest priorities are the same, minimum sum of priorities is picked",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
},
expected: []string{"machine2"},
},
{
name: "when highest priority and sum are the same, minimum number of pods is picked",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.3", UID: types.UID("m1.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.4", UID: types.UID("m1.4")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &negPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.3", UID: types.UID("m3.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
},
expected: []string{"machine2"},
},
{
// pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This
// test ensures that the logic works correctly.
name: "sum of adjusted priorities is considered",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.3", UID: types.UID("m1.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &negPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.3", UID: types.UID("m3.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
},
expected: []string{"machine2"},
},
{
name: "non-overlapping lowest high priority, sum priorities, and number of pods",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3", "machine4"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &veryHighPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.3", UID: types.UID("m1.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.3", UID: types.UID("m3.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.4", UID: types.UID("m3.4")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m4.1", UID: types.UID("m4.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine4"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m4.2", UID: types.UID("m4.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine4"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m4.3", UID: types.UID("m4.3")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine4"}, Status: v1.PodStatus{StartTime: &startTime}},
{ObjectMeta: metav1.ObjectMeta{Name: "m4.4", UID: types.UID("m4.4")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &negPriority, NodeName: "machine4"}, Status: v1.PodStatus{StartTime: &startTime}},
},
expected: []string{"machine1"},
},
{
name: "same priority, same number of victims, different start time for each machine's pod",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190103}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190103}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime20190104}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime20190104}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime20190102}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime20190102}},
},
expected: []string{"machine2"},
},
{
name: "same priority, same number of victims, different start time for all pods",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190105}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190103}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime20190106}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime20190102}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime20190104}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime20190107}},
},
expected: []string{"machine3"},
},
{
name: "different priority, same number of victims, different start time for all pods",
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"machine1", "machine2", "machine3"},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}, Spec: v1.PodSpec{Containers: veryLargeContainers, Priority: &highPriority}},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190105}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{StartTime: &startTime20190103}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime20190107}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.2", UID: types.UID("m2.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine2"}, Status: v1.PodStatus{StartTime: &startTime20190102}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &lowPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime20190104}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.2", UID: types.UID("m3.2")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{StartTime: &startTime20190106}},
},
expected: []string{"machine2"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var nodes []*v1.Node
for _, n := range test.nodes {
nodes = append(nodes, makeNode(n, schedutil.DefaultMilliCPURequest*5, schedutil.DefaultMemoryRequest*5))
}
snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, err := st.NewFramework(test.registerPlugins, framework.WithSnapshotSharedLister(snapshot))
if err != nil {
t.Fatal(err)
}
g := &genericScheduler{
framework: fwk,
nodeInfoSnapshot: snapshot,
}
assignDefaultStartTime(test.pods)
nodeInfos, err := nodesToNodeInfos(nodes, snapshot)
if err != nil {
t.Fatal(err)
}
state := framework.NewCycleState()
candidateNodes, _ := g.selectNodesForPreemption(context.Background(), state, test.pod, nodeInfos, nil)
node := pickOneNodeForPreemption(candidateNodes)
found := false
for _, nodeName := range test.expected {
if node.Name == nodeName {
found = true
break
}
}
if !found {
t.Errorf("unexpected node: %v", node)
}
})
}
}
func TestNodesWherePreemptionMightHelp(t *testing.T) {
// Prepare 4 node names.
nodeNames := make([]string, 0, 4)
for i := 1; i < 5; i++ {
nodeNames = append(nodeNames, fmt.Sprintf("machine%d", i))
}
tests := []struct {
name string
nodesStatuses framework.NodeToStatusMap
expected map[string]bool // set of expected node names. Value is ignored.
}{
{
name: "No node should be attempted",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeaffinity.ErrReason),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
"machine4": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodelabel.ErrReasonPresenceViolated),
},
expected: map[string]bool{},
},
{
name: "ErrReasonAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAffinityNotMatch),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnschedulable),
},
expected: map[string]bool{"machine1": true, "machine4": true},
},
{
name: "pod with both pod affinity and anti-affinity should be tried",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAffinityNotMatch),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
},
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
},
{
name: "ErrReasonAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrReasonAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, interpodaffinity.ErrReasonAffinityRulesNotMatch),
"machine2": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAffinityNotMatch),
},
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
},
{
name: "Mix of failed predicates works fine",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
"machine2": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
},
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
},
{
name: "Node condition errors should be considered unresolvable",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
},
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
},
{
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumebinding.ErrReasonNodeConflict),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumebinding.ErrReasonBindConflict),
},
expected: map[string]bool{"machine4": true},
},
{
name: "ErrTopologySpreadConstraintsNotMatch should be tried as it indicates that the pod is unschedulable due to topology spread constraints",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
"machine3": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
},
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
},
{
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
nodesStatuses: framework.NodeToStatusMap{
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
"machine3": framework.NewStatus(framework.Unschedulable, ""),
"machine4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
},
expected: map[string]bool{"machine1": true, "machine3": true},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fitErr := FitError{
FilteredNodesStatuses: test.nodesStatuses,
}
var nodeInfos []*schedulernodeinfo.NodeInfo
for _, n := range makeNodeList(nodeNames) {
ni := schedulernodeinfo.NewNodeInfo()
ni.SetNode(n)
nodeInfos = append(nodeInfos, ni)
}
nodes := nodesWherePreemptionMightHelp(nodeInfos, &fitErr)
if len(test.expected) != len(nodes) {
t.Errorf("number of nodes is not the same as expected. exptectd: %d, got: %d. Nodes: %v", len(test.expected), len(nodes), nodes)
}
for _, node := range nodes {
name := node.Node().Name
if _, found := test.expected[name]; !found {
t.Errorf("node %v is not expected.", name)
}
}
})
}
}
func TestPreempt(t *testing.T) {
defaultFailedNodeToStatusMap := framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
"machine2": framework.NewStatus(framework.Unschedulable, volumerestrictions.ErrReasonDiskConflict),
"machine3": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
}
// Prepare 3 node names.
var defaultNodeNames []string
for i := 1; i < 4; i++ {
defaultNodeNames = append(defaultNodeNames, fmt.Sprintf("machine%d", i))
}
var (
preemptLowerPriority = v1.PreemptLowerPriority
preemptNever = v1.PreemptNever
)
tests := []struct {
name string
pod *v1.Pod
pods []*v1.Pod
extenders []*FakeExtender
failedNodeToStatusMap framework.NodeToStatusMap
nodeNames []string
registerPlugins []st.RegisterPluginFunc
expectedNode string
expectedPods []string // list of preempted pods
}{
{
name: "basic preemption logic",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority,
PreemptionPolicy: &preemptLowerPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "machine1",
expectedPods: []string{"m1.1", "m1.2"},
},
{
name: "One node doesn't need any preemption",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority,
PreemptionPolicy: &preemptLowerPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "machine3",
expectedPods: []string{},
},
{
name: "preemption for topology spread constraints",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "p",
Labels: map[string]string{"foo": ""},
},
Spec: v1.PodSpec{
Priority: &highPriority,
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
{
MaxSkew: 1,
TopologyKey: "hostname",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
},
},
},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-a1", UID: types.UID("pod-a1"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-a", Priority: &highPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-a2", UID: types.UID("pod-a2"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-a", Priority: &highPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-b1", UID: types.UID("pod-b1"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-b", Priority: &lowPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-x1", UID: types.UID("pod-x1"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-x", Priority: &highPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "pod-x2", UID: types.UID("pod-x2"), Labels: map[string]string{"foo": ""}},
Spec: v1.PodSpec{NodeName: "node-x", Priority: &highPriority},
Status: v1.PodStatus{Phase: v1.PodRunning},
},
},
failedNodeToStatusMap: framework.NodeToStatusMap{
"node-a": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
"node-b": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
"node-x": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
},
nodeNames: []string{"node-a/zone1", "node-b/zone1", "node-x/zone2"},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterPluginAsExtensions(
podtopologyspread.Name,
1,
podtopologyspread.New,
"PreFilter",
"Filter",
),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "node-b",
expectedPods: []string{"pod-b1"},
},
{
name: "Scheduler extenders allow only machine1, otherwise machine3 would have been chosen",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority,
PreemptionPolicy: &preemptLowerPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
extenders: []*FakeExtender{
{
predicates: []fitPredicate{truePredicateExtender},
},
{
predicates: []fitPredicate{machine1PredicateExtender},
},
},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "machine1",
expectedPods: []string{"m1.1", "m1.2"},
},
{
name: "Scheduler extenders do not allow any preemption",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority,
PreemptionPolicy: &preemptLowerPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
extenders: []*FakeExtender{
{
predicates: []fitPredicate{falsePredicateExtender},
},
},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "",
expectedPods: []string{},
},
{
name: "One scheduler extender allows only machine1, the other returns error but ignorable. Only machine1 would be chosen",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority,
PreemptionPolicy: &preemptLowerPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
extenders: []*FakeExtender{
{
predicates: []fitPredicate{errorPredicateExtender},
ignorable: true,
},
{
predicates: []fitPredicate{machine1PredicateExtender},
},
},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "machine1",
expectedPods: []string{"m1.1", "m1.2"},
},
{
name: "One scheduler extender allows only machine1, but it is not interested in given pod, otherwise machine1 would have been chosen",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority,
PreemptionPolicy: &preemptLowerPriority},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
extenders: []*FakeExtender{
{
predicates: []fitPredicate{machine1PredicateExtender},
unInterested: true,
},
{
predicates: []fitPredicate{truePredicateExtender},
},
},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "machine3",
expectedPods: []string{},
},
{
name: "no preempting in pod",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority,
PreemptionPolicy: &preemptNever},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "",
expectedPods: nil,
},
{
name: "PreemptionPolicy is nil",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
Containers: veryLargeContainers,
Priority: &highPriority,
PreemptionPolicy: nil},
},
pods: []*v1.Pod{
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &highPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
{ObjectMeta: metav1.ObjectMeta{Name: "m3.1", UID: types.UID("m3.1")}, Spec: v1.PodSpec{Containers: mediumContainers, Priority: &midPriority, NodeName: "machine3"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
},
registerPlugins: []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin(noderesources.FitName, noderesources.NewFit),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
expectedNode: "machine1",
expectedPods: []string{"m1.1", "m1.2"},
},
}
labelKeys := []string{"hostname", "zone", "region"}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
stop := make(chan struct{})
cache := internalcache.New(time.Duration(0), stop)
for _, pod := range test.pods {
cache.AddPod(pod)
}
cachedNodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{}
nodeNames := defaultNodeNames
if len(test.nodeNames) != 0 {
nodeNames = test.nodeNames
}
var nodes []*v1.Node
for i, name := range nodeNames {
node := makeNode(name, 1000*5, schedutil.DefaultMemoryRequest*5)
// if possible, split node name by '/' to form labels in a format of
// {"hostname": node.Name[0], "zone": node.Name[1], "region": node.Name[2]}
node.ObjectMeta.Labels = make(map[string]string)
for i, label := range strings.Split(node.Name, "/") {
node.ObjectMeta.Labels[labelKeys[i]] = label
}
node.Name = node.ObjectMeta.Labels["hostname"]
cache.AddNode(node)
nodes = append(nodes, node)
nodeNames[i] = node.Name
// Set nodeInfo to extenders to mock extenders' cache for preemption.
cachedNodeInfo := schedulernodeinfo.NewNodeInfo()
cachedNodeInfo.SetNode(node)
cachedNodeInfoMap[node.Name] = cachedNodeInfo
}
var extenders []SchedulerExtender
for _, extender := range test.extenders {
// Set nodeInfoMap as extenders cached node information.
extender.cachedNodeNameToInfo = cachedNodeInfoMap
extenders = append(extenders, extender)
}
snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, err := st.NewFramework(test.registerPlugins, framework.WithSnapshotSharedLister(snapshot))
if err != nil {
t.Fatal(err)
}
scheduler := NewGenericScheduler(
cache,
internalqueue.NewSchedulingQueue(nil),
snapshot,
fwk,
extenders,
nil,
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
false,
schedulerapi.DefaultPercentageOfNodesToScore,
true)
state := framework.NewCycleState()
// Some tests rely on PreFilter plugin to compute its CycleState.
preFilterStatus := fwk.RunPreFilterPlugins(context.Background(), state, test.pod)
if !preFilterStatus.IsSuccess() {
t.Errorf("Unexpected preFilterStatus: %v", preFilterStatus)
}
// Call Preempt and check the expected results.
failedNodeToStatusMap := defaultFailedNodeToStatusMap
if test.failedNodeToStatusMap != nil {
failedNodeToStatusMap = test.failedNodeToStatusMap
}
node, victims, _, err := scheduler.Preempt(context.Background(), state, test.pod, error(&FitError{Pod: test.pod, FilteredNodesStatuses: failedNodeToStatusMap}))
if err != nil {
t.Errorf("unexpected error in preemption: %v", err)
}
if node != nil && node.Name != test.expectedNode {
t.Errorf("expected node: %v, got: %v", test.expectedNode, node.GetName())
}
if node == nil && len(test.expectedNode) != 0 {
t.Errorf("expected node: %v, got: nothing", test.expectedNode)
}
if len(victims) != len(test.expectedPods) {
t.Errorf("expected %v pods, got %v.", len(test.expectedPods), len(victims))
}
for _, victim := range victims {
found := false
for _, expPod := range test.expectedPods {
if expPod == victim.Name {
found = true
break
}
}
if !found {
t.Errorf("pod %v is not expected to be a victim.", victim.Name)
}
// Mark the victims for deletion and record the preemptor's nominated node name.
now := metav1.Now()
victim.DeletionTimestamp = &now
test.pod.Status.NominatedNodeName = node.Name
}
// Call preempt again and make sure it doesn't preempt any more pods.
node, victims, _, err = scheduler.Preempt(context.Background(), state, test.pod, error(&FitError{Pod: test.pod, FilteredNodesStatuses: failedNodeToStatusMap}))
if err != nil {
t.Errorf("unexpected error in preemption: %v", err)
}
if node != nil && len(victims) > 0 {
t.Errorf("didn't expect any more preemption. Node %v is selected for preemption.", node)
}
close(stop)
})
}
}
func TestNumFeasibleNodesToFind(t *testing.T) {
tests := []struct {
name string
percentageOfNodesToScore int32
numAllNodes int32
wantNumNodes int32
}{
{
name: "not set percentageOfNodesToScore and nodes number not more than 50",
numAllNodes: 10,
wantNumNodes: 10,
},
{
name: "set percentageOfNodesToScore and nodes number not more than 50",
percentageOfNodesToScore: 40,
numAllNodes: 10,
wantNumNodes: 10,
},
{
name: "not set percentageOfNodesToScore and nodes number more than 50",
numAllNodes: 1000,
wantNumNodes: 420,
},
{
name: "set percentageOfNodesToScore and nodes number more than 50",
percentageOfNodesToScore: 40,
numAllNodes: 1000,
wantNumNodes: 400,
},
{
name: "not set percentageOfNodesToScore and nodes number more than 50*125",
numAllNodes: 6000,
wantNumNodes: 300,
},
{
name: "set percentageOfNodesToScore and nodes number more than 50*125",
percentageOfNodesToScore: 40,
numAllNodes: 6000,
wantNumNodes: 2400,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := &genericScheduler{
percentageOfNodesToScore: tt.percentageOfNodesToScore,
}
if gotNumNodes := g.numFeasibleNodesToFind(tt.numAllNodes); gotNumNodes != tt.wantNumNodes {
t.Errorf("genericScheduler.numFeasibleNodesToFind() = %v, want %v", gotNumNodes, tt.wantNumNodes)
}
})
}
}
func assignDefaultStartTime(pods []*v1.Pod) {
now := metav1.Now()
for i := range pods {
pod := pods[i]
if pod.Status.StartTime == nil {
pod.Status.StartTime = &now
}
}
}
func TestFairEvaluationForNodes(t *testing.T) {
numAllNodes := 500
nodeNames := make([]string, 0, numAllNodes)
for i := 0; i < numAllNodes; i++ {
nodeNames = append(nodeNames, strconv.Itoa(i))
}
nodes := makeNodeList(nodeNames)
g := makeScheduler(
nodes,
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterFilterPlugin("TrueFilter", NewTrueFilterPlugin),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
)
// To make numAllNodes % nodesToFind != 0
g.percentageOfNodesToScore = 30
nodesToFind := int(g.numFeasibleNodesToFind(int32(numAllNodes)))
// Iterating over all nodes more than twice
for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ {
nodesThatFit, _, err := g.findNodesThatFitPod(context.Background(), framework.NewCycleState(), &v1.Pod{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(nodesThatFit) != nodesToFind {
t.Errorf("got %d nodes filtered, want %d", len(nodesThatFit), nodesToFind)
}
if g.nextStartNodeIndex != (i+1)*nodesToFind%numAllNodes {
t.Errorf("got %d lastProcessedNodeIndex, want %d", g.nextStartNodeIndex, (i+1)*nodesToFind%numAllNodes)
}
}
}
func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) {
var nodeInfos []*schedulernodeinfo.NodeInfo
for _, n := range nodes {
nodeInfo, err := snapshot.NodeInfos().Get(n.Name)
if err != nil {
return nil, err
}
nodeInfos = append(nodeInfos, nodeInfo)
}
return nodeInfos, nil
}
|
{
return &noPodsFilterPlugin{}, nil
}
|
python_stub.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 Paweł Kacperski ([email protected])
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def install_pip_and_modules(package_names):
import os
import os.path
import sys
import shutil
import subprocess
assert sys.version_info >= (2, 7) or sys.version_info >= (3, 4), 'Python 2.7+ or 3.4+ required'
count_installed_packages = 0
try:
import urllib2
except ModuleNotFoundError:
import urllib.request
def download_file(url):
print('Downloading ' + url)
if 'urllib2' in globals() or 'urllib2' in locals():
remote_file = urllib2.urlopen(url)
elif 'urllib' in globals() or 'urllib' in locals():
remote_file = urllib.request.urlopen(url)
with open(os.path.basename(url), 'wb') as local_file:
local_file.write(remote_file.read())
def pip_install_module(module_name, as_user):
cmd = sys.executable + ' -m pip install ' + module_name
if as_user:
cmd += ' --user'
print('Executing: ' + cmd)
os.system(cmd)
def determine_install_as_user():
in_virtualenv = 'VIRTUAL_ENV' in os.environ
is_root = hasattr(os, 'geteuid') and os.geteuid() == 0
return not in_virtualenv and not is_root
def restart():
print('Restarting')
os.system(sys.executable + ' ' + str(' '.join(sys.argv)))
exit(0)
def get_installed_packages():
packages = {}
output_lines = subprocess.check_output([
sys.executable,
'-m',
'pip',
'list'
]).decode('utf-8').split('\n')
for iline in output_lines[2:]:
iline = iline.strip()
if not iline:
continue
parts = iline.split(' ')
packages[parts[0]] = parts[len(parts) - 1]
return packages
install_as_user = determine_install_as_user()
# install pip
try:
import pip
except ImportError as x1:
print(x1)
download_file('https://bootstrap.pypa.io/get-pip.py')
print('Installing: pip')
cmd = sys.executable + ' get-pip.py'
if install_as_user:
cmd += ' --user'
print('Executing: ' + cmd)
os.system(cmd)
os.remove('get-pip.py')
count_installed_packages += 1
try:
import pip
except ImportError:
print('Unable to install pip')
exit(1)
installed_packages = get_installed_packages()
cwd = os.getcwd()
# check if we need Dulwich - pure Python Git implementation
need_dulwich = False
for ipackage_name2 in package_names:
if ipackage_name2.startswith('git+https://'):
need_dulwich = True
break
if need_dulwich:
if not 'dulwich' in installed_packages:
pip_install_module('dulwich', install_as_user)
count_installed_packages += 1
installed_packages = get_installed_packages()
if not 'dulwich' in installed_packages:
print('Unable to install dulwich')
exit(1)
restart()
# install packages
for ipackage_name in package_names:
imodule_pip_basename = os.path.basename(ipackage_name)
if not imodule_pip_basename in installed_packages:
print('Installing: {} ({})'.format(ipackage_name, ipackage_name))
if ipackage_name.startswith('git+https://'):
import dulwich.porcelain
# just remove git+ and install
pkg_url = ipackage_name[4:]
pkg_basename = os.path.basename(pkg_url)
try:
shutil.rmtree(os.path.join(cwd, pkg_basename))
except OSError:
pass
dulwich.porcelain.clone(pkg_url)
pip_install_module(pkg_basename, install_as_user)
count_installed_packages += 1
try:
shutil.rmtree(os.path.join(cwd, pkg_basename))
except Exception as x5:
print(x5)
else:
pip_install_module(ipackage_name, install_as_user)
count_installed_packages += 1
installed_packages = get_installed_packages()
for ipackage_name2 in package_names:
imodule_pip_name2 = os.path.basename(ipackage_name2)
if imodule_pip_name2 not in installed_packages:
print('Unable to install ' + imodule_pip_name2)
exit(1)
if count_installed_packages > 0:
restart()
# this will install some packages
install_pip_and_modules([
'selenium',
'git+https://github.com/boppreh/mouse'
])
# packages installed
# rest of your code goes below
# this lines
import selenium
import mouse
def main():
p
|
if __name__ == '__main__':
main()
|
ass
|
requests_test.go
|
package testing
import (
"fmt"
"io"
"io/ioutil"
"testing"
"github.com/huaweicloud/golangsdk/openstack/imageservice/v2/imagedata"
th "github.com/huaweicloud/golangsdk/testhelper"
fakeclient "github.com/huaweicloud/golangsdk/testhelper/client"
)
func TestUpload(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
HandlePutImageDataSuccessfully(t)
err := imagedata.Upload(
fakeclient.ServiceClient(),
"da3b75d9-3f4a-40e7-8a2c-bfab23927dea",
readSeekerOfBytes([]byte{5, 3, 7, 24})).ExtractErr()
th.AssertNoErr(t, err)
}
func
|
(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
HandleStageImageDataSuccessfully(t)
err := imagedata.Stage(
fakeclient.ServiceClient(),
"da3b75d9-3f4a-40e7-8a2c-bfab23927dea",
readSeekerOfBytes([]byte{5, 3, 7, 24})).ExtractErr()
th.AssertNoErr(t, err)
}
func readSeekerOfBytes(bs []byte) io.ReadSeeker {
return &RS{bs: bs}
}
// implements io.ReadSeeker
type RS struct {
bs []byte
offset int
}
func (rs *RS) Read(p []byte) (int, error) {
leftToRead := len(rs.bs) - rs.offset
if 0 < leftToRead {
bytesToWrite := min(leftToRead, len(p))
for i := 0; i < bytesToWrite; i++ {
p[i] = rs.bs[rs.offset]
rs.offset++
}
return bytesToWrite, nil
}
return 0, io.EOF
}
func min(a int, b int) int {
if a < b {
return a
}
return b
}
func (rs *RS) Seek(offset int64, whence int) (int64, error) {
var offsetInt = int(offset)
if whence == 0 {
rs.offset = offsetInt
} else if whence == 1 {
rs.offset = rs.offset + offsetInt
} else if whence == 2 {
rs.offset = len(rs.bs) - offsetInt
} else {
return 0, fmt.Errorf("For parameter `whence`, expected value in {0,1,2} but got: %#v", whence)
}
return int64(rs.offset), nil
}
func TestDownload(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
HandleGetImageDataSuccessfully(t)
rdr, err := imagedata.Download(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea").Extract()
th.AssertNoErr(t, err)
defer rdr.Close()
bs, err := ioutil.ReadAll(rdr)
th.AssertNoErr(t, err)
th.AssertByteArrayEquals(t, []byte{34, 87, 0, 23, 23, 23, 56, 255, 254, 0}, bs)
}
|
TestStage
|
cic_test.py
|
import os
import numpy as np
from numpy.testing import assert_allclose
import pytest
import scipy.io
import scipy.stats
import cic
def cases():
"""
Loads all filenames of the pre-calculated test cases.
"""
case_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cases'
)
cases = []
for dir_path, _, files in os.walk(case_dir):
cases = cases + [os.path.join(dir_path, f) for f in files]
return cases
@pytest.mark.parametrize('inpath', cases())
# Run both serially and in parallel
@pytest.mark.parametrize('n_jobs', [None, -1])
def test_cic(inpath, n_jobs):
np.random.seed(323490)
# Load the case
objs = scipy.io.loadmat(inpath)
y00 = objs['y00'][:, 0]
y01 = objs['y01'][:, 0]
y10 = objs['y10'][:, 0]
y11 = objs['y11'][:, 0]
est_qte, se_qte, est_ate, se_ate = cic.calculate_cic(
y00, y01, y10, y11, n_bootstraps=499, n_draws=10000,
moments=[np.mean],
n_jobs=n_jobs,
# The original code uses some small (in my view unneccessary)
# numerical corrections when calculating cdf's and inverse cdf's.
# Without using them here also there will always be some test
# cases slightly off.
use_corrections=True
)
est_test = objs['est'][0, 1:10]
se_test = objs['se'][1, 1:10]
# Test quantile treatment effects
assert_allclose(est_qte, est_test)
assert_allclose(se_qte, se_test, atol=5e-2, rtol=1e-3)
# Test average treatment effect
# It is possible to get closer than an atol of 5e-3 by increasing n_draws
# above, at the cost of slower tests
assert_allclose(est_ate[0], objs['est'][0, 0], atol=5e-3)
assert_allclose(se_ate[0], objs['se'][1, 0], atol=5e-2, rtol=1e-3)
@pytest.mark.parametrize(
'inpath',
# exp8 and exp10 don't pass without use_corrections, which is only
# supported for the simple case.
[c for c in cases() if not ('exp8' in c or 'exp10' in c)])
def test_multiple_cic_from_simple_case(inpath):
np.random.seed(442342234)
# Load the case
objs = scipy.io.loadmat(inpath)
y00 = objs['y00'][:, 0]
y01 = objs['y01'][:, 0]
y10 = objs['y10'][:, 0]
y11 = objs['y11'][:, 0]
y = np.concatenate([y00, y01, y10, y11])
g = np.concatenate([np.zeros(y00.shape[0] + y01.shape[0], dtype=np.int_),
np.ones(y10.shape[0] + y11.shape[0], dtype=np.int_)])
t = np.concatenate([np.zeros(y00.shape[0], dtype=np.int_),
np.ones(y01.shape[0], dtype=np.int_),
np.zeros(y10.shape[0], dtype=np.int_),
np.ones(y11.shape[0], dtype=np.int_)])
treat = np.array([[0, 0], [0, 1]], dtype=np.bool_)
model = cic.CICModel(y, g, t, treat, n_bootstraps=499, moments=[np.mean],
n_draws=10000)
assert np.all(model.treatment_for == np.array([[1, 1]], dtype=np.int_))
est_test = objs['est'][0, 1:10]
se_test = objs['se'][1, 1:10]
assert_allclose(model.quantile_effect[0], est_test)
assert_allclose(model.quantile_se[0], se_test, atol=5e-2, rtol=1e-3)
# Test average treatment effect
# It is possible to get closer than an atol of 5e-3 by increasing n_draws
# above, at the cost of slower tests
assert_allclose(model.moment_effect[0], objs['est'][0, 0], atol=5e-3)
assert_allclose(model.moment_se[0], objs['se'][1, 0], atol=5e-2, rtol=1e-3)
def test_cic_model_no_effect():
"""
Test a 3x3 CIC model where none of the treatments have any effect.
The test is done by simulating and estimating the model many times
and checking the coverage of the confidence intervals.
"""
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 1000
quantiles = np.array([0.1, .3, .5, .7, .9])
effect_in_ci = np.zeros((3, quantiles.shape[0]), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
model = cic.CICModel(y, g, t, treat, quantiles)
effect_in_ci += (
(model.quantile_effect - 1.96 * model.quantile_se <= 0) &
(model.quantile_effect + 1.96 * model.quantile_se >= 0))
coverage = effect_in_ci / n_trials
assert_allclose(coverage, np.ones_like(coverage) * .95, rtol=5e-2)
def test_cic_model_shift_effect():
"""
Test a 3x3 CIC model where the treatments are linear shifts, but
different for different groups and times.
The test is done by simulating and estimating the model many times
and checking the coverage of the confidence intervals.
"""
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 1000
quantiles = np.array([.25, .5, .75])
moments = [np.mean, np.std]
quantile_in_ci = np.zeros((3, 3, 3), dtype=np.int_)
moment_in_ci = np.zeros((3, 3, 2), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
y[(g == 1) & (t == 2)] += 1
y[(g == 2) & (t == 1)] -= 1
y[(g == 2) & (t == 2)] -= 2
model = cic.CICModel(y, g, t, treat, quantiles, moments)
mean, se = model.treatment_quantile(1, 2)
quantile_in_ci[:, 0] += ((mean - 1.96 * se <= 1) &
(mean + 1.96 * se >= 1))
mean, se = model.treatment_quantile(2, 1)
quantile_in_ci[:, 1] += ((mean - 1.96 * se <= -1) &
(mean + 1.96 * se >= -1))
mean, se = model.treatment_quantile(2, 2)
quantile_in_ci[:, 2] += ((mean - 1.96 * se <= -2) &
(mean + 1.96 * se >= -2))
mean, se = model.treatment_moment(1, 2)
moment_in_ci[:, 0, 0] += ((mean[0] - 1.96 * se[0] <= 1) &
(mean[0] + 1.96 * se[0] >= 1))
moment_in_ci[:, 0, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
mean, se = model.treatment_moment(2, 1)
moment_in_ci[:, 1, 0] += ((mean[0] - 1.96 * se[0] <= -1) &
(mean[0] + 1.96 * se[0] >= -1))
moment_in_ci[:, 1, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
mean, se = model.treatment_moment(2, 2)
moment_in_ci[:, 2, 0] += ((mean[0] - 1.96 * se[0] <= -2) &
(mean[0] + 1.96 * se[0] >= -2))
moment_in_ci[:, 2, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
quantile_coverage = quantile_in_ci / n_trials
assert_allclose(quantile_coverage,
np.ones_like(quantile_coverage) * .95,
rtol=5e-2)
moment_coverage = moment_in_ci / n_trials
assert_allclose(moment_coverage,
np.ones_like(moment_in_ci) * .95,
rtol=5e-2)
def test_cic_model_dispersion_effect():
|
def test_test_model_based_on_quantile_valid():
np.random.seed(3423482)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 100
n_obs = 500
quantiles = np.array([.5])
reject = 0
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
# y[(g == 1) & (t == 2)] = 2 * y[(g == 1) & (t == 2)] - 3
# y[(g == 2) & (t == 1)] = np.exp(y[(g == 2) & (t == 1)])
# y[(g == 1) & (t == 2)] *= 2
# y[(g == 2) & (t == 1)] -= 3
# y[(g == 2) & (t == 2)] += 1
model = cic.CICModel(y, g, t, treat, quantiles)
test_stat, rank_dist = model.test_model_based_on_quantile(0)
crit_val = scipy.stats.chi2.ppf(.95, rank_dist)
# import pdb; pdb.set_trace()
if test_stat > crit_val:
reject += 1
reject_prob = reject / n_trials
# Just check that the rejection probability is not too large.
# To get reject_prob~0.05 increse n_obs above, but this slows
# down the test too much.
assert reject_prob <= 0.05
def test_combine_effects():
np.random.seed(4545543)
treat = np.array([
[0, 0, 0],
[0, 1, 1]
], dtype=np.bool)
g = np.concatenate((np.zeros(3000, dtype=np.int_), np.ones(4000, dtype=np.int_)))
t = np.concatenate((np.full(1000, 0), np.full(1000, 1), np.full(1000, 2),
np.full(1000, 0), np.full(1000, 1), np.full(2000, 2)))
y = np.random.randn(7000)
y[(g == 1) & (t == 1)] += 1
y[(g == 1) & (t == 2)] += 2
model = cic.CICModel(y, g, t, treat, np.array([.5, .6]), [np.mean], n_draws=2000)
qte_effect, _, moment_effect, _ = model.combine_effects([(1, 1), (1, 2)])
true_effect = 1 / 3 + 2 * 2 / 3
assert_allclose(qte_effect, true_effect, rtol=5e-2)
assert_allclose(moment_effect, true_effect, rtol=5e-2)
def generate_sample(n_obs):
g = np.random.choice(np.arange(3), n_obs)
t = np.random.choice(np.arange(3), n_obs)
u = np.random.randn(n_obs)
y = np.empty(n_obs)
y[t == 0] = u[t == 0]**3
y[t == 1] = u[t == 1] / 3
y[t == 2] = u[t == 2] + 1000
return g, t, y
|
"""
Test a 3x3 CIC model where treatments are multiplying the distribution
by some number, which differs by group and time.
The test is done by simulating and estimating the model many times
and checking the coverage of the confidence intervals.
"""
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 2000
quantiles = np.array([.5])
moments = [np.mean, np.std]
quantile_in_ci = np.zeros((3, 3, 1), dtype=np.int_)
moment_in_ci = np.zeros((3, 3, 2), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
y[(g == 1) & (t == 2)] *= 2
y[(g == 2) & (t == 1)] *= 3
y[(g == 2) & (t == 2)] *= .5
model = cic.CICModel(y, g, t, treat, quantiles, moments)
# Q_{aX}(p) = a Q_X(p) for a quantile function Q and a > 0.
# The median here is 1000, 2 * 1000 = 2000, hence the QTE is 1000
mean, se = model.treatment_quantile(1, 2)
quantile_in_ci[:, 0] += ((mean - 1.96 * se <= 1000) &
(mean + 1.96 * se >= 1000))
# The median here is 0, 3 * 0 = 0, hence the QTE is 0
mean, se = model.treatment_quantile(2, 1)
quantile_in_ci[:, 1] += ((mean - 1.96 * se <= 0) &
(mean + 1.96 * se >= 0))
# The median here is 1000, .5 * 1000 = 500, hence the QTE is -500
mean, se = model.treatment_quantile(2, 2)
quantile_in_ci[:, 2] += ((mean - 1.96 * se <= -500) &
(mean + 1.96 * se >= -500))
mean, se = model.treatment_moment(1, 2)
# The mean goes from 1000 to 2000 => ATE = 1000
moment_in_ci[:, 0, 0] += ((mean[0] - 1.96 * se[0] <= 1000) &
(mean[0] + 1.96 * se[0] >= 1000))
# The standard deviation goes from 1 to 2 => TE = 1
moment_in_ci[:, 0, 1] += ((mean[1] - 1.96 * se[1] <= 1) &
(mean[1] + 1.96 * se[1] >= 1))
mean, se = model.treatment_moment(2, 1)
# The mean goes from 0 to 0 => ATE = 0
moment_in_ci[:, 1, 0] += ((mean[0] - 1.96 * se[0] <= 0) &
(mean[0] + 1.96 * se[0] >= 0))
# The standard deviation goes from 1/3 to 1 => TE = 2/3
moment_in_ci[:, 1, 1] += ((mean[1] - 1.96 * se[1] <= 2 / 3) &
(mean[1] + 1.96 * se[1] >= 2 / 3))
mean, se = model.treatment_moment(2, 2)
# The mean goes from 1000 to 500 => ATE = -500
moment_in_ci[:, 2, 0] += ((mean[0] - 1.96 * se[0] <= -500) &
(mean[0] + 1.96 * se[0] >= -500))
# The standard deviation goes from 1 to .5 => TE = -.5
moment_in_ci[:, 2, 1] += ((mean[1] - 1.96 * se[1] <= -.5) &
(mean[1] + 1.96 * se[1] >= -.5))
quantile_coverage = quantile_in_ci / n_trials
assert_allclose(quantile_coverage,
np.ones_like(quantile_coverage) * .95,
rtol=5e-2)
moment_coverage = moment_in_ci / n_trials
assert_allclose(moment_coverage,
np.ones_like(moment_in_ci) * .95,
rtol=5e-2)
|
ops.rs
|
pub mod init;
pub use self::init::{init, InitOptions};
pub mod add;
pub mod commit;
pub mod new;
pub mod open;
#[cfg(test)]
pub mod testing {
|
use chrono::prelude::*;
use tempfile::tempdir;
use super::{init, InitOptions};
use crate::{
config::Config,
ops::new::{new, NewOptions},
utils::editing::test::test_string_getter,
Diary,
};
pub fn temp_path() -> PathBuf {
tempdir().unwrap().path().to_path_buf()
}
pub fn temp_diary_path() -> PathBuf {
let dir = temp_path();
dir.join("diary")
}
pub fn temp_config() -> Config {
let diary_dir = temp_diary_path();
Config::builder().diary_path(diary_dir).build()
}
pub fn new_entry(config: &Config, entry_date: &Date<Local>) {
let new_opts = NewOptions { open: false };
let diary = Diary::from_config(config).unwrap();
new(&new_opts, &diary, entry_date, test_string_getter).unwrap();
}
pub fn default_init(potential_path: &Path) {
let init_opts = InitOptions {
path: temp_path(),
prefix: None,
git_repo: false,
};
init(&init_opts, potential_path).unwrap();
}
}
|
use std::path::{Path, PathBuf};
|
bind.go
|
package common
import (
"context"
"testing"
"github.com/antihax/optional"
openapi "github.com/benjvi/osb-checker/autogenerated/go-client"
. "github.com/benjvi/osb-checker/config"
. "github.com/smartystreets/goconvey/convey"
)
func TestBind(
t *testing.T,
instanceID, bindingID string,
req *openapi.ServiceBindingRequest,
async bool,
)
|
{
Convey("BINDING - request syntax", t, func() {
Convey("should return 412 PreconditionFailed if missing X-Broker-API-Version header", func() {
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, "", instanceID, bindingID, openapi.ServiceBindingRequest{},
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldNotBeNil)
So(resp.StatusCode, ShouldEqual, 412)
})
if CONF.Authentication.AuthType != TypeNoauth {
Convey("should return 401 Unauthorized if missing Authorization header", func() {
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
context.Background(), CONF.APIVersion, instanceID, bindingID,
openapi.ServiceBindingRequest{},
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldNotBeNil)
So(resp.StatusCode, ShouldEqual, 401)
})
}
if async {
Convey("should return 422 UnprocessableEntity if missing accepts_incomplete", func() {
tempBody := openapi.ServiceBindingRequest{}
deepCopy(req, &tempBody)
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, CONF.APIVersion, instanceID, bindingID, tempBody,
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(false)})
So(err, ShouldNotBeNil)
So(resp.StatusCode, ShouldEqual, 422)
})
}
Convey("should reject if missing service_id", func() {
tempBody := openapi.ServiceBindingRequest{}
deepCopy(req, &tempBody)
tempBody.ServiceId = ""
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, CONF.APIVersion, instanceID, bindingID, tempBody,
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldNotBeNil)
So(resp.StatusCode, ShouldEqual, 400)
})
Convey("should reject if missing plan_id", func() {
tempBody := openapi.ServiceBindingRequest{}
deepCopy(req, &tempBody)
tempBody.PlanId = ""
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, CONF.APIVersion, instanceID, bindingID, tempBody,
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldNotBeNil)
So(resp.StatusCode, ShouldEqual, 400)
})
Convey("should reject if service_id is invalid", func() {
tempBody := openapi.ServiceBindingRequest{}
deepCopy(req, &tempBody)
tempBody.ServiceId = "xxxx-xxxx-xxxx-xxxx"
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, CONF.APIVersion, instanceID, bindingID, tempBody,
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldNotBeNil)
So(resp.StatusCode, ShouldEqual, 400)
})
Convey("should reject if plan_id is invalid", func() {
tempBody := openapi.ServiceBindingRequest{}
deepCopy(req, &tempBody)
tempBody.PlanId = "xxxx-xxxx-xxxx-xxxx"
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, CONF.APIVersion, instanceID, bindingID, tempBody,
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldNotBeNil)
So(resp.StatusCode, ShouldEqual, 400)
})
Convey("should reject if parameters are not following schema", func() {
tempBody := openapi.ServiceBindingRequest{}
deepCopy(req, &tempBody)
tempBody.Parameters = map[string]interface{}{
"can-not": "be-good",
}
if err := testCatalogSchema(&SchemaOpts{
ServiceID: tempBody.ServiceId,
PlanID: tempBody.PlanId,
Parameters: tempBody.Parameters,
SchemaType: TypeServiceBinding,
Action: ActionCreate,
}); err == nil {
return
}
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, CONF.APIVersion, instanceID, bindingID, tempBody,
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldNotBeNil)
So(resp.StatusCode, ShouldEqual, 400)
})
})
Convey("BINDING - new", t, func() {
Convey("should accept a valid binding request", func() {
tempBody := openapi.ServiceBindingRequest{}
deepCopy(req, &tempBody)
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, CONF.APIVersion, instanceID, bindingID, tempBody,
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldBeNil)
if async {
So(resp.StatusCode, ShouldEqual, 202)
} else {
So(resp.StatusCode, ShouldEqual, 201)
}
})
})
if async {
Convey("BINDING - poll", t, func(c C) {
testPollBindingLastOperation(instanceID, bindingID)
So(pollBindingLastOperationStatus(instanceID, bindingID), ShouldBeNil)
})
}
Convey("BINDING - existed", t, func() {
Convey("should return 200 OK when binding Id with same instance Id exists with identical properties", func() {
tempBody := openapi.ServiceBindingRequest{}
deepCopy(req, &tempBody)
_, resp, err := cli.ServiceBindingsApi.ServiceBindingBinding(
authCtx, CONF.APIVersion, instanceID, bindingID, tempBody,
&openapi.ServiceBindingBindingOpts{AcceptsIncomplete: optional.NewBool(async)})
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, 200)
})
})
}
|
|
tpe.py
|
"""
Graphical model (GM)-based optimization algorithm using Theano
"""
from past.utils import old_div
import logging
import time
import numpy as np
from scipy.special import erf
from . import pyll
from .pyll import scope
from .pyll.stochastic import implicit_stochastic
from .base import miscs_to_idxs_vals
from .base import miscs_update_idxs_vals
# from .base import Trials
from . import rand
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/jaberg/hyperopt"
logger = logging.getLogger(__name__)
EPS = 1e-12
# -- default linear forgetting. don't try to change by writing this variable
# because it's captured in function default args when this file is read
DEFAULT_LF = 25
adaptive_parzen_samplers = {}
# a decorator to register functions to the dict `adaptive_parzen_samplers`
def adaptive_parzen_sampler(name):
def wrapper(f):
assert name not in adaptive_parzen_samplers
adaptive_parzen_samplers[name] = f
return f
return wrapper
#
# These are some custom distributions
# that are used to represent posterior distributions.
#
# -- Categorical
@scope.define
def categorical_lpdf(sample, p):
if sample.size:
return np.log(np.asarray(p)[sample])
return np.asarray([])
@scope.define
def randint_via_categorical_lpdf(sample, p):
if sample.size:
return np.log(np.asarray(p)[sample])
return np.asarray([])
# -- Bounded Gaussian Mixture Model (BGMM)
@implicit_stochastic
@scope.define
def GMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):
"""Sample from truncated 1-D Gaussian Mixture Model"""
weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))
assert len(weights) == len(mus) == len(sigmas)
n_samples = int(np.prod(size))
# n_components = len(weights)
if low is None and high is None:
# -- draw from a standard GMM
active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)
samples = rng.normal(loc=mus[active], scale=sigmas[active])
else:
# -- draw from truncated components, handling one-sided truncation
low = float(low) if low is not None else -float("Inf")
high = float(high) if high is not None else float("Inf")
if low >= high:
raise ValueError("low >= high", (low, high))
samples = []
while len(samples) < n_samples:
active = np.argmax(rng.multinomial(1, weights))
draw = rng.normal(loc=mus[active], scale=sigmas[active])
if low <= draw < high:
samples.append(draw)
samples = np.reshape(np.asarray(samples), size)
if q is None:
return samples
return np.round(old_div(samples, q)) * q
@scope.define
def normal_cdf(x, mu, sigma):
top = x - mu
bottom = np.maximum(np.sqrt(2) * sigma, EPS)
z = old_div(top, bottom)
return 0.5 * (1 + erf(z))
@scope.define
def GMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):
def print_verbose(s, x):
return print(f"GMM1_lpdf:{s}", x)
verbose = 0
samples, weights, mus, sigmas = list(
map(np.asarray, (samples, weights, mus, sigmas))
)
if samples.size == 0:
return np.asarray([])
if weights.ndim != 1:
raise TypeError("need vector of weights", weights.shape)
if mus.ndim != 1:
raise TypeError("need vector of mus", mus.shape)
if sigmas.ndim != 1:
raise TypeError("need vector of sigmas", sigmas.shape)
assert len(weights) == len(mus) == len(sigmas)
_samples = samples
samples = _samples.flatten()
if verbose:
print_verbose("samples", set(samples))
print_verbose("weights", weights)
print_verbose("mus", mus)
print_verbose("sigmas", sigmas)
print_verbose("low", low)
print_verbose("high", high)
print_verbose("q", q)
if low is None and high is None:
p_accept = 1
else:
p_accept = np.sum(
weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))
)
if q is None:
dist = samples[:, None] - mus
mahal = (old_div(dist, np.maximum(sigmas, EPS))) ** 2
# mahal shape is (n_samples, n_components)
Z = np.sqrt(2 * np.pi * sigmas ** 2)
coef = weights / Z / p_accept
rval = logsum_rows(-0.5 * mahal + np.log(coef))
else:
prob = np.zeros(samples.shape, dtype="float64")
for w, mu, sigma in zip(weights, mus, sigmas):
if high is None:
ubound = samples + old_div(q, 2.0)
else:
ubound = np.minimum(samples + old_div(q, 2.0), high)
if low is None:
lbound = samples - old_div(q, 2.0)
else:
lbound = np.maximum(samples - old_div(q, 2.0), low)
# -- two-stage addition is slightly more numerically accurate
inc_amt = w * normal_cdf(ubound, mu, sigma)
inc_amt -= w * normal_cdf(lbound, mu, sigma)
prob += inc_amt
rval = np.log(prob) - np.log(p_accept)
if verbose:
print_verbose("rval:", dict(list(zip(samples, rval))))
rval.shape = _samples.shape
return rval
# -- Mixture of Log-Normals
@scope.define
def lognormal_cdf(x, mu, sigma):
# wikipedia claims cdf is
# .5 + .5 erf( log(x) - mu / sqrt(2 sigma^2))
#
# the maximum is used to move negative values and 0 up to a point
# where they do not cause nan or inf, but also don't contribute much
# to the cdf.
if len(x) == 0:
return np.asarray([])
if x.min() < 0:
raise ValueError("negative arg to lognormal_cdf", x)
olderr = np.seterr(divide="ignore")
try:
top = np.log(np.maximum(x, EPS)) - mu
bottom = np.maximum(np.sqrt(2) * sigma, EPS)
z = old_div(top, bottom)
return 0.5 + 0.5 * erf(z)
finally:
np.seterr(**olderr)
@scope.define
def lognormal_lpdf(x, mu, sigma):
# formula copied from wikipedia
# http://en.wikipedia.org/wiki/Log-normal_distribution
assert np.all(sigma >= 0)
sigma = np.maximum(sigma, EPS)
Z = sigma * x * np.sqrt(2 * np.pi)
E = 0.5 * (old_div((np.log(x) - mu), sigma)) ** 2
rval = -E - np.log(Z)
return rval
@scope.define
def qlognormal_lpdf(x, mu, sigma, q):
# casting rounds up to nearest step multiple.
# so lpdf is log of integral from x-step to x+1 of P(x)
# XXX: subtracting two numbers potentially very close together.
return np.log(lognormal_cdf(x, mu, sigma) - lognormal_cdf(x - q, mu, sigma))
@implicit_stochastic
@scope.define
def LGMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):
weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))
n_samples = np.prod(size)
# n_components = len(weights)
if low is None and high is None:
active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)
assert len(active) == n_samples
samples = np.exp(rng.normal(loc=mus[active], scale=sigmas[active]))
else:
# -- draw from truncated components
# TODO: one-sided-truncation
low = float(low)
high = float(high)
if low >= high:
raise ValueError("low >= high", (low, high))
samples = []
while len(samples) < n_samples:
active = np.argmax(rng.multinomial(1, weights))
draw = rng.normal(loc=mus[active], scale=sigmas[active])
if low <= draw < high:
samples.append(np.exp(draw))
samples = np.asarray(samples)
samples = np.reshape(np.asarray(samples), size)
if q is not None:
samples = np.round(old_div(samples, q)) * q
return samples
def logsum_rows(x):
m = x.max(axis=1)
return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m
@scope.define
def LGMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):
samples, weights, mus, sigmas = list(
map(np.asarray, (samples, weights, mus, sigmas))
)
assert weights.ndim == 1
assert mus.ndim == 1
assert sigmas.ndim == 1
_samples = samples
if samples.ndim != 1:
samples = samples.flatten()
if low is None and high is None:
p_accept = 1
else:
p_accept = np.sum(
weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))
)
if q is None:
# compute the lpdf of each sample under each component
lpdfs = lognormal_lpdf(samples[:, None], mus, sigmas)
rval = logsum_rows(lpdfs + np.log(weights))
else:
# compute the lpdf of each sample under each component
prob = np.zeros(samples.shape, dtype="float64")
for w, mu, sigma in zip(weights, mus, sigmas):
if high is None:
ubound = samples + old_div(q, 2.0)
else:
ubound = np.minimum(samples + old_div(q, 2.0), np.exp(high))
if low is None:
lbound = samples - old_div(q, 2.0)
else:
lbound = np.maximum(samples - old_div(q, 2.0), np.exp(low))
lbound = np.maximum(0, lbound)
# -- two-stage addition is slightly more numerically accurate
inc_amt = w * lognormal_cdf(ubound, mu, sigma)
inc_amt -= w * lognormal_cdf(lbound, mu, sigma)
prob += inc_amt
rval = np.log(prob) - np.log(p_accept)
rval.shape = _samples.shape
return rval
#
# This is the weird heuristic ParzenWindow estimator used for continuous
# distributions in various ways.
#
@scope.define_info(o_len=3)
def adaptive_parzen_normal_orig(mus, prior_weight, prior_mu, prior_sigma):
"""
A heuristic estimator for the mu and sigma values of a GMM
TODO: try to find this heuristic in the literature, and cite it - Yoshua
mentioned the term 'elastic' I think?
mus - matrix (N, M) of M, N-dimensional component centers
"""
mus_orig = np.array(mus)
mus = np.array(mus)
assert str(mus.dtype) != "object"
if mus.ndim != 1:
raise TypeError("mus must be vector", mus)
if len(mus) == 0:
mus = np.asarray([prior_mu])
sigma = np.asarray([prior_sigma])
elif len(mus) == 1:
mus = np.asarray([prior_mu] + [mus[0]])
sigma = np.asarray([prior_sigma, prior_sigma * 0.5])
elif len(mus) >= 2:
order = np.argsort(mus)
mus = mus[order]
sigma = np.zeros_like(mus)
sigma[1:-1] = np.maximum(mus[1:-1] - mus[0:-2], mus[2:] - mus[1:-1])
if len(mus) > 2:
lsigma = mus[2] - mus[0]
usigma = mus[-1] - mus[-3]
else:
lsigma = mus[1] - mus[0]
usigma = mus[-1] - mus[-2]
sigma[0] = lsigma
sigma[-1] = usigma
# XXX: is sorting them necessary anymore?
# un-sort the mus and sigma
mus[order] = mus.copy()
sigma[order] = sigma.copy()
if not np.all(mus_orig == mus):
print("orig", mus_orig)
print("mus", mus)
assert np.all(mus_orig == mus)
# put the prior back in
mus = np.asarray([prior_mu] + list(mus))
sigma = np.asarray([prior_sigma] + list(sigma))
maxsigma = prior_sigma
# -- magic formula:
minsigma = old_div(prior_sigma, np.sqrt(1 + len(mus)))
sigma = np.clip(sigma, minsigma, maxsigma)
weights = np.ones(len(mus), dtype=mus.dtype)
weights[0] = prior_weight
weights = old_div(weights, weights.sum())
return weights, mus, sigma
@scope.define
def linear_forgetting_weights(N, LF):
assert N >= 0
assert LF > 0
if N == 0:
return np.asarray([])
if N < LF:
return np.ones(N)
ramp = np.linspace(old_div(1.0, N), 1.0, num=N - LF)
flat = np.ones(LF)
weights = np.concatenate([ramp, flat], axis=0)
assert weights.shape == (N,), (weights.shape, N)
return weights
# XXX: make TPE do a post-inference pass over the pyll graph and insert
# non-default LF argument
@scope.define_info(o_len=3)
def adaptive_parzen_normal(mus, prior_weight, prior_mu, prior_sigma, LF=DEFAULT_LF):
"""
mus - matrix (N, M) of M, N-dimensional component centers
"""
mus = np.array(mus)
assert str(mus.dtype) != "object"
if mus.ndim != 1:
raise TypeError("mus must be vector", mus)
if len(mus) == 0:
srtd_mus = np.asarray([prior_mu])
sigma = np.asarray([prior_sigma])
prior_pos = 0
elif len(mus) == 1:
if prior_mu < mus[0]:
prior_pos = 0
srtd_mus = np.asarray([prior_mu, mus[0]])
sigma = np.asarray([prior_sigma, prior_sigma * 0.5])
else:
prior_pos = 1
srtd_mus = np.asarray([mus[0], prior_mu])
sigma = np.asarray([prior_sigma * 0.5, prior_sigma])
elif len(mus) >= 2:
# create new_mus, which is sorted, and in which
# the prior has been inserted
order = np.argsort(mus)
prior_pos = np.searchsorted(mus[order], prior_mu)
srtd_mus = np.zeros(len(mus) + 1)
srtd_mus[:prior_pos] = mus[order[:prior_pos]]
srtd_mus[prior_pos] = prior_mu
srtd_mus[prior_pos + 1 :] = mus[order[prior_pos:]]
sigma = np.zeros_like(srtd_mus)
sigma[1:-1] = np.maximum(
srtd_mus[1:-1] - srtd_mus[0:-2], srtd_mus[2:] - srtd_mus[1:-1]
)
lsigma = srtd_mus[1] - srtd_mus[0]
usigma = srtd_mus[-1] - srtd_mus[-2]
sigma[0] = lsigma
sigma[-1] = usigma
if LF and LF < len(mus):
unsrtd_weights = linear_forgetting_weights(len(mus), LF)
srtd_weights = np.zeros_like(srtd_mus)
assert len(unsrtd_weights) + 1 == len(srtd_mus)
srtd_weights[:prior_pos] = unsrtd_weights[order[:prior_pos]]
srtd_weights[prior_pos] = prior_weight
srtd_weights[prior_pos + 1 :] = unsrtd_weights[order[prior_pos:]]
else:
srtd_weights = np.ones(len(srtd_mus))
srtd_weights[prior_pos] = prior_weight
# -- magic formula:
maxsigma = old_div(prior_sigma, 1.0)
minsigma = old_div(prior_sigma, min(100.0, (1.0 + len(srtd_mus))))
sigma = np.clip(sigma, minsigma, maxsigma)
sigma[prior_pos] = prior_sigma
assert prior_sigma > 0
assert maxsigma > 0
assert minsigma > 0
assert np.all(sigma > 0), (sigma.min(), minsigma, maxsigma)
srtd_weights /= srtd_weights.sum()
return srtd_weights, srtd_mus, sigma
|
#
# NOTE: These are actually used in a fairly complicated way.
# They are actually returning pyll.Apply AST (Abstract Syntax Tree) objects.
# This AST is then manipulated and the corresponding _lpdf function is called
# (e.g GMM1_lpdf)
#
# Please see the build_posterior function for details
# -- Uniform
@adaptive_parzen_sampler("uniform")
def ap_uniform_sampler(obs, prior_weight, low, high, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
obs, prior_weight, prior_mu, prior_sigma
)
return scope.GMM1(
weights, mus, sigmas, low=low, high=high, q=None, size=size, rng=rng
)
@adaptive_parzen_sampler("quniform")
def ap_quniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
obs, prior_weight, prior_mu, prior_sigma
)
return scope.GMM1(weights, mus, sigmas, low=low, high=high, q=q, size=size, rng=rng)
@adaptive_parzen_sampler("loguniform")
def ap_loguniform_sampler(obs, prior_weight, low, high, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(obs), prior_weight, prior_mu, prior_sigma
)
rval = scope.LGMM1(weights, mus, sigmas, low=low, high=high, size=size, rng=rng)
return rval
@adaptive_parzen_sampler("qloguniform")
def ap_qloguniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(
# -- map observations that were quantized to be below exp(low)
# (particularly 0) back up to exp(low) where they will
# interact in a reasonable way with the AdaptiveParzen
# thing.
scope.maximum(
obs,
scope.maximum( # -- protect against exp(low) underflow
EPS, scope.exp(low)
),
)
),
prior_weight,
prior_mu,
prior_sigma,
)
return scope.LGMM1(weights, mus, sigmas, low, high, q=q, size=size, rng=rng)
# -- Normal
@adaptive_parzen_sampler("normal")
def ap_normal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)
return scope.GMM1(weights, mus, sigmas, size=size, rng=rng)
@adaptive_parzen_sampler("qnormal")
def ap_qnormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)
return scope.GMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
@adaptive_parzen_sampler("lognormal")
def ap_loglognormal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(obs), prior_weight, mu, sigma
)
rval = scope.LGMM1(weights, mus, sigmas, size=size, rng=rng)
return rval
@adaptive_parzen_sampler("qlognormal")
def ap_qlognormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
log_obs = scope.log(scope.maximum(obs, EPS))
weights, mus, sigmas = scope.adaptive_parzen_normal(
log_obs, prior_weight, mu, sigma
)
rval = scope.LGMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
return rval
# -- Categorical
@adaptive_parzen_sampler("randint")
def ap_randint_sampler(
obs, prior_weight, low, high=None, size=(), rng=None, LF=DEFAULT_LF
):
# randint can be seen as a categorical with high - low categories
weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)
# if high is None, then low represents high and there is no offset
domain_size = low if high is None else high - low
offset = pyll.Literal(0) if high is None else low
counts = scope.bincount(obs, offset=offset, minlength=domain_size, weights=weights)
# -- add in some prior pseudocounts
pseudocounts = counts + prior_weight
random_variable = scope.randint_via_categorical(
old_div(pseudocounts, scope.sum(pseudocounts)), size=size, rng=rng
)
return random_variable
@scope.define
def tpe_cat_pseudocounts(counts, prior_weight, p, size):
if np.prod(size) == 0:
return []
if p.ndim == 2:
assert np.all(p == p[0])
p = p[0]
pseudocounts = counts + p.size * (prior_weight * p)
return old_div(pseudocounts, np.sum(pseudocounts))
@adaptive_parzen_sampler("categorical")
def ap_categorical_sampler(obs, prior_weight, p, size=(), rng=None, LF=DEFAULT_LF):
weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)
# in order to support pchoice here, we need to find the size of p,
# but p can have p.ndim == 2, so we pass p to bincount and unpack it
# (if required) there
counts = scope.bincount(obs, p=p, weights=weights)
pseudocounts = scope.tpe_cat_pseudocounts(counts, prior_weight, p, size)
return scope.categorical(pseudocounts, size=size, rng=rng)
#
# Posterior clone performs symbolic inference on the pyll graph of priors.
#
@scope.define_info(o_len=2)
def ap_split_trials(o_idxs, o_vals, l_idxs, l_vals, gamma, gamma_cap=DEFAULT_LF):
"""Split the elements of `o_vals` (observations values) into two groups: those for
trials whose losses (`l_vals`) were above gamma, and those below gamma. Note that
only unique elements are returned, so the total number of returned elements might
be lower than `len(o_vals)`
"""
o_idxs, o_vals, l_idxs, l_vals = list(
map(np.asarray, [o_idxs, o_vals, l_idxs, l_vals])
)
# XXX if this is working, refactor this sort for efficiency
# Splitting is done this way to cope with duplicate loss values.
n_below = min(int(np.ceil(gamma * np.sqrt(len(l_vals)))), gamma_cap)
l_order = np.argsort(l_vals)
keep_idxs = set(l_idxs[l_order[:n_below]])
below = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]
keep_idxs = set(l_idxs[l_order[n_below:]])
above = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]
return np.asarray(below), np.asarray(above)
@scope.define
def broadcast_best(samples, below_llik, above_llik):
if len(samples):
score = below_llik - above_llik
if len(samples) != len(score):
raise ValueError()
best = np.argmax(score)
return [samples[best]] * len(samples)
else:
return []
def build_posterior(
specs,
prior_idxs,
prior_vals,
obs_idxs,
obs_vals,
obs_loss_idxs,
obs_loss_vals,
oloss_gamma,
prior_weight,
):
"""
This method clones a posterior inference graph by iterating forward in
topological order, and replacing prior random-variables (prior_idxs, prior_vals)
with new posterior distributions (post_specs, post_idxs, post_vals) that make use
of observations (obs_idxs, obs_vals).
"""
assert all(
isinstance(arg, pyll.Apply)
for arg in [obs_loss_idxs, obs_loss_vals, oloss_gamma]
)
assert set(prior_idxs.keys()) == set(prior_vals.keys())
expr = pyll.as_apply([specs, prior_idxs, prior_vals])
nodes = pyll.dfs(expr)
# build the joint posterior distribution as the values in this memo
memo = {}
# map prior RVs to observations
obs_memo = {}
for nid in prior_vals:
# construct the leading args for each call to adaptive_parzen_sampler
# which will permit the "adaptive parzen samplers" to adapt to the
# correct samples.
obs_below, obs_above = scope.ap_split_trials(
obs_idxs[nid], obs_vals[nid], obs_loss_idxs, obs_loss_vals, oloss_gamma
)
obs_memo[prior_vals[nid]] = [obs_below, obs_above]
for node in nodes:
if node not in memo:
new_inputs = [memo[arg] for arg in node.inputs()]
if node in obs_memo:
# -- this case corresponds to an observed Random Var
# node.name is a distribution like "normal", "randint", etc.
obs_below, obs_above = obs_memo[node]
aa = [memo[a] for a in node.pos_args]
fn = adaptive_parzen_samplers[node.name]
b_args = [obs_below, prior_weight] + aa
named_args = {kw: memo[arg] for (kw, arg) in node.named_args}
b_post = fn(*b_args, **named_args)
a_args = [obs_above, prior_weight] + aa
a_post = fn(*a_args, **named_args)
# fn is a function e.g ap_uniform_sampler, ap_normal_sampler, etc
# b_post and a_post are pyll.Apply objects that are
# AST (Abstract Syntax Trees). They create the distribution,
# (e.g. using adaptive_parzen_normal), and then
# call a function to sample randomly from that distribution
# (e.g. using scope.GMM1) which return those samples.
#
# However we are only interested in using the samples from b_post.
# This code looks at the AST and grabs the function name that we used
# for sampling (e.g. scope.GMM1) and modifies it, e.g. to
# "scope.GMM1_lpdf". It then calls this function, passing in the
# samples as the first parameter.a_args
#
# The result is that we are effectively calling, for example:
# below_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_below, ...))
# above_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_above, ...))
assert a_post.name == b_post.name
fn_lpdf = getattr(scope, a_post.name + "_lpdf")
a_kwargs = {
n: a for n, a in a_post.named_args if n not in ("rng", "size")
}
b_kwargs = {
n: a for n, a in b_post.named_args if n not in ("rng", "size")
}
# calculate the log likelihood of b_post under both distributions
below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
# compute new_node based on below & above log likelihood
new_node = scope.broadcast_best(b_post, below_llik, above_llik)
elif hasattr(node, "obj"):
# -- keep same literals in the graph
new_node = node
else:
# -- this case is for all the other stuff in the graph
new_node = node.clone_from_inputs(new_inputs)
memo[node] = new_node
post_idxs = {nid: memo[idxs] for nid, idxs in prior_idxs.items()}
post_vals = {nid: memo[vals] for nid, vals in prior_vals.items()}
return post_idxs, post_vals
# TODO: is this used?
# @scope.define
# def idxs_prod(full_idxs, idxs_by_label, llik_by_label):
# """Add all of the log-likelihoods together by id.
#
# Example arguments:
# full_idxs = [0, 1, ... N-1]
# idxs_by_label = {'node_a': [1, 3], 'node_b': [3]}
# llik_by_label = {'node_a': [0.1, -3.3], node_b: [1.0]}
#
# This would return N elements: [0, 0.1, 0, -2.3, 0, 0, ... ]
# """
# assert len(set(full_idxs)) == len(full_idxs)
# full_idxs = list(full_idxs)
# rval = np.zeros(len(full_idxs))
# pos_of_tid = dict(list(zip(full_idxs, list(range(len(full_idxs))))))
# assert set(idxs_by_label.keys()) == set(llik_by_label.keys())
# for nid in idxs_by_label:
# idxs = idxs_by_label[nid]
# llik = llik_by_label[nid]
# assert np.all(np.asarray(idxs) > 1)
# assert len(set(idxs)) == len(idxs)
# assert len(idxs) == len(llik)
# for ii, ll in zip(idxs, llik):
# rval[pos_of_tid[ii]] += ll
# return rval
_default_prior_weight = 1.0
# -- suggest best of this many draws on every iteration
_default_n_EI_candidates = 120
# -- gamma * sqrt(n_trials) is fraction of to use as good
_default_gamma = 0.25
_default_n_startup_jobs = 100
_default_linear_forgetting = DEFAULT_LF
def build_posterior_wrapper(domain, prior_weight, gamma):
"""
Calls build_posterior
Args:
domain (hyperopt.base.Domain): contains info about the obj function and the hp
space passed to fmin
prior_weight (float): smoothing factor for counts, to avoid having 0 prob
# TODO: consider renaming or improving documentation for suggest
gamma (float): the threshold to split between l(x) and g(x), see eq. 2 in
https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
Returns:
"""
# -- these dummy values will be replaced in build_posterior() and never used
observed = {"idxs": pyll.Literal(), "vals": pyll.Literal()}
observed_loss = {"idxs": pyll.Literal(), "vals": pyll.Literal()}
posterior = build_posterior(
# -- vectorized clone of bandit template
domain.vh.v_expr,
# -- this dict and next represent prior dists
domain.vh.idxs_by_label(),
domain.vh.vals_by_label(),
observed["idxs"],
observed["vals"],
observed_loss["idxs"],
observed_loss["vals"],
pyll.Literal(gamma),
pyll.Literal(float(prior_weight)),
)
return observed, observed_loss, posterior
def suggest(
new_ids,
domain,
trials,
seed,
prior_weight=_default_prior_weight,
n_startup_jobs=_default_n_startup_jobs,
n_EI_candidates=_default_n_EI_candidates,
gamma=_default_gamma,
verbose=True,
):
"""
Given previous trials and the domain, suggest the best expected hp point
according to the TPE-EI algo
Args:
prior_weight(
n_startup_jobs:
n_EI_candidates:
gamma:
verbose:
Returns:
"""
t0 = time.time()
# use build_posterior_wrapper to create the pyll nodes
observed, observed_loss, posterior = build_posterior_wrapper(
domain, prior_weight, gamma
)
tt = time.time() - t0
if verbose:
logger.info("build_posterior_wrapper took %f seconds" % tt)
# Loop over previous trials to collect best_docs and best_docs_loss
best_docs = dict()
best_docs_loss = dict()
for doc in trials.trials:
# get either these docs own tid or the one that it's from
tid = doc["misc"].get("from_tid", doc["tid"])
# associate infinite loss to new/running/failed jobs
loss = doc["result"].get("loss")
loss = float("inf") if loss is None else float(loss)
# if set, update loss for this tid if it's higher than current loss
# otherwise, set it
best_docs_loss.setdefault(tid, loss)
if loss <= best_docs_loss[tid]:
best_docs_loss[tid] = loss
best_docs[tid] = doc
# -- sort docs by order of suggestion
# so that linear_forgetting removes the oldest ones
tid_docs = sorted(best_docs.items())
losses = [best_docs_loss[tid] for tid, doc in tid_docs]
tids, docs = list(zip(*tid_docs)) if tid_docs else ([], [])
if verbose:
if docs:
s = "%i/%i trials with best loss %f" % (
len(docs),
len(trials),
np.nanmin(losses),
)
else:
s = "0 trials"
logger.info("TPE using %s" % s)
if len(docs) < n_startup_jobs:
# N.B. THIS SEEDS THE RNG BASED ON THE new_id
return rand.suggest(new_ids, domain, trials, seed)
# Sample and compute log-probability.
first_new_id = new_ids[0]
if tids:
# -- the +2 coordinates with an assertion above
# to ensure that fake ids are used during sampling
# TODO: not sure what assertion this refers to...
fake_id_0 = max(max(tids), first_new_id) + 2
else:
# -- weird - we're running the TPE algo from scratch
assert n_startup_jobs <= 0
fake_id_0 = first_new_id + 2
fake_ids = list(range(fake_id_0, fake_id_0 + n_EI_candidates))
# -- this dictionary will map pyll nodes to the values
# they should take during the evaluation of the pyll program
memo = {domain.s_new_ids: fake_ids, domain.s_rng: np.random.default_rng(seed)}
memo[observed_loss["idxs"]] = tids
memo[observed_loss["vals"]] = losses
observed_idxs_dict, observed_vals_dict = miscs_to_idxs_vals(
[doc["misc"] for doc in docs], keys=list(domain.params.keys())
)
memo[observed["idxs"]] = observed_idxs_dict
memo[observed["vals"]] = observed_vals_dict
# evaluate `n_EI_candidates` pyll nodes in `posterior` using `memo`
# TODO: it seems to return idxs, vals, all the same. Is this correct?
idxs, vals = pyll.rec_eval(posterior, memo=memo, print_node_on_error=False)
# hack to add offset again for randint params
for label, param in domain.params.items():
if param.name == "randint" and len(param.pos_args) == 2:
offset = param.pos_args[0].obj
vals[label] = [val + offset for val in vals[label]]
# -- retrieve the best of the samples and form the return tuple
# specs are deprecated since build_posterior makes all the same
rval_specs = [None]
rval_results = [domain.new_result()]
rval_miscs = [{"tid": first_new_id, "cmd": domain.cmd, "workdir": domain.workdir}]
miscs_update_idxs_vals(
rval_miscs,
idxs,
vals,
idxs_map={fake_ids[0]: first_new_id},
assert_all_vals_used=False,
)
# return the doc for the best new trial
return trials.new_trial_docs([first_new_id], rval_specs, rval_results, rval_miscs)
|
#
# Adaptive Parzen Samplers
# These produce conditional estimators for various prior distributions
|
k-closest-points-to-origin.rs
|
use std::cmp::Ordering;
impl Solution {
pub fn k_closest(points: Vec<Vec<i32>>, k: i32) -> Vec<Vec<i32>> {
let mut points = points;
points.sort_by(|cur_point, next_point| {
let cur_distance = f64::sqrt(f64::from(cur_point[0]).powi(2) + f64::from(cur_point[1]).powi(2));
let next_distance = f64::sqrt(f64::from(next_point[0]).powi(2) + f64::from(next_point[1]).powi(2));
if cur_distance < next_distance {
Ordering::Less
} else if cur_distance > next_distance {
Ordering::Greater
} else {
Ordering::Equal
}
});
let k = k as usize;
points[0..k].to_vec()
}
|
}
|
|
root.test.ts
|
import { assert } from "@re-/assert"
|
import { model } from "@re-/model"
import { definitionTypeErrorTemplate } from "../../internal.js"
describe("root definition", () => {
it("bad type def type", () => {
// @ts-expect-error
assert(() => model({ bad: Symbol() })).throwsAndHasTypeError(
definitionTypeErrorTemplate
)
// @ts-expect-error
assert(() => model({ bad: () => ({}) })).throwsAndHasTypeError(
definitionTypeErrorTemplate
)
})
})
| |
actions.go
|
package actions
import (
"fmt"
"io/ioutil"
"path/filepath"
"github.com/hasura/graphql-engine/cli"
"github.com/hasura/graphql-engine/cli/internal/cliext"
cliextension "github.com/hasura/graphql-engine/cli/internal/metadataobject/actions/cli_extension"
"github.com/hasura/graphql-engine/cli/internal/metadataobject/actions/editor"
"github.com/hasura/graphql-engine/cli/internal/metadataobject/actions/types"
"github.com/hasura/graphql-engine/cli/util"
"github.com/hasura/graphql-engine/cli/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
const (
actionsFileName = "actions.yaml"
graphqlFileName = "actions.graphql"
)
type ActionConfig struct {
MetadataDir string
ActionConfig *types.ActionExecutionConfig
serverFeatureFlags *version.ServerFeatureFlags
cliExtensionConfig *cliextension.Config
ensureCliExt func() error
logger *logrus.Logger
}
func New(ec *cli.ExecutionContext, baseDir string) *ActionConfig
|
func (a *ActionConfig) Create(name string, introSchema interface{}, deriveFrom string) error {
err := a.ensureCliExt()
if err != nil {
return err
}
// Read the content of graphql file
graphqlFileContent, err := a.GetActionsGraphQLFileContent()
if err != nil {
return errors.Wrapf(err, "error in reading %s file", graphqlFileName)
}
// Read actions.yaml
oldAction, err := a.GetActionsFileContent()
if err != nil {
return errors.Wrapf(err, "error in reading %s file", actionsFileName)
}
// check if action already present
for _, currAction := range oldAction.Actions {
if currAction.Name == name {
return fmt.Errorf("action %s already exists in %s", name, graphqlFileName)
}
}
var defaultSDL string
if introSchema == nil {
defaultSDL = `type Mutation {
# Define your action as a mutation here
` + name + ` (arg1: SampleInput!): SampleOutput
}
type SampleOutput {
accessToken: String!
}
input SampleInput {
username: String!
password: String!
}
`
} else {
sdlToReq := types.SDLToRequest{
Derive: types.DerivePayload{
IntrospectionSchema: introSchema,
Operation: deriveFrom,
ActionName: name,
},
}
sdlToResp, err := a.cliExtensionConfig.ConvertMetadataToSDL(sdlToReq)
if err != nil {
return errors.Wrap(err, "error in converting metadata to sdl")
}
defaultSDL = sdlToResp.SDL.Complete
}
graphqlFileContent = defaultSDL + "\n" + graphqlFileContent
data, err := editor.CaptureInputFromEditor(editor.GetPreferredEditorFromEnvironment, graphqlFileContent, "graphql")
if err != nil {
return errors.Wrap(err, "error in getting input from editor")
}
sdlFromReq := types.SDLFromRequest{
SDL: types.SDLPayload{
Complete: string(data),
},
}
sdlFromResp, err := a.cliExtensionConfig.ConvertSDLToMetadata(sdlFromReq)
if err != nil {
return errors.Wrap(err, "error in converting sdl to metadata")
}
currentActionNames := make([]string, 0)
for actionIndex, action := range sdlFromResp.Actions {
for _, currAction := range currentActionNames {
if currAction == action.Name {
return fmt.Errorf("action %s already exists in %s", action.Name, graphqlFileName)
}
}
currentActionNames = append(currentActionNames, action.Name)
for oldActionIndex, oldActionObj := range oldAction.Actions {
if action.Name == oldActionObj.Name {
sdlFromResp.Actions[actionIndex].Permissions = oldAction.Actions[oldActionIndex].Permissions
sdlFromResp.Actions[actionIndex].Definition.Timeout = oldAction.Actions[oldActionIndex].Definition.Timeout
sdlFromResp.Actions[actionIndex].Definition.Kind = oldAction.Actions[oldActionIndex].Definition.Kind
sdlFromResp.Actions[actionIndex].Definition.Type = oldAction.Actions[oldActionIndex].Definition.Type
sdlFromResp.Actions[actionIndex].Definition.Handler = oldAction.Actions[oldActionIndex].Definition.Handler
sdlFromResp.Actions[actionIndex].Definition.ForwardClientHeaders = oldAction.Actions[oldActionIndex].Definition.ForwardClientHeaders
sdlFromResp.Actions[actionIndex].Definition.Headers = oldAction.Actions[oldActionIndex].Definition.Headers
break
}
}
// Set kind and handler for action definition
if sdlFromResp.Actions[actionIndex].Definition.Kind == "" {
sdlFromResp.Actions[actionIndex].Definition.Kind = a.ActionConfig.Kind
}
if sdlFromResp.Actions[actionIndex].Definition.Handler == "" {
sdlFromResp.Actions[actionIndex].Definition.Handler = a.ActionConfig.HandlerWebhookBaseURL + "/" + action.Name
}
}
for customTypeIndex, customType := range sdlFromResp.Types.Enums {
for oldTypeObjIndex, oldTypeObj := range oldAction.CustomTypes.Enums {
if customType.Name == oldTypeObj.Name {
sdlFromResp.Types.Enums[customTypeIndex].Description = oldAction.CustomTypes.Enums[oldTypeObjIndex].Description
sdlFromResp.Types.Enums[customTypeIndex].Relationships = oldAction.CustomTypes.Enums[oldTypeObjIndex].Relationships
break
}
}
}
for customTypeIndex, customType := range sdlFromResp.Types.InputObjects {
for oldTypeObjIndex, oldTypeObj := range oldAction.CustomTypes.InputObjects {
if customType.Name == oldTypeObj.Name {
sdlFromResp.Types.InputObjects[customTypeIndex].Description = oldAction.CustomTypes.InputObjects[oldTypeObjIndex].Description
sdlFromResp.Types.InputObjects[customTypeIndex].Relationships = oldAction.CustomTypes.InputObjects[oldTypeObjIndex].Relationships
break
}
}
}
for customTypeIndex, customType := range sdlFromResp.Types.Objects {
for oldTypeObjIndex, oldTypeObj := range oldAction.CustomTypes.Objects {
if customType.Name == oldTypeObj.Name {
sdlFromResp.Types.Objects[customTypeIndex].Description = oldAction.CustomTypes.Objects[oldTypeObjIndex].Description
sdlFromResp.Types.Objects[customTypeIndex].Relationships = oldAction.CustomTypes.Objects[oldTypeObjIndex].Relationships
break
}
}
}
for customTypeIndex, customType := range sdlFromResp.Types.Scalars {
for oldTypeObjIndex, oldTypeObj := range oldAction.CustomTypes.Scalars {
if customType.Name == oldTypeObj.Name {
sdlFromResp.Types.Scalars[customTypeIndex].Description = oldAction.CustomTypes.Scalars[oldTypeObjIndex].Description
sdlFromResp.Types.Scalars[customTypeIndex].Relationships = oldAction.CustomTypes.Scalars[oldTypeObjIndex].Relationships
break
}
}
}
var common types.Common
common.Actions = sdlFromResp.Actions
common.CustomTypes = sdlFromResp.Types
common.SetExportDefault()
// write actions.yaml
commonByt, err := yaml.Marshal(common)
if err != nil {
return errors.Wrap(err, "error in marshalling common")
}
err = ioutil.WriteFile(filepath.Join(a.MetadataDir, actionsFileName), commonByt, 0644)
if err != nil {
return errors.Wrapf(err, "error in writing %s file", actionsFileName)
}
err = ioutil.WriteFile(filepath.Join(a.MetadataDir, graphqlFileName), data, 0644)
if err != nil {
return errors.Wrapf(err, "error in writing %s file", graphqlFileName)
}
return nil
}
func (a *ActionConfig) Codegen(name string, derivePld types.DerivePayload) error {
err := a.ensureCliExt()
if err != nil {
return err
}
graphqlFileContent, err := a.GetActionsGraphQLFileContent()
if err != nil {
return errors.Wrapf(err, "error in reading %s file", graphqlFileName)
}
data := types.ActionsCodegenRequest{
ActionName: name,
SDL: types.SDLPayload{
Complete: graphqlFileContent,
},
CodegenConfig: a.ActionConfig.Codegen,
Derive: derivePld,
}
if a.ActionConfig.Codegen.URI == "" {
data.CodegenConfig.URI = a.getActionsCodegenURI(data.CodegenConfig.Framework)
}
resp, err := a.cliExtensionConfig.GetActionsCodegen(data)
if err != nil {
return errors.Wrapf(err, "error in getting codegen for action %s", data.ActionName)
}
for _, file := range resp.Files {
err = ioutil.WriteFile(filepath.Join(a.ActionConfig.Codegen.OutputDir, file.Name), []byte(file.Content), 0644)
if err != nil {
return errors.Wrap(err, "error in writing codegen file")
}
}
return nil
}
func (a *ActionConfig) Validate() error {
return nil
}
func (a *ActionConfig) CreateFiles() error {
var common types.Common
data, err := yaml.Marshal(common)
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(a.MetadataDir, actionsFileName), data, 0644)
if err != nil {
return err
}
graphqQLData := []byte(``)
err = ioutil.WriteFile(filepath.Join(a.MetadataDir, graphqlFileName), graphqQLData, 0644)
if err != nil {
return err
}
return nil
}
func (a *ActionConfig) Build(metadata *yaml.MapSlice) error {
if !a.serverFeatureFlags.HasAction {
_, err := a.GetActionsFileContent()
if err == nil {
a.logger.WithField("metadata_plugin", "actions").Warnf("Skipping building %s", actionsFileName)
}
_, err = a.GetActionsGraphQLFileContent()
if err == nil {
a.logger.WithField("metadata_plugin", "actions").Warnf("Skipping building %s", graphqlFileName)
}
return nil
}
err := a.ensureCliExt()
if err != nil {
return err
}
// Read actions.graphql
graphqlFileContent, err := a.GetActionsGraphQLFileContent()
if err != nil {
return errors.Wrapf(err, "error in reading %s file", graphqlFileName)
}
sdlFromReq := types.SDLFromRequest{
SDL: types.SDLPayload{
Complete: graphqlFileContent,
},
}
sdlFromResp, err := a.cliExtensionConfig.ConvertSDLToMetadata(sdlFromReq)
if err != nil {
return errors.Wrap(err, "error in converting sdl to metadata")
}
// Read actions.yaml
oldAction, err := a.GetActionsFileContent()
if err != nil {
return errors.Wrapf(err, "error in reading %s", actionsFileName)
}
for actionIndex, action := range oldAction.Actions {
var isFound bool
for newActionIndex, newActionObj := range sdlFromResp.Actions {
if action.Name == newActionObj.Name {
isFound = true
sdlFromResp.Actions[newActionIndex].Permissions = oldAction.Actions[actionIndex].Permissions
sdlFromResp.Actions[newActionIndex].Definition.Timeout = oldAction.Actions[actionIndex].Definition.Timeout
sdlFromResp.Actions[newActionIndex].Definition.Kind = oldAction.Actions[actionIndex].Definition.Kind
sdlFromResp.Actions[newActionIndex].Definition.Handler = oldAction.Actions[actionIndex].Definition.Handler
sdlFromResp.Actions[newActionIndex].Definition.ForwardClientHeaders = oldAction.Actions[actionIndex].Definition.ForwardClientHeaders
sdlFromResp.Actions[newActionIndex].Definition.Headers = oldAction.Actions[actionIndex].Definition.Headers
break
}
}
if !isFound {
return fmt.Errorf("action %s is not present in %s", action.Name, graphqlFileName)
}
}
for customTypeIndex, customType := range oldAction.CustomTypes.Enums {
var isFound bool
for newTypeObjIndex, newTypeObj := range sdlFromResp.Types.Enums {
if customType.Name == newTypeObj.Name {
isFound = true
sdlFromResp.Types.Enums[newTypeObjIndex].Description = oldAction.CustomTypes.Enums[customTypeIndex].Description
sdlFromResp.Types.Enums[newTypeObjIndex].Relationships = oldAction.CustomTypes.Enums[customTypeIndex].Relationships
break
}
}
if !isFound {
return fmt.Errorf("custom type %s is not present in %s", customType.Name, graphqlFileName)
}
}
for customTypeIndex, customType := range oldAction.CustomTypes.InputObjects {
var isFound bool
for newTypeObjIndex, newTypeObj := range sdlFromResp.Types.InputObjects {
if customType.Name == newTypeObj.Name {
isFound = true
sdlFromResp.Types.InputObjects[newTypeObjIndex].Description = oldAction.CustomTypes.InputObjects[customTypeIndex].Description
sdlFromResp.Types.InputObjects[newTypeObjIndex].Relationships = oldAction.CustomTypes.InputObjects[customTypeIndex].Relationships
break
}
}
if !isFound {
return fmt.Errorf("custom type %s is not present in %s", customType.Name, graphqlFileName)
}
}
for customTypeIndex, customType := range oldAction.CustomTypes.Objects {
var isFound bool
for newTypeObjIndex, newTypeObj := range sdlFromResp.Types.Objects {
if customType.Name == newTypeObj.Name {
isFound = true
sdlFromResp.Types.Objects[newTypeObjIndex].Description = oldAction.CustomTypes.Objects[customTypeIndex].Description
sdlFromResp.Types.Objects[newTypeObjIndex].Relationships = oldAction.CustomTypes.Objects[customTypeIndex].Relationships
break
}
}
if !isFound {
return fmt.Errorf("custom type %s is not present in %s", customType.Name, graphqlFileName)
}
}
for customTypeIndex, customType := range oldAction.CustomTypes.Scalars {
var isFound bool
for newTypeObjIndex, newTypeObj := range sdlFromResp.Types.Scalars {
if customType.Name == newTypeObj.Name {
isFound = true
sdlFromResp.Types.Scalars[newTypeObjIndex].Description = oldAction.CustomTypes.Scalars[customTypeIndex].Description
sdlFromResp.Types.Scalars[newTypeObjIndex].Relationships = oldAction.CustomTypes.Scalars[customTypeIndex].Relationships
break
}
}
if !isFound {
return fmt.Errorf("custom type %s is not present in %s", customType.Name, graphqlFileName)
}
}
if len(sdlFromResp.Actions) != 0 {
actionItem := yaml.MapItem{
Key: "actions",
Value: sdlFromResp.Actions,
}
*metadata = append(*metadata, actionItem)
}
customTypesLen := len(sdlFromResp.Types.Enums) + len(sdlFromResp.Types.InputObjects) + len(sdlFromResp.Types.Objects) + len(sdlFromResp.Types.Scalars)
if customTypesLen != 0 {
customTypeItem := yaml.MapItem{
Key: "custom_types",
Value: sdlFromResp.Types,
}
*metadata = append(*metadata, customTypeItem)
}
return nil
}
func (a *ActionConfig) Export(metadata yaml.MapSlice) (map[string][]byte, error) {
if !a.serverFeatureFlags.HasAction {
a.logger.Debugf("Skipping creating %s and %s", actionsFileName, graphqlFileName)
return make(map[string][]byte), nil
}
err := a.ensureCliExt()
if err != nil {
return nil, err
}
var actions yaml.MapSlice
for _, item := range metadata {
k, ok := item.Key.(string)
if !ok || (k != "actions" && k != "custom_types") {
continue
}
actions = append(actions, item)
}
ymlByt, err := yaml.Marshal(actions)
if err != nil {
return nil, errors.Wrap(err, "error in marshalling actions, custom_types from metadata")
}
var common types.Common
err = yaml.Unmarshal(ymlByt, &common)
if err != nil {
return nil, errors.Wrap(err, "error in unmarshal to common")
}
var sdlToReq types.SDLToRequest
sdlToReq.Types = common.CustomTypes
sdlToReq.Actions = common.Actions
sdlToResp, err := a.cliExtensionConfig.ConvertMetadataToSDL(sdlToReq)
if err != nil {
return nil, errors.Wrap(err, "error in converting metadata to sdl")
}
common.SetExportDefault()
commonByt, err := yaml.Marshal(common)
if err != nil {
return nil, errors.Wrap(err, "error in marshaling common")
}
return map[string][]byte{
filepath.Join(a.MetadataDir, actionsFileName): commonByt,
filepath.Join(a.MetadataDir, graphqlFileName): []byte(sdlToResp.SDL.Complete),
}, nil
}
func (a *ActionConfig) Name() string {
return "actions"
}
func (a *ActionConfig) GetActionsFileContent() (content types.Common, err error) {
commonByt, err := ioutil.ReadFile(filepath.Join(a.MetadataDir, actionsFileName))
if err != nil {
return
}
err = yaml.Unmarshal(commonByt, &content)
return
}
func (a *ActionConfig) GetActionsGraphQLFileContent() (sdl string, err error) {
commonByt, err := ioutil.ReadFile(filepath.Join(a.MetadataDir, graphqlFileName))
if err != nil {
return
}
sdl = string(commonByt)
return
}
func (a *ActionConfig) getActionsCodegenURI(framework string) string {
return fmt.Sprintf(`https://raw.githubusercontent.com/%s/master/%s/actions-codegen.js`, util.ActionsCodegenOrg, framework)
}
|
{
cfg := &ActionConfig{
MetadataDir: baseDir,
ActionConfig: ec.Config.ActionConfig,
serverFeatureFlags: ec.Version.ServerFeatureFlags,
logger: ec.Logger,
cliExtensionConfig: cliextension.NewCLIExtensionConfig(cliext.BinPath(ec), ec.Logger),
ensureCliExt: func() error {
return cliext.Setup(ec)
},
}
return cfg
}
|
dependency.go
|
package dependency
import (
"github.com/go-playground/validator/v10"
"github.com/rs/zerolog"
"github.com/sarulabs/di"
"os"
_config "socks/internal/test_server/config"
_handlers "socks/internal/test_server/handlers"
_logger "socks/internal/test_server/logger"
_picture "socks/internal/test_server/picture"
_server "socks/internal/test_server/server"
)
func Register(builder di.Builder) {
configPathDef := di.Def{
Name: "config_path",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
path, ok := os.LookupEnv("config_path")
if !ok {
return "/etc/kneesocks/test-server-config.json", nil
}
return path, nil
},
}
validatorDef := di.Def{
Name: "validator",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
return *validator.New(), nil
},
}
configDef := di.Def{
Name: "config",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
validate := ctn.Get("validator").(validator.Validate)
configPath := ctn.Get("config_path").(string)
return _config.NewConfig(validate, configPath)
},
}
zeroLoggerDef := di.Def{
Name: "zero_logger",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
cfg := ctn.Get("config").(_config.Config)
consoleLogger := zerolog.ConsoleWriter{
Out: os.Stdout,
TimeFormat: "2006-01-02 15:04:05",
}
file, err := os.OpenFile(cfg.Log.Path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return nil, err
}
return zerolog.New(zerolog.MultiLevelWriter(consoleLogger, file)).
With().
Timestamp().
Logger().
Level(0), nil
},
}
loggerDef := di.Def{
Name: "logger",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
logger := ctn.Get("zero_logger").(zerolog.Logger)
return _logger.NewLogger(logger)
},
}
connectionHandlerDef := di.Def{
Name: "connection_handler",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get("config").(_config.Config)
logger := ctn.Get("logger").(_logger.Logger)
sender := ctn.Get("picture_sender").(_picture.Sender)
return _handlers.NewConnectHandler(config, logger, sender)
},
}
packetHandlerDef := di.Def{
Name: "packet_handler",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get("config").(_config.Config)
logger := ctn.Get("logger").(_logger.Logger)
return _handlers.NewPacketHandler(config, logger)
},
}
bindHandlerDef := di.Def{
Name: "bind_handler",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get("config").(_config.Config)
logger := ctn.Get("logger").(_logger.Logger)
picture := ctn.Get("picture_sender").(_picture.Sender)
return _handlers.NewBindHandler(config, logger, picture)
},
}
serverDef := di.Def{
Name: "server",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get("config").(_config.Config)
logger := ctn.Get("logger").(_logger.Logger)
connectionHandler := ctn.Get("connection_handler").(_handlers.ConnectHandler)
packetHandler := ctn.Get("packet_handler").(_handlers.PacketHandler)
bindHandler := ctn.Get("bind_handler").(_handlers.BindHandler)
return _server.NewServer(config, connectionHandler, packetHandler, logger, bindHandler)
},
}
pictureSenderDef := di.Def{
Name: "picture_sender",
Scope: di.App,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get("config").(_config.Config)
logger := ctn.Get("logger").(_logger.Logger)
return _picture.NewSender(logger, config)
},
}
err := builder.Add(
configPathDef,
validatorDef,
configDef,
zeroLoggerDef,
loggerDef,
connectionHandlerDef,
packetHandlerDef,
bindHandlerDef,
serverDef,
|
)
if err != nil {
panic(err)
}
}
|
pictureSenderDef,
|
c.py
|
from collections import deque
def main():
# input
R, C = map(int, input().split())
sy, sx = map(int, input().split())
gy, gx = map(int, input().split())
|
cs = [[*input()] for _ in range(R)]
# compute
sy -= 1
sx -= 1
gy -= 1
gx -= 1
deq = deque([[sy, sx]])
dist = [[-1 for _ in range(C)] for _ in range(R)]
dist[sy][sx] = 0
route = 0
while deq:
y, x = deq.popleft()
tmp_dist = dist[y][x]
for dy, dx in [[-1,0], [0,1], [1,0], [0,-1]]:
if y+dy==gy and x+dx==gx:
route = tmp_dist + 1
print(route)
exit()
elif cs[y+dy][x+dx]=='.' and dist[y+dy][x+dx]==-1:
dist[y+dy][x+dx] = tmp_dist + 1
deq.append([y+dy, x+dx])
# output
if __name__ == '__main__':
main()
| |
Question2_Part_1_To_2.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Train the language model on texts from the file pride And Prejudice. Before using it to train the language model,
you need to first sentence segment, then tokenize, then lower case each line of the file using Spacy. Append
start-of-sentence token ’<s>’ and end-of-sentence ’</s>’ token to each sentence and put each sentence in its own line.
Use only words that appear more than once in this corpus and assign UNK tokens for the rest; you may also need to
pad sentences that are shorter than 5. Train the language model and save the trained model. Generate 10 examples
of text from it, starting from ’<s>’ token and ending at ’</s>’ token.
@author: shahab Sotudian
"""
import re
import pickle
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.lm.preprocessing import pad_both_ends
from collections import Counter
import math
# Functions ###########================-------------------
'''
############################################################
#### Piazza calculate Preplexity
net.cuda()
net.eval()
H = 0
TOTAL_PROBs = 1
with torch.no_grad():
for Test_Sentence in Test_1_Preprocessed_Pride_Text_Perplexity:
H += len(Test_Sentence)
# Calculate for each sentence
Total_prob_Sentence = 1
for i,word in enumerate(Test_Sentence):
if i == len(Test_Sentence)-1:
continue
else:
if i==0:
h = net.init_hidden(1)
h = tuple([each.data for each in h])
else:
h = h_new
x = np.array([[word2idx[word]]])
inputs = torch.from_numpy(x)
inputs = inputs.cuda()
out, h_new = net(inputs, h)
# get the token probabilities
p = F.softmax(out, dim=1).data
p = p.cpu()
p = p.numpy()
p = p.reshape(p.shape[1],)
Prob_next_Word = p[word2idx[Test_Sentence[i+1]]] # P(w4|w1,w2,w3)
Total_prob_Sentence = Prob_next_Word * Total_prob_Sentence
TOTAL_PROBs = TOTAL_PROBs * Total_prob_Sentence
Preplexity = (1/TOTAL_PROBs)**(1/float(H))
############################################################
'''
def NLP_PreProcessing(text_main):
# sentence segmenting
sentences = nltk.sent_tokenize(text_main)
# Tokenization + lower casing
Tokenized_sentences = [word_tokenize(S.lower()) for S in sentences]
# Padding
Pad_Tokenized_sentences = [list(pad_both_ends(TS, n=2)) for TS in Tokenized_sentences]
return Pad_Tokenized_sentences
def NLP_PreProcessin
|
# Tokenization + lower casing
Tokenized_sentences = word_tokenize(text_main.lower())
# Padding
Pad_Tokenized_sentences = [list(pad_both_ends(Tokenized_sentences, n=2))]
return Pad_Tokenized_sentences
def Equal_seq(text, seq_len):
sequences = []
if len(text) > seq_len:
for i in range(seq_len, (len(text)+1)):
seq = text[i-seq_len:i]
sequences.append(seq)
else:
sequences = [['_PAD']*(seq_len-len(text)) + text ]
return sequences
def get_batches(arr_x, arr_y, batch_size):
# iterate through the arrays
prv = 0
for n in range(batch_size, arr_x.shape[0], batch_size):
x = arr_x[prv:n,:]
y = arr_y[prv:n,:]
prv = n
yield x, y
class WordLSTM(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size, 200)
## define the LSTM
self.lstm = nn.LSTM(200, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## define the fully-connected layer
self.fc = nn.Linear(n_hidden, vocab_size)
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## pass input through embedding layer
embedded = self.emb_layer(x)
## Get the outputs and the new hidden state from the lstm
lstm_output, hidden = self.lstm(embedded, hidden)
## pass through a dropout layer
out = self.dropout(lstm_output)
#out = out.contiguous().view(-1, self.n_hidden)
out = out.reshape(-1, self.n_hidden)
## put "out" through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
# if GPU is available
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
# if GPU is not available
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
def train(net, epochs, batch_size, lr, clip, print_every,XX,YY):
# optimizer
opt = torch.optim.Adam(net.parameters(), lr=lr)
# loss
criterion = nn.CrossEntropyLoss()
# push model to GPU
net.cuda()
counter = 0
net.train()
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(XX, YY, batch_size):
counter+= 1
# convert numpy arrays to PyTorch arrays
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
# push tensors to GPU
inputs, targets = inputs.cuda(), targets.cuda()
# detach hidden states
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(-1))
# back-propagate error
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
# update weigths
opt.step()
if counter % print_every == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter))
def predict(net, tkn, h=None, word2idx_Inp = None, idx2word_Inp =None ):
# tensor inputs
x = np.array([[word2idx_Inp[tkn]]])
inputs = torch.from_numpy(x)
# push to GPU
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the token probabilities
p = F.softmax(out, dim=1).data
p = p.cpu()
p = p.numpy()
p = p.reshape(p.shape[1],)
# get indices of top 3 values
top_n_idx = p.argsort()[-3:][::-1]
# randomly select one of the three indices
sampled_token_index = top_n_idx[random.sample([0,1,2],1)[0]]
# return the encoded value of the predicted char and the hidden state
return idx2word_Inp[sampled_token_index], h
# function to generate text
def sample(net, size, prime="<s>",word2idx_Inp = None, idx2word_Inp =None ):
# push to GPU
net.cuda()
net.eval()
# batch size is 1
h = net.init_hidden(1)
toks = prime.split()
# predict next token
for t in prime.split():
token, h = predict(net, t, h,word2idx_Inp,idx2word_Inp)
toks.append(token)
# predict subsequent tokens
if size == '</s>':
while(token!='</s>'):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
else:
for i in range(size-1):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
return ' '.join(toks)
def Testing(net, batch_size,Test_X,Test_Y):
net.eval()
criterion = nn.CrossEntropyLoss()
# initialize hidden state
h = net.init_hidden(batch_size)
test_loss = 0.
with torch.no_grad():
for x, y in get_batches(Test_X, Test_Y, batch_size):
# convert numpy arrays to PyTorch arrays
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
# push tensors to GPU
inputs, targets = inputs.cuda(), targets.cuda()
# detach hidden states
h = tuple([each.data for each in h])
# get the output from the model
output, h = net(inputs, h)
test_loss += criterion(output, targets.view(-1)).item()
test_loss = test_loss / ((len(Test_X) // batch_size)+1)
print('-' * 40)
print('Test loss {:5.2f} ------ Test perplexity {:8.2f}'.format(test_loss, math.exp(test_loss)))
print('-' * 40)
class WordLSTM_with_Glove(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size_Q6,100, padding_idx=0)
self.emb_layer.weight.data.copy_(torch.from_numpy(embedding_matrix))
self.emb_layer.weight.requires_grad = False ## freeze embeddings
'''
self.emb_layer = nn.Embedding(vocab_size_Q6,100)
self.emb_layer.weight = nn.Parameter(torch.from_numpy(embedding_matrix).float())
'''
## define the LSTM
self.lstm = nn.LSTM(100, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## define the fully-connected layer
self.fc = nn.Linear(n_hidden, vocab_size_Q6)
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## pass input through embedding layer
embedded = self.emb_layer(x)
## Get the outputs and the new hidden state from the lstm
lstm_output, hidden = self.lstm(embedded, hidden)
## pass through a dropout layer
out = self.dropout(lstm_output)
#out = out.contiguous().view(-1, self.n_hidden)
out = out.reshape(-1, self.n_hidden)
## put "out" through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
# if GPU is available
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
# if GPU is not available
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
# Data ###########================-------------------
with open('prideAndPrejudice.txt') as f:
Pride_Text = [line.rstrip() for line in f]
# Q2.1 ###########================-------------------
# sentence segmenting + lower casing + Tokenization + Padding using function NLP_PreProcessing
Preprocessed_Pride_Text = []
for t in range(len(Pride_Text)):
Preprocessed_Pride_Text = Preprocessed_Pride_Text + NLP_PreProcessing(Pride_Text[t])
Length_of_Sequences = 5
Pride_Text_Equal_seqs_L5 = sum([Equal_seq(i,Length_of_Sequences) for i in Preprocessed_Pride_Text], [])
del t,f
# Create Vocab
words = Counter()
for i, sentence in enumerate(Preprocessed_Pride_Text):
for word in sentence:
words.update([word])
words = {k:v for k,v in words.items() if v>1} # Removing the words that only appear once
del i,sentence,word
words = sorted(words, key=words.get, reverse=True) # Sorting the words
words = ['_PAD','_UNK'] + words
word2idx = {o:i for i,o in enumerate(words)}
idx2word = {i:o for i,o in enumerate(words)}
# Looking up the mapping dictionary and assigning the index to the respective words
Pride_Text_Equal_seqs_INDICES_L5 =[]
for i, sentence in enumerate(Pride_Text_Equal_seqs_L5):
Pride_Text_Equal_seqs_INDICES_L5.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
X = []
Y = []
for S in Pride_Text_Equal_seqs_INDICES_L5:
X.append(S[:-1])
Y.append(S[1:])
x_int_L5 = np.array(X)
y_int_L5 = np.array(Y)
vocab_size = len(word2idx)
# Train Or Load LSTM
Do_want_To_Train = 0
batch_size = 320
epochs=20
lr=0.001
if Do_want_To_Train == 1:
net1 = WordLSTM() # instantiate the model
net1.cuda() # push the model to GPU
train(net1, epochs, batch_size, lr, 1, 50,x_int_L5,y_int_L5) # train the model
torch.save(net1, 'Q2_Part_1_Network.pt')
else:
net1 = torch.load('Q2_Part_1_Network.pt')
net1.eval()
print(net1)
# Generate text
for i in range(10):
print('=======================================')
print("- Example "+str(i+1)+": ",sample(net1, size='</s>' , prime="<s>", word2idx_Inp = word2idx, idx2word_Inp =idx2word ),'\n')
del X,Y,i,S,Do_want_To_Train
print('=' * 60)
# Q2.2 ###########================-------------------
with open('test_1.txt') as f:
test_1 = [line.rstrip() for line in f]
# sentence segmenting + lower casing + Tokenization + Padding using function NLP_PreProcessing_Test
Test_1_Preprocessed_Pride_Text = []
for t in range(len(test_1)):
Test_1_Preprocessed_Pride_Text = Test_1_Preprocessed_Pride_Text + NLP_PreProcessing_Test((test_1[t])[4:-5])
Test_1_Pride_Text_Equal_seqs = sum([Equal_seq(i,Length_of_Sequences) for i in Test_1_Preprocessed_Pride_Text], [])
del t,f
# Looking up the mapping dictionary and assigning the index to the respective words
Test_1_Pride_Text_Equal_seqs_INDICES =[]
for i, sentence in enumerate(Test_1_Pride_Text_Equal_seqs):
Test_1_Pride_Text_Equal_seqs_INDICES.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
Test_1_X = []
Test_1_Y = []
for S in Test_1_Pride_Text_Equal_seqs_INDICES:
Test_1_X.append(S[:-1])
Test_1_Y.append(S[1:])
Test_1_x_int = np.array(Test_1_X)
Test_1_y_int = np.array(Test_1_Y)
del Test_1_X,Test_1_Y,S
# Calculate Perplexity
Testing(net1, batch_size ,Test_1_x_int,Test_1_y_int)
del Pride_Text,Length_of_Sequences
|
g_Test(text_main):
|
main.go
|
package main
import (
_ "embed"
"fmt"
"regexp"
"strings"
. "github.com/beanz/advent/lib-go"
)
//go:embed input.txt
var input []byte
var (
finishedMatch = regexp.MustCompile(`^(\d+)$`)
tetheredPlus = regexp.MustCompile(`^(\d+) \+ (\d+)`)
tetheredMult = regexp.MustCompile(`^(\d+) \* (\d+)`)
untetheredPlus = regexp.MustCompile(`(\d+) \+ (\d+)`)
bracketMatch = regexp.MustCompile(`\(([^()]+)\)`)
)
func ReadNInt64s(s string, expected int, msg string) []int64 {
nums := SimpleReadInt64s(s)
if len(nums) != expected {
panic(msg)
}
return nums
}
func Part1Math(s string) int64 {
if DEBUG() {
fmt.Printf("P1: %s\n", s)
}
if m := finishedMatch.FindStringSubmatch(s); m != nil {
if DEBUG() {
fmt.Printf("Just number: %s\n", s)
}
nums := ReadNInt64s(m[0], 1, "invalid answer"+s)
return nums[0]
}
if m := bracketMatch.FindStringSubmatch(s); m != nil {
if DEBUG() {
fmt.Printf("(): %s => %s\n", s, m[1])
}
return Part1Math(strings.Replace(s,
m[0], fmt.Sprintf("%d", Part1Math(m[1])), 1))
}
if m := tetheredPlus.FindStringSubmatch(s); m != nil {
if DEBUG() {
fmt.Printf("+: %s => %s\n", s, m[0])
}
nums := ReadNInt64s(m[0], 2, "invalid + operands in "+m[0])
return Part1Math(strings.Replace(s,
m[0], fmt.Sprintf("%d", nums[0]+nums[1]), 1))
}
if m := tetheredMult.FindStringSubmatch(s); m != nil {
if DEBUG() {
fmt.Printf("*: %s => %s\n", s, m[0])
}
nums := ReadNInt64s(m[0], 2, "invalid * operands in "+m[0])
return Part1Math(strings.Replace(s,
m[0], fmt.Sprintf("%d", nums[0]*nums[1]), 1))
}
return 1
}
func Part1(lines []string) int64 {
var s int64
for _, sum := range lines {
s += Part1Math(sum)
}
return s
}
func Part2Math(s string) int64 {
if DEBUG() {
fmt.Printf("P1: %s\n", s)
}
if m := finishedMatch.FindStringSubmatch(s); m != nil {
if DEBUG()
|
nums := ReadNInt64s(m[0], 1, "invalid answer"+s)
return nums[0]
}
if m := bracketMatch.FindStringSubmatch(s); m != nil {
if DEBUG() {
fmt.Printf("(): %s => %s\n", s, m[1])
}
return Part2Math(strings.Replace(s,
m[0], fmt.Sprintf("%d", Part2Math(m[1])), 1))
}
if m := untetheredPlus.FindStringSubmatch(s); m != nil {
if DEBUG() {
fmt.Printf("+: %s => %s\n", s, m[0])
}
nums := ReadNInt64s(m[0], 2, "invalid + operands in "+m[0])
return Part2Math(strings.Replace(s,
m[0], fmt.Sprintf("%d", nums[0]+nums[1]), 1))
}
if m := tetheredMult.FindStringSubmatch(s); m != nil {
if DEBUG() {
fmt.Printf("*: %s => %s\n", s, m[0])
}
nums := ReadNInt64s(m[0], 2, "invalid * operands in "+m[0])
return Part2Math(strings.Replace(s,
m[0], fmt.Sprintf("%d", nums[0]*nums[1]), 1))
}
return 1
}
func Part2(lines []string) int64 {
var s int64
for _, sum := range lines {
s += Part2Math(sum)
}
return s
}
func main() {
lines := InputLines(input)
p1 := Part1(lines)
if !benchmark {
fmt.Printf("Part 1: %d\n", p1)
}
p2 := Part2(lines)
if !benchmark {
fmt.Printf("Part 2: %d\n", p2)
}
}
var benchmark = false
|
{
fmt.Printf("Just number: %s\n", s)
}
|
sqlalchemy_execution_engine.py
|
import copy
import datetime
import logging
import traceback
import warnings
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from urllib.parse import urlparse
from great_expectations._version import get_versions # isort:skip
__version__ = get_versions()["version"] # isort:skip
del get_versions # isort:skip
from great_expectations.core import IDDict
from great_expectations.core.batch import BatchMarkers, BatchSpec
from great_expectations.core.batch_spec import (
RuntimeQueryBatchSpec,
SqlAlchemyDatasourceBatchSpec,
)
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.data_context.types.base import ConcurrencyConfig
from great_expectations.exceptions import (
DatasourceKeyPairAuthBadPassphraseError,
ExecutionEngineError,
GreatExpectationsError,
InvalidBatchSpecError,
InvalidConfigError,
)
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.expectations.row_conditions import parse_condition_to_sqlalchemy
from great_expectations.util import filter_properties_dict, import_library_module
from great_expectations.validator.metric_configuration import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sa
except ImportError:
sa = None
try:
from sqlalchemy.engine import reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql import Selectable
from sqlalchemy.sql.elements import TextClause, quoted_name
except ImportError:
reflection = None
DefaultDialect = None
Selectable = None
TextClause = None
quoted_name = None
OperationalError = None
try:
import psycopg2
import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2
except (ImportError, KeyError):
sqlalchemy_psycopg2 = None
try:
import sqlalchemy_redshift.dialect
except ImportError:
sqlalchemy_redshift = None
try:
import snowflake.sqlalchemy.snowdialect
if sa:
# Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register("snowflake", "snowflake.sqlalchemy", "dialect")
except (ImportError, KeyError, AttributeError):
snowflake = None
try:
import pybigquery.sqlalchemy_bigquery
###
# NOTE: 20210816 - jdimatteo: A convention we rely on is for SqlAlchemy dialects
# to define an attribute "dialect". A PR has been submitted to fix this upstream
# with https://github.com/googleapis/python-bigquery-sqlalchemy/pull/251. If that
# fix isn't present, add this "dialect" attribute here:
if not hasattr(pybigquery.sqlalchemy_bigquery, "dialect"):
pybigquery.sqlalchemy_bigquery.dialect = (
pybigquery.sqlalchemy_bigquery.BigQueryDialect
)
# Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in Azure (our CI/CD pipeline) in certain cases, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register(
"bigquery", "pybigquery.sqlalchemy_bigquery", "dialect"
)
try:
getattr(pybigquery.sqlalchemy_bigquery, "INTEGER")
bigquery_types_tuple = None
except AttributeError:
# In older versions of the pybigquery driver, types were not exported, so we use a hack
logger.warning(
"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later."
)
from collections import namedtuple
BigQueryTypes = namedtuple(
"BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map)
)
bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)
except (ImportError, AttributeError):
bigquery_types_tuple = None
pybigquery = None
def _get_dialect_type_module(dialect):
"""Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates
with the database/database implementation. Currently checks for RedShift/BigQuery dialects"""
if dialect is None:
logger.warning(
"No sqlalchemy dialect found; relying in top-level sqlalchemy types."
)
return sa
try:
# Redshift does not (yet) export types to top level; only recognize base SA types
if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect):
return dialect.sa
except (TypeError, AttributeError):
pass
# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple
try:
if (
isinstance(
dialect,
pybigquery.sqlalchemy_bigquery.BigQueryDialect,
)
and bigquery_types_tuple is not None
):
return bigquery_types_tuple
except (TypeError, AttributeError):
pass
return dialect
class SqlAlchemyExecutionEngine(ExecutionEngine):
|
def __init__(
self,
name=None,
credentials=None,
data_context=None,
engine=None,
connection_string=None,
url=None,
batch_data_dict=None,
create_temp_table=True,
concurrency: Optional[ConcurrencyConfig] = None,
**kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine
):
"""Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the
desired database. Also initializes the dialect to be used and configures usage statistics.
Args:
name (str): \
The name of the SqlAlchemyExecutionEngine
credentials: \
If the Execution Engine is not provided, the credentials can be used to build the Execution
Engine. If the Engine is provided, it will be used instead
data_context (DataContext): \
An object representing a Great Expectations project that can be used to access Expectation
Suites and the Project Data itself
engine (Engine): \
A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, useful if an
Engine has already been configured and should be reused. Will override Credentials
if provided.
connection_string (string): \
If neither the engines nor the credentials have been provided, a connection string can be used
to access the data. This will be overridden by both the engine and credentials if those are
provided.
url (string): \
If neither the engines, the credentials, nor the connection_string have been provided,
a url can be used to access the data. This will be overridden by all other configuration
options if any are provided.
concurrency (ConcurrencyConfig): Concurrency config used to configure the sqlalchemy engine.
"""
super().__init__(name=name, batch_data_dict=batch_data_dict)
self._name = name
self._credentials = credentials
self._connection_string = connection_string
self._url = url
self._create_temp_table = create_temp_table
if engine is not None:
if credentials is not None:
logger.warning(
"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. "
"Ignoring credentials."
)
self.engine = engine
else:
concurrency = (
concurrency if concurrency is not None else ConcurrencyConfig()
)
concurrency.add_sqlalchemy_create_engine_parameters(kwargs)
if credentials is not None:
self.engine = self._build_engine(credentials=credentials, **kwargs)
elif connection_string is not None:
self.engine = sa.create_engine(connection_string, **kwargs)
elif url is not None:
self.drivername = urlparse(url).scheme
self.engine = sa.create_engine(url, **kwargs)
else:
raise InvalidConfigError(
"Credentials or an engine are required for a SqlAlchemyExecutionEngine."
)
# Get the dialect **for purposes of identifying types**
if self.engine.dialect.name.lower() in [
"postgresql",
"mysql",
"sqlite",
"oracle",
"mssql",
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect_module = import_library_module(
module_name="sqlalchemy.dialects." + self.engine.dialect.name
)
elif self.engine.dialect.name.lower() == "snowflake":
self.dialect_module = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.engine.dialect.name.lower() == "redshift":
self.dialect_module = import_library_module(
module_name="sqlalchemy_redshift.dialect"
)
elif self.engine.dialect.name.lower() == "bigquery":
self.dialect_module = import_library_module(
module_name="pybigquery.sqlalchemy_bigquery"
)
else:
self.dialect_module = None
# <WILL> 20210726 - engine_backup is used by the snowflake connector, which requires connection and engine
# to be closed and disposed separately. Currently self.engine can refer to either a Connection or Engine,
# depending on the backend. This will need to be cleaned up in an upcoming refactor, so that Engine and
# Connection can be handled separately.
self._engine_backup = None
if self.engine and self.engine.dialect.name.lower() in [
"sqlite",
"mssql",
"snowflake",
"mysql",
]:
self._engine_backup = self.engine
# sqlite/mssql temp tables only persist within a connection so override the engine
self.engine = self.engine.connect()
# Send a connect event to provide dialect type
if data_context is not None and getattr(
data_context, "_usage_statistics_handler", None
):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="execution_engine.sqlalchemy.connect",
event_payload={
"anonymized_name": handler._execution_engine_anonymizer.anonymize(
self.name
),
"sqlalchemy_dialect": self.engine.name,
},
success=True,
)
# Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values,
# and set the instance "_config" variable equal to the resulting dictionary.
self._config = {
"name": name,
"credentials": credentials,
"data_context": data_context,
"engine": engine,
"connection_string": connection_string,
"url": url,
"batch_data_dict": batch_data_dict,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
self._config.update(kwargs)
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
@property
def credentials(self):
return self._credentials
@property
def connection_string(self):
return self._connection_string
@property
def url(self):
return self._url
def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine":
"""
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a
private key path.
"""
# Update credentials with anything passed during connection time
drivername = credentials.pop("drivername")
schema_name = credentials.pop("schema_name", None)
if schema_name is not None:
logger.warning(
"schema_name specified creating a URL with schema is not supported. Set a default "
"schema on the user connecting to your database."
)
create_engine_kwargs = kwargs
connect_args = credentials.pop("connect_args", None)
if connect_args:
create_engine_kwargs["connect_args"] = connect_args
if "private_key_path" in credentials:
options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(
drivername, credentials
)
else:
options = sa.engine.url.URL(drivername, **credentials)
self.drivername = drivername
engine = sa.create_engine(options, **create_engine_kwargs)
return engine
def _get_sqlalchemy_key_pair_auth_url(
self, drivername: str, credentials: dict
) -> Tuple["sa.engine.url.URL", Dict]:
"""
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided
values into a private key. If passphrase is incorrect, this will fail and an exception is raised.
Args:
drivername(str) - The name of the driver class
credentials(dict) - A dictionary of database credentials used to access the database
Returns:
a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
private_key_path = credentials.pop("private_key_path")
private_key_passphrase = credentials.pop("private_key_passphrase")
with Path(private_key_path).expanduser().resolve().open(mode="rb") as key:
try:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode()
if private_key_passphrase
else None,
backend=default_backend(),
)
except ValueError as e:
if "incorrect password" in str(e).lower():
raise DatasourceKeyPairAuthBadPassphraseError(
datasource_name="SqlAlchemyDatasource",
message="Decryption of key failed, was the passphrase incorrect?",
) from e
else:
raise e
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
credentials_driver_name = credentials.pop("drivername", None)
create_engine_kwargs = {"connect_args": {"private_key": pkb}}
return (
sa.engine.url.URL(drivername or credentials_driver_name, **credentials),
create_engine_kwargs,
)
def get_domain_records(
self,
domain_kwargs: Dict,
) -> Selectable:
"""
Uses the given domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to
obtain and/or query a batch. Returns in the format of an SqlAlchemy table/column(s) object.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
Returns:
An SqlAlchemy table/column(s) (the selectable object for obtaining data on which to compute)
"""
batch_id = domain_kwargs.get("batch_id")
if batch_id is None:
# We allow no batch id specified if there is only one batch
if self.active_batch_data:
data_object = self.active_batch_data
else:
raise GreatExpectationsError(
"No batch is specified, but could not identify a loaded batch."
)
else:
if batch_id in self.loaded_batch_data_dict:
data_object = self.loaded_batch_data_dict[batch_id]
else:
raise GreatExpectationsError(
f"Unable to find batch with batch_id {batch_id}"
)
if "table" in domain_kwargs and domain_kwargs["table"] is not None:
# TODO: Add logic to handle record_set_name once implemented
# (i.e. multiple record sets (tables) in one batch
if domain_kwargs["table"] != data_object.selectable.name:
selectable = sa.Table(
domain_kwargs["table"],
sa.MetaData(),
schema_name=data_object._schema_name,
)
else:
selectable = data_object.selectable
elif "query" in domain_kwargs:
raise ValueError(
"query is not currently supported by SqlAlchemyExecutionEngine"
)
else:
selectable = data_object.selectable
# Filtering by row condition.
if (
"row_condition" in domain_kwargs
and domain_kwargs["row_condition"] is not None
):
condition_parser = domain_kwargs["condition_parser"]
if condition_parser == "great_expectations__experimental__":
parsed_condition = parse_condition_to_sqlalchemy(
domain_kwargs["row_condition"]
)
selectable = sa.select(
"*", from_obj=selectable, whereclause=parsed_condition
)
else:
raise GreatExpectationsError(
"SqlAlchemyExecutionEngine only supports the great_expectations condition_parser."
)
if "column" in domain_kwargs:
return selectable
if (
"column_A" in domain_kwargs
and "column_B" in domain_kwargs
and "ignore_row_if" in domain_kwargs
):
if self.active_batch_data.use_quoted_name:
# Checking if case-sensitive and using appropriate name
# noinspection PyPep8Naming
column_A_name = quoted_name(domain_kwargs["column_A"], quote=True)
# noinspection PyPep8Naming
column_B_name = quoted_name(domain_kwargs["column_B"], quote=True)
else:
# noinspection PyPep8Naming
column_A_name = domain_kwargs["column_A"]
# noinspection PyPep8Naming
column_B_name = domain_kwargs["column_B"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "both_values_are_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.and_(
sa.column(column_A_name) == None,
sa.column(column_B_name) == None,
)
)
)
)
elif ignore_row_if == "either_value_is_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.or_(
sa.column(column_A_name) == None,
sa.column(column_B_name) == None,
)
)
)
)
else:
if ignore_row_if not in ["neither", "never"]:
raise ValueError(
f'Unrecognized value of ignore_row_if ("{ignore_row_if}").'
)
if ignore_row_if == "never":
warnings.warn(
f"""The correct "no-action" value of the "ignore_row_if" directive for the column pair case is \
"neither" (the use of "{ignore_row_if}" will be deprecated). Please update code accordingly.
""",
DeprecationWarning,
)
return selectable
if "column_list" in domain_kwargs and "ignore_row_if" in domain_kwargs:
if self.active_batch_data.use_quoted_name:
# Checking if case-sensitive and using appropriate name
column_list = [
quoted_name(domain_kwargs[column_name], quote=True)
for column_name in domain_kwargs["column_list"]
]
else:
column_list = domain_kwargs["column_list"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "all_values_are_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.and_(
*(
sa.column(column_name) == None
for column_name in column_list
)
)
)
)
)
elif ignore_row_if == "any_value_is_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.or_(
*(
sa.column(column_name) == None
for column_name in column_list
)
)
)
)
)
else:
if ignore_row_if != "never":
raise ValueError(
f'Unrecognized value of ignore_row_if ("{ignore_row_if}").'
)
return selectable
return selectable
def get_compute_domain(
self,
domain_kwargs: Dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) -> Tuple[Selectable, dict, dict]:
"""Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would
like to be using, or a corresponding string value representing it. String types include "identity",
"column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the
class MetricDomainTypes.
accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when
describing the domain and simply transferred with their associated values into accessor_domain_kwargs.
Returns:
SqlAlchemy column
"""
selectable = self.get_domain_records(
domain_kwargs=domain_kwargs,
)
# Extracting value from enum if it is given for future computation
domain_type = MetricDomainTypes(domain_type)
# Warning user if accessor keys are in any domain that is not of type table, will be ignored
if (
domain_type != MetricDomainTypes.TABLE
and accessor_keys is not None
and len(list(accessor_keys)) > 0
):
logger.warning(
'Accessor keys ignored since Metric Domain Type is not "table"'
)
compute_domain_kwargs = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs = {}
if domain_type == MetricDomainTypes.TABLE:
if accessor_keys is not None and len(list(accessor_keys)) > 0:
for key in accessor_keys:
accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key)
if len(domain_kwargs.keys()) > 0:
# Warn user if kwarg not "normal".
unexpected_keys: set = set(compute_domain_kwargs.keys()).difference(
{
"batch_id",
"table",
"row_condition",
"condition_parser",
}
)
if len(unexpected_keys) > 0:
unexpected_keys_str: str = ", ".join(
map(lambda element: f'"{element}"', unexpected_keys)
)
logger.warning(
f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".'
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.COLUMN:
if "column" not in compute_domain_kwargs:
raise GreatExpectationsError(
"Column not provided in compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column"] = quoted_name(
compute_domain_kwargs.pop("column"), quote=True
)
else:
accessor_domain_kwargs["column"] = compute_domain_kwargs.pop("column")
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
if not (
"column_A" in compute_domain_kwargs
and "column_B" in compute_domain_kwargs
):
raise GreatExpectationsError(
"column_A or column_B not found within compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column_A"] = quoted_name(
compute_domain_kwargs.pop("column_A"), quote=True
)
accessor_domain_kwargs["column_B"] = quoted_name(
compute_domain_kwargs.pop("column_B"), quote=True
)
else:
accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop(
"column_A"
)
accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop(
"column_B"
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.MULTICOLUMN:
if "column_list" not in domain_kwargs:
raise GreatExpectationsError(
"column_list not found within domain_kwargs"
)
column_list = compute_domain_kwargs.pop("column_list")
if len(column_list) < 2:
raise GreatExpectationsError(
"column_list must contain at least 2 columns"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column_list"] = [
quoted_name(column_name, quote=True) for column_name in column_list
]
else:
accessor_domain_kwargs["column_list"] = column_list
return selectable, compute_domain_kwargs, accessor_domain_kwargs
# Letting selectable fall through
return selectable, compute_domain_kwargs, accessor_domain_kwargs
def resolve_metric_bundle(
self,
metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]],
) -> dict:
"""For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds
bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail
if bundling the metrics together is not possible.
Args:
metric_fn_bundle (Iterable[Tuple[MetricConfiguration, Callable, dict]): \
A Dictionary containing a MetricProvider's MetricConfiguration (its unique identifier), its metric provider function
(the function that actually executes the metric), and the arguments to pass to the metric provider function.
A dictionary of metrics defined in the registry and corresponding arguments
Returns:
A dictionary of metric names and their corresponding now-queried values.
"""
resolved_metrics = {}
# We need a different query for each domain (where clause).
queries: Dict[Tuple, dict] = {}
for (
metric_to_resolve,
engine_fn,
compute_domain_kwargs,
accessor_domain_kwargs,
metric_provider_kwargs,
) in metric_fn_bundle:
if not isinstance(compute_domain_kwargs, IDDict):
compute_domain_kwargs = IDDict(compute_domain_kwargs)
domain_id = compute_domain_kwargs.to_id()
if domain_id not in queries:
queries[domain_id] = {
"select": [],
"ids": [],
"domain_kwargs": compute_domain_kwargs,
}
queries[domain_id]["select"].append(
engine_fn.label(metric_to_resolve.metric_name)
)
queries[domain_id]["ids"].append(metric_to_resolve.id)
for query in queries.values():
domain_kwargs = query["domain_kwargs"]
selectable = self.get_domain_records(
domain_kwargs=domain_kwargs,
)
assert len(query["select"]) == len(query["ids"])
try:
res = self.engine.execute(
sa.select(query["select"]).select_from(selectable)
).fetchall()
logger.debug(
f"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(domain_kwargs).to_id()}"
)
except OperationalError as oe:
exception_message: str = "An SQL execution Exception occurred. "
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(oe).__name__}: "{str(oe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise ExecutionEngineError(message=exception_message)
assert (
len(res) == 1
), "all bundle-computed metrics must be single-value statistics"
assert len(query["ids"]) == len(
res[0]
), "unexpected number of metrics returned"
for idx, id in enumerate(query["ids"]):
resolved_metrics[id] = convert_to_json_serializable(res[0][idx])
return resolved_metrics
def close(self):
"""
Note: Will 20210729
This is a helper function that will close and dispose Sqlalchemy objects that are used to connect to a database.
Databases like Snowflake require the connection and engine to be instantiated and closed separately, and not
doing so has caused problems with hanging connections.
Currently the ExecutionEngine does not support handling connections and engine separately, and will actually
override the engine with a connection in some cases, obfuscating what object is used to actually used by the
ExecutionEngine to connect to the external database. This will be handled in an upcoming refactor, which will
allow this function to eventually become:
self.connection.close()
self.engine.dispose()
More background can be found here: https://github.com/great-expectations/great_expectations/pull/3104/
"""
if self._engine_backup:
self.engine.close()
self._engine_backup.dispose()
else:
self.engine.dispose()
### Splitter methods for partitioning tables ###
def _split_on_whole_table(self, table_name: str, batch_identifiers: dict):
"""'Split' by returning the whole table"""
# return sa.column(column_name) == batch_identifiers[column_name]
return 1 == 1
def _split_on_column_value(
self, table_name: str, column_name: str, batch_identifiers: dict
):
"""Split using the values in the named column"""
return sa.column(column_name) == batch_identifiers[column_name]
def _split_on_converted_datetime(
self,
table_name: str,
column_name: str,
batch_identifiers: dict,
date_format_string: str = "%Y-%m-%d",
):
"""Convert the values in the named column to the given date_format, and split on that"""
return (
sa.func.strftime(
date_format_string,
sa.column(column_name),
)
== batch_identifiers[column_name]
)
def _split_on_divided_integer(
self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
return (
sa.cast(sa.column(column_name) / divisor, sa.Integer)
== batch_identifiers[column_name]
)
def _split_on_mod_integer(
self, table_name: str, column_name: str, mod: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
return sa.column(column_name) % mod == batch_identifiers[column_name]
def _split_on_multi_column_values(
self, table_name: str, column_names: List[str], batch_identifiers: dict
):
"""Split on the joint values in the named columns"""
return sa.and_(
*(
sa.column(column_name) == column_value
for column_name, column_value in batch_identifiers.items()
)
)
def _split_on_hashed_column(
self,
table_name: str,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
):
"""Split on the hashed value of the named column"""
return (
sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits)
== batch_identifiers[column_name]
)
### Sampling methods ###
# _sample_using_limit
# _sample_using_random
# _sample_using_mod
# _sample_using_a_list
# _sample_using_md5
def _sample_using_random(
self,
p: float = 0.1,
):
"""Take a random sample of rows, retaining proportion p
Note: the Random function behaves differently on different dialects of SQL
"""
return sa.func.random() < p
def _sample_using_mod(
self,
column_name,
mod: int,
value: int,
):
"""Take the mod of named column, and only keep rows that match the given value"""
return sa.column(column_name) % mod == value
def _sample_using_a_list(
self,
column_name: str,
value_list: list,
):
"""Match the values in the named column against value_list, and only keep the matches"""
return sa.column(column_name).in_(value_list)
def _sample_using_md5(
self,
column_name: str,
hash_digits: int = 1,
hash_value: str = "f",
):
"""Hash the values in the named column, and split on that"""
return (
sa.func.right(
sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits
)
== hash_value
)
def _build_selectable_from_batch_spec(self, batch_spec) -> Union[Selectable, str]:
table_name: str = batch_spec["table_name"]
if "splitter_method" in batch_spec:
splitter_fn = getattr(self, batch_spec["splitter_method"])
split_clause = splitter_fn(
table_name=table_name,
batch_identifiers=batch_spec["batch_identifiers"],
**batch_spec["splitter_kwargs"],
)
else:
split_clause = True
if "sampling_method" in batch_spec:
if batch_spec["sampling_method"] == "_sample_using_limit":
# SQLalchemy's semantics for LIMIT are different than normal WHERE clauses,
# so the business logic for building the query needs to be different.
if self.engine.dialect.name.lower() == "oracle":
# limit doesn't compile properly for oracle so we will append rownum to query string later
raw_query = (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
)
query = str(
raw_query.compile(
self.engine, compile_kwargs={"literal_binds": True}
)
)
query += "\nAND ROWNUM <= %d" % batch_spec["sampling_kwargs"]["n"]
return query
else:
return (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
.limit(batch_spec["sampling_kwargs"]["n"])
)
else:
sampler_fn = getattr(self, batch_spec["sampling_method"])
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(
sa.and_(
split_clause,
sampler_fn(**batch_spec["sampling_kwargs"]),
)
)
)
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(split_clause)
)
def get_batch_data_and_markers(
self, batch_spec: BatchSpec
) -> Tuple[Any, BatchMarkers]:
if not isinstance(
batch_spec, (SqlAlchemyDatasourceBatchSpec, RuntimeQueryBatchSpec)
):
raise InvalidBatchSpecError(
f"""SqlAlchemyExecutionEngine accepts batch_spec only of type SqlAlchemyDatasourceBatchSpec or
RuntimeQueryBatchSpec (illegal type "{str(type(batch_spec))}" was received).
"""
)
batch_data: Optional[SqlAlchemyBatchData] = None
batch_markers: BatchMarkers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
temp_table_name: Optional[str]
if "bigquery_temp_table" in batch_spec:
temp_table_name = batch_spec.get("bigquery_temp_table")
else:
temp_table_name = None
source_table_name = batch_spec.get("table_name", None)
source_schema_name = batch_spec.get("schema_name", None)
if isinstance(batch_spec, RuntimeQueryBatchSpec):
# query != None is already checked when RuntimeQueryBatchSpec is instantiated
query: str = batch_spec.query
batch_spec.query = "SQLQuery"
batch_data = SqlAlchemyBatchData(
execution_engine=self,
query=query,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
elif isinstance(batch_spec, SqlAlchemyDatasourceBatchSpec):
if self.engine.dialect.name.lower() == "oracle":
selectable: str = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
else:
selectable: Selectable = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
batch_data = SqlAlchemyBatchData(
execution_engine=self,
selectable=selectable,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
return batch_data, batch_markers
|
|
testUsdImportFrameRange.py
|
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import os
import unittest
from pxr import Usd
from maya import cmds
from maya import standalone
class testUsdImportFrameRange(unittest.TestCase):
def _LoadUsdWithRange(self, start=None, end=None):
# Import the USD file.
usdFilePath = os.path.abspath('MovingCube.usda')
cmds.loadPlugin('pxrUsd')
if start is not None and end is not None:
cmds.usdImport(file=usdFilePath, readAnimData=True,
frameRange=(start, end))
else:
cmds.usdImport(file=usdFilePath, readAnimData=True)
self.stage = Usd.Stage.Open(usdFilePath)
self.assertTrue(self.stage)
@classmethod
|
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
def setUp(self):
cmds.file(new=True, force=True)
def testUsdImport(self):
"""
Tests a simple import with frame range specified.
"""
self._LoadUsdWithRange(1, 15)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 14)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,14), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 16) if x != 5.0])
def testUsdImportNoRangeSpecified(self):
"""
Tests an import with animation but no range specified.
"""
self._LoadUsdWithRange()
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 29)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,29), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 31) if x != 5.0])
def testUsdImportOverlyLargeRange(self):
"""
Tests an import frame range that is larger than the time range of
animation available in USD prims.
"""
self._LoadUsdWithRange(-100, 100)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 29)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,29), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 31) if x != 5.0])
def testUsdImportOutOfRange(self):
"""
Tests an import frame range that doesn't intersect with the time range
of animation available in USD prims.
"""
self._LoadUsdWithRange(-200, -100)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 0)
def testUsdImportSingle(self):
"""
Tests an import frame range that is only one frame.
"""
self._LoadUsdWithRange(29, 29)
xValue = cmds.getAttr("pCube1.translateX")
self.assertAlmostEqual(xValue, 11.7042500857406)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 0) # Only one frame, so no real animation.
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,0), query=True)
self.assertEqual(keyTimes, None) # Only one frame, so no real animation.
if __name__ == '__main__':
unittest.main(verbosity=2)
|
def tearDownClass(cls):
standalone.uninitialize()
|
filament.go
|
// +build filament
/*
* Copyright 2019-2020 by Nedim Sabic Sabic
* https://www.fibratus.io
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package filament
import (
"errors"
"expvar"
"fmt"
"github.com/rabbitstack/fibratus/pkg/alertsender"
"github.com/rabbitstack/fibratus/pkg/config"
"github.com/rabbitstack/fibratus/pkg/filament/cpython"
"github.com/rabbitstack/fibratus/pkg/filter"
"github.com/rabbitstack/fibratus/pkg/handle"
"github.com/rabbitstack/fibratus/pkg/kevent"
"github.com/rabbitstack/fibratus/pkg/ps"
"github.com/rabbitstack/fibratus/pkg/util/multierror"
"github.com/rabbitstack/fibratus/pkg/util/term"
log "github.com/sirupsen/logrus"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
// initialize alert senders
_ "github.com/rabbitstack/fibratus/pkg/alertsender/mail"
_ "github.com/rabbitstack/fibratus/pkg/alertsender/slack"
)
// pyver designates the current Python version
const pyver = "37"
// useEmbeddedPython instructs the filament engine to use the embedded Python distribution.
var useEmbeddedPython = true
const (
intervalFn = "interval"
columnsFn = "columns"
sortbyFn = "sort_by"
kfilterFn = "kfilter"
addRowFn = "add_row"
maxRowsFn = "max_rows"
titleFn = "title"
renderTableFn = "render_table"
findHandleFn = "find_handle"
findHandlesFn = "find_handles"
findProcessFn = "find_process"
findProcessesFn = "find_processes"
emitAlertFn = "emit_alert"
onInitFn = "on_init"
onStopFn = "on_stop"
onNextKeventFn = "on_next_kevent"
onIntervalFn = "on_interval"
doc = "__doc__"
)
var (
keventErrors = expvar.NewMap("filament.kevent.errors")
keventProcessErrors = expvar.NewInt("filament.kevent.process.errors")
kdictErrors = expvar.NewInt("filament.kdict.errors")
batchFlushes = expvar.NewInt("filament.kevent.batch.flushes")
errFilamentsDir = func(path string) error { return fmt.Errorf("%s does not exist or is not a directory", path) }
errNoDoc = errors.New("filament description is required")
errNoOnNextKevent = errors.New("required on_next_kevent function is not defined")
errOnNextKeventNotCallable = errors.New("on_next_kevent is not callable")
errOnNextKeventMismatchArgs = func(c uint32) error { return fmt.Errorf("expected 1 argument for on_next_kevent but found %d args", c) }
tableOutput io.Writer
)
type kbatch []*kevent.Kevent
func (k *kbatch) append(kevt *kevent.Kevent) {
if *k == nil {
*k = make([]*kevent.Kevent, 0)
}
*k = append(*k, kevt)
}
func (k *kbatch) reset() { *k = nil }
func (k kbatch) len() int { return len(k) }
type filament struct {
name string
sortBy string
interval time.Duration
columns []string
fexpr string
fnerrs chan error
close chan struct{}
gil *cpython.GIL
tick *time.Ticker
mod *cpython.Module
config *config.Config
psnap ps.Snapshotter
hsnap handle.Snapshotter
filter filter.Filter
initErrors []error
onNextKevent *cpython.PyObject
onStop *cpython.PyObject
table tab
}
// New creates a new instance of the filament by starting an embedded Python interpreter. It imports the filament
// module and anchors required functions for controlling the filament options as well as providing the access to
// the kernel event flow.
func
|
(
name string,
psnap ps.Snapshotter,
hsnap handle.Snapshotter,
config *config.Config,
) (Filament, error) {
if useEmbeddedPython {
exe, err := os.Executable()
if err != nil {
return nil, err
}
pylib := filepath.Join(filepath.Dir(exe), "..", "Python", fmt.Sprintf("python%s.zip", pyver))
if _, err := os.Stat(pylib); err != nil {
return nil, fmt.Errorf("python lib not found: %v", err)
}
// set the default module search path so it points to our embedded Python distribution
cpython.SetPath(pylib)
}
// initialize the Python interpreter
if err := cpython.Initialize(); err != nil {
return nil, err
}
// set the PYTHON_PATH to the filaments directory so the interpreter
// is aware of our filament module prior to its loading
path := config.Filament.Path
fstat, err := os.Stat(path)
if err != nil || !fstat.IsDir() {
return nil, errFilamentsDir(path)
}
filaments, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
// check if the filament is present in the directory
var exists bool
for _, f := range filaments {
if strings.TrimSuffix(f.Name(), filepath.Ext(f.Name())) == name {
exists = true
}
}
if !exists {
return nil, fmt.Errorf("%q filament does not exist. Run 'fibratus list filaments' to view available filaments", name)
}
cpython.AddPythonPath(path)
mod, err := cpython.NewModule(name)
if err != nil {
if err = cpython.FetchErr(); err != nil {
return nil, err
}
return nil, err
}
// ensure required attributes are present before proceeding with
// further initialization. For instance, if the documentation
// string is not provided, on_next_kevent function is missing
// or has a wrong signature we won't run the filament
doc, err := mod.GetAttrString(doc)
if err != nil || doc.IsNull() {
return nil, errNoDoc
}
defer doc.DecRef()
if !mod.HasAttr(onNextKeventFn) {
return nil, errNoOnNextKevent
}
onNextKevent, err := mod.GetAttrString(onNextKeventFn)
if err != nil || onNextKevent.IsNull() {
return nil, errNoOnNextKevent
}
if !onNextKevent.IsCallable() {
return nil, errOnNextKeventNotCallable
}
argCount := onNextKevent.CallableArgCount()
if argCount != 1 {
return nil, errOnNextKeventMismatchArgs(argCount)
}
f := &filament{
name: name,
mod: mod,
config: config,
psnap: psnap,
hsnap: hsnap,
close: make(chan struct{}, 1),
fnerrs: make(chan error, 100),
gil: cpython.NewGIL(),
columns: make([]string, 0),
onNextKevent: onNextKevent,
interval: time.Second,
initErrors: make([]error, 0),
table: newTable(),
}
if mod.HasAttr(onStopFn) {
f.onStop, _ = mod.GetAttrString(onStopFn)
}
// register all the functions for interacting with filament
// within the Python module
err = f.mod.RegisterFn(addRowFn, f.addRowFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(renderTableFn, f.renderTableFn, cpython.MethNoArgs)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(titleFn, f.titleFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(sortbyFn, f.sortByFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(maxRowsFn, f.maxRowsFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(columnsFn, f.columnsFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(kfilterFn, f.kfilterFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(intervalFn, f.intervalFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(emitAlertFn, f.emitAlertFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(findHandleFn, f.findHandleFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(findHandlesFn, f.findHandlesFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(findProcessFn, f.findProcessFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
err = f.mod.RegisterFn(findProcessesFn, f.findProcessesFn, cpython.DefaultMethFlags)
if err != nil {
return nil, err
}
// invoke the on_init function if it has been declared in the filament
if mod.HasAttr(onInitFn) {
onInit, _ := mod.GetAttrString(onInitFn)
if !onInit.IsNull() {
onInit.Call()
if err := cpython.FetchErr(); err != nil {
return nil, fmt.Errorf("filament init error: %v", err)
}
if len(f.initErrors) > 0 {
return nil, multierror.Wrap(f.initErrors...)
}
}
}
// initialize the console frame buffer
var fb io.Writer
if len(f.columns) > 0 {
fb, err = term.NewFrameBuffer()
if err != nil {
return nil, fmt.Errorf("couldn't create console frame buffer: %v", err)
}
}
if fb != nil {
f.table.setWriter(fb)
f.table.setColumnConfigs(f.columns, term.GetColumns()/2+15)
} else if tableOutput != nil {
f.table.setWriter(tableOutput)
} else {
f.table.setWriter(os.Stdout)
}
if len(f.columns) > 0 && f.sortBy != "" {
var sortBy bool
for _, col := range f.columns {
if col == f.sortBy {
sortBy = true
break
}
}
if !sortBy {
return nil, fmt.Errorf("%s column can't be sorted since it is not defined", f.sortBy)
}
}
// compile filter from the expression
if f.fexpr != "" {
f.filter = filter.New(f.fexpr, config)
if err := f.filter.Compile(); err != nil {
return nil, err
}
}
// if on_interval function has been declared in the module, we'll
// schedule the ticker to the interval value set during filament
// bootstrap in on_init function or otherwise we'll use the default interval
if mod.HasAttr(onIntervalFn) {
onInterval, err := mod.GetAttrString(onIntervalFn)
if err == nil && !onInterval.IsNull() {
f.tick = time.NewTicker(f.interval)
go f.onInterval(onInterval)
}
}
// we acquired the GIL as a side effect of threading initialization (the call to cpython.Initialize())
// but now we have to reset the current thread state and release the GIL. It is the responsibility of
// the caller to acquire the GIL before executing any Python code from now on
f.gil.SaveThread()
return f, nil
}
func (f *filament) Run(kevents chan *kevent.Kevent, errs chan error) error {
var batch kbatch
var flusher = time.NewTicker(time.Second)
for {
select {
case <-f.close:
flusher.Stop()
return nil
default:
}
select {
case kevt := <-kevents:
batch.append(kevt)
case err := <-errs:
keventErrors.Add(err.Error(), 1)
case <-flusher.C:
batchFlushes.Add(1)
if batch.len() > 0 {
err := f.pushKevents(batch)
if err != nil {
log.Warnf("on_next_kevent failed: %v", err)
keventProcessErrors.Add(1)
}
batch.reset()
}
case err := <-f.fnerrs:
return err
case <-f.close:
flusher.Stop()
return nil
}
}
}
func (f *filament) pushKevents(b kbatch) error {
f.gil.Lock()
defer f.gil.Unlock()
for _, kevt := range b {
kdict, err := newKDict(kevt)
kevt.Release()
if err != nil {
kdict.DecRef()
kdictErrors.Add(1)
continue
}
r := f.onNextKevent.Call(kdict.Object())
if r != nil {
r.DecRef()
}
kdict.DecRef()
if err := cpython.FetchErr(); err != nil {
return err
}
}
return nil
}
func (f *filament) Close() error {
if f.onStop != nil && !f.onStop.IsNull() {
f.gil.Lock()
f.onStop.Call()
f.gil.Unlock()
}
f.close <- struct{}{}
if f.tick != nil {
f.close <- struct{}{}
}
if f.tick != nil {
f.tick.Stop()
}
return nil
}
func (f *filament) Filter() filter.Filter { return f.filter }
func (f *filament) intervalFn(_, args cpython.PyArgs) cpython.PyRawObject {
f.interval = time.Second * time.Duration(args.GetInt(1))
if f.interval == 0 {
f.initErrors = append(f.initErrors, errors.New("invalid interval value specified"))
}
return cpython.NewPyNone()
}
func (f *filament) sortByFn(_, args cpython.PyArgs) cpython.PyRawObject {
f.sortBy = args.GetString(1)
f.table.sortBy(f.sortBy)
return cpython.NewPyNone()
}
func (f *filament) maxRowsFn(_, args cpython.PyArgs) cpython.PyRawObject {
f.table.maxRows(args.GetInt(1))
return cpython.NewPyNone()
}
func (f *filament) columnsFn(_, args cpython.PyArgs) cpython.PyRawObject {
var err error
f.columns, err = args.GetStringSlice(1)
if err != nil {
f.initErrors = append(f.initErrors, err)
}
f.table.appendHeader(f.columns)
return cpython.NewPyNone()
}
func (f *filament) kfilterFn(_, args cpython.PyArgs) cpython.PyRawObject {
f.fexpr = args.GetString(1)
return cpython.NewPyNone()
}
func (f *filament) addRowFn(_, args cpython.PyArgs) cpython.PyRawObject {
s, err := args.GetSlice(1)
if err != nil {
f.fnerrs <- err
return cpython.NewPyNone()
}
if len(s) != len(f.columns) {
f.fnerrs <- fmt.Errorf("add_row has %d row(s) but expected %d rows(s)", len(s), len(f.columns))
return cpython.NewPyNone()
}
f.table.appendRow(s)
return cpython.NewPyLong(int64(len(s)))
}
func (f *filament) renderTableFn(_ cpython.PyArgs, args cpython.PyArgs) cpython.PyRawObject {
f.table.render()
f.table.reset()
return cpython.NewPyNone()
}
func (f *filament) titleFn(_ cpython.PyArgs, args cpython.PyArgs) cpython.PyRawObject {
f.table.title(args.GetString(1))
return cpython.NewPyNone()
}
var keywords = []string{"", "", "severity", "tags"}
func (f *filament) emitAlertFn(_, args cpython.PyArgs, kwargs cpython.PyKwargs) cpython.PyRawObject {
f.gil.Lock()
defer f.gil.Unlock()
senders := alertsender.FindAll()
if len(senders) == 0 {
log.Warn("no alertsenders registered. Alert won't be sent")
return cpython.NewPyNone()
}
title, text, sever, tags := cpython.PyArgsParseKeywords(args, kwargs, keywords)
for _, s := range senders {
alert := alertsender.NewAlert(
title,
text,
tags,
alertsender.ParseSeverityFromString(sever),
)
if err := s.Send(alert); err != nil {
log.Warnf("unable to emit alert from filament: %v", err)
}
}
return cpython.NewPyNone()
}
func (f *filament) findProcessFn(_, args cpython.PyArgs) cpython.PyRawObject {
f.gil.Lock()
defer f.gil.Unlock()
return cpython.NewPyNone()
}
func (f *filament) findHandleFn(_, args cpython.PyArgs) cpython.PyRawObject {
f.gil.Lock()
defer f.gil.Unlock()
return cpython.NewPyNone()
}
func (f *filament) findProcessesFn(_, args cpython.PyArgs) cpython.PyRawObject {
f.gil.Lock()
defer f.gil.Unlock()
return cpython.NewPyNone()
}
func (f *filament) findHandlesFn(_, args cpython.PyArgs) cpython.PyRawObject {
f.gil.Lock()
defer f.gil.Unlock()
return cpython.NewPyNone()
}
func (f *filament) onInterval(fn *cpython.PyObject) {
for {
select {
case <-f.tick.C:
f.gil.Lock()
r := fn.Call()
if r != nil {
r.DecRef()
}
if err := cpython.FetchErr(); err != nil {
f.fnerrs <- err
}
f.gil.Unlock()
case <-f.close:
}
}
}
|
New
|
main.rs
|
use anyhow::{Context, Result};
use chrono::{Duration, NaiveTime};
use clap::Parser;
use std::{
fs,
fs::File,
io::{BufRead, BufReader, Write},
ops::Add,
};
#[derive(Parser)]
#[clap(
version = "0.1.0",
author = "abhayk <[email protected]>",
about = r#"A tool to add or subtract offsets to the timestamps in a .srt subtitle file.
After offsets are applied the original file will be backed up to <file>.orig"#
)]
struct Opts {
#[clap(short, long, about = "The path to the subtitle file")]
file: String,
#[clap(
short,
long,
about = "The shift offset. To increment by half a second provide +500, To decrement -500.",
allow_hyphen_values = true
)]
offset: i8,
}
fn main() -> Result<()> {
let opts: Opts = Opts::parse();
process_file(&opts.file, opts.offset)
}
fn process_file(input_file_path: &str, offset: i8) -> Result<()> {
let input_file = File::open(input_file_path).context("Failed to read the input file")?;
let tmp_output_file_path = String::from(input_file_path) + ".tmp";
let mut tmp_output_file =
File::create(&tmp_output_file_path).context("Failed to create the output file")?;
let separator = " --> ";
let buffered = BufReader::new(input_file);
for line in buffered.lines() {
let line = line.context("Failed while reading the file")?;
let line = if line.contains(separator) {
process_duration(&line, offset, separator)
.with_context(|| format!("Failed to process the line `{}`", line))?
} else {
line
};
writeln!(tmp_output_file, "{}", line).context("Failed while writing to the output file")?;
}
fs::rename(input_file_path, String::from(input_file_path) + ".orig")
.context("Failed while taking a backup of the input file")?;
fs::rename(tmp_output_file_path, input_file_path)
.context("Failed while trying to replace the original file with the updated version")?;
Ok(())
}
fn process_duration(line: &str, offset: i8, separator: &str) -> Result<String> {
let result: Vec<String> = line
.split(separator)
.map(|item| apply_offset(item, offset))
.collect::<Result<_>>()?;
Ok(result.join(separator))
}
fn apply_offset(input: &str, offset: i8) -> Result<String> {
let format = "%H:%M:%S,%3f";
let time = NaiveTime::parse_from_str(input, format)?
.add(Duration::milliseconds(offset as i64))
.format(format)
.to_string();
Ok(time)
}
#[cfg(test)]
mod tests {
use crate::{apply_offset, process_duration};
#[test]
fn
|
() {
assert_eq!(apply_offset("01:29:13,905", 0).unwrap(), "01:29:13,905");
}
#[test]
fn apply_offset_with_positive_offset() {
assert_eq!(apply_offset("01:29:13,905", 100).unwrap(), "01:29:14,005");
}
#[test]
fn apply_offset_with_negative_offset() {
assert_eq!(apply_offset("01:29:13,905", -100).unwrap(), "01:29:13,805");
}
#[test]
fn apply_offset_with_invalid_format() {
assert!(apply_offset("01:29:13:905", 10).is_err());
}
#[test]
fn process_duration_valid_input() {
assert_eq!(
process_duration("00:19:28,220 --> 00:19:29,753", 100, " --> ").unwrap(),
"00:19:28,320 --> 00:19:29,853"
)
}
#[test]
fn process_duration_invalid_input() {
assert!(process_duration("They're gonna send someone.", 100, " --> ").is_err());
}
}
|
apply_offset_with_zero_offset
|
gardenlet.go
|
// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"regexp"
goruntime "runtime"
"strings"
"time"
cmdutils "github.com/gardener/gardener/cmd/utils"
gardencore "github.com/gardener/gardener/pkg/apis/core"
gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/pkg/client/kubernetes/clientmap"
clientmapbuilder "github.com/gardener/gardener/pkg/client/kubernetes/clientmap/builder"
"github.com/gardener/gardener/pkg/client/kubernetes/clientmap/keys"
"github.com/gardener/gardener/pkg/features"
"github.com/gardener/gardener/pkg/gardenlet/apis/config"
configv1alpha1 "github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1"
configvalidation "github.com/gardener/gardener/pkg/gardenlet/apis/config/validation"
"github.com/gardener/gardener/pkg/gardenlet/bootstrap"
"github.com/gardener/gardener/pkg/gardenlet/bootstrap/certificate"
"github.com/gardener/gardener/pkg/gardenlet/controller"
gardenletfeatures "github.com/gardener/gardener/pkg/gardenlet/features"
"github.com/gardener/gardener/pkg/healthz"
"github.com/gardener/gardener/pkg/logger"
"github.com/gardener/gardener/pkg/server"
"github.com/gardener/gardener/pkg/server/routes"
"github.com/gardener/gardener/pkg/utils"
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/secrets"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
certificatesv1 "k8s.io/api/certificates/v1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
eventsv1 "k8s.io/api/events/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/clock"
kubernetesclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/version"
"k8s.io/component-base/version/verflag"
)
// Options has all the context and parameters needed to run a Gardenlet.
type Options struct {
// ConfigFile is the location of the Gardenlet's configuration file.
ConfigFile string
config *config.GardenletConfiguration
scheme *runtime.Scheme
codecs serializer.CodecFactory
}
// AddFlags adds flags for a specific Gardenlet to the specified FlagSet.
func (o *Options) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.")
}
// NewOptions returns a new Options object.
func NewOptions() (*Options, error) {
o := &Options{
config: new(config.GardenletConfiguration),
}
o.scheme = runtime.NewScheme()
o.codecs = serializer.NewCodecFactory(o.scheme)
if err := config.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := configv1alpha1.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := gardencore.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := gardencorev1beta1.AddToScheme(o.scheme); err != nil {
return nil, err
}
return o, nil
}
// loadConfigFromFile loads the content of file and decodes it as a
// GardenletConfiguration object.
func (o *Options) loadConfigFromFile(file string) (*config.GardenletConfiguration, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
return o.decodeConfig(data)
}
// decodeConfig decodes data as a GardenletConfiguration object.
func (o *Options) decodeConfig(data []byte) (*config.GardenletConfiguration, error) {
gardenletConfig := &config.GardenletConfiguration{}
if _, _, err := o.codecs.UniversalDecoder().Decode(data, nil, gardenletConfig); err != nil {
return nil, err
}
return gardenletConfig, nil
}
func (o *Options) configFileSpecified() error {
if len(o.ConfigFile) == 0 {
return fmt.Errorf("missing Gardenlet config file")
}
return nil
}
// Validate validates all the required options.
func (o *Options) validate(args []string) error {
if len(args) != 0 {
return errors.New("arguments are not supported")
}
return nil
}
func run(ctx context.Context, o *Options) error {
c, err := o.loadConfigFromFile(o.ConfigFile)
if err != nil {
return fmt.Errorf("unable to read the configuration file: %w", err)
}
if errs := configvalidation.ValidateGardenletConfiguration(c, nil, false); len(errs) > 0 {
return fmt.Errorf("errors validating the configuration: %+v", errs)
}
o.config = c
// Add feature flags
if err := gardenletfeatures.FeatureGate.SetFromMap(o.config.FeatureGates); err != nil {
return err
}
kubernetes.UseCachedRuntimeClients = gardenletfeatures.FeatureGate.Enabled(features.CachedRuntimeClients)
if gardenletfeatures.FeatureGate.Enabled(features.ReversedVPN) && !gardenletfeatures.FeatureGate.Enabled(features.APIServerSNI) {
return fmt.Errorf("inconsistent feature gate: APIServerSNI is required for ReversedVPN (APIServerSNI: %t, ReversedVPN: %t)",
gardenletfeatures.FeatureGate.Enabled(features.APIServerSNI), gardenletfeatures.FeatureGate.Enabled(features.ReversedVPN))
}
gardenlet, err := NewGardenlet(ctx, o.config)
if err != nil {
return err
}
return gardenlet.Run(ctx)
}
// NewCommandStartGardenlet creates a *cobra.Command object with default parameters
func NewCommandStartGardenlet() *cobra.Command {
opts, err := NewOptions()
if err != nil {
panic(err)
}
cmd := &cobra.Command{
Use: "gardenlet",
Short: "Launch the Gardenlet",
Long: `In essence, the Gardener is an extension API server along with a bundle
of Kubernetes controllers which introduce new API objects in an existing Kubernetes
cluster (which is called Garden cluster) in order to use them for the management of
further Kubernetes clusters (which are called Shoot clusters).
To do that reliably and to offer a certain quality of service, it requires to control
the main components of a Kubernetes cluster (etcd, API server, controller manager, scheduler).
These so-called control plane components are hosted in Kubernetes clusters themselves
(which are called Seed clusters).`,
RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested()
if err := opts.configFileSpecified(); err != nil {
return err
}
if err := opts.validate(args); err != nil {
return err
}
return run(cmd.Context(), opts)
},
}
flags := cmd.Flags()
verflag.AddFlags(flags)
opts.AddFlags(flags)
return cmd
}
// Gardenlet represents all the parameters required to start the
// Gardenlet.
type Gardenlet struct {
Config *config.GardenletConfiguration
Identity *gardencorev1beta1.Gardener
GardenClusterIdentity string
ClientMap clientmap.ClientMap
Log logr.Logger
Recorder record.EventRecorder
LeaderElection *leaderelection.LeaderElectionConfig
HealthManager healthz.Manager
CertificateManager *certificate.Manager
ClientCertificateExpirationTimestamp *metav1.Time
}
// NewGardenlet is the main entry point of instantiating a new Gardenlet.
func NewGardenlet(ctx context.Context, cfg *config.GardenletConfiguration) (*Gardenlet, error) {
if cfg == nil {
return nil, errors.New("config is required")
}
// Initialize logrus and zap logger (for the migration period, we will use both in parallel)
logrusLogger := logger.NewLogger(*cfg.LogLevel, *cfg.LogFormat)
log, err := logger.NewZapLogger(*cfg.LogLevel, *cfg.LogFormat)
if err != nil {
return nil, fmt.Errorf("error instantiating zap logger: %w", err)
}
log.Info("Starting gardenlet...", "version", version.Get())
log.Info("Feature Gates", "featureGates", gardenletfeatures.FeatureGate.String())
if flag := flag.Lookup("v"); flag != nil {
if err := flag.Value.Set(fmt.Sprintf("%d", cfg.KubernetesLogLevel)); err != nil {
return nil, err
}
}
// Prepare a Kubernetes client object for the Garden cluster which contains all the Clientsets
// that can be used to access the Kubernetes API.
if kubeconfig := os.Getenv("GARDEN_KUBECONFIG"); kubeconfig != "" {
cfg.GardenClientConnection.Kubeconfig = kubeconfig
}
if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" {
cfg.SeedClientConnection.Kubeconfig = kubeconfig
}
var (
clientCertificateExpirationTimestamp *metav1.Time
kubeconfigFromBootstrap []byte
csrName string
seedName string
)
// constructs a seed client for `SeedClientConnection.kubeconfig` or if not set,
// creates a seed client based on the service account token mounted into the gardenlet container running in Kubernetes
// when running outside of Kubernetes, `SeedClientConnection.kubeconfig` has to be set either directly or via the environment variable "KUBECONFIG"
seedClient, err := kubernetes.NewClientFromFile(
"",
cfg.SeedClientConnection.ClientConnectionConfiguration.Kubeconfig,
kubernetes.WithClientConnectionOptions(cfg.SeedClientConnection.ClientConnectionConfiguration),
kubernetes.WithDisabledCachedClient(),
)
if err != nil {
return nil, err
}
if cfg.GardenClientConnection.KubeconfigSecret != nil {
kubeconfigFromBootstrap, csrName, seedName, err = bootstrapKubeconfig(ctx, logrusLogger, seedClient.Client(), cfg)
if err != nil {
return nil, err
}
} else {
log.Info("No kubeconfig secret given in the configuration under `.gardenClientConnection.kubeconfigSecret`. Skipping the kubeconfig bootstrap process and certificate rotation.")
}
if kubeconfigFromBootstrap == nil {
log.Info("Falling back to the kubeconfig specified in the configuration under `.gardenClientConnection.kubeconfig`")
if len(cfg.GardenClientConnection.Kubeconfig) == 0 {
return nil, fmt.Errorf("the configuration file needs to either specify a Garden API Server kubeconfig under `.gardenClientConnection.kubeconfig` or provide bootstrapping information. " +
"To configure the Gardenlet for bootstrapping, provide the secret containing the bootstrap kubeconfig under `.gardenClientConnection.kubeconfigSecret` and also the secret name where the created kubeconfig should be stored for further use via`.gardenClientConnection.kubeconfigSecret`")
}
} else {
gardenClientCertificate, err := certificate.GetCurrentCertificate(logrusLogger, kubeconfigFromBootstrap, cfg.GardenClientConnection)
if err != nil {
return nil, err
}
clientCertificateExpirationTimestamp = &metav1.Time{Time: gardenClientCertificate.Leaf.NotAfter}
log.Info("The client certificate used to communicate with the garden cluster has expiration date", "expirationDate", gardenClientCertificate.Leaf.NotAfter)
}
restCfg, err := kubernetes.RESTConfigFromClientConnectionConfiguration(&cfg.GardenClientConnection.ClientConnectionConfiguration, kubeconfigFromBootstrap)
if err != nil {
return nil, err
}
gardenClientMapBuilder := clientmapbuilder.NewGardenClientMapBuilder().
WithRESTConfig(restCfg).
// gardenlet does not have the required RBAC permissions for listing/watching the following resources, so let's prevent any
// attempts to cache them
WithUncached(
&gardencorev1alpha1.ExposureClass{},
&gardencorev1alpha1.ShootState{},
&gardencorev1beta1.CloudProfile{},
&gardencorev1beta1.ControllerDeployment{},
&gardencorev1beta1.Project{},
&gardencorev1beta1.SecretBinding{},
&certificatesv1.CertificateSigningRequest{},
&certificatesv1beta1.CertificateSigningRequest{},
&coordinationv1.Lease{},
&corev1.Namespace{},
&corev1.ConfigMap{},
&corev1.Event{},
&eventsv1.Event{},
).
ForSeed(cfg.SeedConfig.Name)
seedClientMapBuilder := clientmapbuilder.NewSeedClientMapBuilder().
WithClientConnectionConfig(&cfg.SeedClientConnection.ClientConnectionConfiguration)
shootClientMapBuilder := clientmapbuilder.NewShootClientMapBuilder().
WithClientConnectionConfig(&cfg.ShootClientConnection.ClientConnectionConfiguration)
clientMap, err := clientmapbuilder.NewDelegatingClientMapBuilder().
WithGardenClientMapBuilder(gardenClientMapBuilder).
WithSeedClientMapBuilder(seedClientMapBuilder).
WithShootClientMapBuilder(shootClientMapBuilder).
Build()
if err != nil {
return nil, fmt.Errorf("failed to build ClientMap: %w", err)
}
k8sGardenClient, err := clientMap.GetClient(ctx, keys.ForGarden())
if err != nil {
return nil, fmt.Errorf("failed to get garden client: %w", err)
}
// Delete bootstrap auth data if certificate was newly acquired
if len(csrName) > 0 && len(seedName) > 0 {
log.Info("Deleting bootstrap authentication data used to request a certificate")
if err := bootstrap.DeleteBootstrapAuth(ctx, k8sGardenClient.Client(), k8sGardenClient.Client(), csrName, seedName); err != nil {
return nil, err
}
}
// Set up leader election if enabled and prepare event recorder.
var (
leaderElectionConfig *leaderelection.LeaderElectionConfig
recorder = cmdutils.CreateRecorder(k8sGardenClient.Kubernetes(), "gardenlet")
)
if cfg.LeaderElection.LeaderElect {
seedRestCfg, err := kubernetes.RESTConfigFromClientConnectionConfiguration(&cfg.SeedClientConnection.ClientConnectionConfiguration, nil)
if err != nil {
return nil, err
}
k8sSeedClientLeaderElection, err := kubernetesclientset.NewForConfig(seedRestCfg)
if err != nil {
return nil, fmt.Errorf("failed to create client for leader election: %w", err)
}
leaderElectionConfig, err = cmdutils.MakeLeaderElectionConfig(
*cfg.LeaderElection,
k8sSeedClientLeaderElection,
cmdutils.CreateRecorder(k8sSeedClientLeaderElection, "gardenlet"),
)
if err != nil {
return nil, err
}
}
identity, err := determineGardenletIdentity()
if err != nil {
return nil, err
}
gardenClusterIdentity := &corev1.ConfigMap{}
if err := k8sGardenClient.Client().Get(ctx, kutil.Key(metav1.NamespaceSystem, v1beta1constants.ClusterIdentity), gardenClusterIdentity); err != nil {
return nil, fmt.Errorf("unable to get Gardener`s cluster-identity ConfigMap: %w", err)
}
clusterIdentity, ok := gardenClusterIdentity.Data[v1beta1constants.ClusterIdentity]
if !ok {
return nil, errors.New("unable to extract Gardener`s cluster identity from cluster-identity ConfigMap")
}
// create the certificate manager to schedule certificate rotations
var certificateManager *certificate.Manager
if cfg.GardenClientConnection.KubeconfigSecret != nil {
certificateManager = certificate.NewCertificateManager(clientMap, seedClient.Client(), cfg)
}
return &Gardenlet{
Identity: identity,
GardenClusterIdentity: clusterIdentity,
Config: cfg,
Log: log,
Recorder: recorder,
ClientMap: clientMap,
LeaderElection: leaderElectionConfig,
CertificateManager: certificateManager,
ClientCertificateExpirationTimestamp: clientCertificateExpirationTimestamp,
}, nil
}
// Run runs the Gardenlet. This should never exit.
func (g *Gardenlet) Run(ctx context.Context) error {
controllerCtx, controllerCancel := context.WithCancel(ctx)
defer controllerCancel()
// Initialize /healthz manager.
healthGracePeriod := time.Duration((*g.Config.Controllers.Seed.LeaseResyncSeconds)*(*g.Config.Controllers.Seed.LeaseResyncMissThreshold)) * time.Second
g.HealthManager = healthz.NewPeriodicHealthz(clock.RealClock{}, healthGracePeriod)
if g.CertificateManager != nil {
g.CertificateManager.ScheduleCertificateRotation(controllerCtx, controllerCancel, g.Recorder)
}
// Start HTTPS server.
if g.Config.Server.HTTPS.TLS == nil {
g.Log.Info("No TLS server certificates provided... self-generating them now...")
_, _, tempDir, err := secrets.SelfGenerateTLSServerCertificate("gardenlet", []string{
"gardenlet",
fmt.Sprintf("gardenlet.%s", v1beta1constants.GardenNamespace),
fmt.Sprintf("gardenlet.%s.svc", v1beta1constants.GardenNamespace),
}, nil)
if err != nil {
return err
}
g.Config.Server.HTTPS.TLS = &config.TLSServer{
ServerCertPath: filepath.Join(tempDir, secrets.DataKeyCertificate),
ServerKeyPath: filepath.Join(tempDir, secrets.DataKeyPrivateKey),
}
g.Log.Info("TLS server certificates successfully self-generated.")
}
g.startServer(ctx)
// Prepare a reusable run function.
run := func(ctx context.Context) error {
g.HealthManager.Start()
return g.startControllers(ctx)
}
leaderElectionCtx, leaderElectionCancel := context.WithCancel(context.Background())
// If leader election is enabled, run via LeaderElector until done and exit.
if g.LeaderElection != nil {
g.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
g.Log.Info("Acquired leadership, starting controllers")
if err := run(controllerCtx); err != nil {
g.Log.Error(err, "failed to run controllers")
}
leaderElectionCancel()
},
OnStoppedLeading: func() {
g.Log.Info("Lost leadership, terminating")
controllerCancel()
},
}
leaderElector, err := leaderelection.NewLeaderElector(*g.LeaderElection)
if err != nil {
return fmt.Errorf("couldn't create leader elector: %w", err)
}
leaderElector.Run(leaderElectionCtx)
return nil
}
// Leader election is disabled, thus run directly until done.
leaderElectionCancel()
return run(controllerCtx)
}
func (g *Gardenlet) startServer(ctx context.Context) {
builder := server.
NewBuilder().
WithBindAddress(g.Config.Server.HTTPS.BindAddress).
WithPort(g.Config.Server.HTTPS.Port).
WithTLS(g.Config.Server.HTTPS.TLS.ServerCertPath, g.Config.Server.HTTPS.TLS.ServerKeyPath).
WithHandler("/metrics", promhttp.Handler()).
WithHandlerFunc("/healthz", healthz.HandlerFunc(g.HealthManager))
if g.Config.Debugging != nil && g.Config.Debugging.EnableProfiling {
routes.Profiling{}.AddToBuilder(builder)
if g.Config.Debugging.EnableContentionProfiling {
goruntime.SetBlockProfileRate(1)
}
}
go builder.Build().Start(ctx)
}
func (g *Gardenlet) startControllers(ctx context.Context) error {
return controller.NewGardenletControllerFactory(
g.ClientMap,
g.Config,
g.Identity,
g.GardenClusterIdentity,
g.Recorder,
g.HealthManager,
g.ClientCertificateExpirationTimestamp,
).Run(ctx)
}
// We want to determine the Docker container id of the currently running Gardenlet because
// we need to identify for still ongoing operations whether another Gardenlet instance is
// still operating the respective Shoots. When running locally, we generate a random string because
// there is no container id.
func determineGardenletIdentity() (*gardencorev1beta1.Gardener, error)
|
func extractID(line string) string {
var (
id string
splitBySlash = strings.Split(line, "/")
)
if len(splitBySlash) == 0 {
return ""
}
id = strings.TrimSpace(splitBySlash[len(splitBySlash)-1])
id = strings.TrimSuffix(id, ".scope")
id = strings.TrimPrefix(id, "docker-")
return id
}
|
{
var (
validID = regexp.MustCompile(`([0-9a-f]{64})`)
gardenletID string
gardenletName string
err error
)
gardenletName, err = os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %w", err)
}
// If running inside a Kubernetes cluster (as container) we can read the container id from the proc file system.
// Otherwise generate a random string for the gardenletID
if cGroupFile, err := os.Open("/proc/self/cgroup"); err == nil {
defer cGroupFile.Close()
reader := bufio.NewReader(cGroupFile)
var cgroupV1 string
for {
line, err := reader.ReadString('\n')
if err != nil {
break
}
// Store cgroup-v1 result for fall back
if strings.HasPrefix(line, "1:name=systemd") {
cgroupV1 = line
}
// Always prefer cgroup-v2
if strings.HasPrefix(line, "0::") {
if id := extractID(line); validID.MatchString(id) {
gardenletID = id
break
}
}
}
// Fall-back to cgroup-v1 if possible
if len(gardenletID) == 0 && len(cgroupV1) > 0 {
gardenletID = extractID(cgroupV1)
}
}
if gardenletID == "" {
gardenletID, err = utils.GenerateRandomString(64)
if err != nil {
return nil, fmt.Errorf("unable to generate gardenletID: %w", err)
}
}
return &gardencorev1beta1.Gardener{
ID: gardenletID,
Name: gardenletName,
Version: version.Get().GitVersion,
}, nil
}
|
mag_pwr_all.py
|
from objectives.results._objective_result import *
from objectives.results._add_sub_stat import add_stat_all, sub_stat_all
MAG_PWR_ADDRESS = 0x161d
add_mag_pwr = add_stat_all(MAG_PWR_ADDRESS, "mag_pwr")
sub_mag_pwr = sub_stat_all(MAG_PWR_ADDRESS, "mag_pwr")
class Field(field_result.Result):
def
|
(self, count):
if count < 0:
return [
field.LongCall(START_ADDRESS_SNES + sub_mag_pwr, -count),
]
elif count > 0:
return [
field.LongCall(START_ADDRESS_SNES + add_mag_pwr, count),
]
return []
class Battle(battle_result.Result):
def src(self, count):
if count < 0:
return [
asm.LDA(-count, asm.IMM8),
asm.STA(field.LongCall.ARG_ADDRESS, asm.DIR),
asm.JSL(START_ADDRESS_SNES + sub_mag_pwr),
]
elif count > 0:
return [
asm.LDA(count, asm.IMM8),
asm.STA(field.LongCall.ARG_ADDRESS, asm.DIR),
asm.JSL(START_ADDRESS_SNES + add_mag_pwr),
]
return []
class Result(ObjectiveResult):
NAME = "MagPwr All"
def __init__(self, min_count, max_count):
self.count = random.randint(min_count, max_count)
super().__init__(Field, Battle, self.count)
|
src
|
m3_captial_t_class.py
|
"""
A CapitalT class and methods that use the Cross class.
Authors: David Mutchler, Vibha Alangar, Dave Fisher, Amanda Stouder,
their colleagues and Xiaolong Chen (Harry).
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
import math
def main():
"""
Calls the test functions.
As you implement CapitalT method uncomment the appropriate tests.
"""
# --------------------------------------------------------------
# Uncomment only 1 test at a time as you develop your code.
# --------------------------------------------------------------
print('Un-comment the calls in MAIN one by one')
print(' to run the testing code as you complete the TODOs.')
run_test_simple_t()
run_test_set_colors()
run_test_move_by()
run_test_clone()
def run_test_simple_t():
"""
Tests for the __init__ method and attach_to method.
See the simple_t PDF for expected output.
"""
print()
print('--------------------------------------------------')
print('Testing __init__ and attach_to ')
|
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
print("Expected: Point(250.0, 40.0) Point(350.0, 60.0)")
print("Actual: ", t1.h_rect.get_upper_left_corner(), t1.h_rect.get_lower_right_corner())
print("Expected: Point(290.0, 40.0) Point(310.0, 240.0)")
print("Actual: ", t1.v_rect.get_upper_left_corner(), t1.v_rect.get_lower_right_corner())
t1.attach_to(window)
t2 = CapitalT(rg.Point(150, 150), 100, 150, 40)
t2.attach_to(window)
t3 = CapitalT(rg.Point(450, 150), 10, 15, 4)
t3.attach_to(window)
window.render()
print("See graphics window and compare to the simple_t PDF")
window.close_on_mouse_click()
def run_test_set_colors():
""" Tests for the set_colors method. See the set_colors PDF for expected output. """
window = rg.RoseWindow(600, 400, 'Test 2 - Colorful Ts')
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_colors('red', 'magenta')
t1.attach_to(window)
t2 = CapitalT(rg.Point(150, 150), 100, 150, 40)
t2.set_colors('green', 'purple')
t2.attach_to(window)
t3 = CapitalT(rg.Point(450, 150), 10, 15, 4)
t3.set_colors('blue', 'gray')
t3.attach_to(window)
window.render()
window.close_on_mouse_click()
def run_test_move_by():
""" Tests for the move_by method. See the move_by PDF for expected output. """
window = rg.RoseWindow(600, 400, 'Test 3 - Moving T')
little_red_t = CapitalT(rg.Point(300, 50), 60, 80, 5)
little_red_t.set_colors('red', 'gray')
little_red_t.attach_to(window)
window.render(0.5)
little_red_t.move_by(0, 100)
window.render(0.5)
little_red_t.move_by(0, 100)
window.render(0.5)
for k in range(40):
little_red_t.move_by(5, -2)
window.render(0.05)
window.close_on_mouse_click()
def run_test_clone():
""" Tests for the clone method. See the clone PDF for expected output. """
window = rg.RoseWindow(650, 400, 'Test 4 - Cloning Ts')
first_t = CapitalT(rg.Point(75, 50), 80, 80, 40)
first_t.set_colors('blue', 'cyan')
for k in range(6):
t = first_t.clone()
if k < 2:
t.set_colors('white', 'black')
t.move_by(100 * k, 20 * k)
t.attach_to(window)
first_t.move_by(0, 200)
first_t.attach_to(window)
window.render()
window.close_on_mouse_click()
########################################################################
# The CapitalT class (and its methods) begins here.
########################################################################
class CapitalT(object):
"""
Manages a CapitalT graphics object which is made up of two rectangles.
See the PDFs, especially dimenstions.pdf, to help you understand this.
"""
def __init__(self, intersection_center, width, height, letter_thickness):
"""
What comes in:
-- self
-- an rg.Point for the intersection center of the CapitalT
-- This point is also center of the horizontal rectangle.
-- a int for the width of the CapitalT (the width of the horizontal rectangle)
-- a int for the height of the CapitalT (the height of the vertical rectangle)
-- a int for the thickness of each rectangle (the letter's thickness)
What goes out: Nothing (i.e., None).
Side effects: Sets two instance variables named:
-- h_rect (to represent the horizontal rectangle in the T, the top bar)
-- v_rect (to represent the vertical rectangle in the T, the | part of the T)
*** See the dimensions PDF for the exact placement of the rectangles in the T. ***
Each rectangle is an rg.Rectangle. Unlike prior modules you are NOT
allowed to make any other instance variables. You may only use
exactly these two and must figure out how to do the problem with ONLY
those two instance variables.
Example:
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
-- t1.h_rect would have an upper left corner of (250, 40)
-- t1.h_rect would have an lower right corner of (350, 60)
-- t1.v_rect would have an upper left corner of (290, 40)
-- t1.v_rect would have an lower right corner of (310, 240)
Type hints:
:type intersection_center: rg.Point
:type width: int
:type height: int
:type letter_thickness: int
"""
hupperright = rg.Point(intersection_center.x+(1/2)*width, intersection_center.y-(1/2)*letter_thickness)
hlowerleft = rg.Point(intersection_center.x-(1/2)*width,intersection_center.y+(1/2)*letter_thickness)
self.h_rect = rg.Rectangle(hupperright,hlowerleft)
vupperright = rg.Point(intersection_center.x + (1/2)*letter_thickness,hupperright.y)
vlowerleft = rg.Point(intersection_center.x-(1/2)*letter_thickness,hlowerleft.y+(height-letter_thickness))
self.v_rect = rg.Rectangle(vupperright, vlowerleft)
# --------------------------------------------------------------
# DONE: 3.
# READ the above specification, including the Example.
# Implement this method
# Note: you will need to also implement attach_to before testing
# --------------------------------------------------------------
def attach_to(self, window):
"""
What comes in:
-- self
-- an rg.RoseWindow
What goes out: Nothing (i.e., None).
Side effects:
-- Attaches both instance rectangles to the given window.
-- Hint: Attach h_rect second to make it draw in front of v_rect
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.attach_to(window)
Type hints:
:type window: rg.RoseWindow
"""
self.v_rect.attach_to(window)
self.h_rect.attach_to(window)
# --------------------------------------------------------------
# DONE: 4.
# READ the above specification, including the Example.
# Implement and test this method by looking at the console and
# the graphics window (compare it to simple_t.pdf)
# --------------------------------------------------------------
def set_colors(self, fill_color, outline_color):
"""
What comes in:
-- self
-- a string that represents a valid rosegraphics color
-- a string that represents a valid rosegraphics color
What goes out: Nothing (i.e., None).
Side effects:
-- sets the fill_color of both rectangles to the given fill color
-- sets the outline_color of both rectangles to the given outline color
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_color('red', 'blue')
Type hints:
:type fill_color: str
:type outline_color: str
"""
self.h_rect.fill_color = fill_color
self.v_rect.fill_color = fill_color
self.h_rect.outline_color = outline_color
self.v_rect.outline_color =outline_color
# --------------------------------------------------------------
# DONE: 5.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# set_colors.pdf.
# --------------------------------------------------------------
def move_by(self, dx, dy):
"""
What comes in:
-- self
-- an int amount to move in the x direction
-- an int amount to move in the y direction
What goes out: Nothing (i.e., None).
Side effects:
-- Moves both h_rect and v_rect the specified dx and dy amounts.
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.attach_to(window)
window.render(0.5)
t1.move_by(100, 200) # Moves the T 100 pixels right and 200 down.
window.render() # necessary to see the change
Type hints:
:type dx: int
:type dy: int
"""
self.h_rect.corner_1.x +=dx
self.h_rect .corner_2.x +=dx
self.h_rect.corner_1.y += dy
self.h_rect.corner_2.y += dy
self.v_rect.corner_1.x += dx
self.v_rect.corner_2.x += dx
self.v_rect.corner_1.y += dy
self.v_rect.corner_2.y += dy
# --------------------------------------------------------------
# DONE: 6.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# move_by.pdf. Note: the pdf shows the different locations
# that the T moves through, but there is only 1 T at any moment.
# --------------------------------------------------------------
def clone(self):
"""
What comes in:
-- self
What goes out:
-- Returns a new CapitalT that is located in the same position as
this CapitalT with the same colors for the rectangles.
Side effects:
-- None
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_color('red', 'blue')
t2 = t1.clone() # t2 is at the same location WITH THE SAME COLORS
Type hints:
:rtype: CapitalT
"""
h = self.h_rect
v = self.v_rect
intersect = rg.Point((1/2)*h.get_upper_right_corner().x,(1/2)*h.get_upper_right_corner().y+h.get_lower_left_corner().y)
thickness = math.fabs(h.get_upper_right_corner().y-h.get_lower_left_corner().y)
clone = CapitalT(intersect,h.get_width(),v.get_height(),thickness)
clone.set_colors(self.h_rect.fill_color,self.h_rect .outline_color)
return clone
# --------------------------------------------------------------
# DONE: 7.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# clone.pdf.
# --------------------------------------------------------------
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
print('--------------------------------------------------')
window = rg.RoseWindow(600, 400, 'Test 1 - Simple Ts')
|
exceptions.py
|
class
|
(Exception):
"""Base error of all stories-pytest errors."""
pass
|
StoryPytestError
|
expandable.wrapper.delegate.ts
|
import { Maybe, objectIsEmpty } from '@dereekb/util';
import { FieldWrapper, FormlyFieldProps, FormlyFieldConfig } from '@ngx-formly/core';
import { map, shareReplay, startWith, switchMap, BehaviorSubject, of } from 'rxjs';
import { Directive, OnDestroy, OnInit } from '@angular/core';
import { AbstractControl } from '@angular/forms';
import { filterMaybe } from '@dereekb/rxjs';
export interface AbstractFormExpandableSectionConfig<T extends object = object> {
expandLabel?: string;
/**
* Optional function to use for checking value existence.
*/
hasValueFn?: (value: T) => boolean;
}
export interface FormExpandableSectionWrapperFieldProps<T extends object = object, S extends AbstractFormExpandableSectionConfig<T> = AbstractFormExpandableSectionConfig<T>> extends Pick<FormlyFieldProps, 'label'> {
expandWrapper?: S;
}
export const DEFAULT_HAS_VALUE_FN = (x: object) => !objectIsEmpty(x);
@Directive()
export class AbstractFormExpandableSectionWrapperDirective<T extends object = object, S extends AbstractFormExpandableSectionConfig<T> = AbstractFormExpandableSectionConfig<T>> extends FieldWrapper<FormlyFieldConfig<FormExpandableSectionWrapperFieldProps<T, S>>> implements OnInit, OnDestroy {
protected _formControlObs = new BehaviorSubject<Maybe<AbstractControl>>(undefined);
readonly formControl$ = this._formControlObs.pipe(filterMaybe());
protected _toggleOpen = new BehaviorSubject<Maybe<boolean>>(undefined);
readonly show$ = this._toggleOpen.pipe(
switchMap((toggleOpen: Maybe<boolean>) => {
if (toggleOpen != null) {
return of(toggleOpen);
} else {
|
shareReplay(1)
);
readonly hasValue$ = this.formControl$.pipe(
switchMap((x) =>
x.valueChanges.pipe(
startWith(x.value),
map((value) => {
return this.hasValueFn(value);
}),
shareReplay(1)
)
)
);
get expandableSection(): Maybe<S> {
return this.props.expandWrapper;
}
get hasValueFn(): (value: T) => boolean {
return this.expandableSection?.hasValueFn ?? (DEFAULT_HAS_VALUE_FN as (value: T) => boolean);
}
get expandLabel(): Maybe<string> {
let label: Maybe<string> = this.expandableSection?.expandLabel ?? this.field?.props?.label;
if (label == null) {
const firstFieldGroup = this.field.fieldGroup?.[0];
if (firstFieldGroup) {
label = firstFieldGroup.props?.label ?? (firstFieldGroup.key as string);
}
}
return label;
}
open(): void {
this._toggleOpen.next(true);
}
ngOnInit(): void {
this._formControlObs.next(this.formControl);
}
ngOnDestroy(): void {
this._toggleOpen.complete();
this._formControlObs.complete();
}
}
|
return this.hasValue$;
}
}),
|
symbol.rs
|
//! An "interner" is a data structure that associates values with usize tags and
//! allows bidirectional lookup; i.e., given a value, one can easily find the
//! type, and vice versa.
use arena::DroplessArena;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::Idx;
use rustc_data_structures::newtype_index;
use rustc_macros::symbols;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use std::cmp::{PartialEq, Ordering, PartialOrd, Ord};
use std::fmt;
use std::hash::{Hash, Hasher};
use std::str;
use crate::hygiene::SyntaxContext;
use crate::{Span, DUMMY_SP, GLOBALS};
#[cfg(test)]
mod tests;
symbols! {
// After modifying this list adjust `is_special`, `is_used_keyword`/`is_unused_keyword`,
// this should be rarely necessary though if the keywords are kept in alphabetic order.
Keywords {
// Special reserved identifiers used internally for elided lifetimes,
// unnamed method parameters, crate root module, error recovery etc.
Invalid: "",
PathRoot: "{{root}}",
DollarCrate: "$crate",
Underscore: "_",
// Keywords that are used in stable Rust.
As: "as",
Break: "break",
Const: "const",
Continue: "continue",
Crate: "crate",
Else: "else",
Enum: "enum",
Extern: "extern",
False: "false",
Fn: "fn",
For: "for",
If: "if",
Impl: "impl",
In: "in",
Let: "let",
Loop: "loop",
Match: "match",
Mod: "mod",
Move: "move",
Mut: "mut",
Pub: "pub",
Ref: "ref",
Return: "return",
SelfLower: "self",
SelfUpper: "Self",
Static: "static",
Struct: "struct",
Super: "super",
Trait: "trait",
True: "true",
Type: "type",
Unsafe: "unsafe",
Use: "use",
Where: "where",
While: "while",
// Keywords that are used in unstable Rust or reserved for future use.
Abstract: "abstract",
Become: "become",
Box: "box",
Do: "do",
Final: "final",
Macro: "macro",
Override: "override",
Priv: "priv",
Typeof: "typeof",
Unsized: "unsized",
Virtual: "virtual",
Yield: "yield",
// Edition-specific keywords that are used in stable Rust.
Dyn: "dyn", // >= 2018 Edition only
// Edition-specific keywords that are used in unstable Rust or reserved for future use.
Async: "async", // >= 2018 Edition only
Await: "await", // >= 2018 Edition only
Try: "try", // >= 2018 Edition only
// Special lifetime names
UnderscoreLifetime: "'_",
StaticLifetime: "'static",
// Weak keywords, have special meaning only in specific contexts.
Auto: "auto",
Catch: "catch",
Default: "default",
Union: "union",
}
// Symbols that can be referred to with syntax_pos::sym::*. The symbol is
// the stringified identifier unless otherwise specified (e.g.
// `proc_dash_macro` represents "proc-macro").
//
// As well as the symbols listed, there are symbols for the the strings
// "0", "1", ..., "9", which are accessible via `sym::integer`.
Symbols {
aarch64_target_feature,
abi,
abi_amdgpu_kernel,
abi_msp430_interrupt,
abi_ptx,
abi_sysv64,
abi_thiscall,
abi_unadjusted,
abi_vectorcall,
abi_x86_interrupt,
aborts,
advanced_slice_patterns,
adx_target_feature,
alias,
align,
alignstack,
all,
allocator,
allocator_internals,
alloc_error_handler,
allow,
allowed,
allow_fail,
allow_internal_unsafe,
allow_internal_unstable,
allow_internal_unstable_backcompat_hack,
always,
and,
any,
arbitrary_enum_discriminant,
arbitrary_self_types,
Arguments,
ArgumentV1,
arm_target_feature,
asm,
assert,
associated_consts,
associated_type_bounds,
associated_type_defaults,
associated_types,
async_await,
async_closure,
attr,
attributes,
attr_literals,
augmented_assignments,
automatically_derived,
avx512_target_feature,
await_macro,
begin_panic,
bench,
bin,
bind_by_move_pattern_guards,
block,
bool,
borrowck_graphviz_postflow,
borrowck_graphviz_preflow,
box_patterns,
box_syntax,
braced_empty_structs,
C,
cdylib,
cfg,
cfg_attr,
cfg_attr_multi,
cfg_doctest,
cfg_target_feature,
cfg_target_has_atomic,
cfg_target_thread_local,
cfg_target_vendor,
char,
clippy,
clone,
Clone,
clone_closures,
clone_from,
closure_to_fn_coercion,
cmp,
cmpxchg16b_target_feature,
cold,
column,
compile_error,
compiler_builtins,
concat,
concat_idents,
conservative_impl_trait,
console,
const_compare_raw_pointers,
const_constructor,
const_fn,
const_fn_union,
const_generics,
const_indexing,
const_in_array_repeat_expressions,
const_let,
const_panic,
const_raw_ptr_deref,
const_raw_ptr_to_usize_cast,
const_transmute,
contents,
context,
convert,
Copy,
copy_closures,
core,
core_intrinsics,
crate_id,
crate_in_paths,
crate_local,
crate_name,
crate_type,
crate_visibility_modifier,
custom_attribute,
custom_derive,
custom_inner_attributes,
custom_test_frameworks,
c_variadic,
Debug,
declare_lint_pass,
decl_macro,
Decodable,
Default,
default_lib_allocator,
default_type_parameter_fallback,
default_type_params,
deny,
deprecated,
deref,
deref_mut,
derive,
direct,
doc,
doc_alias,
doc_cfg,
doc_keyword,
doc_masked,
doc_spotlight,
doctest,
document_private_items,
dotdoteq_in_patterns,
dotdot_in_tuple_patterns,
double_braced_crate: "{{crate}}",
double_braced_impl: "{{impl}}",
double_braced_misc: "{{misc}}",
double_braced_closure: "{{closure}}",
double_braced_constructor: "{{constructor}}",
double_braced_constant: "{{constant}}",
double_braced_opaque: "{{opaque}}",
dropck_eyepatch,
dropck_parametricity,
drop_types_in_const,
dylib,
dyn_trait,
eh_personality,
eh_unwind_resume,
enable,
Encodable,
env,
eq,
err,
Err,
Eq,
Equal,
except,
exclusive_range_pattern,
exhaustive_integer_patterns,
exhaustive_patterns,
existential_type,
expected,
export_name,
expr,
extern_absolute_paths,
external_doc,
extern_crate_item_prelude,
extern_crate_self,
extern_in_paths,
extern_prelude,
extern_types,
f16c_target_feature,
f32,
f64,
feature,
ffi_returns_twice,
field,
field_init_shorthand,
file,
fmt,
fmt_internals,
fn_must_use,
forbid,
format_args,
format_args_nl,
from,
From,
from_desugaring,
from_error,
from_generator,
from_method,
from_ok,
from_usize,
fundamental,
future,
Future,
FxHashSet,
FxHashMap,
gen_future,
generators,
generic_associated_types,
generic_param_attrs,
global_allocator,
global_asm,
globs,
hash,
Hash,
HashSet,
HashMap,
hexagon_target_feature,
hidden,
homogeneous_aggregate,
html_favicon_url,
html_logo_url,
html_no_source,
html_playground_url,
html_root_url,
i128,
i128_type,
i16,
i32,
i64,
i8,
ident,
if_let,
if_while_or_patterns,
ignore,
impl_header_lifetime_elision,
impl_lint_pass,
impl_trait_in_bindings,
import_shadowing,
index,
index_mut,
in_band_lifetimes,
include,
include_bytes,
include_str,
inclusive_range_syntax,
infer_outlives_requirements,
infer_static_outlives_requirements,
inline,
intel,
into_iter,
IntoIterator,
into_result,
intrinsics,
irrefutable_let_patterns,
isize,
issue,
issue_5723_bootstrap,
issue_tracker_base_url,
item,
item_like_imports,
iter,
Iterator,
keyword,
kind,
label,
label_break_value,
lang,
lang_items,
let_chains,
lhs,
lib,
lifetime,
line,
link,
linkage,
link_args,
link_cfg,
link_llvm_intrinsics,
link_name,
link_section,
LintPass,
lint_reasons,
literal,
local_inner_macros,
log_syntax,
loop_break_value,
macro_at_most_once_rep,
macro_escape,
macro_export,
macro_lifetime_matcher,
macro_literal_matcher,
macro_reexport,
macro_rules,
macros_in_extern,
macro_use,
macro_vis_matcher,
main,
managed_boxes,
marker,
marker_trait_attr,
masked,
match_beginning_vert,
match_default_bindings,
may_dangle,
member_constraints,
message,
meta,
min_const_fn,
min_const_unsafe_fn,
mips_target_feature,
mmx_target_feature,
module,
module_path,
more_struct_aliases,
movbe_target_feature,
must_use,
naked,
naked_functions,
name,
needs_allocator,
needs_panic_runtime,
negate_unsigned,
never,
never_type,
new,
next,
__next,
nll,
no_builtins,
no_core,
no_crate_inject,
no_debug,
no_default_passes,
no_implicit_prelude,
no_inline,
no_link,
no_main,
no_mangle,
non_ascii_idents,
None,
non_exhaustive,
non_modrs_mods,
no_stack_check,
no_start,
no_std,
not,
note,
Ok,
omit_gdb_pretty_printer_section,
on,
on_unimplemented,
oom,
ops,
optimize,
optimize_attribute,
optin_builtin_traits,
option,
Option,
option_env,
opt_out_copy,
or,
Ord,
Ordering,
Output,
overlapping_marker_traits,
packed,
panic,
panic_handler,
panic_impl,
panic_implementation,
panic_runtime,
parent_trait,
partial_cmp,
param_attrs,
PartialEq,
PartialOrd,
passes,
pat,
path,
pattern_parentheses,
Pending,
pin,
Pin,
pinned,
platform_intrinsics,
plugin,
plugin_registrar,
plugins,
Poll,
poll_with_tls_context,
powerpc_target_feature,
precise_pointer_size_matching,
prelude,
prelude_import,
primitive,
proc_dash_macro: "proc-macro",
proc_macro,
proc_macro_attribute,
proc_macro_def_site,
proc_macro_derive,
proc_macro_expr,
proc_macro_gen,
proc_macro_hygiene,
proc_macro_internals,
proc_macro_mod,
proc_macro_non_items,
proc_macro_path_invoc,
profiler_runtime,
pub_restricted,
pushpop_unsafe,
quad_precision_float,
question_mark,
quote,
Range,
RangeFrom,
RangeFull,
RangeInclusive,
RangeTo,
RangeToInclusive,
raw_identifiers,
Ready,
reason,
recursion_limit,
reexport_test_harness_main,
reflect,
relaxed_adts,
repr,
repr128,
repr_align,
repr_align_enum,
repr_packed,
repr_simd,
repr_transparent,
re_rebalance_coherence,
result,
Result,
Return,
rhs,
rlib,
rt,
rtm_target_feature,
rust,
rust_2015_preview,
rust_2018_preview,
rust_begin_unwind,
rustc,
RustcDecodable,
RustcEncodable,
rustc_allocator,
rustc_allocator_nounwind,
rustc_allow_const_fn_ptr,
rustc_args_required_const,
rustc_attrs,
rustc_builtin_macro,
rustc_clean,
rustc_const_unstable,
rustc_conversion_suggestion,
rustc_def_path,
rustc_deprecated,
rustc_diagnostic_macros,
rustc_dirty,
rustc_dummy,
rustc_dump_env_program_clauses,
rustc_dump_program_clauses,
rustc_dump_user_substs,
rustc_error,
rustc_expected_cgu_reuse,
rustc_if_this_changed,
rustc_inherit_overflow_checks,
rustc_layout,
rustc_layout_scalar_valid_range_end,
rustc_layout_scalar_valid_range_start,
rustc_macro_transparency,
rustc_mir,
rustc_nonnull_optimization_guaranteed,
rustc_object_lifetime_default,
rustc_on_unimplemented,
rustc_outlives,
rustc_paren_sugar,
rustc_partition_codegened,
rustc_partition_reused,
rustc_peek,
rustc_peek_definite_init,
rustc_peek_maybe_init,
rustc_peek_maybe_uninit,
rustc_private,
rustc_proc_macro_decls,
rustc_promotable,
rustc_regions,
rustc_stable,
rustc_std_internal_symbol,
rustc_symbol_name,
rustc_synthetic,
rustc_test_marker,
rustc_then_this_would_need,
rustc_variance,
rustdoc,
rustfmt,
rust_eh_personality,
rust_eh_unwind_resume,
rust_oom,
__rust_unstable_column,
rvalue_static_promotion,
sanitizer_runtime,
_Self,
self_in_typedefs,
self_struct_ctor,
should_panic,
simd,
simd_ffi,
since,
size,
slice_patterns,
slicing_syntax,
Some,
specialization,
speed,
spotlight,
sse4a_target_feature,
stable,
staged_api,
start,
static_in_const,
staticlib,
static_nobundle,
static_recursion,
std,
std_inject,
str,
stringify,
stmt,
stmt_expr_attributes,
stop_after_dataflow,
struct_field_attributes,
struct_inherit,
structural_match,
struct_variant,
sty,
suggestion,
target_feature,
target_has_atomic,
target_thread_local,
task,
tbm_target_feature,
termination_trait,
termination_trait_test,
test,
test_2018_feature,
test_accepted_feature,
test_case,
test_removed_feature,
test_runner,
then_with,
thread_local,
tool_attributes,
tool_lints,
trace_macros,
trait_alias,
transmute,
transparent,
transparent_enums,
transparent_unions,
trivial_bounds,
Try,
try_blocks,
try_trait,
tt,
tuple_indexing,
Ty,
ty,
type_alias_impl_trait,
TyCtxt,
TyKind,
type_alias_enum_variants,
type_ascription,
type_length_limit,
type_macros,
u128,
u16,
u32,
u64,
u8,
unboxed_closures,
underscore_const_names,
underscore_imports,
underscore_lifetimes,
uniform_paths,
universal_impl_trait,
unmarked_api,
unreachable_code,
unrestricted_attribute_tokens,
unsafe_no_drop_flag,
unsized_locals,
unsized_tuple_coercion,
unstable,
untagged_unions,
unwind,
unwind_attributes,
unwrap_or,
used,
use_extern_macros,
use_nested_groups,
usize,
v1,
val,
vec,
Vec,
vis,
visible_private_types,
volatile,
warn,
warn_directory_ownership,
wasm_import_module,
wasm_target_feature,
while_let,
windows,
windows_subsystem,
Yield,
}
}
#[derive(Copy, Clone, Eq)]
pub struct Ident {
pub name: Symbol,
pub span: Span,
}
impl Ident {
#[inline]
/// Constructs a new identifier from a symbol and a span.
pub const fn new(name: Symbol, span: Span) -> Ident {
Ident { name, span }
}
/// Constructs a new identifier with an empty syntax context.
#[inline]
pub const fn with_empty_ctxt(name: Symbol) -> Ident {
Ident::new(name, DUMMY_SP)
}
#[inline]
pub fn invalid() -> Ident {
Ident::with_empty_ctxt(kw::Invalid)
}
/// Maps an interned string to an identifier with an empty syntax context.
pub fn from_interned_str(string: InternedString) -> Ident {
Ident::with_empty_ctxt(string.as_symbol())
}
/// Maps a string to an identifier with an empty span.
pub fn from_str(string: &str) -> Ident {
Ident::with_empty_ctxt(Symbol::intern(string))
}
/// Maps a string and a span to an identifier.
pub fn from_str_and_span(string: &str, span: Span) -> Ident {
Ident::new(Symbol::intern(string), span)
}
/// Replaces `lo` and `hi` with those from `span`, but keep hygiene context.
pub fn with_span_pos(self, span: Span) -> Ident {
Ident::new(self.name, span.with_ctxt(self.span.ctxt()))
}
pub fn without_first_quote(self) -> Ident {
Ident::new(Symbol::intern(self.as_str().trim_start_matches('\'')), self.span)
}
/// "Normalize" ident for use in comparisons using "item hygiene".
/// Identifiers with same string value become same if they came from the same "modern" macro
/// (e.g., `macro` item, but not `macro_rules` item) and stay different if they came from
/// different "modern" macros.
/// Technically, this operation strips all non-opaque marks from ident's syntactic context.
pub fn modern(self) -> Ident {
Ident::new(self.name, self.span.modern())
}
/// "Normalize" ident for use in comparisons using "local variable hygiene".
/// Identifiers with same string value become same if they came from the same non-transparent
/// macro (e.g., `macro` or `macro_rules!` items) and stay different if they came from different
/// non-transparent macros.
/// Technically, this operation strips all transparent marks from ident's syntactic context.
pub fn modern_and_legacy(self) -> Ident {
Ident::new(self.name, self.span.modern_and_legacy())
}
/// Transforms an identifier into one with the same name, but gensymed.
pub fn gensym(self) -> Ident {
let name = with_interner(|interner| interner.gensymed(self.name));
Ident::new(name, self.span)
}
/// Transforms an underscore identifier into one with the same name, but
/// gensymed. Leaves non-underscore identifiers unchanged.
pub fn gensym_if_underscore(self) -> Ident {
if self.name == kw::Underscore { self.gensym() } else { self }
}
// WARNING: this function is deprecated and will be removed in the future.
pub fn is_gensymed(self) -> bool {
with_interner(|interner| interner.is_gensymed(self.name))
}
pub fn as_str(self) -> LocalInternedString {
self.name.as_str()
}
pub fn as_interned_str(self) -> InternedString {
self.name.as_interned_str()
}
}
impl PartialEq for Ident {
fn eq(&self, rhs: &Self) -> bool {
self.name == rhs.name && self.span.ctxt() == rhs.span.ctxt()
}
}
impl Hash for Ident {
fn hash<H: Hasher>(&self, state: &mut H) {
self.name.hash(state);
self.span.ctxt().hash(state);
}
}
impl fmt::Debug for Ident {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}{:?}", self.name, self.span.ctxt())
}
}
impl fmt::Display for Ident {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.name, f)
}
}
impl Encodable for Ident {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
if self.span.ctxt().modern() == SyntaxContext::empty() {
s.emit_str(&self.as_str())
} else { // FIXME(jseyfried): intercrate hygiene
let mut string = "#".to_owned();
string.push_str(&self.as_str());
s.emit_str(&string)
}
}
}
impl Decodable for Ident {
fn decode<D: Decoder>(d: &mut D) -> Result<Ident, D::Error> {
let string = d.read_str()?;
Ok(if !string.starts_with('#') {
Ident::from_str(&string)
} else { // FIXME(jseyfried): intercrate hygiene
Ident::from_str(&string[1..]).gensym()
})
}
}
/// A symbol is an interned or gensymed string. A gensym is a symbol that is
/// never equal to any other symbol.
///
/// Conceptually, a gensym can be thought of as a normal symbol with an
/// invisible unique suffix. Gensyms are useful when creating new identifiers
/// that must not match any existing identifiers, e.g. during macro expansion
/// and syntax desugaring. Because gensyms should always be identifiers, all
/// gensym operations are on `Ident` rather than `Symbol`. (Indeed, in the
/// future the gensym-ness may be moved from `Symbol` to hygiene data.)
///
/// Examples:
/// ```
/// assert_eq!(Ident::from_str("x"), Ident::from_str("x"))
/// assert_ne!(Ident::from_str("x").gensym(), Ident::from_str("x"))
/// assert_ne!(Ident::from_str("x").gensym(), Ident::from_str("x").gensym())
/// ```
/// Internally, a symbol is implemented as an index, and all operations
/// (including hashing, equality, and ordering) operate on that index. The use
/// of `newtype_index!` means that `Option<Symbol>` only takes up 4 bytes,
/// because `newtype_index!` reserves the last 256 values for tagging purposes.
///
/// Note that `Symbol` cannot directly be a `newtype_index!` because it
/// implements `fmt::Debug`, `Encodable`, and `Decodable` in special ways.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Symbol(SymbolIndex);
newtype_index! {
pub struct SymbolIndex { .. }
}
impl Symbol {
const fn new(n: u32) -> Self {
Symbol(SymbolIndex::from_u32_const(n))
}
/// Maps a string to its interned representation.
pub fn intern(string: &str) -> Self {
with_interner(|interner| interner.intern(string))
}
pub fn as_str(self) -> LocalInternedString {
with_interner(|interner| unsafe {
LocalInternedString {
string: std::mem::transmute::<&str, &str>(interner.get(self))
}
})
}
pub fn as_interned_str(self) -> InternedString {
with_interner(|interner| InternedString {
symbol: interner.interned(self)
})
}
pub fn as_u32(self) -> u32 {
self.0.as_u32()
}
}
impl fmt::Debug for Symbol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let is_gensymed = with_interner(|interner| interner.is_gensymed(*self));
if is_gensymed {
write!(f, "{}({:?})", self, self.0)
} else {
write!(f, "{}", self)
}
}
}
impl fmt::Display for Symbol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.as_str(), f)
}
}
impl Encodable for Symbol {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_str(&self.as_str())
}
}
impl Decodable for Symbol {
fn decode<D: Decoder>(d: &mut D) -> Result<Symbol, D::Error> {
Ok(Symbol::intern(&d.read_str()?))
}
}
// The `&'static str`s in this type actually point into the arena.
//
// Note that normal symbols are indexed upward from 0, and gensyms are indexed
// downward from SymbolIndex::MAX_AS_U32.
#[derive(Default)]
pub struct Interner {
arena: DroplessArena,
names: FxHashMap<&'static str, Symbol>,
strings: Vec<&'static str>,
gensyms: Vec<Symbol>,
}
impl Interner {
fn prefill(init: &[&'static str]) -> Self {
Interner {
strings: init.into(),
names: init.iter().copied().zip((0..).map(Symbol::new)).collect(),
..Default::default()
}
}
pub fn intern(&mut self, string: &str) -> Symbol {
if let Some(&name) = self.names.get(string) {
return name;
}
let name = Symbol::new(self.strings.len() as u32);
// `from_utf8_unchecked` is safe since we just allocated a `&str` which is known to be
// UTF-8.
let string: &str = unsafe {
str::from_utf8_unchecked(self.arena.alloc_slice(string.as_bytes()))
};
// It is safe to extend the arena allocation to `'static` because we only access
// these while the arena is still alive.
let string: &'static str = unsafe {
&*(string as *const str)
};
self.strings.push(string);
self.names.insert(string, name);
name
}
fn interned(&self, symbol: Symbol) -> Symbol {
if (symbol.0.as_usize()) < self.strings.len() {
symbol
} else {
self.gensyms[(SymbolIndex::MAX_AS_U32 - symbol.0.as_u32()) as usize]
}
}
fn gensymed(&mut self, symbol: Symbol) -> Symbol {
self.gensyms.push(symbol);
Symbol::new(SymbolIndex::MAX_AS_U32 - self.gensyms.len() as u32 + 1)
}
fn is_gensymed(&mut self, symbol: Symbol) -> bool {
symbol.0.as_usize() >= self.strings.len()
}
// Get the symbol as a string. `Symbol::as_str()` should be used in
// preference to this function.
pub fn get(&self, symbol: Symbol) -> &str {
match self.strings.get(symbol.0.as_usize()) {
Some(string) => string,
None => {
let symbol = self.gensyms[(SymbolIndex::MAX_AS_U32 - symbol.0.as_u32()) as usize];
self.strings[symbol.0.as_usize()]
}
}
}
}
// This module has a very short name because it's used a lot.
pub mod kw {
use super::Symbol;
keywords!();
}
// This module has a very short name because it's used a lot.
pub mod sym {
use std::convert::TryInto;
use super::Symbol;
symbols!();
// Get the symbol for an integer. The first few non-negative integers each
// have a static symbol and therefore are fast.
pub fn integer<N: TryInto<usize> + Copy + ToString>(n: N) -> Symbol {
if let Result::Ok(idx) = n.try_into() {
if let Option::Some(&sym) = digits_array.get(idx) {
return sym;
}
}
Symbol::intern(&n.to_string())
}
}
impl Symbol {
fn is_used_keyword_2018(self) -> bool {
self == kw::Dyn
}
fn is_unused_keyword_2018(self) -> bool {
self >= kw::Async && self <= kw::Try
}
/// Used for sanity checking rustdoc keyword sections.
pub fn is_doc_keyword(self) -> bool {
self <= kw::Union
}
/// A keyword or reserved identifier that can be used as a path segment.
pub fn is_path_segment_keyword(self) -> bool {
self == kw::Super ||
self == kw::SelfLower ||
self == kw::SelfUpper ||
self == kw::Crate ||
self == kw::PathRoot ||
self == kw::DollarCrate
}
/// This symbol can be a raw identifier.
pub fn can_be_raw(self) -> bool {
self != kw::Invalid && self != kw::Underscore && !self.is_path_segment_keyword()
}
}
impl Ident {
// Returns `true` for reserved identifiers used internally for elided lifetimes,
// unnamed method parameters, crate root module, error recovery etc.
pub fn is_special(self) -> bool {
self.name <= kw::Underscore
}
/// Returns `true` if the token is a keyword used in the language.
pub fn is_used_keyword(self) -> bool {
// Note: `span.edition()` is relatively expensive, don't call it unless necessary.
self.name >= kw::As && self.name <= kw::While ||
self.name.is_used_keyword_2018() && self.span.rust_2018()
}
/// Returns `true` if the token is a keyword reserved for possible future use.
pub fn is_unused_keyword(self) -> bool {
// Note: `span.edition()` is relatively expensive, don't call it unless necessary.
self.name >= kw::Abstract && self.name <= kw::Yield ||
self.name.is_unused_keyword_2018() && self.span.rust_2018()
}
/// Returns `true` if the token is either a special identifier or a keyword.
pub fn is_reserved(self) -> bool {
self.is_special() || self.is_used_keyword() || self.is_unused_keyword()
}
/// A keyword or reserved identifier that can be used as a path segment.
pub fn is_path_segment_keyword(self) -> bool {
self.name.is_path_segment_keyword()
}
/// We see this identifier in a normal identifier position, like variable name or a type.
/// How was it written originally? Did it use the raw form? Let's try to guess.
pub fn is_raw_guess(self) -> bool {
self.name.can_be_raw() && self.is_reserved()
}
}
// If an interner exists, return it. Otherwise, prepare a fresh one.
#[inline]
fn with_interner<T, F: FnOnce(&mut Interner) -> T>(f: F) -> T {
GLOBALS.with(|globals| f(&mut *globals.symbol_interner.lock()))
}
/// An alternative to `Symbol` and `InternedString`, useful when the chars
/// within the symbol need to be accessed. It is best used for temporary
/// values.
///
/// Because the interner outlives any thread which uses this type, we can
/// safely treat `string` which points to interner data, as an immortal string,
/// as long as this type never crosses between threads.
//
// FIXME: ensure that the interner outlives any thread which uses
// `LocalInternedString`, by creating a new thread right after constructing the
// interner.
#[derive(Clone, Copy, Hash, PartialOrd, Eq, Ord)]
pub struct LocalInternedString {
string: &'static str,
}
impl LocalInternedString {
/// Maps a string to its interned representation.
pub fn intern(string: &str) -> Self {
let string = with_interner(|interner| {
let symbol = interner.intern(string);
interner.strings[symbol.0.as_usize()]
});
LocalInternedString {
string: unsafe { std::mem::transmute::<&str, &str>(string) }
}
}
pub fn as_interned_str(self) -> InternedString {
InternedString {
symbol: Symbol::intern(self.string)
}
}
#[inline]
pub fn get(&self) -> &str {
// This returns a valid string since we ensure that `self` outlives the interner
// by creating the interner on a thread which outlives threads which can access it.
// This type cannot move to a thread which outlives the interner since it does
// not implement Send.
self.string
}
}
impl<U: ?Sized> std::convert::AsRef<U> for LocalInternedString
where
str: std::convert::AsRef<U>
{
#[inline]
fn as_ref(&self) -> &U {
self.string.as_ref()
}
}
impl<T: std::ops::Deref<Target = str>> std::cmp::PartialEq<T> for LocalInternedString {
fn eq(&self, other: &T) -> bool {
self.string == other.deref()
}
}
impl std::cmp::PartialEq<LocalInternedString> for str {
fn eq(&self, other: &LocalInternedString) -> bool {
self == other.string
}
}
impl<'a> std::cmp::PartialEq<LocalInternedString> for &'a str {
fn eq(&self, other: &LocalInternedString) -> bool {
*self == other.string
}
}
impl std::cmp::PartialEq<LocalInternedString> for String {
fn eq(&self, other: &LocalInternedString) -> bool {
self == other.string
}
}
impl<'a> std::cmp::PartialEq<LocalInternedString> for &'a String {
fn eq(&self, other: &LocalInternedString) -> bool {
*self == other.string
}
}
impl !Send for LocalInternedString {}
impl !Sync for LocalInternedString {}
impl std::ops::Deref for LocalInternedString {
type Target = str;
#[inline]
fn deref(&self) -> &str { self.string }
}
impl fmt::Debug for LocalInternedString {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(self.string, f)
}
}
impl fmt::Display for LocalInternedString {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.string, f)
}
}
impl Decodable for LocalInternedString {
fn decode<D: Decoder>(d: &mut D) -> Result<LocalInternedString, D::Error> {
Ok(LocalInternedString::intern(&d.read_str()?))
}
}
impl Encodable for LocalInternedString {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_str(self.string)
}
}
/// An alternative to `Symbol` that is focused on string contents. It has two
/// main differences to `Symbol`.
///
/// First, its implementations of `Hash`, `PartialOrd` and `Ord` work with the
/// string chars rather than the symbol integer. This is useful when hash
/// stability is required across compile sessions, or a guaranteed sort
/// ordering is required.
///
/// Second, gensym-ness is irrelevant. E.g.:
/// ```
/// assert_ne!(Symbol::gensym("x"), Symbol::gensym("x"))
/// assert_eq!(Symbol::gensym("x").as_interned_str(), Symbol::gensym("x").as_interned_str())
/// ```
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct InternedString {
symbol: Symbol,
}
impl InternedString {
/// Maps a string to its interned representation.
pub fn intern(string: &str) -> Self {
InternedString {
symbol: Symbol::intern(string)
}
}
pub fn with<F: FnOnce(&str) -> R, R>(self, f: F) -> R {
let str = with_interner(|interner| {
interner.get(self.symbol) as *const str
});
// This is safe because the interner keeps string alive until it is dropped.
// We can access it because we know the interner is still alive since we use a
// scoped thread local to access it, and it was alive at the beginning of this scope
unsafe { f(&*str) }
}
fn with2<F: FnOnce(&str, &str) -> R, R>(self, other: &InternedString, f: F) -> R {
let (self_str, other_str) = with_interner(|interner| {
(interner.get(self.symbol) as *const str,
interner.get(other.symbol) as *const str)
});
// This is safe for the same reason that `with` is safe.
unsafe { f(&*self_str, &*other_str) }
}
pub fn as_symbol(self) -> Symbol {
self.symbol
}
pub fn as_str(self) -> LocalInternedString {
self.symbol.as_str()
}
}
impl Hash for InternedString {
fn hash<H: Hasher>(&self, state: &mut H) {
self.with(|str| str.hash(state))
}
}
impl PartialOrd<InternedString> for InternedString {
fn partial_cmp(&self, other: &InternedString) -> Option<Ordering> {
if self.symbol == other.symbol {
return Some(Ordering::Equal);
}
self.with2(other, |self_str, other_str| self_str.partial_cmp(other_str))
}
}
impl Ord for InternedString {
fn
|
(&self, other: &InternedString) -> Ordering {
if self.symbol == other.symbol {
return Ordering::Equal;
}
self.with2(other, |self_str, other_str| self_str.cmp(other_str))
}
}
impl fmt::Debug for InternedString {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.with(|str| fmt::Debug::fmt(&str, f))
}
}
impl fmt::Display for InternedString {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.with(|str| fmt::Display::fmt(&str, f))
}
}
impl Decodable for InternedString {
fn decode<D: Decoder>(d: &mut D) -> Result<InternedString, D::Error> {
Ok(InternedString::intern(&d.read_str()?))
}
}
impl Encodable for InternedString {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.with(|string| s.emit_str(string))
}
}
|
cmp
|
build.rs
|
fn main()
|
{
let path = "../../kernel/target/x86_64/release/deps";
println!("cargo:rustc-link-search=all={}", path);
let path = "../../kernel/target/aarch64/release/deps";
println!("cargo:rustc-link-search=all={}", path);
let path = "../../kernel/target/release/deps";
println!("cargo:rustc-link-search=all={}", path);
}
|
|
s3client.py
|
import os
from pathlib import Path, PurePosixPath
from typing import Any, Dict, Iterable, Optional, Union
from ..client import Client, register_client_class
from ..cloudpath import implementation_registry
from .s3path import S3Path
try:
from boto3.session import Session
from boto3.s3.transfer import TransferConfig
from botocore.config import Config
from botocore.exceptions import ClientError
import botocore.session
except ModuleNotFoundError:
implementation_registry["s3"].dependencies_loaded = False
@register_client_class("s3")
class S3Client(Client):
"""Client class for AWS S3 which handles authentication with AWS for [`S3Path`](../s3path/)
instances. See documentation for the [`__init__` method][cloudpathlib.s3.s3client.S3Client.__init__]
for detailed authentication options."""
def __init__(
self,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
no_sign_request: Optional[bool] = False,
botocore_session: Optional["botocore.session.Session"] = None,
profile_name: Optional[str] = None,
boto3_session: Optional["Session"] = None,
local_cache_dir: Optional[Union[str, os.PathLike]] = None,
endpoint_url: Optional[str] = None,
boto3_transfer_config: Optional["TransferConfig"] = None,
):
"""Class constructor. Sets up a boto3 [`Session`](
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html).
Directly supports the same authentication interface, as well as the same environment
variables supported by boto3. See [boto3 Session documentation](
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/session.html).
If no authentication arguments or environment variables are provided, then the client will
be instantiated as anonymous, which will only have access to public buckets.
Args:
aws_access_key_id (Optional[str]): AWS access key ID.
aws_secret_access_key (Optional[str]): AWS secret access key.
aws_session_token (Optional[str]): Session key for your AWS account. This is only
needed when you are using temporarycredentials.
no_sign_request: (Optional[bool]): If `True`, credentials are not looked for and we use unsigned
requests to fetch resources. This will only allow access to public resources. This is equivalent
to `--no-sign-request` in the AWS CLI (https://docs.aws.amazon.com/cli/latest/reference/).
botocore_session (Optional[botocore.session.Session]): An already instantiated botocore
Session.
profile_name (Optional[str]): Profile name of a profile in a shared credentials file.
boto3_session (Optional[Session]): An already instantiated boto3 Session.
local_cache_dir (Optional[Union[str, os.PathLike]]): Path to directory to use as cache
for downloaded files. If None, will use a temporary directory.
endpoint_url (Optional[str]): S3 server endpoint URL to use for the constructed boto3 S3 resource and client.
Parameterize it to access a customly deployed S3-compatible object store such as MinIO, Ceph or any other.
boto3_transfer_config (Optional[dict]): Instantiated TransferConfig for managing s3 transfers.
(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/s3.html#boto3.s3.transfer.TransferConfig)
"""
endpoint_url = endpoint_url or os.getenv("AWS_ENDPOINT_URL")
if boto3_session is not None:
self.sess = boto3_session
else:
self.sess = Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
botocore_session=botocore_session,
profile_name=profile_name,
)
if no_sign_request:
self.s3 = self.sess.resource(
"s3",
endpoint_url=endpoint_url,
config=Config(signature_version=botocore.session.UNSIGNED),
)
self.client = self.sess.client(
"s3",
endpoint_url=endpoint_url,
config=Config(signature_version=botocore.session.UNSIGNED),
)
else:
self.s3 = self.sess.resource("s3", endpoint_url=endpoint_url)
self.client = self.sess.client("s3", endpoint_url=endpoint_url)
self.boto3_transfer_config = boto3_transfer_config
super().__init__(local_cache_dir=local_cache_dir)
def _get_metadata(self, cloud_path: S3Path) -> Dict[str, Any]:
data = self.s3.ObjectSummary(cloud_path.bucket, cloud_path.key).get()
return {
"last_modified": data["LastModified"],
"size": data["ContentLength"],
"etag": data["ETag"],
"mime": data["ContentType"],
"extra": data["Metadata"],
}
def _download_file(self, cloud_path: S3Path, local_path: Union[str, os.PathLike]) -> Path:
local_path = Path(local_path)
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.download_file(str(local_path), Config=self.boto3_transfer_config)
return local_path
def _is_file_or_dir(self, cloud_path: S3Path) -> Optional[str]:
# short-circuit the root-level bucket
if not cloud_path.key:
return "dir"
# get first item by listing at least one key
s3_obj = self._s3_file_query(cloud_path)
if s3_obj is None:
return None
# since S3 only returns files when filtering objects:
# if the first item key is equal to the path key, this is a file
if s3_obj.key == cloud_path.key:
# "fake" directories on S3 can be created in the console UI
# these are 0-size keys that end in `/`
# Ref: https://github.com/boto/boto3/issues/377
if s3_obj.key.endswith("/") and s3_obj.content_length == 0:
return "dir"
else:
return "file"
else:
return "dir"
def _exists(self, cloud_path: S3Path) -> bool:
return self._s3_file_query(cloud_path) is not None
def _s3_file_query(self, cloud_path: S3Path):
|
def _list_dir(self, cloud_path: S3Path, recursive=False) -> Iterable[S3Path]:
bucket = self.s3.Bucket(cloud_path.bucket)
prefix = cloud_path.key
if prefix and not prefix.endswith("/"):
prefix += "/"
yielded_dirs = set()
if recursive:
for o in bucket.objects.filter(Prefix=prefix):
# get directory from this path
for parent in PurePosixPath(o.key[len(prefix) :]).parents:
# if we haven't surfaced their directory already
if parent not in yielded_dirs and str(parent) != ".":
yield self.CloudPath(f"s3://{cloud_path.bucket}/{prefix}{parent}")
yielded_dirs.add(parent)
yield self.CloudPath(f"s3://{o.bucket_name}/{o.key}")
else:
# non recursive is best done with old client API rather than resource
paginator = self.client.get_paginator("list_objects")
for result in paginator.paginate(
Bucket=cloud_path.bucket, Prefix=prefix, Delimiter="/"
):
# sub directory names
for result_prefix in result.get("CommonPrefixes", []):
yield self.CloudPath(f"s3://{cloud_path.bucket}/{result_prefix.get('Prefix')}")
# files in the directory
for result_key in result.get("Contents", []):
if result_key.get('Size') > 0:
yield self.CloudPath(f"s3://{cloud_path.bucket}/{result_key.get('Key')}")
def _move_file(self, src: S3Path, dst: S3Path, remove_src: bool = True) -> S3Path:
# just a touch, so "REPLACE" metadata
if src == dst:
o = self.s3.Object(src.bucket, src.key)
o.copy_from(
CopySource={"Bucket": src.bucket, "Key": src.key},
Metadata=self._get_metadata(src).get("extra", {}),
MetadataDirective="REPLACE",
)
else:
target = self.s3.Object(dst.bucket, dst.key)
target.copy({"Bucket": src.bucket, "Key": src.key})
if remove_src:
self._remove(src)
return dst
def _remove(self, cloud_path: S3Path) -> None:
try:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
# will throw if not a file
obj.load()
resp = obj.delete()
assert resp.get("ResponseMetadata").get("HTTPStatusCode") == 204
except ClientError:
# try to delete as a direcotry instead
bucket = self.s3.Bucket(cloud_path.bucket)
prefix = cloud_path.key
if prefix and not prefix.endswith("/"):
prefix += "/"
resp = bucket.objects.filter(Prefix=prefix).delete()
# ensure directory deleted; if cloud_path did not exist at all
# resp will be [], so no need to check success
if resp:
assert resp[0].get("ResponseMetadata").get("HTTPStatusCode") == 200
def _upload_file(self, local_path: Union[str, os.PathLike], cloud_path: S3Path) -> S3Path:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.upload_file(str(local_path), Config=self.boto3_transfer_config)
return cloud_path
S3Client.S3Path = S3Client.CloudPath # type: ignore
|
"""Boto3 query used for quick checks of existence and if path is file/dir"""
# first check if this is an object that we can access directly
try:
obj = self.s3.Object(cloud_path.bucket, cloud_path.key)
obj.load()
return obj
# else, confirm it is a dir by filtering to the first item under the prefix
except ClientError:
return next(
(
obj
for obj in (
self.s3.Bucket(cloud_path.bucket)
.objects.filter(Prefix=cloud_path.key)
.limit(1)
)
),
None,
)
|
component---src-pages-index-jsx-f901ab7da514504425f7.js
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[2],{137:function(e,t,a){"use strict";a.r(t);a(140);var r=a(0),i=a.n(r),n=a(138),l=(a(144),a(145),a(164)),o=a(146),c=a.n(o),m=a(154),s=a.n(m),d=function(){var e=s.a.siteTitle,t=s.a.siteDescription,a="/"===s.a.pathPrefix?"":s.a.pathPrefix,r=s.a.siteUrl+a+s.a.siteLogo,n=[{"@context":"http://schema.org","@type":"WebSite",url:s.a.siteUrl+s.a.pathPrefix,name:e,alternateName:s.a.siteTitleAlt?s.a.siteTitleAlt:""}];return i.a.createElement(c.a,null,i.a.createElement("html",{lang:s.a.siteLanguage}),i.a.createElement("title",null,e),i.a.createElement("link",{rel:"apple-touch-icon",href:"/favicons/apple-touch-icon.png"}),i.a.createElement("link",{rel:"icon",type:"image/png",sizes:"32x32",href:"/favicons/favicon-32x32.png"}),i.a.createElement("link",{rel:"icon",type:"image/png",sizes:"16x16",href:"/favicons/favicon-16x16.png"}),i.a.createElement("link",{rel:"shortcut icon",href:"favicon.ico"}),i.a.createElement("meta",{name:"msapplication-TileColor",content:s.a.backgroundColor}),i.a.createElement("meta",{name:"msapplication-config",content:"browserconfig.xml"}),i.a.createElement("meta",{name:"description",content:t}),i.a.createElement("meta",{name:"image",content:r}),i.a.createElement("script",{type:"application/ld+json"},JSON.stringify(n)),i.a.createElement("meta",{property:"og:locale",content:s.a.ogLanguage}),i.a.createElement("meta",{property:"og:site_name",content:s.a.ogSiteName}),i.a.createElement("meta",{property:"og:title",content:e}),i.a.createElement("meta",{property:"og:description",content:t}),i.a.createElement("meta",{property:"og:image",content:r}),i.a.createElement("meta",{property:"fb:app_id",content:s.a.siteFBAppID?s.a.siteFBAppID:""}),i.a.createElement("meta",{name:"twitter:card",content:"summary_large_image"}),i.a.createElement("meta",{name:"twitter:creator",content:s.a.userTwitter?s.a.userTwitter:""}),i.a.createElement("meta",{name:"twitter:title",content:e}),i.a.createElement("meta",{name:"twitter:description",content:t}),i.a.createElement("meta",{name:"twitter:image",content:r}))},p=a(4),g=a.n(p),f=a(139),h=Object(n.b)("svg",{target:"e13mltvi0"})({stroke:"currentColor",position:"absolute"},";color:",function(e){return e.stroke},";width:",function(e){return e.svgWidth},";fill:",function(e){return e.fill},";left:",function(e){return e.left},";top:",function(e){return e.top},";"),b={triangle:{shape:i.a.createElement("polygon",{strokeWidth:"1px",strokeLinejoin:"round",strokeMiterlimit:"10",points:"14.921,2.27 28.667,25.5 1.175,25.5 "}),viewBox:"0 0 30 30"},circle:{shape:i.a.createElement("path",{d:"M15,30A15,15,0,1,1,30,15,15,15,0,0,1,15,30ZM15,6.23A8.77,8.77,0,1,0,23.77,15,8.77,8.77,0,0,0,15,6.23Z"}),viewBox:"0 0 30 30"},arrowUp:{shape:i.a.createElement(i.a.Fragment,null,i.a.createElement("path",{d:"M28.74,20.81H1.26a1.26,1.26,0,0,1-1-2L14.16.5a1.25,1.25,0,0,1,1-.5h0a1.24,1.24,0,0,1,1,.51L29.75,18.8a1.25,1.25,0,0,1-1,2ZM3.81,18.29H26.22L15.16,3.37Z"})," ",i.a.createElement("path",{d:"M28.74,42H1.26a1.28,1.28,0,0,1-1.13-.71A1.26,1.26,0,0,1,.26,40l13.9-18.29a1.28,1.28,0,0,1,1-.5h0a1.24,1.24,0,0,1,1,.51L29.75,40a1.26,1.26,0,0,1,.12,1.32A1.28,1.28,0,0,1,28.74,42ZM3.81,39.47H26.22L15.16,24.55Z"})),viewBox:"0 0 30 42"},upDown:{shape:i.a.createElement(i.a.Fragment,null,i.a.createElement("path",{d:"M28.74,44.58a1.28,1.28,0,0,1-1-.51L15.16,27.13l-12.89,17a1.26,1.26,0,1,1-2-1.53l13.9-18.29a1.34,1.34,0,0,1,1-.5,1.24,1.24,0,0,1,1,.51L29.75,42.56a1.27,1.27,0,0,1-1,2Z"}),i.a.createElement("path",{d:"M14.83,20.82h0a1.28,1.28,0,0,1-1-.52L.25,2a1.27,1.27,0,0,1,2-1.51L14.84,17.45,27.73.5a1.26,1.26,0,0,1,2,1.53L15.84,20.32A1.28,1.28,0,0,1,14.83,20.82Z"})),viewBox:"0 0 30 44.58"},box:{shape:i.a.createElement("path",{d:"M28,2V28H2V2H28m.13-2H1.88A1.88,1.88,0,0,0,0,1.88V28.13A1.88,1.88,0,0,0,1.88,30H28.13A1.87,1.87,0,0,0,30,28.13V1.88A1.88,1.88,0,0,0,28.13,0Z"}),viewBox:"0 0 30 30"},hexa:{shape:i.a.createElement("polygon",{strokeLinejoin:"round",strokeMiterlimit:"10",points:"27.5,21.904 15,28.809 2.5,21.904 2.5,8.095 15,1.19 27.5,8.095 "}),viewBox:"0 0 30 30"}},u=function(e){var t=e.stroke,a=e.fill,r=e.width,n=e.icon,l=e.left,o=e.top,c=e.className;return i.a.createElement(h,{viewBox:b[n].viewBox,stroke:t,fill:a,svgWidth:f.width[""+r],left:l,top:o,className:c},b[n].shape)},w=u;u.propTypes={stroke:g.a.string,fill:g.a.string,width:g.a.number,icon:g.a.oneOf(["triangle","circle","arrowUp","upDown","box","hexa"]).isRequired,left:g.a.string,top:g.a.string,className:g.a.string},u.defaultProps={stroke:"transparent",width:8,fill:"none",left:"0%",top:"0%",className:"fancy-icon"};a(160);var x=Object(n.b)("a",{target:"e1383vsv0"})("width:100%;",{boxShadow:"0 15px 30px 0 rgba(0, 0, 0, .11), 0 5px 15px 0 rgba(0, 0, 0, .08)",position:"relative",textDecoration:"none",borderRadius:".5rem",paddingLeft:"2rem",paddingRight:"2rem",paddingTop:"2rem",paddingBottom:"2rem",color:"#fff","@media (min-width: 600px)":{paddingTop:"6rem",paddingBottom:"6rem"}},";background:",function(e){return e.bg},";transition:transform 0.4s cubic-bezier(0.175,0.885,0.32,1.275);&:hover{transform:translateY(-5px);}"),E=Object(n.b)("div",{target:"e1383vsv1"})({opacity:".75",fontFamily:'"Open Sans", -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif',fontSize:".875rem","@media (min-width: 600px)":{fontSize:"1rem"}},";text-shadow:0 2px 10px rgba(0,0,0,0.3);"),y=Object(n.b)("div",{target:"e1383vsv2"})({color:"#fff",textTransform:"uppercase",fontSize:"1.5rem",letterSpacing:".05em",fontFamily:'"Open Sans", -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif',paddingTop:"2rem","@media (min-width: 600px)":{fontSize:"1.875rem"},"@media (min-width: 1200px)":{fontSize:"2.25rem"}},";text-shadow:0 2px 10px rgba(0,0,0,0.3);"),k=function(e){var t=e.title,a=e.link,r=e.children,n=e.bg;return i.a.createElement(x,{href:a,target:"_blank",rel:"noopener noreferrer",bg:n},i.a.createElement(E,null,r),i.a.createElement(y,null,t))},v=k;k.propTypes={title:g.a.string.isRequired,link:g.a.string.isRequired,children:g.a.oneOfType([g.a.string,g.a.element]).isRequired,bg:g.a.string.isRequired};var S=Object(n.c)("from{transform:rotate(0deg);}to{transform:rotate(360deg);}"),j=Object(n.c)('0%{d:path("M 0 100 Q 250 50 400 200 Q 550 350 800 300 L 800 0 L 0 0 L 0 100 Z");}50%{d:path("M 0 100 Q 200 150 400 200 Q 600 250 800 300 L 800 0 L 0 0 L 0 100 Z");}100%{d:path("M 0 100 Q 150 350 400 200 Q 650 50 800 300 L 800 0 L 0 0 L 0 100 Z");}'),L=Object(n.c)("from{transform:translateY(0);}to{transform:translateY(30px);}"),I=Object(n.c)("from{transform:translateY(0);}to{transform:translateY(200px);}"),O=Object(n.b)("div",{target:"e18yluu40"})("animation:",L," 4s ease-in-out infinite alternate;",{top:"0",right:"0",bottom:"0",left:"0",position:"absolute"},";"),C=Object(n.b)("div",{target:"e18yluu41"})("animation:",I," 18s ease-in-out infinite alternate;",{top:"0",right:"0",bottom:"0",left:"0",position:"absolute"},";"),A=Object(n.a)("animation:",j," 20s linear infinite alternate;"),D=Object(n.a)({display:"none","@media (min-width: 1200px)":{display:"block"}},";"),M=a(162),N=a.n(M),z=a(163),B=a.n(z),F=a(72);Object(F.injectGlobal)("*,*:before,*:after{box-sizing:inherit;}html{text-rendering:optimizeLegibility;overflow-x:hidden;box-sizing:border-box;-ms-overflow-style:scrollbar;-webkit-tap-highlight-color:rgba(0,0,0,0);background-color:#161719;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;}html,body{width:100%;height:100%;margin:0;padding:0;}");var T=Object(n.b)(l.b,{target:"e1jmbv3i0"})({position:"absolute",width:"100%",height:"100%"},";background:",function(e){return e.bg},";svg{fill:",function(e){return e.fill},";}clip-path:",function(e){return e.clipPath},";"),H=Object(n.b)(T,{target:"e1jmbv3i1"})("clip-path:polygon(0 15%,100% 25%,100% 85%,0 75%);"),R=Object(n.b)(l.b,{target:"e1jmbv3i2"})({padding:"1.5rem",justifyContent:"center",alignItems:"center",display:"flex",zIndex:"50","@media (min-width: 600px)":{padding:"3rem"},"@media (min-width: 900px)":{padding:"6rem"}},";"),U=Object(n.b)("div",{target:"e1jmbv3i3"})({width:"100%","@media (min-width: 1200px)":{width:"66.66667%"}},";"),P=Object(n.b)("div",{target:"e1jmbv3i4"})({width:"100%",textAlign:"center","@media (min-width: 1600px)":{width:"66.66667%"},"@media (min-width: 900px)":{textAlign:"left"}},";"),Z=Object(n.b)("h1",{target:"e1jmbv3i5"})({fontSize:"3rem",fontFamily:'"Cantata One", Constantia, Lucida Bright, Lucidabright, Lucida Serif, Lucida, DejaVu Serif, Bitstream Vera Serif, Liberation Serif, Georgia, serif',color:"#fff",marginBottom:"1.5rem",letterSpacing:".05em","@media (min-width: 900px)":{fontSize:"5rem"}},";text-shadow:0 5px 35px rgba(255,255,255,0.15);"),W=Object(n.b)("h1",{target:"e1jmbv3i6"})({fontSize:"2.25rem",fontFamily:'"Cantata One", Constantia, Lucida Bright, Lucidabright, Lucida Serif, Lucida, DejaVu Serif, Bitstream Vera Serif, Liberation Serif, Georgia, serif',color:"#fff",marginBottom:"2rem",letterSpacing:".05em",position:"relative",display:"inline-block","@media (min-width: 900px)":{fontSize:"2.25rem"}},";text-shadow:0 2px 10px rgba(0,0,0,0.2);&:before{content:'';width:40px;height:40px;background:url(",N.a,");position:absolute;background-size:40px;animation:",S," 4s linear infinite;left:-60px;top:5px;}"),Q=Object(n.b)("p",{target:"e1jmbv3i7"})({fontSize:"1.5rem",fontFamily:'"Open Sans", -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif',color:"#fff",marginTop:"2rem","@media (min-width: 900px)":{fontSize:"2.25rem"},"@media (min-width: 1600px)":{width:"75%"}},";text-shadow:0 2px 15px rgba(0,0,0,0.2);"),V=Object(n.b)("div",{target:"e1jmbv3i8"})({display:"flex",flexWrap:"wrap",justifyContent:"space-between",marginTop:"2rem"},";display:grid;grid-gap:4rem;grid-template-columns:repeat(2,1fr);@media (max-width:1200px){grid-gap:3rem;}@media (max-width:900px){grid-template-columns:1fr;grid-gap:2rem;}"),G=Object(n.b)("div",{target:"e1jmbv3i9"})({position:"absolute",bottom:"0",width:"100%"},";transform:matrix(1,0,0,-1,0,0);"),Y=Object(n.b)("div",{target:"e1jmbv3i10"})({position:"relative",height:"100%"},";svg{width:100%;height:40vh;}"),J=Object(n.b)("div",{target:"e1jmbv3i11"})({display:"flex",flexDirection:"column",alignItems:"center",marginTop:"2rem","@media (min-width: 900px)":{flexDirection:"row"}},";"),_=Object(n.b)("img",{target:"e1jmbv3i12"})({borderRadius:"9999px",width:"8rem",boxShadow:"0 15px 30px 0 rgba(0, 0, 0, .11), 0 5px 15px 0 rgba(0, 0, 0, .08)",height:"auto","@media (min-width: 1200px)":{width:"12rem"}},";"),q=Object(n.b)("span",{target:"e1jmbv3i13"})({color:"#fff",paddingTop:"3rem",fontSize:"1.5rem","@media (min-width: 900px)":{paddingTop:"0",paddingLeft:"3rem",fontSize:"1.875rem"},"@media (min-width: 1200px)":{fontSize:"2.25rem"}},";"),K=Object(n.b)("p",{target:"e1jmbv3i14"})({color:"#dae4e9",fontSize:"1.125rem",fontFamily:'"Open Sans", -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif',paddingTop:"1.5rem",textAlign:"justify","@media (min-width: 600px)":{fontSize:"1.25rem",paddingTop:"3rem"},"@media (min-width: 900px)":{fontSize:"1.5rem"}},";"),X=Object(n.b)("p",{target:"e1jmbv3i15"})({color:"#dae4e9",fontFamily:'"Open Sans", -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif',fontSize:"1.25rem","@media (min-width: 600px)":{fontSize:"1.5rem"},"@media (min-width: 900px)":{fontSize:"1.875rem"}},";a{color:#e07628;text-decoration:none;}"),$=Object(n.b)("footer",{target:"e1jmbv3i16"})({textAlign:"center",color:"#9babb4",position:"absolute",bottom:"0",padding:"1.5rem",fontFamily:'"Open Sans", -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif',"@media (min-width: 900px)":{fontSize:"1.125rem"}},";a{color:#e07628;text-decoration:none;}");t.default=function(){return i.a.createElement(i.a.Fragment,null,i.a.createElement(d,null),i.a.createElement(l.a,{pages:5},i.a.createElement(T,{speed:.2,offset:0},i.a.createElement(O,null,i.a.createElement(w,{icon:"triangle",className:D,width:48,stroke:f.colors.orange,left:"10%",top:"20%"}),i.a.createElement(w,{icon:"hexa",width:48,stroke:f.colors.red,left:"60%",top:"70%"}),i.a.createElement(w,{icon:"box",width:6,fill:f.colors["grey-darker"],left:"60%",top:"15%"})),i.a.createElement(C,null,i.a.createElement(w,{icon:"arrowUp",className:D,width:16,fill:f.colors["blue-dark"],left:"80%",top:"10%"}),i.a.createElement(w,{icon:"triangle",width:12,stroke:f.colors.white,left:"90%",top:"50%"}),i.a.createElement(w,{icon:"circle",width:16,fill:f.colors["grey-darker"],left:"70%",top:"90%"}),i.a.createElement(w,{icon:"triangle",width:16,stroke:f.colors["grey-darkest"],left:"30%",top:"65%"}),i.a.createElement(w,{icon:"circle",width:6,fill:f.colors["grey-darkest"],left:"75%",top:"10%"}),i.a.createElement(w,{icon:"upDown",className:D,width:8,fill:f.colors["grey-darkest"],left:"45%",top:"10%"})),i.a.createElement(w,{icon:"circle",className:D,width:24,fill:f.colors["grey-darker"],left:"5%",top:"70%"}),i.a.createElement(w,{icon:"circle",width:6,fill:f.colors["grey-darkest"],left:"4%",top:"20%"}),i.a.createElement(w,{icon:"circle",width:12,fill:f.colors["grey-darkest"],left:"50%",top:"60%"}),i.a.createElement(w,{icon:"upDown",width:8,fill:f.colors["grey-darkest"],left:"95%",top:"90%"}),i.a.createElement(w,{icon:"upDown",className:D,width:24,fill:f.colors["grey-darker"],left:"40%",top:"80%"}),i.a.createElement(w,{icon:"triangle",width:8,stroke:f.colors["grey-darker"],left:"25%",top:"5%"}),i.a.createElement(w,{icon:"circle",width:64,fill:f.colors.green,left:"95%",top:"5%"}),i.a.createElement(w,{icon:"box",className:D,width:64,fill:f.colors.purple,left:"5%",top:"90%"}),i.a.createElement(w,{icon:"box",width:6,fill:f.colors["grey-darkest"],left:"10%",top:"10%"}),i.a.createElement(w,{icon:"box",width:12,fill:f.colors["grey-darkest"],left:"40%",top:"30%"}),i.a.createElement(w,{icon:"hexa",width:16,stroke:f.colors["grey-darker"],left:"10%",top:"50%"}),i.a.createElement(w,{icon:"hexa",width:8,stroke:f.colors["grey-darker"],left:"80%",top:"70%"})),i.a.createElement(R,{speed:.4,offset:0},i.a.createElement(U,null,i.a.createElement(Z,null,"Hello, ",i.a.createElement("br",null)," I'm John Doe."),i.a.createElement(Q,null,"I'm creating noice web experiences for the next generation of consumer-facing companies."))),i.a.createElement(H,{bg:"linear-gradient(to right, SlateBlue 0%, DeepSkyBlue 100%)",speed:-.2,offset:1.1,factor:2}),i.a.createElement(R,{speed:.4,offset:1.2,factor:2},i.a.createElement(P,null,i.a.createElement(W,null,"Projects"),i.a.createElement(V,null,i.a.createElement(v,{title:"Freiheit",link:"https://www.behance.net/gallery/58937147/Freiheit",bg:"linear-gradient(to right, #D4145A 0%, #FBB03B 100%)"},"This project is my entry to Adobe's #ChallengeYourPerspective contest."),i.a.createElement(v,{title:"Harry Potter",link:"https://www.behance.net/gallery/52915793/Harry-Potter",bg:"linear-gradient(to right, #662D8C 0%, #ED1E79 100%)"},"I entered the DOCMA 2017 award with this Harry Potter inspired image."),i.a.createElement(v,{title:"Tomb Raider",link:"https://www.behance.net/gallery/43907099/Tomb-Raider",bg:"linear-gradient(to right, #009245 0%, #FCEE21 100%)"},"Recreation of a Tomb Raider Wallpaper (Fan Art)"),i.a.createElement(v,{title:"Eagle",link:"https://www.behance.net/gallery/38068151/Eagle",bg:"linear-gradient(to right, #D585FF 0%, #00FFEE 100%)"},"A fantasy image manipulation relocating the habitat of wild animals.")))),i.a.createElement(T,{speed:.1,offset:1,factor:2},i.a.createElement(O,null,i.a.createElement(w,{icon:"box",width:6,fill:f.colors.white,left:"85%",top:"75%"}),i.a.createElement(w,{icon:"upDown",width:8,fill:f.colors.teal,left:"70%",top:"20%"}),i.a.createElement(w,{icon:"triangle",width:8,stroke:f.colors.orange,left:"25%",top:"5%"}),i.a.createElement(w,{icon:"circle",className:D,width:24,fill:f.colors.white,left:"17%",top:"60%"})),i.a.createElement(C,null,i.a.createElement(w,{icon:"arrowUp",className:D,width:16,fill:f.colors.green,left:"20%",top:"90%"}),i.a.createElement(w,{icon:"triangle",width:12,stroke:f.colors.white,left:"90%",top:"30%"}),i.a.createElement(w,{icon:"circle",width:16,fill:f.colors.yellow,left:"70%",top:"90%"}),i.a.createElement(w,{icon:"triangle",className:D,width:16,stroke:f.colors.teal,left:"18%",top:"75%"}),i.a.createElement(w,{icon:"circle",width:6,fill:f.colors.white,left:"75%",top:"10%"}),i.a.createElement(w,{icon:"upDown",className:D,width:8,fill:f.colors.green,left:"45%",top:"10%"})),i.a.createElement(w,{icon:"circle",width:6,fill:f.colors.white,left:"4%",top:"20%"}),i.a.createElement(w,{icon:"circle",width:12,fill:f.colors.pink,left:"80%",top:"60%"}),i.a.createElement(w,{icon:"box",width:6,fill:f.colors.orange,left:"10%",top:"10%"}),i.a.createElement(w,{icon:"box",width:12,fill:f.colors.yellow,left:"29%",top:"26%"}),i.a.createElement(w,{icon:"hexa",width:16,stroke:f.colors.red,left:"75%",top:"30%"}),i.a.createElement(w,{icon:"hexa",width:8,stroke:f.colors.yellow,left:"80%",top:"70%"})),i.a.createElement(T,{bg:"#23262b",clipPath:"polygon(0 16%, 100% 4%, 100% 82%, 0 94%)",speed:.2,offset:3}),i.a.createElement(T,{speed:.1,offset:3},i.a.createElement(O,null,i.a.createElement(w,{icon:"box",className:D,width:6,fill:f.colors.blue,left:"50%",top:"75%"}),i.a.createElement(w,{icon:"upDown",className:D,width:8,fill:f.colors["grey-darkest"],left:"70%",top:"20%"}),i.a.createElement(w,{icon:"triangle",width:8,stroke:f.colors["grey-darkest"],left:"25%",top:"5%"}),i.a.createElement(w,{icon:"upDown",className:D,width:24,fill:f.colors.orange,left:"80%",top:"80%"})),i.a.createElement(C,null,i.a.createElement(w,{icon:"arrowUp",className:D,width:16,fill:f.colors.purple,left:"5%",top:"80%"}),i.a.createElement(w,{icon:"triangle",width:12,stroke:f.colors.white,left:"95%",top:"50%"}),i.a.createElement(w,{icon:"circle",width:6,fill:f.colors.white,left:"85%",top:"15%"}),i.a.createElement(w,{icon:"upDown",className:D,width:8,fill:f.colors["grey-darkest"],left:"45%",top:"10%"})),i.a.createElement(w,{icon:"circle",width:6,fill:f.colors.white,left:"4%",top:"20%"}),i.a.createElement(w,{icon:"circle",width:12,fill:f.colors["grey-darkest"],left:"70%",top:"60%"}),i.a.createElement(w,{icon:"box",width:6,fill:f.colors.orange,left:"10%",top:"10%"}),i.a.createElement(w,{icon:"box",width:12,fill:f.colors["grey-darkest"],left:"20%",top:"30%"}),i.a.createElement(w,{icon:"hexa",width:8,stroke:f.colors["grey-darkest"],left:"80%",top:"70%"})),i.a.createElement(R,{speed:.4,offset:3},i.a.createElement(P,null,i.a.createElement(W,null,"About"),i.a.createElement(J,null,i.a.createElement(_,{src:B.a,alt:"John Doe"}),i.a.createElement(q,null,"The English language can not fully capture the depth and complexity of my thoughts. So I'm incorporating Emoji into my speech to better express myself. Winky face.")),i.a.createElement(K,null,"You know the way you feel when you see a picture of two otters holding hands? That's how you're gonna feel every day. My mother cried the day I was born because she knew she’d never be prettier than me. You should make me your campaign manager. I was born for politics. I have great hair and I love lying. Captain? The kids want to know where Paulie the Pigeon is. I told them he got sucked up into an airplane engine, is that all right?"))),i.a.createElement(T,{fill:"#23262b",speed:.2,offset:4},i.a.createElement(G,null,i.a.createElement(Y,null,i.a.createElement("svg",{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 800 338.05",preserveAspectRatio:"none"},i.a.createElement("path",{className:A},i.a.createElement("animate",{attributeName:"d",values:"M 0 100 Q 250 50 400 200 Q 550 350 800 300 L 800 0 L 0 0 L 0 100 Z;M 0 100 Q 200 150 400 200 Q 600 250 800 300 L 800 0 L 0 0 L 0 100 Z;M 0 100 Q 150 350 400 200 Q 650 50 800 300 L 800 0 L 0 0 L 0 100 Z",repeatCount:"indefinite",dur:"30s"})))))),i.a.createElement(R,{speed:.4,offset:4},i.a.createElement(P,null,i.a.createElement(W,null,"Get in touch"),i.a.createElement(X,null,"Say ",i.a.createElement("a",{href:"mailto:[email protected]"},"Hi")," or find me on other platforms:"," ",i.a.createElement("a",{href:"https://dribbble.com/LekoArts"},"Dribbble")," &"," ",i.a.createElement("a",{href:"https://www.instagram.com/lekoarts.de/"},"Instagram"))),i.a.createElement($,null,"© 2018 by Gatsby Starter Portfolio Cara."," ",i.a.createElement("a",{href:"https://github.com/LekoArts/gatsby-starter-portfolio-cara"},"Github Repository"),".")),i.a.createElement(T,{speed:.1,offset:4},i.a.createElement(O,null,i.a.createElement(w,{icon:"upDown",className:D,width:8,fill:f.colors["grey-darkest"],left:"70%",top:"20%"}),i.a.createElement(w,{icon:"triangle",width:8,stroke:f.colors["grey-darkest"],left:"25%",top:"5%"})),i.a.createElement(C,null,i.a.createElement(w,{icon:"triangle",width:12,stroke:f.colors.white,left:"95%",top:"50%"}),i.a.createElement(w,{icon:"circle",width:6,fill:f.colors.white,left:"85%",top:"15%"}),i.a.createElement(w,{icon:"upDown",className:D,width:8,fill:f.colors["grey-darkest"],left:"45%",top:"10%"})),i.a.createElement(w,{icon:"circle",width:6,fill:f.colors.white,left:"4%",top:"20%"}),i.a.createElement(w,{icon:"circle",width:12,fill:f.colors["grey-darkest"],left:"70%",top:"60%"}),i.a.createElement(w,{icon:"box",width:12,fill:f.colors["grey-darkest"],left:"20%",top:"30%"}),i.a.createElement(w,{icon:"hexa",width:8,stroke:f.colors["grey-darkest"],left:"80%",top:"70%"}))))}},139:function(e,t,a){a(31);a(155)();var r={transparent:"transparent","blue-black":"#161719","blue-grey":"#23262b",black:"#222b2f","grey-darkest":"#273238","grey-darker":"#364349","grey-dark":"#70818a",grey:"#9babb4","grey-light":"#dae4e9","grey-lighter":"#f3f7f9","grey-lightest":"#fafcfc",white:"#ffffff","red-darkest":"#420806","red-darker":"#6a1b19","red-dark":"#cc1f1a",red:"#bf381a","red-light":"#ef5753","red-lighter":"#f9acaa","red-lightest":"#fcebea","orange-darkest":"#542605","orange-darker":"#7f4012","orange-dark":"#c1611f",orange:"#e07628","orange-light":"#ffa31b","orange-lighter":"#fcd9b6","orange-lightest":"#fff5eb","yellow-darkest":"#453411","yellow-darker":"#684f1d","yellow-dark":"#f2d024",yellow:"#e9af32","yellow-light":"#fff382","yellow-lighter":"#fff9c2","yellow-lightest":"#fcfbeb","green-darkest":"#032d19","green-darker":"#0b4228","green-dark":"#1f9d55",green:"#38c172","green-light":"#51d88a","green-lighter":"#a2f5bf","green-lightest":"#e3fcec","teal-darkest":"#0d3331","teal-darker":"#174e4b","teal-dark":"#38a89d",teal:"#4dc0b5","teal-light":"#64d5ca","teal-lighter":"#a0f0ed","teal-lightest":"#e8fffe","blue-darkest":"#0a224e","blue-darker":"#103d60","blue-dark":"#2779bd",blue:"#a0d8f1","blue-light":"#6cb2eb","blue-lighter":"#bcdefa","blue-lightest":"#eff8ff","indigo-darkest":"#191e38","indigo-darker":"#2f365f","indigo-dark":"#5661b3",indigo:"#6574cd","indigo-light":"#7886d7","indigo-lighter":"#b2b7ff","indigo-lightest":"#e6e8ff","purple-darkest":"#1f133f","purple-darker":"#352465","purple-dark":"#794acf",purple:"#9561e2","purple-light":"#a779e9","purple-lighter":"#d6bbfc","purple-lightest":"#f3ebff","pink-darkest":"#45051e","pink-darker":"#72173a","pink-dark":"#eb5286",pink:"#f66d9b","pink-light":"#fa7ea8","pink-lighter":"#ffbbca","pink-lightest":"#ffebef"};e.exports={colors:r,screens:{sm:"400px",md:"600px",lg:"900px",xl:"1200px",xxl:"1600px"},fonts:{sans:['"Open Sans"',"-apple-system","BlinkMacSystemFont","Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue","sans-serif"],serif:['"Cantata One"',"Constantia","Lucida Bright","Lucidabright","Lucida Serif","Lucida","DejaVu Serif","Bitstream Vera Serif","Liberation Serif","Georgia","serif"],mono:["Menlo","Monaco","Consolas","Liberation Mono","Courier New","monospace"]},textSizes:{xs:".75rem",sm:".875rem",base:"1rem",lg:"1.125rem",xl:"1.25rem","2xl":"1.5rem","3xl":"1.875rem","4xl":"2.25rem","5xl":"3rem","6xl":"5rem","7xl":"8rem"},fontWeights:{hairline:100,thin:200,light:300,normal:400,medium:500,semibold:600,bold:700,extrabold:800,black:900},leading:{none:1,tight:1.25,normal:1.5,loose:2},tracking:{tight:"-0.05em",normal:"0",wide:"0.05em"},textColors:r,backgroundColors:r,borderWidths:{default:"1px",0:"0",2:"2px",4:"4px",8:"8px"},borderColors:Object.assign({default:r["grey-light"]},r),borderRadius:{none:"0",sm:".125rem",default:".25rem",lg:".5rem",full:"9999px"},width:{auto:"auto",px:"1px",1:"0.25rem",2:"0.5rem",3:"0.75rem",4:"1rem",6:"1.5rem",8:"2rem",10:"2.5rem",12:"3rem",16:"4rem",24:"6rem",32:"8rem",48:"12rem",64:"16rem","1/2":"50%","1/3":"33.33333%","2/3":"66.66667%","1/4":"25%","3/4":"75%","1/5":"20%","2/5":"40%","3/5":"60%","4/5":"80%","1/6":"16.66667%","5/6":"83.33333%",full:"100%",screen:"100vw"},height:{auto:"auto",px:"1px",1:"0.25rem",2:"0.5rem",3:"0.75rem",4:"1rem",6:"1.5rem",8:"2rem",10:"2.5rem",12:"3rem",16:"4rem",24:"6rem",32:"8rem",48:"12rem",64:"16rem",full:"100%",screen:"100vh"},minWidth:{0:"0",full:"100%"},minHeight:{0:"0",full:"100%",screen:"100vh"},maxWidth:{xs:"20rem",sm:"30rem",md:"40rem",lg:"50rem",xl:"60rem","2xl":"70rem","3xl":"80rem","4xl":"90rem","5xl":"100rem",full:"100%"},maxHeight:{full:"100%",screen:"100vh"},padding:{px:"1px",0:"0",1:"0.25rem",2:"0.5rem",3:"0.75rem",4:"1rem",6:"1.5rem",8:"2rem",12:"3rem",16:"4rem",24:"6rem",32:"8rem"},margin:{auto:"auto",px:"1px",0:"0",1:"0.25rem",2:"0.5rem",3:"0.75rem",4:"1rem",6:"1.5rem",8:"2rem"},negativeMargin:{px:"1px",0:"0",1:"0.25rem",2:"0.5rem",3:"0.75rem",4:"1rem",6:"1.5rem",8:"2rem"},shadows:{default:"0 2px 4px 0 rgba(0,0,0,0.10)",md:"0 4px 8px 0 rgba(0,0,0,0.12), 0 2px 4px 0 rgba(0,0,0,0.08)",lg:"0 15px 30px 0 rgba(0,0,0,0.11), 0 5px 15px 0 rgba(0,0,0,0.08)",inner:"inset 0 2px 4px 0 rgba(0,0,0,0.06)",none:"none"},zIndex:{auto:"auto",0:0,10:10,20:20,30:30,40:40,50:50},opacity:{0:"0",25:".25",50:".5",75:".75",100:"1"},options:{prefix:"",important:!1}}},154:function(e,t,a){var r=a(139);e.exports={pathPrefix:"/",siteTitle:"Harbinger Industries",siteTitleAlt:"Harbinger",siteUrl:"https://harbinger-industries.net",siteLanguage:"en",siteLogo:"/logos/logo-1024.png",siteDescription:"Work / Portfolio / Music site of Avik Nandy.",userTwitter:"@av1k",ogSiteName:"Harbinger Industries",ogLanguage:"en_US",themeColor:r.colors.orange,backgroundColor:r.colors.blue}},162:function(e,t){e.exports="data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hsaW5rIiB3aWR0aD0iMTAwMCIgaGVpZ2h0PSIxMDAwIiB2aWV3Qm94PSIwIDAgMzAgMzUiPgogICAgPHBhdGgKICAgICAgICAgICAgZmlsbD0ibm9uZSIKICAgICAgICAgICAgc3Ryb2tlPSJ3aGl0ZSIKICAgICAgICAgICAgZD0iTTE0LjkyMSwyLjI3IDI4LjY2NywyNS41IDEuMTc1LDI1LjUgeiIKICAgIC8+Cjwvc3ZnPg=="},163:function(e,t,a){e.exports=a.p+"static/avatar-c99035c8ff4376bb578c17e597c4baed.jpg"}}]);
//# sourceMappingURL=component---src-pages-index-jsx-f901ab7da514504425f7.js.map
|
||
plugin.ts
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
import * as path from "path";
import {
AzureSolutionSettings,
Func,
FxError,
NodeType,
PluginContext,
QTreeNode,
ReadonlyPluginConfig,
Result,
Stage,
} from "@microsoft/teamsfx-api";
import { StorageManagementClient } from "@azure/arm-storage";
import { StringDictionary } from "@azure/arm-appservice/esm/models";
import { WebSiteManagementClient, WebSiteManagementModels } from "@azure/arm-appservice";
import { v4 as uuid } from "uuid";
import { AzureClientFactory, AzureLib } from "./utils/azure-client";
import {
ConfigFunctionAppError,
GetConnectionStringError,
InitAzureSDKError,
InstallNpmPackageError,
InstallTeamsfxBindingError,
ProvisionError,
ValidationError,
runWithErrorCatchAndThrow,
FunctionNameConflictError,
FetchConfigError,
} from "./resources/errors";
import {
DefaultProvisionConfigs,
DefaultValues,
DependentPluginInfo,
FunctionPluginInfo,
FunctionPluginPathInfo,
QuestionValidationFunc,
RegularExpr,
} from "./constants";
import { DialogUtils } from "./utils/dialog";
import { ErrorMessages, InfoMessages } from "./resources/message";
import {
FunctionConfigKey,
FunctionLanguage,
NodeVersion,
QuestionKey,
ResourceType,
} from "./enums";
import { FunctionDeploy } from "./ops/deploy";
import { FunctionNaming, FunctionProvision } from "./ops/provision";
import { FunctionScaffold } from "./ops/scaffold";
import { FxResult, FunctionPluginResultFactory as ResultFactory } from "./result";
import { Logger } from "./utils/logger";
import {
PostProvisionSteps,
PreDeploySteps,
ProvisionSteps,
StepGroup,
step,
} from "./resources/steps";
import { functionNameQuestion } from "./questions";
import { DotnetChecker } from "./utils/depsChecker/dotnetChecker";
import { Messages, isLinux, dotnetManualInstallHelpLink } from "./utils/depsChecker/common";
import { DepsCheckerError } from "./utils/depsChecker/errors";
import { getNodeVersion } from "./utils/node-version";
import { funcPluginAdapter } from "./utils/depsChecker/funcPluginAdapter";
import { funcPluginLogger } from "./utils/depsChecker/funcPluginLogger";
import { funcPluginTelemetry } from "./utils/depsChecker/funcPluginTelemetry";
type Site = WebSiteManagementModels.Site;
type AppServicePlan = WebSiteManagementModels.AppServicePlan;
type SiteAuthSettings = WebSiteManagementModels.SiteAuthSettings;
export interface FunctionConfig {
/* Config from solution */
resourceGroupName?: string;
subscriptionId?: string;
resourceNameSuffix?: string;
location?: string;
functionName?: string;
/* Config exported by Function plugin */
functionLanguage?: FunctionLanguage;
functionAppName?: string;
defaultFunctionName?: string;
storageAccountName?: string;
appServicePlanName?: string;
functionEndpoint?: string;
/* Intermediate */
skipDeploy: boolean;
}
export class
|
{
config: FunctionConfig = {
skipDeploy: false,
};
private syncConfigFromContext(ctx: PluginContext): void {
const solutionConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.solutionPluginName
);
this.config.resourceNameSuffix = solutionConfig?.get(
DependentPluginInfo.resourceNameSuffix
) as string;
this.config.resourceGroupName = solutionConfig?.get(
DependentPluginInfo.resourceGroupName
) as string;
this.config.subscriptionId = solutionConfig?.get(DependentPluginInfo.subscriptionId) as string;
this.config.location = solutionConfig?.get(DependentPluginInfo.location) as string;
this.config.functionLanguage = solutionConfig?.get(
DependentPluginInfo.programmingLanguage
) as FunctionLanguage;
this.config.defaultFunctionName = ctx.config.get(
FunctionConfigKey.defaultFunctionName
) as string;
this.config.functionAppName = ctx.config.get(FunctionConfigKey.functionAppName) as string;
this.config.storageAccountName = ctx.config.get(FunctionConfigKey.storageAccountName) as string;
this.config.appServicePlanName = ctx.config.get(FunctionConfigKey.appServicePlanName) as string;
/* Always validate after sync for safety and security. */
this.validateConfig();
}
private syncConfigToContext(ctx: PluginContext): void {
Object.entries(this.config)
.filter((kv) =>
FunctionPluginInfo.FunctionPluginPersistentConfig.find(
(x: FunctionConfigKey) => x === kv[0]
)
)
.forEach((kv) => {
if (kv[1]) {
ctx.config.set(kv[0], kv[1].toString());
}
});
}
private validateConfig(): void {
if (
this.config.functionLanguage &&
!Object.values(FunctionLanguage).includes(this.config.functionLanguage)
) {
throw new ValidationError(FunctionConfigKey.functionLanguage);
}
if (
this.config.resourceNameSuffix &&
!RegularExpr.validResourceSuffixPattern.test(this.config.resourceNameSuffix)
) {
throw new ValidationError(FunctionConfigKey.resourceNameSuffix);
}
if (
this.config.functionAppName &&
!RegularExpr.validFunctionAppNamePattern.test(this.config.functionAppName)
) {
throw new ValidationError(FunctionConfigKey.functionAppName);
}
if (
this.config.storageAccountName &&
!RegularExpr.validStorageAccountNamePattern.test(this.config.storageAccountName)
) {
throw new ValidationError(FunctionConfigKey.storageAccountName);
}
if (
this.config.appServicePlanName &&
!RegularExpr.validAppServicePlanNamePattern.test(this.config.appServicePlanName)
) {
throw new ValidationError(FunctionConfigKey.appServicePlanName);
}
if (
this.config.defaultFunctionName &&
!RegularExpr.validFunctionNamePattern.test(this.config.defaultFunctionName)
) {
throw new ValidationError(FunctionConfigKey.defaultFunctionName);
}
}
public async callFunc(func: Func, ctx: PluginContext): Promise<FxResult> {
if (func.method === QuestionValidationFunc.validateFunctionName) {
const workingPath: string = this.getFunctionProjectRootPath(ctx);
const name = func.params as string;
if (!name || !RegularExpr.validFunctionNamePattern.test(name)) {
return ResultFactory.Success(ErrorMessages.invalidFunctionName);
}
const stage: Stage | undefined = ctx.answers?.get(QuestionKey.stage) as Stage;
if (stage === Stage.create) {
return ResultFactory.Success();
}
const language: FunctionLanguage =
(ctx.answers?.get(QuestionKey.programmingLanguage) as FunctionLanguage) ??
(ctx.configOfOtherPlugins
.get(DependentPluginInfo.solutionPluginName)
?.get(DependentPluginInfo.programmingLanguage) as FunctionLanguage);
// If language is unknown, skip checking and let scaffold handle the error.
if (language && (await FunctionScaffold.doesFunctionPathExist(workingPath, language, name))) {
return ResultFactory.Success(ErrorMessages.functionAlreadyExists);
}
}
return ResultFactory.Success();
}
public getQuestions(stage: Stage, ctx: PluginContext): Result<QTreeNode | undefined, FxError> {
const res = new QTreeNode({
type: NodeType.group,
});
if (stage === Stage.update) {
res.addChild(functionNameQuestion);
}
return ResultFactory.Success(res);
}
public async preScaffold(ctx: PluginContext): Promise<FxResult> {
this.syncConfigFromContext(ctx);
const workingPath: string = this.getFunctionProjectRootPath(ctx);
const functionLanguage: FunctionLanguage = this.checkAndGet(
this.config.functionLanguage,
FunctionConfigKey.functionLanguage
);
const name: string =
(ctx.answers?.get(QuestionKey.functionName) as string) ?? DefaultValues.functionName;
if (await FunctionScaffold.doesFunctionPathExist(workingPath, functionLanguage, name)) {
throw new FunctionNameConflictError();
}
this.config.functionName = name;
this.syncConfigToContext(ctx);
return ResultFactory.Success();
}
public async scaffold(ctx: PluginContext): Promise<FxResult> {
const workingPath: string = this.getFunctionProjectRootPath(ctx);
const functionName: string = this.checkAndGet(
this.config.functionName,
FunctionConfigKey.functionName
);
const functionLanguage: FunctionLanguage = this.checkAndGet(
this.config.functionLanguage,
FunctionConfigKey.functionLanguage
);
await FunctionScaffold.scaffoldFunction(
workingPath,
functionLanguage,
DefaultValues.functionTriggerType,
functionName,
{
appName: ctx.app.name.short,
functionName: functionName,
}
);
if (!this.config.defaultFunctionName) {
this.config.defaultFunctionName = this.config.functionName;
}
this.syncConfigToContext(ctx);
return ResultFactory.Success();
}
public async preProvision(ctx: PluginContext): Promise<FxResult> {
this.syncConfigFromContext(ctx);
if (
!this.config.functionAppName ||
!this.config.storageAccountName ||
!this.config.appServicePlanName
) {
const teamsAppName: string = ctx.app.name.short;
const suffix: string = this.config.resourceNameSuffix ?? uuid().substr(0, 6);
if (!this.config.functionAppName) {
this.config.functionAppName = FunctionNaming.generateFunctionAppName(
teamsAppName,
DefaultProvisionConfigs.nameSuffix,
suffix
);
Logger.info(InfoMessages.generateFunctionAppName(this.config.functionAppName));
}
if (!this.config.storageAccountName) {
this.config.storageAccountName = FunctionNaming.generateStorageAccountName(
teamsAppName,
DefaultProvisionConfigs.nameSuffix,
suffix
);
Logger.info(InfoMessages.generateStorageAccountName(this.config.storageAccountName));
}
if (!this.config.appServicePlanName) {
this.config.appServicePlanName = this.config.functionAppName;
Logger.info(InfoMessages.generateAppServicePlanName(this.config.appServicePlanName));
}
}
this.syncConfigToContext(ctx);
return ResultFactory.Success();
}
private async getValidNodeVersion(ctx: PluginContext): Promise<NodeVersion> {
const currentNodeVersion = await getNodeVersion(this.getFunctionProjectRootPath(ctx));
const candidateNodeVersions = Object.values(NodeVersion);
return (
candidateNodeVersions.find((v: NodeVersion) => v === currentNodeVersion) ??
DefaultValues.nodeVersion
);
}
public async provision(ctx: PluginContext): Promise<FxResult> {
const resourceGroupName = this.checkAndGet(
this.config.resourceGroupName,
FunctionConfigKey.resourceGroupName
);
const subscriptionId = this.checkAndGet(
this.config.subscriptionId,
FunctionConfigKey.subscriptionId
);
const location = this.checkAndGet(this.config.location, FunctionConfigKey.location);
const appServicePlanName = this.checkAndGet(
this.config.appServicePlanName,
FunctionConfigKey.appServicePlanName
);
const storageAccountName = this.checkAndGet(
this.config.storageAccountName,
FunctionConfigKey.storageAccountName
);
const functionAppName = this.checkAndGet(
this.config.functionAppName,
FunctionConfigKey.functionAppName
);
const functionLanguage = this.checkAndGet(
this.config.functionLanguage,
FunctionConfigKey.functionLanguage
);
const credential = this.checkAndGet(
await ctx.azureAccountProvider?.getAccountCredentialAsync(),
FunctionConfigKey.credential
);
const nodeVersion = await this.getValidNodeVersion(ctx);
const storageManagementClient: StorageManagementClient = await runWithErrorCatchAndThrow(
new InitAzureSDKError(),
() => AzureClientFactory.getStorageManagementClient(credential, subscriptionId)
);
Logger.info(
InfoMessages.checkResource(ResourceType.storageAccount, storageAccountName, resourceGroupName)
);
await runWithErrorCatchAndThrow(new ProvisionError(ResourceType.storageAccount), () =>
step(
StepGroup.ProvisionStepGroup,
ProvisionSteps.ensureStorageAccount,
async () =>
await AzureLib.ensureStorageAccount(
storageManagementClient,
resourceGroupName,
storageAccountName,
DefaultProvisionConfigs.storageConfig(location)
)
)
);
const storageConnectionString: string | undefined = await runWithErrorCatchAndThrow(
new GetConnectionStringError(),
async () =>
await step(StepGroup.ProvisionStepGroup, ProvisionSteps.getConnectionString, async () =>
AzureLib.getConnectionString(
storageManagementClient,
resourceGroupName,
storageAccountName
)
)
);
if (!storageConnectionString) {
Logger.error(ErrorMessages.failToGetConnectionString);
throw new GetConnectionStringError();
}
const webSiteManagementClient: WebSiteManagementClient = await runWithErrorCatchAndThrow(
new InitAzureSDKError(),
() => AzureClientFactory.getWebSiteManagementClient(credential, subscriptionId)
);
Logger.info(
InfoMessages.checkResource(ResourceType.appServicePlan, appServicePlanName, resourceGroupName)
);
const appServicePlan: AppServicePlan = await runWithErrorCatchAndThrow(
new ProvisionError(ResourceType.appServicePlan),
async () =>
await step(StepGroup.ProvisionStepGroup, ProvisionSteps.ensureAppServicePlans, async () =>
AzureLib.ensureAppServicePlans(
webSiteManagementClient,
resourceGroupName,
appServicePlanName,
DefaultProvisionConfigs.appServicePlansConfig(location)
)
)
);
const appServicePlanId: string | undefined = appServicePlan.id;
if (!appServicePlanId) {
Logger.error(ErrorMessages.failToGetAppServicePlanId);
throw new ProvisionError(ResourceType.appServicePlan);
}
Logger.info(
InfoMessages.checkResource(ResourceType.functionApp, appServicePlanName, resourceGroupName)
);
const site: Site = await runWithErrorCatchAndThrow(
new ProvisionError(ResourceType.functionApp),
async () =>
await step(StepGroup.ProvisionStepGroup, ProvisionSteps.ensureFunctionApp, async () =>
FunctionProvision.ensureFunctionApp(
webSiteManagementClient,
resourceGroupName,
location,
functionAppName,
functionLanguage,
appServicePlanId,
storageConnectionString,
nodeVersion
)
)
);
if (!site.defaultHostName) {
Logger.error(ErrorMessages.failToGetFunctionAppEndpoint);
throw new ProvisionError(ResourceType.functionApp);
}
if (!this.config.functionEndpoint) {
this.config.functionEndpoint = `https://${site.defaultHostName}`;
}
this.syncConfigToContext(ctx);
return ResultFactory.Success();
}
public async postProvision(ctx: PluginContext): Promise<FxResult> {
const subscriptionId = this.checkAndGet(
this.config.subscriptionId,
FunctionConfigKey.subscriptionId
);
const functionAppName = this.checkAndGet(
this.config.functionAppName,
FunctionConfigKey.functionAppName
);
const resourceGroupName = this.checkAndGet(
this.config.resourceGroupName,
FunctionConfigKey.resourceGroupName
);
const credential = this.checkAndGet(
await ctx.azureAccountProvider?.getAccountCredentialAsync(),
FunctionConfigKey.credential
);
const webSiteManagementClient: WebSiteManagementClient = await runWithErrorCatchAndThrow(
new InitAzureSDKError(),
() => AzureClientFactory.getWebSiteManagementClient(credential, subscriptionId)
);
const site: Site | undefined = await runWithErrorCatchAndThrow(
new ConfigFunctionAppError(),
async () =>
await step(StepGroup.PostProvisionStepGroup, PostProvisionSteps.findFunctionApp, async () =>
AzureLib.findFunctionApp(webSiteManagementClient, resourceGroupName, functionAppName)
)
);
if (!site) {
Logger.error(ErrorMessages.failToFindFunctionApp);
throw new ConfigFunctionAppError();
}
if (!site.siteConfig) {
Logger.info(InfoMessages.functionAppConfigIsEmpty);
site.siteConfig = {};
}
// The site queried does not contains appSettings, complete it through another API.
if (!site.siteConfig.appSettings) {
const res: StringDictionary = await runWithErrorCatchAndThrow(
new ConfigFunctionAppError(),
async () =>
await webSiteManagementClient.webApps.listApplicationSettings(
resourceGroupName,
functionAppName
)
);
if (res.properties) {
site.siteConfig.appSettings = Object.entries(res.properties).map(
(kv: [string, string]) => ({
name: kv[0],
value: kv[1],
})
);
}
}
this.collectFunctionAppSettings(ctx, site);
await runWithErrorCatchAndThrow(
new ConfigFunctionAppError(),
async () =>
await step(
StepGroup.PostProvisionStepGroup,
PostProvisionSteps.updateFunctionSettings,
async () =>
await webSiteManagementClient.webApps.update(resourceGroupName, functionAppName, site)
)
);
Logger.info(InfoMessages.functionAppSettingsUpdated);
const authSettings: SiteAuthSettings | undefined = this.collectFunctionAppAuthSettings(ctx);
if (authSettings) {
await runWithErrorCatchAndThrow(
new ConfigFunctionAppError(),
async () =>
await step(
StepGroup.PostProvisionStepGroup,
PostProvisionSteps.updateFunctionSettings,
async () =>
await webSiteManagementClient.webApps.updateAuthSettings(
resourceGroupName,
functionAppName,
authSettings
)
)
);
}
Logger.info(InfoMessages.functionAppAuthSettingsUpdated);
this.syncConfigToContext(ctx);
return ResultFactory.Success();
}
public async preDeploy(ctx: PluginContext): Promise<FxResult> {
this.syncConfigFromContext(ctx);
const workingPath: string = this.getFunctionProjectRootPath(ctx);
const functionLanguage: FunctionLanguage = this.checkAndGet(
this.config.functionLanguage,
FunctionConfigKey.functionLanguage
);
const updated: boolean = await FunctionDeploy.hasUpdatedContent(workingPath, functionLanguage);
if (!updated) {
Logger.info(InfoMessages.skipDeployment);
DialogUtils.show(ctx, InfoMessages.skipDeployment);
this.config.skipDeploy = true;
return ResultFactory.Success();
}
// NOTE: make sure this step is before using `dotnet` command if you refactor this code.
await this.handleDotnetChecker(ctx);
await this.handleBackendExtensionsInstall(workingPath, functionLanguage);
await runWithErrorCatchAndThrow(
new InstallNpmPackageError(),
async () =>
await step(StepGroup.PreDeployStepGroup, PreDeploySteps.npmPrepare, async () =>
FunctionDeploy.build(workingPath, functionLanguage)
)
);
this.config.skipDeploy = false;
return ResultFactory.Success();
}
public async deploy(ctx: PluginContext): Promise<FxResult> {
if (this.config.skipDeploy) {
return ResultFactory.Success();
}
const workingPath: string = this.getFunctionProjectRootPath(ctx);
const subscriptionId: string = this.checkAndGet(
this.config.subscriptionId,
FunctionConfigKey.subscriptionId
);
const functionAppName: string = this.checkAndGet(
this.config.functionAppName,
FunctionConfigKey.functionAppName
);
const resourceGroupName: string = this.checkAndGet(
this.config.resourceGroupName,
FunctionConfigKey.resourceGroupName
);
const functionLanguage: FunctionLanguage = this.checkAndGet(
this.config.functionLanguage,
FunctionConfigKey.functionLanguage
);
const credential = this.checkAndGet(
await ctx.azureAccountProvider?.getAccountCredentialAsync(),
FunctionConfigKey.credential
);
const webSiteManagementClient: WebSiteManagementClient = await runWithErrorCatchAndThrow(
new InitAzureSDKError(),
() => AzureClientFactory.getWebSiteManagementClient(credential, subscriptionId)
);
await FunctionDeploy.deployFunction(
webSiteManagementClient,
workingPath,
functionAppName,
functionLanguage,
resourceGroupName
);
return ResultFactory.Success();
}
private getFunctionProjectRootPath(ctx: PluginContext): string {
return path.join(ctx.root, FunctionPluginPathInfo.solutionFolderName);
}
private checkAndGet<T>(v: T | undefined, key: string): T {
if (v) {
return v;
}
throw new FetchConfigError(key);
}
public isPluginEnabled(ctx: PluginContext, plugin: string): boolean {
const solutionConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.solutionPluginName
);
const selectedPlugins = (ctx.projectSettings?.solutionSettings as AzureSolutionSettings)
.activeResourcePlugins;
return selectedPlugins.includes(plugin);
}
private collectFunctionAppSettings(ctx: PluginContext, site: Site): void {
const functionEndpoint: string = this.checkAndGet(
this.config.functionEndpoint,
FunctionConfigKey.functionEndpoint
);
FunctionProvision.updateFunctionSettingsSelf(site, functionEndpoint);
const aadConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.aadPluginName
);
if (this.isPluginEnabled(ctx, DependentPluginInfo.aadPluginName) && aadConfig) {
Logger.info(InfoMessages.dependPluginDetected(DependentPluginInfo.aadPluginName));
const clientId: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.aadClientId) as string,
"AAD client Id"
);
const clientSecret: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.aadClientSecret) as string,
"AAD secret"
);
const oauthHost: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.oauthHost) as string,
"OAuth Host"
);
const tenantId: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.tenantId) as string,
"Tenant Id"
);
const applicationIdUris: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.applicationIdUris) as string,
"Application Id URI"
);
FunctionProvision.updateFunctionSettingsForAAD(
site,
clientId,
clientSecret,
oauthHost,
tenantId,
applicationIdUris
);
}
const frontendConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.frontendPluginName
);
if (this.isPluginEnabled(ctx, DependentPluginInfo.frontendPluginName) && frontendConfig) {
Logger.info(InfoMessages.dependPluginDetected(DependentPluginInfo.frontendPluginName));
const frontendEndpoint: string = this.checkAndGet(
frontendConfig.get(DependentPluginInfo.frontendEndpoint) as string,
"frontend endpoint"
);
FunctionProvision.updateFunctionSettingsForFrontend(site, frontendEndpoint);
}
const sqlConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.sqlPluginName
);
const identityConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.identityPluginName
);
if (
this.isPluginEnabled(ctx, DependentPluginInfo.sqlPluginName) &&
this.isPluginEnabled(ctx, DependentPluginInfo.identityPluginName) &&
sqlConfig &&
identityConfig
) {
Logger.info(InfoMessages.dependPluginDetected(DependentPluginInfo.sqlPluginName));
Logger.info(InfoMessages.dependPluginDetected(DependentPluginInfo.identityPluginName));
const identityId: string = this.checkAndGet(
identityConfig.get(DependentPluginInfo.identityId) as string,
"identity Id"
);
const databaseName: string = this.checkAndGet(
sqlConfig.get(DependentPluginInfo.databaseName) as string,
"database name"
);
const sqlEndpoint: string = this.checkAndGet(
sqlConfig.get(DependentPluginInfo.sqlEndpoint) as string,
"sql endpoint"
);
const identityName: string = this.checkAndGet(
identityConfig.get(DependentPluginInfo.identityName) as string,
"identity name"
);
FunctionProvision.updateFunctionSettingsForSQL(
site,
identityId,
databaseName,
sqlEndpoint,
identityName
);
}
const apimConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.apimPluginName
);
if (this.isPluginEnabled(ctx, DependentPluginInfo.apimPluginName) && apimConfig) {
Logger.info(InfoMessages.dependPluginDetected(DependentPluginInfo.apimPluginName));
const clientId: string = this.checkAndGet(
apimConfig.get(DependentPluginInfo.apimAppId) as string,
"APIM app Id"
);
FunctionProvision.ensureFunctionAllowAppIds(site, [clientId]);
}
}
private collectFunctionAppAuthSettings(ctx: PluginContext): SiteAuthSettings | undefined {
const aadConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.aadPluginName
);
const frontendConfig: ReadonlyPluginConfig | undefined = ctx.configOfOtherPlugins.get(
DependentPluginInfo.frontendPluginName
);
if (
this.isPluginEnabled(ctx, DependentPluginInfo.aadPluginName) &&
this.isPluginEnabled(ctx, DependentPluginInfo.frontendPluginName) &&
aadConfig &&
frontendConfig
) {
const clientId: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.aadClientId) as string,
"AAD client Id"
);
const oauthHost: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.oauthHost) as string,
"OAuth Host"
);
const tenantId: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.tenantId) as string,
"tenant Id"
);
const applicationIdUri: string = this.checkAndGet(
aadConfig.get(DependentPluginInfo.applicationIdUris) as string,
"Application Id URI"
);
return FunctionProvision.constructFunctionAuthSettings(
clientId,
applicationIdUri,
oauthHost,
tenantId
);
}
return undefined;
}
private async handleDotnetChecker(ctx: PluginContext): Promise<void> {
try {
await step(StepGroup.PreDeployStepGroup, PreDeploySteps.dotnetInstall, async () => {
const dotnetChecker = new DotnetChecker(
funcPluginAdapter,
funcPluginLogger,
funcPluginTelemetry
);
try {
if (await dotnetChecker.isInstalled()) {
return;
}
} catch (error) {
funcPluginLogger.debug(InfoMessages.failedToCheckDotnet(error));
funcPluginAdapter.handleDotnetError(error);
return;
}
if (isLinux()) {
// TODO: handle linux installation
if (!(await funcPluginAdapter.handleDotnetForLinux(ctx, dotnetChecker))) {
// NOTE: this is a temporary fix for Linux, to make the error message more readable.
const message = await funcPluginAdapter.generateMsg(Messages.linuxDepsNotFoundHelpLinkMessage, [dotnetChecker]);
funcPluginAdapter.handleDotnetError(
new DepsCheckerError(message, dotnetManualInstallHelpLink)
);
}
return;
}
try {
await dotnetChecker.install();
} catch (error) {
await funcPluginLogger.printDetailLog();
funcPluginLogger.error(InfoMessages.failedToInstallDotnet(error));
funcPluginAdapter.handleDotnetError(error);
}
});
} finally {
funcPluginLogger.cleanup();
}
}
private async handleBackendExtensionsInstall(
workingPath: string,
functionLanguage: FunctionLanguage
): Promise<void> {
await runWithErrorCatchAndThrow(
new InstallTeamsfxBindingError(),
async () =>
await step(StepGroup.PreDeployStepGroup, PreDeploySteps.installTeamsfxBinding, async () => {
try {
await FunctionDeploy.installFuncExtensions(workingPath, functionLanguage);
} catch (error) {
// wrap the original error to UserError so the extensibility model will pop-up a dialog correctly
funcPluginAdapter.handleDotnetError(error);
}
})
);
}
}
|
FunctionPluginImpl
|
client.rs
|
use regex::Regex;
use serde::{Serialize, Deserialize};
use crate::types::{PlotParameters, DiagramError, Format};
use std::error::Error;
use crate::types::WSDEnum;
// Represent response from websequence diagram website
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
struct WebSequenceDiagramResponse {
img: String,
errors: Vec<String>,
// TODO(mkl): add aditional fields
}
/// Contains result of plotting diagram
pub struct WSDResult {
/// Content of the diagram
pub diagram: Vec<u8>,
/// Vector with errors.
/// Errors are not fatal. Even if there are errors
/// rest lines may be plotted.
pub errors: Vec<DiagramError>,
/// Actual format of the diagram
/// Actual format may be different from requested. For example when pdf is requested
/// but no api key are provided.
/// Format is determined from returned url
/// "?png=mscKTO107" for png
pub actual_format: Format
}
/// plot diagram using websequncediagrams public API
pub fn get_diagram(spec: &str, parameters: &PlotParameters) -> Result<WSDResult, Box<Error>> {
// if send request for pdf but key is incorrect png in returned
let mut params = vec![
("message".to_owned(), spec.to_owned()),
("style".to_owned(), parameters.style.wsd_value()),
("format".to_owned(), parameters.format.wsd_value()),
("apiVersion".to_owned(), "1".to_owned()),
];
if let Some(ref api_key) = parameters.api_key {
params.push(("apikey".to_owned(), api_key.clone()));
}
if let Some(ref paper_size) = parameters.paper_size {
params.push(("paper".to_owned(), paper_size.wsd_value()));
}
if let Some(ref paper_orientation) = parameters.paper_orientation {
params.push(("landscape".to_owned(), paper_orientation.wsd_value()));
}
if let Some(ref scale) = parameters.scale {
params.push(("scale".to_owned(), format!("{}", scale)));
}
// URL for first request
let first_request_url = "http://www.websequencediagrams.com/index.php";
let first_response = reqwest::Client::new()
.post(first_request_url)
.form(¶ms)
.send();
let first_response: WebSequenceDiagramResponse = match first_response {
Ok(mut r) => {
let mut v = vec![];
// Save the response, so we can check it if something going wrong
std::io::copy(&mut r, &mut v)
.map_err(|err| format!("error reading response from server {} : {:?}", first_request_url, err))?;
if !r.status().is_success() {
return Err(format!(
"Error response from server: {} HTTP code={:?} response={}",
first_request_url,
r.status(),
String::from_utf8_lossy(&v)
).into())
}
serde_json::from_reader(&v[..])
.map_err(|err|
format!(
"Cannot deserialize websequencegiagram response: {:?} Response: {}",
err,
String::from_utf8_lossy(&v)
)
)
}
Err(err) => {
Err(format!("error sending request to {} : {}", first_request_url, err))
}
}?;
let actual_format = determine_actual_format(&first_response.img)
.map_err(|err| format!("cannot determine actual format from url: {} : {:?}", &first_response.img, err))?;
let second_request_url = format!("http://www.websequencediagrams.com/index.php{}", first_response.img);
// Second request contains actual diagram
let mut second_response = reqwest::Client::new()
.get(&second_request_url)
.send()
.map_err(|err| format!("Error sending request for diagram to {} : {:?}", second_request_url, err))?;
if !second_response.status().is_success() {
return Err(format!("Request for diagram was unsuccesfull url: {} code: {:?}", second_request_url, second_response.status()).into());
}
let mut data = vec![];
std::io::copy(&mut second_response, &mut data)
.map_err(|err|
format!("Error reading diagram from {} : {:?}", second_request_url, err)
)?;
let errors_parsed = first_response.errors
.iter()
.map(|error| DiagramError::from_wsd_error_str(error));
let mut errors = vec![];
for error in errors_parsed {
match error {
Ok(error) => errors.push(error),
Err(err) => return Err(format!("cannot parse wsd error message {:?}",err).into())
}
}
Ok(WSDResult {
diagram: data,
errors,
actual_format,
})
}
fn determine_actual_format(url: &str) -> Result<Format, Box<Error>> {
let re = Regex::new(r"(?ix)
\?
(?P<format>\w+) # format
=
.*
")?;
let caps = if let Some(caps) = re.captures(url) {
caps
} else {
return Err("Error parsing diagram url.".into())
};
let format_str = if let Some(format_match) = caps.name("format"){
format_match.as_str()
} else {
return Err("Error parsing diagram url. Group `format` not found".into())
};
match Format::from_str(format_str) {
Some(x) => Ok(x),
None =>
|
}
}
#[cfg(test)]
mod tests {
use crate::types::{DiagramError, Format};
use crate::client::determine_actual_format;
#[test]
fn determine_actual_format_test() {
// "?png=mscKTO107" for png
// "?pdf=mscKTO107" for pdf
// "?svg=mscKTO107" for svg
assert_eq!(determine_actual_format("?png=mscKTO107").unwrap(), Format::Png);
assert_eq!(determine_actual_format("?pdf=mscKTO107").unwrap(), Format::Pdf);
assert_eq!(determine_actual_format("?svg=mscKTO107").unwrap(), Format::Svg);
assert!(determine_actual_format("?xxx=mscKTO107").is_err());
}
}
|
{
Err(format!("unknown format in diagram url. Known formats are: {}. Got: {}", Format::help_str(), format_str).into())
}
|
host.rs
|
use core::{
marker::PhantomData,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
};
use rustacuda::{
context::Context,
error::{CudaError, CudaResult},
function::Function,
memory::{DeviceBox, DeviceBuffer, LockedBuffer},
module::Module,
stream::Stream,
};
use rustacuda_core::{DeviceCopy, DevicePointer};
#[cfg(feature = "derive")]
#[doc(cfg(feature = "derive"))]
pub use rust_cuda_derive::{check_kernel, link_kernel, specialise_kernel_call};
use crate::{
common::{DeviceAccessible, DeviceConstRef, DeviceMutRef, RustToCuda},
ptx_jit::{CudaKernel, PtxJITCompiler, PtxJITResult},
safety::SafeDeviceCopy,
};
pub trait Launcher {
type KernelTraitObject: ?Sized;
type CompilationWatcher;
fn get_launch_package(&mut self) -> LaunchPackage<Self>;
/// # Errors
///
/// Should only return a `CudaError` if some implementation-defined
/// critical kernel function configuration failed.
#[allow(unused_variables)]
fn on_compile(kernel: &Function, watcher: &mut Self::CompilationWatcher) -> CudaResult<()> {
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct LaunchConfig {
pub grid: rustacuda::function::GridSize,
pub block: rustacuda::function::BlockSize,
pub shared_memory_size: u32,
}
pub struct LaunchPackage<'l, L: ?Sized + Launcher> {
pub config: LaunchConfig,
pub kernel: &'l mut TypedKernel<L::KernelTraitObject>,
pub stream: &'l mut Stream,
pub watcher: &'l mut L::CompilationWatcher,
}
pub enum
|
<'k> {
Cached(&'k Function<'k>),
Recompiled(&'k Function<'k>),
}
pub struct TypedKernel<KernelTraitObject: ?Sized> {
compiler: PtxJITCompiler,
kernel: Option<CudaKernel>,
entry_point: alloc::boxed::Box<std::ffi::CStr>,
marker: PhantomData<KernelTraitObject>,
}
impl<KernelTraitObject: ?Sized> TypedKernel<KernelTraitObject> {
/// # Errors
///
/// Returns a `CudaError` if `ptx` or `entry_point` contain nul bytes.
pub fn new(ptx: &str, entry_point: &str) -> CudaResult<Self> {
let ptx_cstring = std::ffi::CString::new(ptx).map_err(|_| CudaError::InvalidPtx)?;
let compiler = crate::ptx_jit::PtxJITCompiler::new(&ptx_cstring);
let entry_point_cstring =
std::ffi::CString::new(entry_point).map_err(|_| CudaError::InvalidValue)?;
let entry_point = entry_point_cstring.into_boxed_c_str();
Ok(Self {
compiler,
kernel: None,
entry_point,
marker: PhantomData,
})
}
/// # Errors
///
/// Returns a `CudaError` if `ptx` (from [`Self::new`]) is not a valid
/// PTX source, or it does not contain an entry point named `entry_point`
/// (from [`Self::new`]).
pub fn compile_with_ptx_jit_args(
&mut self,
arguments: Option<&[Option<&[u8]>]>,
) -> CudaResult<KernelJITResult> {
let ptx_jit = self.compiler.with_arguments(arguments);
let kernel_jit = if self.kernel.is_none() || matches!(ptx_jit, PtxJITResult::Recomputed(_))
{
let ptx_cstr = match ptx_jit {
PtxJITResult::Cached(ptx_cstr) | PtxJITResult::Recomputed(ptx_cstr) => ptx_cstr,
};
let recomputed_kernel = CudaKernel::new(ptx_cstr, &self.entry_point)?;
// Replace the existing compiled kernel, drop the old one
let kernel = self.kernel.insert(recomputed_kernel);
KernelJITResult::Recompiled(kernel.get_function())
} else {
// Safety: if tells us that kernel is some and ptx_jit is cached
let kernel = match &self.kernel {
Some(kernel) => kernel,
None => unsafe { core::hint::unreachable_unchecked() },
};
KernelJITResult::Cached(kernel.get_function())
};
Ok(kernel_jit)
}
}
pub trait LendToCuda: RustToCuda {
/// Lends an immutable copy of `&self` to CUDA:
/// - code in the CUDA kernel can only access `&self` through the
/// `DeviceConstRef` inside the closure
/// - after the closure, `&self` will not have changed
///
/// # Errors
///
/// Returns a `rustacuda::errors::CudaError` iff an error occurs inside CUDA
fn lend_to_cuda<
O,
E: From<CudaError>,
F: FnOnce(
HostAndDeviceConstRef<DeviceAccessible<<Self as RustToCuda>::CudaRepresentation>>,
) -> Result<O, E>,
>(
&self,
inner: F,
) -> Result<O, E>;
/// Lends a mutable copy of `&mut self` to CUDA:
/// - code in the CUDA kernel can only access `&mut self` through the
/// `DeviceMutRef` inside the closure
/// - after the closure, `&mut self` might have changed in the following
/// ways:
/// - to avoid aliasing, each CUDA thread gets its own shallow copy of
/// `&mut self`, i.e. any shallow changes will NOT be reflected after
/// the closure
/// - each CUDA thread can access the same heap allocated storage, i.e.
/// any deep changes will be reflected after the closure
///
/// # Errors
///
/// Returns a `rustacuda::errors::CudaError` iff an error occurs inside CUDA
fn lend_to_cuda_mut<
O,
E: From<CudaError>,
F: FnOnce(
HostAndDeviceMutRef<DeviceAccessible<<Self as RustToCuda>::CudaRepresentation>>,
) -> Result<O, E>,
>(
&mut self,
inner: F,
) -> Result<O, E>;
/// Moves `self` to CUDA iff `self` is `SafeDeviceCopy`
///
/// # Errors
///
/// Returns a `rustacuda::errors::CudaError` iff an error occurs inside CUDA
fn move_to_cuda<
O,
E: From<CudaError>,
F: FnOnce(
HostAndDeviceOwned<DeviceAccessible<<Self as RustToCuda>::CudaRepresentation>>,
) -> Result<O, E>,
>(
self,
inner: F,
) -> Result<O, E>
where
Self: Sized + SafeDeviceCopy,
<Self as RustToCuda>::CudaRepresentation: SafeDeviceCopy,
<Self as RustToCuda>::CudaAllocation: EmptyCudaAlloc;
}
impl<T: RustToCuda> LendToCuda for T {
fn lend_to_cuda<
O,
E: From<CudaError>,
F: FnOnce(
HostAndDeviceConstRef<DeviceAccessible<<Self as RustToCuda>::CudaRepresentation>>,
) -> Result<O, E>,
>(
&self,
inner: F,
) -> Result<O, E> {
let (cuda_repr, alloc) = unsafe { self.borrow(NullCudaAlloc) }?;
let result = HostAndDeviceConstRef::with_new(&cuda_repr, inner);
core::mem::drop(cuda_repr);
core::mem::drop(alloc);
result
}
fn lend_to_cuda_mut<
O,
E: From<CudaError>,
F: FnOnce(
HostAndDeviceMutRef<DeviceAccessible<<Self as RustToCuda>::CudaRepresentation>>,
) -> Result<O, E>,
>(
&mut self,
inner: F,
) -> Result<O, E> {
let (mut cuda_repr, alloc) = unsafe { self.borrow(NullCudaAlloc) }?;
let result = HostAndDeviceMutRef::with_new(&mut cuda_repr, inner);
core::mem::drop(cuda_repr);
let _: NullCudaAlloc = unsafe { self.restore(alloc) }?;
result
}
fn move_to_cuda<
O,
E: From<CudaError>,
F: FnOnce(
HostAndDeviceOwned<DeviceAccessible<<Self as RustToCuda>::CudaRepresentation>>,
) -> Result<O, E>,
>(
self,
inner: F,
) -> Result<O, E>
where
Self: Sized + SafeDeviceCopy,
<Self as RustToCuda>::CudaRepresentation: SafeDeviceCopy,
<Self as RustToCuda>::CudaAllocation: EmptyCudaAlloc,
{
let (cuda_repr, alloc) = unsafe { self.borrow(NullCudaAlloc) }?;
let result = HostAndDeviceOwned::with_new(cuda_repr, inner);
core::mem::drop(alloc);
result
}
}
pub(crate) mod private {
pub mod alloc {
pub trait Sealed {}
}
pub mod drop {
pub trait Sealed: Sized {
fn drop(val: Self) -> Result<(), (rustacuda::error::CudaError, Self)>;
}
}
pub mod empty {
pub trait Sealed {}
}
}
pub trait EmptyCudaAlloc: private::empty::Sealed {}
impl<T: private::empty::Sealed> EmptyCudaAlloc for T {}
pub trait CudaAlloc: private::alloc::Sealed {}
impl<T: private::alloc::Sealed> CudaAlloc for T {}
impl<T: CudaAlloc> private::alloc::Sealed for Option<T> {}
pub struct NullCudaAlloc;
impl private::alloc::Sealed for NullCudaAlloc {}
impl private::empty::Sealed for NullCudaAlloc {}
pub struct CombinedCudaAlloc<A: CudaAlloc, B: CudaAlloc>(A, B);
impl<A: CudaAlloc, B: CudaAlloc> private::alloc::Sealed for CombinedCudaAlloc<A, B> {}
impl<A: CudaAlloc + EmptyCudaAlloc, B: CudaAlloc + EmptyCudaAlloc> private::empty::Sealed
for CombinedCudaAlloc<A, B>
{
}
impl<A: CudaAlloc, B: CudaAlloc> CombinedCudaAlloc<A, B> {
pub fn new(front: A, tail: B) -> Self {
Self(front, tail)
}
pub fn split(self) -> (A, B) {
(self.0, self.1)
}
}
pub struct CudaDropWrapper<C: private::drop::Sealed>(Option<C>);
impl<C: private::drop::Sealed> private::alloc::Sealed for CudaDropWrapper<C> {}
impl<C: private::drop::Sealed> From<C> for CudaDropWrapper<C> {
fn from(val: C) -> Self {
Self(Some(val))
}
}
impl<C: private::drop::Sealed> Drop for CudaDropWrapper<C> {
fn drop(&mut self) {
if let Some(val) = self.0.take() {
if let Err((_err, val)) = C::drop(val) {
core::mem::forget(val);
}
}
}
}
impl<C: private::drop::Sealed> Deref for CudaDropWrapper<C> {
type Target = C;
fn deref(&self) -> &Self::Target {
self.0.as_ref().unwrap()
}
}
impl<C: private::drop::Sealed> DerefMut for CudaDropWrapper<C> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.0.as_mut().unwrap()
}
}
macro_rules! impl_sealed_drop_collection {
($type:ident) => {
impl<C: DeviceCopy> private::drop::Sealed for $type<C> {
fn drop(val: Self) -> Result<(), (CudaError, Self)> {
Self::drop(val)
}
}
};
}
impl_sealed_drop_collection!(DeviceBuffer);
impl_sealed_drop_collection!(DeviceBox);
impl_sealed_drop_collection!(LockedBuffer);
macro_rules! impl_sealed_drop_value {
($type:ident) => {
impl private::drop::Sealed for $type {
fn drop(val: Self) -> Result<(), (CudaError, Self)> {
Self::drop(val)
}
}
};
}
impl_sealed_drop_value!(Module);
impl_sealed_drop_value!(Stream);
impl_sealed_drop_value!(Context);
#[repr(transparent)]
#[allow(clippy::module_name_repetitions)]
pub struct HostDeviceBox<T: DeviceCopy>(DevicePointer<T>);
impl<T: DeviceCopy> private::alloc::Sealed for HostDeviceBox<T> {}
impl<T: DeviceCopy> HostDeviceBox<T> {
/// # Errors
///
/// Returns a `CudaError` iff copying from `value` into `self` failed.
pub fn copy_from(&mut self, value: &T) -> CudaResult<()> {
// Safety: pointer comes from `DeviceBox::into_device`
// i.e. this function completes the roundtrip
let mut device_box = unsafe { ManuallyDrop::new(DeviceBox::from_device(self.0)) };
rustacuda::memory::CopyDestination::copy_from(&mut *device_box, value)
}
/// # Errors
///
/// Returns a `CudaError` iff copying from `self` into `value` failed.
pub fn copy_to(&self, value: &mut T) -> CudaResult<()> {
// Safety: pointer comes from `DeviceBox::into_device`
// i.e. this function completes the roundtrip
let device_box = unsafe { ManuallyDrop::new(DeviceBox::from_device(self.0)) };
rustacuda::memory::CopyDestination::copy_to(&*device_box, value)
}
}
impl<T: DeviceCopy> From<DeviceBox<T>> for HostDeviceBox<T> {
fn from(device_box: DeviceBox<T>) -> Self {
Self(DeviceBox::into_device(device_box))
}
}
impl<T: DeviceCopy> From<HostDeviceBox<T>> for DeviceBox<T> {
fn from(host_device_box: HostDeviceBox<T>) -> Self {
// Safety: pointer comes from `DeviceBox::into_device`
// i.e. this function completes the roundtrip
unsafe { DeviceBox::from_device(host_device_box.0) }
}
}
impl<T: DeviceCopy> Drop for HostDeviceBox<T> {
fn drop(&mut self) {
// Safety: pointer comes from `DeviceBox::into_device`
// i.e. this function completes the roundtrip
let device_box = unsafe { DeviceBox::from_device(self.0) };
core::mem::drop(CudaDropWrapper::from(device_box));
}
}
#[allow(clippy::module_name_repetitions)]
pub struct HostAndDeviceMutRef<'a, T: DeviceCopy> {
device_box: &'a mut HostDeviceBox<T>,
host_ref: &'a mut T,
}
impl<'a, T: DeviceCopy> HostAndDeviceMutRef<'a, T> {
/// # Safety
///
/// `device_box` must contain EXACTLY the device copy of `host_ref`
pub unsafe fn new(device_box: &'a mut HostDeviceBox<T>, host_ref: &'a mut T) -> Self {
Self {
device_box,
host_ref,
}
}
/// # Errors
///
/// Returns a `rustacuda::errors::CudaError` iff `value` cannot be moved
/// to CUDA or an error occurs inside `inner`.
pub fn with_new<
O,
E: From<CudaError>,
F: for<'b> FnOnce(HostAndDeviceMutRef<'b, T>) -> Result<O, E>,
>(
host_ref: &mut T,
inner: F,
) -> Result<O, E> {
let mut device_box: HostDeviceBox<_> = DeviceBox::new(host_ref)?.into();
// Safety: `device_box` contains exactly the device copy of `host_ref`
let result = inner(HostAndDeviceMutRef {
device_box: &mut device_box,
host_ref,
});
// Copy back any changes made
device_box.copy_to(host_ref)?;
core::mem::drop(device_box);
result
}
#[must_use]
pub fn for_device<'b>(&'b mut self) -> DeviceMutRef<'a, T>
where
'a: 'b,
{
DeviceMutRef {
pointer: self.device_box.0.as_raw_mut(),
reference: PhantomData,
}
}
#[must_use]
pub fn for_host<'b: 'a>(&'b self) -> &'a T {
self.host_ref
}
#[must_use]
pub fn as_ref<'b>(&'b self) -> HostAndDeviceConstRef<'b, T>
where
'a: 'b,
{
// Safety: `device_box` contains EXACTLY the device copy of `host_ref`
// by construction of `HostAndDeviceMutRef`
unsafe { HostAndDeviceConstRef::new(self.device_box, self.host_ref) }
}
#[must_use]
pub fn as_mut<'b>(&'b mut self) -> HostAndDeviceMutRef<'b, T>
where
'a: 'b,
{
// Safety: `device_box` contains EXACTLY the device copy of `host_ref`
// by construction of `HostAndDeviceMutRef`
unsafe { HostAndDeviceMutRef::new(self.device_box, self.host_ref) }
}
}
#[allow(clippy::module_name_repetitions)]
pub struct HostAndDeviceConstRef<'a, T: DeviceCopy> {
device_box: &'a HostDeviceBox<T>,
host_ref: &'a T,
}
impl<'a, T: DeviceCopy> Clone for HostAndDeviceConstRef<'a, T> {
fn clone(&self) -> Self {
*self
}
}
impl<'a, T: DeviceCopy> Copy for HostAndDeviceConstRef<'a, T> {}
impl<'a, T: DeviceCopy> HostAndDeviceConstRef<'a, T> {
/// # Safety
///
/// `device_box` must contain EXACTLY the device copy of `host_ref`
pub unsafe fn new(device_box: &'a HostDeviceBox<T>, host_ref: &'a T) -> Self {
Self {
device_box,
host_ref,
}
}
/// # Errors
///
/// Returns a `rustacuda::errors::CudaError` iff `value` cannot be moved
/// to CUDA or an error occurs inside `inner`.
pub fn with_new<
O,
E: From<CudaError>,
F: for<'b> FnOnce(HostAndDeviceConstRef<'b, T>) -> Result<O, E>,
>(
host_ref: &T,
inner: F,
) -> Result<O, E> {
let device_box: HostDeviceBox<_> = DeviceBox::new(host_ref)?.into();
// Safety: `device_box` contains exactly the device copy of `host_ref`
let result = inner(HostAndDeviceConstRef {
device_box: &device_box,
host_ref,
});
core::mem::drop(device_box);
result
}
#[must_use]
pub fn for_device<'b>(&'b self) -> DeviceConstRef<'a, T>
where
'a: 'b,
{
DeviceConstRef {
pointer: self.device_box.0.as_raw(),
reference: PhantomData,
}
}
#[must_use]
pub fn for_host(&'a self) -> &'a T {
self.host_ref
}
#[must_use]
pub fn as_ref<'b>(&'b self) -> HostAndDeviceConstRef<'b, T>
where
'a: 'b,
{
*self
}
}
#[allow(clippy::module_name_repetitions)]
pub struct HostAndDeviceOwned<'a, T: SafeDeviceCopy + DeviceCopy> {
device_box: &'a mut HostDeviceBox<T>,
host_val: &'a mut T,
}
impl<'a, T: SafeDeviceCopy + DeviceCopy> HostAndDeviceOwned<'a, T> {
/// # Errors
///
/// Returns a `rustacuda::errors::CudaError` iff `value` cannot be moved
/// to CUDA or an error occurs inside `inner`.
pub fn with_new<
O,
E: From<CudaError>,
F: for<'b> FnOnce(HostAndDeviceOwned<'b, T>) -> Result<O, E>,
>(
mut value: T,
inner: F,
) -> Result<O, E> {
let mut device_box: HostDeviceBox<_> = DeviceBox::new(&value)?.into();
// Safety: `device_box` contains exactly the device copy of `value`
let result = inner(HostAndDeviceOwned {
device_box: &mut device_box,
host_val: &mut value,
});
core::mem::drop(device_box);
core::mem::drop(value);
result
}
#[must_use]
pub fn for_device(self) -> DeviceMutRef<'a, T> {
DeviceMutRef {
pointer: self.device_box.0.as_raw_mut(),
reference: PhantomData,
}
}
#[must_use]
pub fn for_host(&'a mut self) -> &'a T {
self.host_val
}
}
|
KernelJITResult
|
manager.py
|
# coding=utf-8
import sys
import signal
import time
from multiprocessing import Process
from allocator import Allocator, Event
class Manager(object):
"""A manager manage multi allocators, when told to stop, manager would tell the allocator to stop."""
def __init__(self, cfg_list):
self.allocator_list = []
self.event_list = []
for cfg in cfg_list:
event = Event()
cfg.update({'poison': event})
self.allocator_list.append(Allocator(**cfg))
self.event_list.append(event)
def
|
(self):
"""start all the allocators"""
self.process_list = []
for allocator in self.allocator_list:
process = Process(target=allocator.start)
process.start()
self.process_list.append(process)
def stop_all(self, signal, frame):
"""stop all the allocators"""
for event in self.event_list:
event.set()
for process in self.process_list:
process.join()
sys.exit()
@classmethod
def trigger(cls, cfg_list):
"""outer interface"""
manager = cls(cfg_list)
manager.start_all()
signal.signal(signal.SIGINT, manager.stop_all)
signal.signal(signal.SIGTERM, manager.stop_all)
while True: # dead loop might meets many problem, better using a finite loop.
time.sleep(2)
manager.stop_all(None, None)
|
start_all
|
order_fill_or_kill_requests_schema.d.ts
|
export declare const orderFillOrKillRequestsSchema: {
id: string;
type: string;
items: {
|
signedOrder: {
$ref: string;
};
fillTakerAmount: {
$ref: string;
};
};
required: string[];
type: string;
};
};
|
properties: {
|
storage.py
|
import hashlib
import json
import os
import posixpath
import re
from collections import OrderedDict
from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.cache import (
InvalidCacheBackendError, cache as default_cache, caches,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes
from django.utils.functional import LazyObject
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super().__init__(location, base_url, *args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super().path(name)
class HashedFilesMixin:
default_template = """url("%s")"""
max_post_process_passes = 5
patterns = (
("*.styling", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Return a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None, filename=None):
# `filename` is the name of file to hash if `content` isn't given.
# `name` is the base name to construct the new hashed filename from.
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
if filename:
filename = urlsplit(unquote(filename)).path.strip()
filename = filename or clean_name
opened = False
if content is None:
if not self.exists(filename):
raise ValueError("The file '%s' could not be found with %r." % (filename, self))
try:
content = self.open(filename)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
Return the non-hashed URL in DEBUG mode.
"""
if settings.DEBUG and not force:
|
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
args = (clean_name,)
if hashed_files is not None:
args += (hashed_files,)
hashed_name = hashed_name_func(*args)
final_url = super().url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url(self, name, force=False):
"""
Return the non-hashed URL in DEBUG mode.
"""
return self._url(self.stored_name, name, force)
def url_converter(self, name, hashed_files, template=None):
"""
Return the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matched, url = matchobj.groups()
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
return template % unquote(transformed_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
adjustable_paths = [
path for path in paths
if matches_patterns(path, self._patterns)
]
# Do a single pass first. Post-process all files once, then repeat for
# adjustable files.
for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
paths = {path: paths[path] for path in adjustable_paths}
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files):
yield name, hashed_name, processed
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
yield 'All', None, RuntimeError('Max post-process passes exceeded.')
# Store the processed paths
self.hashed_files.update(hashed_files)
def _post_process(self, paths, adjustable_paths, hashed_files):
# Sort the files by directory level
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths, key=path_level, reverse=True):
substitutions = True
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
# generate the hash with the original content, even for
# adjustable files.
if hash_key not in hashed_files:
hashed_name = self.hashed_name(name, original_file)
else:
hashed_name = hashed_files[hash_key]
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
old_hashed_name = hashed_name
content = original_file.read().decode(settings.FILE_CHARSET)
for extension, patterns in self._patterns.items():
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(name, hashed_files, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc, False
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
# Save intermediate file for reference
saved_name = self._save(hashed_name, content_file)
hashed_name = self.hashed_name(name, content_file)
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, content_file)
hashed_name = self.clean_name(saved_name)
# If the file hash stayed the same, this file didn't change
if old_hashed_name == hashed_name:
substitutions = False
processed = True
if not processed:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = self.clean_name(saved_name)
# and then set the cache accordingly
hashed_files[hash_key] = hashed_name
yield name, hashed_name, processed, substitutions
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def _stored_name(self, name, hashed_files):
# Normalize the path to avoid multiple names for the same file like
# ../foo/bar.styling and ../foo/../foo/bar.styling which normalize to the same
# path.
name = posixpath.normpath(name)
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
return cache_name
def stored_name(self, name):
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name:
return cache_name
# No cached name found, recalculate it from the files.
intermediate_name = name
for i in range(self.max_post_process_passes + 1):
cache_name = self.clean_name(
self.hashed_name(name, content=None, filename=intermediate_name)
)
if intermediate_name == cache_name:
# Store the hashed name if there was a miss.
self.hashed_files[hash_key] = cache_name
return cache_name
else:
# Move on to the next intermediate file.
intermediate_name = cache_name
# If the cache name can't be determined after the max number of passes,
# the intermediate files on disk may be corrupt; avoid an infinite loop.
raise ValueError("The name '%s' could not be hashed with %r." % (name, self))
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
manifest_strict = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode()
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version')
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
yield from super().post_process(*args, **kwargs)
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode()
self._save(self.manifest_name, ContentFile(contents))
def stored_name(self, name):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
hash_key = self.hash_key(clean_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
if self.manifest_strict:
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
cache_name = self.clean_name(self.hashed_name(name))
unparsed_name = list(parsed_name)
unparsed_name[2] = cache_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
class _MappingCache:
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
hashed_name, fragment = name, ''
|
example.routing.component.spec.ts
|
import { TestBed, inject } from "@angular/core/testing";
import { Router } from "@angular/router";
import { ExampleRoutingComponent } from "./example.routing.component";
describe('Testing routing in a component using a Stub', () => {
let component, fixture;
class
|
{
navigateByUrl() { }
}
beforeEach(() => {
TestBed.configureTestingModule({
declarations: [ExampleRoutingComponent],
providers: [{
provide: Router, useClass: RouterStub // replace 'Router' with our Stub
}]
}).compileComponents();
})
beforeEach(() => {
fixture = TestBed.createComponent(ExampleRoutingComponent);
component = fixture.debugElement.componentInstance;
});
it('test back() method', inject([Router], (router: Router) => {
const spy = spyOn(router, 'navigateByUrl');
component.back(); // invoking our back method that should call the spy in turn
expect(spy.calls.any()).toBe(true);
}));
});
|
RouterStub
|
main.go
|
// Copyright © 2020 lvzhihao <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package main
import "github.com/lvzhihao/webdeploy/cmd"
func main() {
|
cmd.Execute()
}
|
|
lib.rs
|
/**
* Copyright (c) 2020 Ayush Kumar Mishra
*
* This source code is licensed under the MIT License found in
* the LICENSE file in the root directory of this source tree.
*/
extern crate proc_macro;
use proc_macro::{TokenStream, TokenTree};
|
use trace_caller::backtrace::{Backtrace, BacktraceFrame};
let (trace, curr_file, curr_line) = (Backtrace::new(), file!(), line!());
let frames = trace.frames();
let previous_symbol = frames
.iter()
.flat_map(BacktraceFrame::symbols)
.skip_while(|s| {
s.filename()
.map(|p| !p.ends_with(curr_file))
.unwrap_or(true)
|| s.lineno() != Some(curr_line)
})
.nth(1 as usize)
.unwrap();
println!(\"Called from {:?} at line {:?}\",
previous_symbol.filename().unwrap(), previous_symbol.lineno().unwrap());
"
.parse()
.unwrap();
item.into_iter()
.map(|tt| match tt {
TokenTree::Group(ref g) if g.delimiter() == proc_macro::Delimiter::Brace => {
prefix.extend(g.stream());
TokenTree::Group(proc_macro::Group::new(
proc_macro::Delimiter::Brace,
prefix.clone(),
))
}
other => other,
})
.collect()
}
|
/// This attribute will enable a function to access the caller's source location.
#[proc_macro_attribute]
pub fn trace(_attr: TokenStream, item: TokenStream) -> TokenStream {
let mut prefix: TokenStream = "
|
ethernet.rs
|
#![allow(dead_code)]
#![allow(unused_imports)]
#![allow(unused_variables)]
use core::cell::RefCell;
use core::sync::atomic::{AtomicU32, Ordering};
use cortex_m::interrupt::Mutex;
use cortex_m_rt::exception;
use stm32h7xx_hal as hal;
use hal::prelude::*;
use hal::hal::digital::v2::OutputPin;
use hal::gpio::Speed::*;
use hal::{ethernet, ethernet::PHY};
use hal::pac;
use pac::interrupt;
use embedded_timeout_macros::{
block_timeout,
repeat_timeout,
TimeoutError,
};
use smoltcp;
use smoltcp::iface::{
EthernetInterface, EthernetInterfaceBuilder, Neighbor, NeighborCache,
Route, Routes,
};
use smoltcp::socket::{SocketHandle, SocketSet, SocketSetItem};
use smoltcp::socket::{UdpSocket, UdpSocketBuffer, UdpPacketMetadata};
use smoltcp::storage::PacketMetadata;
use smoltcp::time::{Duration, Instant};
use smoltcp::wire::{EthernetAddress, IpAddress, IpCidr, Ipv6Cidr, IpEndpoint, Ipv4Address};
use heapless::Vec;
use crate::timer::CountDownTimer as Timer;
use crate::pins;
// - global constants ---------------------------------------------------------
pub const MAX_UDP_PACKET_SIZE: usize = 576;
//pub const MAX_UDP_PACKET_SIZE: usize = 4096;
// - global static state ------------------------------------------------------
#[link_section = ".sram3.eth"]
pub static mut ETHERNET_DESCRIPTOR_RING: ethernet::DesRing = ethernet::DesRing::new();
static mut ETHERNET_MUTEX: Mutex<RefCell<Option<Interface>>> = Mutex::new(RefCell::new(None));
pub static ATOMIC_TIME: AtomicU32 = AtomicU32::new(0);
// - statically allocated storage ---------------------------------------------
static mut ETHERNET_STORAGE: Storage = Storage::new();
static mut ETHERNET_SOCKETS_STORAGE: Vec<UdpSocketStorage, heapless::consts::U8>
= Vec(heapless::i::Vec::new());
pub struct Storage<'a> {
ip_addrs: [IpCidr; 1],
socket_set_entries: [Option<SocketSetItem<'a>>; 8],
neighbor_cache_storage: [Option<(IpAddress, Neighbor)>; 8],
routes_storage: [Option<(IpCidr, Route)>; 1],
}
impl<'a> Storage<'a> {
const fn new() -> Self {
Storage {
ip_addrs: [IpCidr::Ipv6(Ipv6Cidr::SOLICITED_NODE_PREFIX)],
socket_set_entries: [None, None, None, None, None, None, None, None],
neighbor_cache_storage: [None; 8],
routes_storage: [None; 1],
}
}
}
#[derive(Debug)]
pub struct UdpSocketStorage <'a> {
socket_handle: Option<SocketHandle>,
udp_rx_metadata: [PacketMetadata<IpEndpoint>; 1],
udp_tx_metadata: [PacketMetadata<IpEndpoint>; 1],
udp_rx_buffer: [u8; MAX_UDP_PACKET_SIZE],
udp_tx_buffer: [u8; MAX_UDP_PACKET_SIZE],
_marker: core::marker::PhantomData<&'a ()>
}
impl<'a> UdpSocketStorage<'a> {
const fn new() -> Self {
Self {
socket_handle: None,
udp_rx_metadata: [UdpPacketMetadata::EMPTY],
udp_tx_metadata: [UdpPacketMetadata::EMPTY],
udp_rx_buffer: [0u8; MAX_UDP_PACKET_SIZE],
udp_tx_buffer: [0u8; MAX_UDP_PACKET_SIZE],
_marker: core::marker::PhantomData,
}
}
}
// - types --------------------------------------------------------------------
#[derive(Debug)]
pub enum Error {
LinkTimedOut,
ToDo,
}
// - ethernet::Interface ------------------------------------------------------
pub struct Interface<'a> {
pins: self::Pins,
lan8742a: Option<hal::ethernet::phy::LAN8742A<hal::ethernet::EthernetMAC>>,
interface: Option<EthernetInterface<'a, ethernet::EthernetDMA<'a>>>,
pub sockets: Option<SocketSet<'static>>,
_marker: core::marker::PhantomData<&'a ()>,
}
impl<'a> Interface<'a> {
fn new(pins: self::Pins) -> Self {
Self {
pins: pins,
lan8742a: None,
interface: None,
sockets: None,
_marker: core::marker::PhantomData,
}
}
pub unsafe fn free(mut self) -> (pins::ethernet::Pins,
hal::ethernet::phy::LAN8742A<hal::ethernet::EthernetMAC>) {
// halt interrupts
let eth_dma = &*pac::ETHERNET_DMA::ptr();
eth_dma.dmacier.modify(|_, w|
w.nie().clear_bit() // normal interrupt summary enable
.rie().clear_bit() // receive interrupt enable
.tie().clear_bit() // transmit interrupt enable
);
cortex_m::peripheral::NVIC::mask(pac::Interrupt::ETH);
// reclaim the objects used to create this structure
let owned_resources = (
pins::ethernet::Pins {
ref_clk: self.pins.ref_clk.into_analog(),
md_io: self.pins.md_io.into_analog(),
md_clk: self.pins.md_clk.into_analog(),
crs: self.pins.crs.into_analog(),
rx_d0: self.pins.rx_d0.into_analog(),
rx_d1: self.pins.rx_d1.into_analog(),
tx_en: self.pins.tx_en.into_analog(),
tx_d0: self.pins.tx_d0.into_analog(),
tx_d1: self.pins.tx_d1.into_analog()
},
core::ptr::replace(&mut self.lan8742a, None).unwrap(),
);
// clean out static global singleton
cortex_m::interrupt::free(|cs| {
ETHERNET_MUTEX.borrow(cs).replace(None);
});
owned_resources
}
pub fn start(pins: pins::ethernet::Pins,
mac_address: &[u8; 6],
ip_address: &[u8; 4],
eth1mac: hal::rcc::rec::Eth1Mac,
ccdr_clocks: &hal::rcc::CoreClocks,
timeout_timer: Timer<pac::TIM17>) -> Result<Timer<pac::TIM17>, Error>
|
pub fn interrupt_free<F, R>(f: F) -> R where
F: FnOnce(&mut Interface<'static>) -> R {
cortex_m::interrupt::free(|cs| {
if let Some (ethernet_interface) = unsafe { ETHERNET_MUTEX.borrow(cs).borrow_mut().as_mut() } {
f(ethernet_interface)
} else {
panic!("Ethernet interface has not been started");
}
})
}
fn up(&mut self,
mac_address: &[u8; 6],
ip_address: &[u8; 4],
eth1mac: hal::rcc::rec::Eth1Mac,
ccdr_clocks: &hal::rcc::CoreClocks,
mut timeout_timer: Timer<pac::TIM17>) -> Result<Timer<pac::TIM17>, Error> {
let dp = unsafe { pac::Peripherals::steal() };
let ethernet_address = EthernetAddress::from_bytes(mac_address);
let (eth_dma, eth_mac) = unsafe {
ethernet::new_unchecked(
dp.ETHERNET_MAC,
dp.ETHERNET_MTL,
dp.ETHERNET_DMA,
&mut ETHERNET_DESCRIPTOR_RING,
ethernet_address,
eth1mac,
ccdr_clocks,
)
};
// initialise PHY
let mut lan8742a: hal::ethernet::phy::LAN8742A<hal::ethernet::EthernetMAC>
= ethernet::phy::LAN8742A::new(eth_mac.set_phy_addr(0));
lan8742a.phy_reset();
lan8742a.phy_init();
// wait for link to come up
timeout_timer.start(10_000.ms());
let result: Result<(), TimeoutError<()>> = block_timeout!(
&mut timeout_timer,
{
if lan8742a.poll_link() {
Ok(())
} else {
Err(nb::Error::WouldBlock)
}
}
);
match result {
Ok(()) => (),
Err(TimeoutError::Timeout) | Err(_) => {
return Err(Error::LinkTimedOut);
},
}
// enable ethernet interrupt
let cp = unsafe { &mut pac::CorePeripherals::steal() };
unsafe {
ethernet::enable_interrupt();
cp.NVIC.set_priority(pac::Interrupt::ETH, 196); // mid prio
cortex_m::peripheral::NVIC::unmask(pac::Interrupt::ETH);
}
// --------------------------------------------------------------------
unsafe {
ETHERNET_STORAGE.ip_addrs = [IpCidr::new(Ipv4Address::from_bytes(ip_address).into(), 0)];
}
let neighbor_cache = NeighborCache::new(unsafe { &mut ETHERNET_STORAGE.neighbor_cache_storage[..] });
let routes = Routes::new(unsafe { &mut ETHERNET_STORAGE.routes_storage[..] });
let interface = EthernetInterfaceBuilder::new(eth_dma)
.ethernet_addr(ethernet_address)
.neighbor_cache(neighbor_cache)
.ip_addrs(unsafe { &mut ETHERNET_STORAGE.ip_addrs[..] })
.routes(routes)
.finalize();
let sockets = SocketSet::new(unsafe { &mut ETHERNET_STORAGE.socket_set_entries[..] });
self.lan8742a = Some(lan8742a);
self.interface = Some(interface);
self.sockets = Some(sockets);
Ok(timeout_timer)
}
pub fn poll_link(&mut self) -> bool {
self.lan8742a.as_mut().unwrap().poll_link()
}
// poll ethernet interface
pub fn poll(&mut self) -> Result<bool, smoltcp::Error> {
let timestamp = Instant::from_millis(self.now());
self.interface.as_mut().unwrap().poll(&mut self.sockets.as_mut().unwrap(), timestamp)
}
pub fn poll_delay(&mut self) -> Option<Duration> {
let timestamp = Instant::from_millis(self.now());
self.interface.as_mut().unwrap().poll_delay(&mut self.sockets.as_mut().unwrap(), timestamp)
}
/// returns an absolute time value in milliseconds
pub fn now(&self) -> i64 {
ATOMIC_TIME.load(Ordering::Relaxed).into()
}
pub fn new_udp_socket(&mut self) -> SocketHandle {
unsafe {
ETHERNET_SOCKETS_STORAGE.push(UdpSocketStorage::new()).unwrap(); // TODO handle result
}
let len = unsafe { ETHERNET_SOCKETS_STORAGE.len() };
let socket_storage = unsafe { &mut ETHERNET_SOCKETS_STORAGE[len - 1] };
let udp_socket = UdpSocket::new(
UdpSocketBuffer::new(&mut socket_storage.udp_rx_metadata[..],
&mut socket_storage.udp_rx_buffer[..]),
UdpSocketBuffer::new(&mut socket_storage.udp_tx_metadata[..],
&mut socket_storage.udp_tx_buffer[..]),
);
let socket_handle = self.sockets.as_mut().unwrap().add(udp_socket);
socket_handle
}
}
// - Pins ---------------------------------------------------------------------
type AlternateFunction11 = hal::gpio::Alternate<hal::gpio::AF11>;
// Also see: https://github.com/stm32-rs/stm32-eth/blob/master/src/setup.rs
use hal::gpio::gpioa;
use hal::gpio::gpiob;
use hal::gpio::gpioc;
use hal::gpio::gpiog;
pub struct Pins {
pub ref_clk: gpioa::PA1 <AlternateFunction11>, // REFCLK, // RmiiRefClk
pub md_io: gpioa::PA2 <AlternateFunction11>, // IO, // MDIO
pub md_clk: gpioc::PC1 <AlternateFunction11>, // CLK, // MDC
pub crs: gpioa::PA7 <AlternateFunction11>, // CRS, // RmiiCrsDv
pub rx_d0: gpioc::PC4 <AlternateFunction11>, // RXD0, // RmiiRxD0
pub rx_d1: gpioc::PC5 <AlternateFunction11>, // RXD1, // RmiiRxD0
pub tx_en: gpiog::PG11<AlternateFunction11>, // TXEN, // RmiiTxEN
pub tx_d0: gpiog::PG13<AlternateFunction11>, // TXD0, // RmiiTxD0
pub tx_d1: gpiob::PB13<AlternateFunction11>, // TXD1, // RmiiTxD1
}
// - interrupts and exceptions ------------------------------------------------
#[interrupt]
fn ETH() {
unsafe { ethernet::interrupt_handler() };
}
#[exception]
fn SysTick() {
ATOMIC_TIME.fetch_add(1, Ordering::Relaxed);
}
|
{
let pins = self::Pins {
ref_clk: pins.ref_clk.into_alternate_af11().set_speed(VeryHigh),
md_io: pins.md_io.into_alternate_af11().set_speed(VeryHigh),
md_clk: pins.md_clk.into_alternate_af11().set_speed(VeryHigh),
crs: pins.crs.into_alternate_af11().set_speed(VeryHigh),
rx_d0: pins.rx_d0.into_alternate_af11().set_speed(VeryHigh),
rx_d1: pins.rx_d1.into_alternate_af11().set_speed(VeryHigh),
tx_en: pins.tx_en.into_alternate_af11().set_speed(VeryHigh),
tx_d0: pins.tx_d0.into_alternate_af11().set_speed(VeryHigh),
tx_d1: pins.tx_d1.into_alternate_af11().set_speed(VeryHigh)
};
let mut interface = Interface::new(pins);
let timeout_timer = match interface.up(mac_address,
ip_address,
eth1mac,
ccdr_clocks,
timeout_timer) {
Ok(timeout_timer) => {
timeout_timer
},
Err(e) => {
return Err(e);
}
};
// wrap ethernet interface in mutex
cortex_m::interrupt::free(|cs| {
unsafe {
ETHERNET_MUTEX.borrow(cs).replace(Some(interface));
}
});
// configure systick timer to 1ms
let syst = unsafe { &mut pac::CorePeripherals::steal().SYST };
let c_ck_mhz = ccdr_clocks.c_ck().0 / 1_000_000;
let syst_calib = 0x3E8;
syst.set_clock_source(cortex_m::peripheral::syst::SystClkSource::Core);
syst.set_reload((syst_calib * c_ck_mhz) - 1);
syst.enable_interrupt();
syst.enable_counter();
Ok(timeout_timer)
}
|
sqs.go
|
package sqs
import (
"fmt"
"github.com/pjmyburg/virago/config"
)
// SQS represents an instance of the SQS mock
type SQS struct {
queues map[string]queue
}
type queue struct {
url string
in chan message
inFlight chan message
}
type message struct {
body string
}
// New creates a new SQS instance and creates queues using the supplied config on startup
func
|
(conf *config.Config) *SQS {
queues := make(map[string]queue)
for _, confQ := range conf.Queues {
queues[confQ.Name] = queue{
url: fmt.Sprintf("https://%s.queue.amazonaws.com/%s/%s", config.Region, config.AccountID, confQ.Name),
in: nil,
inFlight: nil,
}
}
return &SQS{
queues: queues,
}
}
|
New
|
connection.rs
|
#![allow(clippy::if_same_then_else)]
use crate::{
connection_utils::{self, ConnectionError},
TimeSync, VideoFrame, BATTERY_SENDER, INPUT_SENDER, TIME_SYNC_SENDER,
VIDEO_ERROR_REPORT_SENDER, VIEWS_CONFIG_SENDER,
};
use alvr_common::{
glam::{Quat, Vec2, Vec3},
log,
prelude::*,
<<<<<<< HEAD
ALVR_NAME, ALVR_VERSION,
=======
Haptics, ALVR_NAME, ALVR_VERSION, LEFT_HAND_HAPTIC_ID,
>>>>>>> libalvr
};
use alvr_session::{CodecType, SessionDesc, TrackingSpace};
use alvr_sockets::{
spawn_cancelable, ClientConfigPacket, ClientControlPacket, ClientHandshakePacket, Haptics,
HeadsetInfoPacket, PeerType, PlayspaceSyncPacket, PrivateIdentity, ProtoControlSocket,
ServerControlPacket, ServerHandshakePacket, StreamSocketBuilder, VideoFrameHeaderPacket, AUDIO,
HAPTICS, INPUT, VIDEO,
};
use futures::future::BoxFuture;
use jni::{
objects::{GlobalRef, JClass},
JavaVM,
};
use serde_json as json;
use settings_schema::Switch;
use std::{
future, mem, ptr, slice,
sync::{
atomic::{AtomicBool, Ordering},
mpsc as smpsc, Arc,
},
time::Duration,
};
use tokio::{
sync::{mpsc as tmpsc, Mutex},
task,
time::{self, Instant},
};
#[cfg(target_os = "android")]
use crate::audio;
const INITIAL_MESSAGE: &str = "Searching for server...\n(open ALVR on your PC)";
const NETWORK_UNREACHABLE_MESSAGE: &str = "Cannot connect to the internet";
const CLIENT_UNTRUSTED_MESSAGE: &str = "On the PC, click \"Trust\"\nnext to the client entry";
const INCOMPATIBLE_VERSIONS_MESSAGE: &str = concat!(
"Server and client have\n",
"incompatible types.\n",
"Please update either the app\n",
"on the PC or on the headset"
);
const STREAM_STARTING_MESSAGE: &str = "The stream will begin soon\nPlease wait...";
const SERVER_RESTART_MESSAGE: &str = "The server is restarting\nPlease wait...";
const SERVER_DISCONNECTED_MESSAGE: &str = "The server has disconnected.";
const CONTROL_CONNECT_RETRY_PAUSE: Duration = Duration::from_millis(500);
const RETRY_CONNECT_MIN_INTERVAL: Duration = Duration::from_secs(1);
const PLAYSPACE_SYNC_INTERVAL: Duration = Duration::from_millis(500);
const NETWORK_KEEPALIVE_INTERVAL: Duration = Duration::from_secs(1);
const CLEANUP_PAUSE: Duration = Duration::from_millis(500);
// close stream on Drop (manual disconnection or execution canceling)
struct StreamCloseGuard {
is_connected: Arc<AtomicBool>,
}
impl Drop for StreamCloseGuard {
fn drop(&mut self) {
self.is_connected.store(false, Ordering::Relaxed);
}
}
fn set_loading_message(
java_vm: &JavaVM,
activity_ref: &GlobalRef,
hostname: &str,
message: &str,
) -> StrResult {
let message = format!(
"ALVR v{}\nhostname: {hostname}\n \n{message}",
*ALVR_VERSION,
);
// Note: env = java_vm.attach_current_thread() cannot be saved into a variable because it is
// not Send (compile error). This makes sense since tokio could move the execution of this
// task to another thread at any time, and env is valid only within a specific thread. For
// the same reason, other jni objects cannot be made into variables and the arguments must
// be created inline within the call_method() call
trace_err!(trace_err!(java_vm.attach_current_thread())?.call_method(
activity_ref,
"setLoadingMessage",
"(Ljava/lang/String;)V",
&[trace_err!(trace_err!(java_vm.attach_current_thread())?.new_string(message))?.into()],
))?;
Ok(())
}
async fn connection_pipeline(
headset_info: &HeadsetInfoPacket,
device_name: String,
private_identity: &PrivateIdentity,
java_vm: Arc<JavaVM>,
activity_ref: Arc<GlobalRef>,
nal_class_ref: Arc<GlobalRef>,
) -> StrResult {
let hostname = &private_identity.hostname;
let handshake_packet = ClientHandshakePacket {
alvr_name: ALVR_NAME.into(),
version: ALVR_VERSION.clone(),
device_name,
hostname: hostname.clone(),
reserved1: "".into(),
reserved2: "".into(),
};
let (mut proto_socket, server_ip) = tokio::select! {
res = connection_utils::announce_client_loop(handshake_packet) => {
match res? {
ConnectionError::ServerMessage(message) => {
info!("Server response: {message:?}");
let message_str = match message {
ServerHandshakePacket::ClientUntrusted => CLIENT_UNTRUSTED_MESSAGE,
ServerHandshakePacket::IncompatibleVersions =>
INCOMPATIBLE_VERSIONS_MESSAGE,
};
set_loading_message(&*java_vm, &*activity_ref, hostname, message_str)?;
return Ok(());
}
ConnectionError::NetworkUnreachable => {
info!("Network unreachable");
set_loading_message(
&*java_vm,
&*activity_ref,
hostname,
NETWORK_UNREACHABLE_MESSAGE,
)?;
time::sleep(RETRY_CONNECT_MIN_INTERVAL).await;
set_loading_message(
&*java_vm,
&*activity_ref,
&private_identity.hostname,
INITIAL_MESSAGE,
)
.ok();
return Ok(());
}
}
},
pair = async {
loop {
if let Ok(pair) = ProtoControlSocket::connect_to(PeerType::Server).await {
break pair;
}
time::sleep(CONTROL_CONNECT_RETRY_PAUSE).await;
}
} => pair
};
trace_err!(proto_socket.send(&(headset_info, server_ip)).await)?;
let config_packet = trace_err!(proto_socket.recv::<ClientConfigPacket>().await)?;
let (control_sender, mut control_receiver) = proto_socket.split();
let control_sender = Arc::new(Mutex::new(control_sender));
match control_receiver.recv().await {
Ok(ServerControlPacket::StartStream) => {
info!("Stream starting");
set_loading_message(&*java_vm, &*activity_ref, hostname, STREAM_STARTING_MESSAGE)?;
}
Ok(ServerControlPacket::Restarting) => {
info!("Server restarting");
set_loading_message(&*java_vm, &*activity_ref, hostname, SERVER_RESTART_MESSAGE)?;
return Ok(());
}
Err(e) => {
info!("Server disconnected. Cause: {e}");
set_loading_message(
&*java_vm,
&*activity_ref,
hostname,
SERVER_DISCONNECTED_MESSAGE,
)?;
return Ok(());
}
_ => {
info!("Unexpected packet");
set_loading_message(&*java_vm, &*activity_ref, hostname, "Unexpected packet")?;
return Ok(());
}
}
let settings = {
let mut session_desc = SessionDesc::default();
session_desc.merge_from_json(&trace_err!(json::from_str(&config_packet.session_desc))?)?;
session_desc.to_settings()
};
let stream_socket_builder = StreamSocketBuilder::listen_for_server(
settings.connection.stream_port,
settings.connection.stream_protocol,
)
.await?;
if let Err(e) = control_sender
.lock()
.await
.send(&ClientControlPacket::StreamReady)
.await
{
info!("Server disconnected. Cause: {e}");
set_loading_message(
&*java_vm,
&*activity_ref,
hostname,
SERVER_DISCONNECTED_MESSAGE,
)?;
return Ok(());
}
let stream_socket = tokio::select! {
res = stream_socket_builder.accept_from_server(
server_ip,
settings.connection.stream_port,
) => res?,
_ = time::sleep(Duration::from_secs(5)) => {
return fmt_e!("Timeout while setting up streams");
}
};
let stream_socket = Arc::new(stream_socket);
info!("Connected to server");
let is_connected = Arc::new(AtomicBool::new(true));
let _stream_guard = StreamCloseGuard {
is_connected: Arc::clone(&is_connected),
};
trace_err!(trace_err!(java_vm.attach_current_thread())?.call_method(
&*activity_ref,
"setDarkMode",
"(Z)V",
&[settings.extra.client_dark_mode.into()],
))?;
// create this before initializing the stream on cpp side
let (views_config_sender, mut views_config_receiver) = tmpsc::unbounded_channel();
*VIEWS_CONFIG_SENDER.lock() = Some(views_config_sender);
let (battery_sender, mut battery_receiver) = tmpsc::unbounded_channel();
*BATTERY_SENDER.lock() = Some(battery_sender);
unsafe {
crate::setStreamConfig(crate::StreamConfig {
eyeWidth: config_packet.eye_resolution_width,
eyeHeight: config_packet.eye_resolution_height,
refreshRate: config_packet.fps,
enableFoveation: matches!(settings.video.foveated_rendering, Switch::Enabled(_)),
foveationCenterSizeX: if let Switch::Enabled(foveation_vars) =
&settings.video.foveated_rendering
{
foveation_vars.center_size_x
} else {
3_f32 / 5_f32
},
foveationCenterSizeY: if let Switch::Enabled(foveation_vars) =
&settings.video.foveated_rendering
{
foveation_vars.center_size_y
} else {
2_f32 / 5_f32
},
foveationCenterShiftX: if let Switch::Enabled(foveation_vars) =
&settings.video.foveated_rendering
|
else {
2_f32 / 5_f32
},
foveationCenterShiftY: if let Switch::Enabled(foveation_vars) =
&settings.video.foveated_rendering
{
foveation_vars.center_shift_y
} else {
1_f32 / 10_f32
},
foveationEdgeRatioX: if let Switch::Enabled(foveation_vars) =
&settings.video.foveated_rendering
{
foveation_vars.edge_ratio_x
} else {
2_f32
},
foveationEdgeRatioY: if let Switch::Enabled(foveation_vars) =
&settings.video.foveated_rendering
{
foveation_vars.edge_ratio_y
} else {
2_f32
},
trackingSpaceType: matches!(settings.headset.tracking_space, TrackingSpace::Stage) as _,
extraLatencyMode: settings.headset.extra_latency_mode,
});
}
trace_err!(trace_err!(java_vm.attach_current_thread())?.call_method(
&*activity_ref,
"onServerConnected",
"(FIZLjava/lang/String;)V",
&[
config_packet.fps.into(),
(matches!(settings.video.codec, CodecType::HEVC) as i32).into(),
settings.video.client_request_realtime_decoder.into(),
trace_err!(trace_err!(java_vm.attach_current_thread())?
.new_string(config_packet.dashboard_url))?
.into()
],
))?;
let tracking_clientside_prediction = match &settings.headset.controllers {
Switch::Enabled(controllers) => controllers.clientside_prediction,
Switch::Disabled => false,
};
// setup stream loops
// let (debug_sender, mut debug_receiver) = tmpsc::unbounded_channel();
// let debug_loop = {
// let control_sender = Arc::clone(&control_sender);
// async move {
// while let Some(data) = debug_receiver.recv().await {
// control_sender
// .lock()
// .await
// .send(&ClientControlPacket::Reserved(data))
// .await
// .ok();
// }
// Ok(())
// }
// };
let input_send_loop = {
let mut socket_sender = stream_socket.request_stream(INPUT).await?;
async move {
let (data_sender, mut data_receiver) = tmpsc::unbounded_channel();
*INPUT_SENDER.lock() = Some(data_sender);
while let Some(input) = data_receiver.recv().await {
socket_sender
.send_buffer(socket_sender.new_buffer(&input, 0)?)
.await
.ok();
}
Ok(())
}
};
let time_sync_send_loop = {
let control_sender = Arc::clone(&control_sender);
async move {
let (data_sender, mut data_receiver) = tmpsc::unbounded_channel();
*TIME_SYNC_SENDER.lock() = Some(data_sender);
while let Some(time_sync) = data_receiver.recv().await {
control_sender
.lock()
.await
.send(&ClientControlPacket::TimeSync(time_sync))
.await
.ok();
}
Ok(())
}
};
let video_error_report_send_loop = {
let control_sender = Arc::clone(&control_sender);
async move {
let (data_sender, mut data_receiver) = tmpsc::unbounded_channel();
*VIDEO_ERROR_REPORT_SENDER.lock() = Some(data_sender);
while let Some(()) = data_receiver.recv().await {
control_sender
.lock()
.await
.send(&ClientControlPacket::VideoErrorReport)
.await
.ok();
}
Ok(())
}
};
let views_config_send_loop = {
let control_sender = Arc::clone(&control_sender);
async move {
while let Some(config) = views_config_receiver.recv().await {
control_sender
.lock()
.await
.send(&ClientControlPacket::ViewsConfig(config))
.await
.ok();
}
Ok(())
}
};
let battery_send_loop = {
let control_sender = Arc::clone(&control_sender);
async move {
while let Some(packet) = battery_receiver.recv().await {
control_sender
.lock()
.await
.send(&ClientControlPacket::Battery(packet))
.await
.ok();
}
Ok(())
}
};
let (legacy_receive_data_sender, legacy_receive_data_receiver) = smpsc::channel();
let legacy_receive_data_sender = Arc::new(Mutex::new(legacy_receive_data_sender));
let video_receive_loop = {
let mut receiver = stream_socket
.subscribe_to_stream::<VideoFrameHeaderPacket>(VIDEO)
.await?;
let legacy_receive_data_sender = legacy_receive_data_sender.clone();
async move {
loop {
let packet = receiver.recv().await?;
let mut buffer = vec![0_u8; mem::size_of::<VideoFrame>() + packet.buffer.len()];
let header = VideoFrame {
type_: 9, // ALVR_PACKET_TYPE_VIDEO_FRAME
packetCounter: packet.header.packet_counter,
trackingFrameIndex: packet.header.tracking_frame_index,
videoFrameIndex: packet.header.video_frame_index,
sentTime: packet.header.sent_time,
frameByteSize: packet.header.frame_byte_size,
fecIndex: packet.header.fec_index,
fecPercentage: packet.header.fec_percentage,
};
buffer[..mem::size_of::<VideoFrame>()].copy_from_slice(unsafe {
&mem::transmute::<_, [u8; mem::size_of::<VideoFrame>()]>(header)
});
buffer[mem::size_of::<VideoFrame>()..].copy_from_slice(&packet.buffer);
legacy_receive_data_sender.lock().await.send(buffer).ok();
}
}
};
let haptics_receive_loop = {
let mut receiver = stream_socket
.subscribe_to_stream::<Haptics>(HAPTICS)
.await?;
async move {
loop {
<<<<<<< HEAD
let packet = receiver.recv().await?.header;
unsafe {
crate::onHapticsFeedbackNative(
packet.path,
packet.duration.as_secs_f32(),
packet.frequency,
packet.amplitude,
)
=======
let packet = receiver.recv().await?;
let haptics = HapticsFeedback {
type_: 13, // ALVR_PACKET_TYPE_HAPTICS
startTime: 0,
amplitude: packet.header.amplitude,
duration: packet.header.duration.as_secs_f32(),
frequency: packet.header.frequency,
hand: if packet.header.path == *LEFT_HAND_HAPTIC_ID {
0
} else {
1
},
>>>>>>> libalvr
};
}
}
};
// The main stream loop must be run in a normal thread, because it needs to access the JNI env
// many times per second. If using a future I'm forced to attach and detach the env continuously.
// When the parent function exits or gets canceled, this loop will run to finish.
let legacy_stream_socket_loop = task::spawn_blocking({
let java_vm = Arc::clone(&java_vm);
let activity_ref = Arc::clone(&activity_ref);
let nal_class_ref = Arc::clone(&nal_class_ref);
let codec = settings.video.codec;
let enable_fec = settings.connection.enable_fec;
move || -> StrResult {
let env = trace_err!(java_vm.attach_current_thread())?;
let env_ptr = env.get_native_interface() as _;
let activity_obj = activity_ref.as_obj();
let nal_class: JClass = nal_class_ref.as_obj().into();
unsafe {
crate::initializeSocket(
env_ptr,
*activity_obj as _,
**nal_class as _,
matches!(codec, CodecType::HEVC) as _,
enable_fec,
);
let mut idr_request_deadline = None;
while let Ok(mut data) = legacy_receive_data_receiver.recv() {
// Send again IDR packet every 2s in case it is missed
// (due to dropped burst of packets at the start of the stream or otherwise).
if !crate::IDR_PARSED.load(Ordering::Relaxed) {
if let Some(deadline) = idr_request_deadline {
if deadline < Instant::now() {
crate::IDR_REQUEST_NOTIFIER.notify_waiters();
idr_request_deadline = None;
}
} else {
idr_request_deadline = Some(Instant::now() + Duration::from_secs(2));
}
}
crate::legacyReceive(data.as_mut_ptr(), data.len() as _);
}
crate::closeSocket(env_ptr);
}
Ok(())
}
});
let tracking_interval = Duration::from_secs_f32(1_f32 / 360_f32);
let tracking_loop = async move {
let mut deadline = Instant::now();
loop {
unsafe { crate::onTrackingNative(tracking_clientside_prediction) };
deadline += tracking_interval;
time::sleep_until(deadline).await;
}
};
unsafe impl Send for crate::GuardianData {}
let playspace_sync_loop = {
let control_sender = Arc::clone(&control_sender);
async move {
loop {
let guardian_data = unsafe { crate::getGuardianData() };
if guardian_data.shouldSync {
let perimeter_points = if guardian_data.perimeterPointsCount == 0 {
None
} else {
let perimeter_slice = unsafe {
slice::from_raw_parts(
guardian_data.perimeterPoints,
guardian_data.perimeterPointsCount as _,
)
};
let perimeter_points = perimeter_slice
.iter()
.map(|p| Vec2::from_slice(&[p[0], p[2]]))
.collect::<Vec<_>>();
Some(perimeter_points)
};
let packet = PlayspaceSyncPacket {
position: Vec3::from_slice(&guardian_data.position),
rotation: Quat::from_xyzw(
guardian_data.rotation[0],
guardian_data.rotation[1],
guardian_data.rotation[2],
guardian_data.rotation[3],
),
area_width: guardian_data.areaWidth,
area_height: guardian_data.areaHeight,
perimeter_points,
};
control_sender
.lock()
.await
.send(&ClientControlPacket::PlayspaceSync(packet))
.await
.ok();
}
time::sleep(PLAYSPACE_SYNC_INTERVAL).await;
}
}
};
let game_audio_loop: BoxFuture<_> = if let Switch::Enabled(desc) = settings.audio.game_audio {
#[cfg(target_os = "android")]
{
let game_audio_receiver = stream_socket.subscribe_to_stream(AUDIO).await?;
Box::pin(audio::play_audio_loop(
config_packet.game_audio_sample_rate,
desc.config,
game_audio_receiver,
))
}
#[cfg(not(target_os = "android"))]
Box::pin(future::pending())
} else {
Box::pin(future::pending())
};
let microphone_loop: BoxFuture<_> = if let Switch::Enabled(config) = settings.audio.microphone {
#[cfg(target_os = "android")]
{
let microphone_sender = stream_socket.request_stream(AUDIO).await?;
Box::pin(audio::record_audio_loop(
config.sample_rate,
microphone_sender,
))
}
#[cfg(not(target_os = "android"))]
Box::pin(future::pending())
} else {
Box::pin(future::pending())
};
let keepalive_sender_loop = {
let control_sender = Arc::clone(&control_sender);
let java_vm = Arc::clone(&java_vm);
let activity_ref = Arc::clone(&activity_ref);
async move {
loop {
let res = control_sender
.lock()
.await
.send(&ClientControlPacket::KeepAlive)
.await;
if let Err(e) = res {
info!("Server disconnected. Cause: {e}");
set_loading_message(
&*java_vm,
&*activity_ref,
hostname,
SERVER_DISCONNECTED_MESSAGE,
)?;
break Ok(());
}
time::sleep(NETWORK_KEEPALIVE_INTERVAL).await;
}
}
};
let control_loop = {
let java_vm = Arc::clone(&java_vm);
let activity_ref = Arc::clone(&activity_ref);
async move {
loop {
tokio::select! {
_ = crate::IDR_REQUEST_NOTIFIER.notified() => {
control_sender.lock().await.send(&ClientControlPacket::RequestIdr).await?;
}
control_packet = control_receiver.recv() =>
match control_packet {
Ok(ServerControlPacket::Restarting) => {
info!("Server restarting");
set_loading_message(
&*java_vm,
&*activity_ref,
hostname,
SERVER_RESTART_MESSAGE
)?;
break Ok(());
}
Ok(ServerControlPacket::TimeSync(data)) => {
let time_sync = TimeSync {
type_: 7, // ALVR_PACKET_TYPE_TIME_SYNC
mode: data.mode,
serverTime: data.server_time,
clientTime: data.client_time,
sequence: 0,
packetsLostTotal: data.packets_lost_total,
packetsLostInSecond: data.packets_lost_in_second,
averageTotalLatency: 0,
averageSendLatency: data.average_send_latency,
averageTransportLatency: data.average_transport_latency,
averageDecodeLatency: data.average_decode_latency,
idleTime: data.idle_time,
fecFailure: data.fec_failure,
fecFailureInSecond: data.fec_failure_in_second,
fecFailureTotal: data.fec_failure_total,
fps: data.fps,
serverTotalLatency: data.server_total_latency,
trackingRecvFrameIndex: data.tracking_recv_frame_index,
};
let mut buffer = vec![0_u8; mem::size_of::<TimeSync>()];
buffer.copy_from_slice(unsafe {
&mem::transmute::<_, [u8; mem::size_of::<TimeSync>()]>(time_sync)
});
legacy_receive_data_sender.lock().await.send(buffer).ok();
},
Ok(_) => (),
Err(e) => {
info!("Server disconnected. Cause: {e}");
set_loading_message(
&*java_vm,
&*activity_ref,
hostname,
SERVER_DISCONNECTED_MESSAGE
)?;
break Ok(());
}
}
}
}
}
};
let receive_loop = async move { stream_socket.receive_loop().await };
// Run many tasks concurrently. Threading is managed by the runtime, for best performance.
tokio::select! {
res = spawn_cancelable(receive_loop) => {
if let Err(e) = res {
info!("Server disconnected. Cause: {e}");
}
set_loading_message(
&*java_vm,
&*activity_ref,
hostname,
SERVER_DISCONNECTED_MESSAGE
)?;
Ok(())
},
res = spawn_cancelable(game_audio_loop) => res,
res = spawn_cancelable(microphone_loop) => res,
res = spawn_cancelable(tracking_loop) => res,
res = spawn_cancelable(playspace_sync_loop) => res,
res = spawn_cancelable(input_send_loop) => res,
res = spawn_cancelable(time_sync_send_loop) => res,
res = spawn_cancelable(video_error_report_send_loop) => res,
res = spawn_cancelable(views_config_send_loop) => res,
res = spawn_cancelable(battery_send_loop) => res,
res = spawn_cancelable(video_receive_loop) => res,
res = spawn_cancelable(haptics_receive_loop) => res,
res = legacy_stream_socket_loop => trace_err!(res)?,
// keep these loops on the current task
res = keepalive_sender_loop => res,
res = control_loop => res,
// res = debug_loop => res,
}
}
pub async fn connection_lifecycle_loop(
headset_info: HeadsetInfoPacket,
device_name: &str,
private_identity: PrivateIdentity,
java_vm: Arc<JavaVM>,
activity_ref: Arc<GlobalRef>,
nal_class_ref: Arc<GlobalRef>,
) {
set_loading_message(
&*java_vm,
&*activity_ref,
&private_identity.hostname,
INITIAL_MESSAGE,
)
.ok();
loop {
tokio::join!(
async {
let maybe_error = connection_pipeline(
&headset_info,
device_name.to_owned(),
&private_identity,
Arc::clone(&java_vm),
Arc::clone(&activity_ref),
Arc::clone(&nal_class_ref),
)
.await;
if let Err(e) = maybe_error {
let message = format!("Connection error:\n{e}\nCheck the PC for more details");
error!("{message}");
set_loading_message(
&*java_vm,
&*activity_ref,
&private_identity.hostname,
&message,
)
.ok();
}
// let any running task or socket shutdown
time::sleep(CLEANUP_PAUSE).await;
},
time::sleep(RETRY_CONNECT_MIN_INTERVAL),
);
}
}
|
{
foveation_vars.center_shift_x
}
|
AWSEMRCluster_SimpleScalingPolicyConfiguration.go
|
// Package types contains functions that return a resource or property type when called.
// This code is autogenerated.
// Do not edit it by hand.
package types
|
func AWSEMRCluster_SimpleScalingPolicyConfigurationProperty() cf.PropertyType {
return cf.PropertyType{Documentation:"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticmapreduce-cluster-simplescalingpolicyconfiguration.html", Properties:map[string]cf.Property{"CoolDown":cf.Property{Documentation:"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticmapreduce-cluster-simplescalingpolicyconfiguration.html#cfn-elasticmapreduce-cluster-simplescalingpolicyconfiguration-cooldown", DuplicatesAllowed:false, ItemType:"", PrimitiveItemType:"", PrimitiveType:"Integer", Required:false, Type:"", UpdateType:"Mutable"}, "ScalingAdjustment":cf.Property{Documentation:"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticmapreduce-cluster-simplescalingpolicyconfiguration.html#cfn-elasticmapreduce-cluster-simplescalingpolicyconfiguration-scalingadjustment", DuplicatesAllowed:false, ItemType:"", PrimitiveItemType:"", PrimitiveType:"Integer", Required:true, Type:"", UpdateType:"Mutable"}, "AdjustmentType":cf.Property{Documentation:"http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticmapreduce-cluster-simplescalingpolicyconfiguration.html#cfn-elasticmapreduce-cluster-simplescalingpolicyconfiguration-adjustmenttype", DuplicatesAllowed:false, ItemType:"", PrimitiveItemType:"", PrimitiveType:"String", Required:false, Type:"", UpdateType:"Mutable"}}}
}
|
import "github.com/awslabs/aws-cloudformation-template-builder/spec/cf"
|
factory.go
|
// Copyright Jetstack Ltd. See LICENSE for details.
// Code generated by informer-gen. DO NOT EDIT.
package internalversion
import (
reflect "reflect"
sync "sync"
time "time"
internalversion "github.com/jetstack/tarmak/pkg/wing/client/clientset/internalversion"
internalinterfaces "github.com/jetstack/tarmak/pkg/wing/client/informers/internalversion/internalinterfaces"
wing "github.com/jetstack/tarmak/pkg/wing/client/informers/internalversion/wing"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client internalversion.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client internalversion.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client internalversion.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func
|
(client internalversion.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Wing() wing.Interface
}
func (f *sharedInformerFactory) Wing() wing.Interface {
return wing.New(f, f.namespace, f.tweakListOptions)
}
|
NewSharedInformerFactoryWithOptions
|
se.rs
|
mod pp;
use crate::{serde_ext, Error, ErrorType};
pub use pp::*;
use serde_ext::ser;
use std::io::Write;
use std::result::Result;
use std::str;
use value_trait::generator::BaseGenerator;
macro_rules! iomap {
($e:expr) => {
($e).map_err(|err| Error::generic(ErrorType::IO(err)))
};
}
/// Write a value to a vector
/// # Errors
/// when the data can not be written
#[inline]
pub fn to_vec<T>(to: &T) -> crate::Result<Vec<u8>>
where
T: ser::Serialize + ?Sized,
{
let v = Vec::with_capacity(512);
let mut s = Serializer(v);
to.serialize(&mut s).map(|_| s.0)
}
/// Write a value to a string
///
/// # Errors
/// when the data can not be written
#[inline]
pub fn to_string<T>(to: &T) -> crate::Result<String>
where
T: ser::Serialize + ?Sized,
{
to_vec(to).map(|v| unsafe { String::from_utf8_unchecked(v) })
}
/// Write a value to a string
/// # Errors
/// when the data can not be written
#[inline]
pub fn to_writer<T, W>(writer: W, to: &T) -> crate::Result<()>
where
T: ser::Serialize + ?Sized,
W: Write,
{
let mut s = Serializer(writer);
to.serialize(&mut s)
}
struct Serializer<W: Write>(W);
impl<'w, W> BaseGenerator for Serializer<W>
where
W: Write,
{
type T = W;
#[inline]
fn get_writer(&mut self) -> &mut Self::T {
&mut self.0
}
#[inline]
fn write_min(&mut self, _slice: &[u8], min: u8) -> std::io::Result<()> {
self.0.write_all(&[min])
}
}
struct SerializeSeq<'s, W: Write + 's> {
s: &'s mut Serializer<W>,
first: bool,
}
impl<'s, W> ser::SerializeSeq for SerializeSeq<'s, W>
where
W: Write,
{
type Ok = ();
type Error = Error;
#[inline]
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: serde_ext::Serialize,
{
let SerializeSeq {
ref mut s,
ref mut first,
..
} = *self;
if *first {
*first = false;
value.serialize(&mut **s)
} else {
iomap!(s.write(b",")).and_then(|_| value.serialize(&mut **s))
}
}
#[inline]
fn end(self) -> Result<Self::Ok, Self::Error> {
if self.first {
Ok(())
} else {
iomap!(self.s.write(b"]"))
}
}
}
impl<'s, W> ser::SerializeTuple for SerializeSeq<'s, W>
where
W: Write,
{
type Ok = ();
type Error = Error;
#[inline]
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: serde_ext::Serialize,
{
let SerializeSeq {
ref mut s,
ref mut first,
} = *self;
if *first {
*first = false;
value.serialize(&mut **s)
} else {
iomap!(s.write(b",")).and_then(|_| value.serialize(&mut **s))
}
}
#[inline]
fn end(self) -> Result<Self::Ok, Self::Error> {
if self.first {
Ok(())
} else {
iomap!(self.s.write(b"]"))
}
}
}
impl<'s, W> ser::SerializeTupleStruct for SerializeSeq<'s, W>
where
W: Write,
{
type Ok = ();
type Error = Error;
#[inline]
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: serde_ext::Serialize,
{
let SerializeSeq {
ref mut s,
ref mut first,
} = *self;
if *first {
*first = false;
value.serialize(&mut **s)
} else {
iomap!(s.write(b",")).and_then(|_| value.serialize(&mut **s))
}
}
#[inline]
fn end(self) -> Result<Self::Ok, Self::Error> {
if self.first {
Ok(())
} else {
iomap!(self.s.write(b"]"))
}
}
}
impl<'s, W> ser::SerializeTupleVariant for SerializeSeq<'s, W>
where
W: Write,
{
type Ok = ();
type Error = Error;
#[inline]
fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: serde_ext::Serialize,
{
let SerializeSeq {
ref mut s,
ref mut first,
} = *self;
if *first {
*first = false;
value.serialize(&mut **s)
} else {
iomap!(s.write(b",")).and_then(|_| value.serialize(&mut **s))
}
}
#[inline]
fn end(self) -> Result<Self::Ok, Self::Error> {
if self.first {
Ok(())
} else {
iomap!(self.s.write(b"}"))
}
}
}
struct SerializeMap<'s, W: Write + 's> {
s: &'s mut Serializer<W>,
first: bool,
}
impl<'s, W> ser::SerializeMap for SerializeMap<'s, W>
where
W: Write,
{
type Ok = ();
type Error = Error;
#[inline]
fn serialize_key<T: ?Sized>(&mut self, key: &T) -> Result<(), Self::Error>
where
T: serde_ext::Serialize,
{
let SerializeMap {
ref mut s,
ref mut first,
..
} = *self;
if *first {
*first = false;
key.serialize(&mut **s).and_then(|_| iomap!(s.write(b":")))
} else {
iomap!(s.write(b","))
.and_then(|_| key.serialize(&mut **s))
.and_then(|_| iomap!(s.write(b":")))
}
}
#[inline]
fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
where
T: serde_ext::Serialize,
{
let SerializeMap { ref mut s, .. } = *self;
value.serialize(&mut **s)
}
#[inline]
fn end(self) -> Result<Self::Ok, Self::Error> {
if self.first {
Ok(())
} else {
iomap!(self.s.write(b"}"))
}
}
}
impl<'s, W> ser::SerializeStruct for SerializeMap<'s, W>
where
W: Write,
{
type Ok = ();
type Error = Error;
#[inline]
fn serialize_field<T: ?Sized>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error>
where
T: serde_ext::Serialize,
{
let SerializeMap {
ref mut s,
ref mut first,
..
} = *self;
if *first {
*first = false;
iomap!(s.write_simple_string(key).and_then(|_| s.write(b":")))
.and_then(|_| value.serialize(&mut **s))
} else {
iomap!(s
.write(b",")
.and_then(|_| s.write_simple_string(key))
.and_then(|_| s.write(b":")))
.and_then(|_| value.serialize(&mut **s))
}
}
#[inline]
fn
|
(self) -> Result<Self::Ok, Self::Error> {
if self.first {
Ok(())
} else {
iomap!(self.s.write(b"}"))
}
}
}
impl<'s, W> ser::SerializeStructVariant for SerializeMap<'s, W>
where
W: Write,
{
type Ok = ();
type Error = Error;
#[inline]
fn serialize_field<T: ?Sized>(
&mut self,
key: &'static str,
value: &T,
) -> Result<(), Self::Error>
where
T: serde_ext::Serialize,
{
let SerializeMap {
ref mut s,
ref mut first,
..
} = *self;
if *first {
*first = false;
iomap!(s.write_simple_string(key).and_then(|_| s.write(b":")))
.and_then(|_| value.serialize(&mut **s))
} else {
iomap!(s
.write(b",")
.and_then(|_| s.write_simple_string(key))
.and_then(|_| s.write(b":")))
.and_then(|_| value.serialize(&mut **s))
}
}
#[inline]
fn end(self) -> Result<Self::Ok, Self::Error> {
if self.first {
Ok(())
} else {
iomap!(self.s.write(b"}"))
}
}
}
impl<'w, W> ser::Serializer for &'w mut Serializer<W>
where
W: Write,
{
type Ok = ();
type Error = Error;
type SerializeSeq = SerializeSeq<'w, W>;
type SerializeTuple = SerializeSeq<'w, W>;
type SerializeTupleStruct = SerializeSeq<'w, W>;
type SerializeTupleVariant = SerializeSeq<'w, W>;
type SerializeMap = SerializeMap<'w, W>;
type SerializeStruct = SerializeMap<'w, W>;
type SerializeStructVariant = SerializeMap<'w, W>;
#[inline]
fn serialize_bool(self, v: bool) -> Result<Self::Ok, Self::Error> {
if v {
iomap!(self.write(b"true"))
} else {
iomap!(self.write(b"false"))
}
}
#[inline]
fn serialize_i8(self, v: i8) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_i16(self, v: i16) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_i32(self, v: i32) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_i64(self, v: i64) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_i128(self, v: i128) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_u8(self, v: u8) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_u16(self, v: u16) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_u32(self, v: u32) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_u64(self, v: u64) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_u128(self, v: u128) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_int(v))
}
#[inline]
fn serialize_f32(self, v: f32) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_float(f64::from(v)))
}
#[inline]
fn serialize_f64(self, v: f64) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_float(v))
}
#[inline]
fn serialize_char(self, v: char) -> Result<Self::Ok, Self::Error> {
// A char encoded as UTF-8 takes 4 bytes at most.
// taken from: https://docs.serde.rs/src/serde_json/ser.rs.html#213
let mut buf = [0; 4];
iomap!(self.write_simple_string(v.encode_utf8(&mut buf)))
}
#[inline]
fn serialize_str(self, v: &str) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_string(v))
}
#[inline]
fn serialize_bytes(self, v: &[u8]) -> Result<Self::Ok, Self::Error> {
iomap!(self.write(b"[").and_then(|_| {
if let Some((first, rest)) = v.split_first() {
self.write_int(*first).and_then(|_| {
for v in rest {
if let Err(e) = self.write(b",").and_then(|_| self.write_int(*v)) {
return Err(e);
}
}
self.write(b"]")
})
} else {
self.write(b"]")
}
}))
}
#[inline]
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
self.serialize_unit()
}
#[inline]
fn serialize_some<T: ?Sized>(self, value: &T) -> Result<Self::Ok, Self::Error>
where
T: serde_ext::Serialize,
{
value.serialize(self)
}
#[inline]
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
iomap!(self.write(b"null"))
}
#[inline]
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
self.serialize_unit()
}
#[inline]
fn serialize_unit_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
iomap!(self.write_simple_string(variant))
}
#[inline]
fn serialize_newtype_struct<T: ?Sized>(
self,
_name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: serde_ext::Serialize,
{
value.serialize(self)
}
#[inline]
fn serialize_newtype_variant<T: ?Sized>(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: serde_ext::Serialize,
{
iomap!(self
.write(b"{")
.and_then(|_| self.write_simple_string(variant))
.and_then(|_| self.write(b":")))
.and_then(|_| value.serialize(&mut *self))
.and_then(|_| iomap!(self.write(b"}")))
}
#[inline]
fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
if len == Some(0) {
iomap!(self.write(b"[]"))
} else {
iomap!(self.write(b"["))
}
.map(move |_| SerializeSeq {
s: self,
first: true,
})
}
#[inline]
fn serialize_tuple(self, len: usize) -> Result<Self::SerializeTuple, Self::Error> {
self.serialize_seq(Some(len))
}
#[inline]
fn serialize_tuple_struct(
self,
_name: &'static str,
len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
self.serialize_seq(Some(len))
}
#[inline]
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
iomap!(self
.write(b"{")
.and_then(|_| self.write_simple_string(variant))
.and_then(|_| self.write(b":")))
.and_then(move |_| self.serialize_seq(Some(len)))
}
#[inline]
fn serialize_map(self, len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
if len == Some(0) {
iomap!(self.write(b"{}"))
} else {
iomap!(self.write(b"{"))
}
.map(move |_| SerializeMap {
s: self,
first: true,
})
}
#[inline]
fn serialize_struct(
self,
_name: &'static str,
len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
self.serialize_map(Some(len))
}
#[inline]
fn serialize_struct_variant(
self,
_name: &'static str,
_variant_index: u32,
variant: &'static str,
len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
iomap!(self
.write(b"{")
.and_then(|_| self.write_simple_string(variant))
.and_then(|_| self.write(b":")))
.and_then(move |_| self.serialize_map(Some(len)))
}
}
#[cfg(test)]
mod test {
use crate::{OwnedValue as Value, StaticNode};
use proptest::prelude::*;
#[cfg(not(feature = "128bit"))]
fn arb_json_value() -> BoxedStrategy<Value> {
let leaf = prop_oneof![
Just(Value::Static(StaticNode::Null)),
any::<bool>().prop_map(Value::from),
//(-1.0e306f64..1.0e306f64).prop_map(Value::from), // damn you float!
any::<i64>().prop_map(Value::from),
any::<u64>().prop_map(Value::from),
".*".prop_map(Value::from),
];
leaf.prop_recursive(
8, // 8 levels deep
256, // Shoot for maximum size of 256 nodes
10, // We put up to 10 items per collection
|inner| {
prop_oneof![
// Take the inner strategy and make the two recursive cases.
prop::collection::vec(inner.clone(), 0..10).prop_map(Value::from),
prop::collection::hash_map(".*", inner, 0..10).prop_map(Value::from),
]
},
)
.boxed()
}
#[cfg(feature = "128bit")]
fn arb_json_value() -> BoxedStrategy<Value> {
let leaf = prop_oneof![
Just(Value::Static(StaticNode::Null)),
any::<bool>().prop_map(Value::from),
//(-1.0e306f64..1.0e306f64).prop_map(Value::from), // damn you float!
any::<i64>().prop_map(Value::from),
any::<u64>().prop_map(Value::from),
any::<i128>().prop_map(Value::from),
any::<u128>().prop_map(Value::from),
".*".prop_map(Value::from),
];
leaf.prop_recursive(
8, // 8 levels deep
256, // Shoot for maximum size of 256 nodes
10, // We put up to 10 items per collection
|inner| {
prop_oneof![
// Take the inner strategy and make the two recursive cases.
prop::collection::vec(inner.clone(), 0..10).prop_map(Value::from),
prop::collection::hash_map(".*", inner, 0..10).prop_map(Value::from),
]
},
)
.boxed()
}
proptest! {
#![proptest_config(ProptestConfig {
// Setting both fork and timeout is redundant since timeout implies
// fork, but both are shown for clarity.
// Disabled for code coverage, enable to track bugs
// fork: true,
.. ProptestConfig::default()
})]
#[test]
fn prop_json_encode_decode(val in arb_json_value()) {
let mut encoded = crate::to_vec(&val).unwrap();
println!("{}", String::from_utf8_lossy(&encoded.clone()));
let res: Value = crate::from_slice(encoded.as_mut_slice()).expect("can't convert");
assert_eq!(val, res);
}
}
}
|
end
|
users.go
|
package tc
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import "database/sql"
import "encoding/json"
import "errors"
import "fmt"
import "github.com/apache/trafficcontrol/lib/go-rfc"
import "github.com/apache/trafficcontrol/lib/go-util"
import "github.com/go-ozzo/ozzo-validation"
import "github.com/go-ozzo/ozzo-validation/is"
// UserCredentials contains Traffic Ops login credentials
type UserCredentials struct {
Username string `json:"u"`
Password string `json:"p"`
}
// UserToken represents a request payload containing a UUID token for authentication
type UserToken struct {
Token string `json:"t"`
}
// UserV13 contains non-nullable TO user information
type UserV13 struct {
Username string `json:"username"`
PublicSSHKey string `json:"publicSshKey"`
Role int `json:"role"`
RoleName string `json:"rolename"`
ID int `json:"id"`
UID int `json:"uid"`
GID int `json:"gid"`
Company string `json:"company"`
Email string `json:"email"`
FullName string `json:"fullName"`
NewUser bool `json:"newUser"`
LastUpdated string `json:"lastUpdated"`
AddressLine1 string `json:"addressLine1"`
AddressLine2 string `json:"addressLine2"`
City string `json:"city"`
Country string `json:"country"`
PhoneNumber string `json:"phoneNumber"`
PostalCode string `json:"postalCode"`
RegistrationSent TimeNoMod `json:"registrationSent"`
StateOrProvince string `json:"stateOrProvince"`
Tenant string `json:"tenant"`
TenantID int `json:"tenantId"`
}
// commonUserFields is unexported, but its contents are still visible when it is embedded
// LastUpdated is a new field for some structs
type commonUserFields struct {
AddressLine1 *string `json:"addressLine1" db:"address_line1"`
AddressLine2 *string `json:"addressLine2" db:"address_line2"`
City *string `json:"city" db:"city"`
Company *string `json:"company" db:"company"`
Country *string `json:"country" db:"country"`
Email *string `json:"email" db:"email"`
FullName *string `json:"fullName" db:"full_name"`
GID *int `json:"gid"`
ID *int `json:"id" db:"id"`
NewUser *bool `json:"newUser" db:"new_user"`
PhoneNumber *string `json:"phoneNumber" db:"phone_number"`
PostalCode *string `json:"postalCode" db:"postal_code"`
PublicSSHKey *string `json:"publicSshKey" db:"public_ssh_key"`
Role *int `json:"role" db:"role"`
StateOrProvince *string `json:"stateOrProvince" db:"state_or_province"`
Tenant *string `json:"tenant"`
TenantID *int `json:"tenantId" db:"tenant_id"`
Token *string `json:"-" db:"token"`
UID *int `json:"uid"`
//Username *string `json:"username" db:"username"` //not including major change due to naming incompatibility
LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"`
}
// User fields in v14 have been updated to be nullable
type User struct {
Username *string `json:"username" db:"username"`
RegistrationSent *TimeNoMod `json:"registrationSent" db:"registration_sent"`
LocalPassword *string `json:"localPasswd,omitempty" db:"local_passwd"`
ConfirmLocalPassword *string `json:"confirmLocalPasswd,omitempty" db:"confirm_local_passwd"`
RoleName *string `json:"roleName,omitempty" db:"-"`
commonUserFields
}
// UserCurrent represents the profile for the authenticated user
type UserCurrent struct {
UserName *string `json:"username"`
LocalUser *bool `json:"localUser"`
RoleName *string `json:"roleName"`
commonUserFields
}
// CurrentUserUpdateRequest differs from a regular User/UserCurrent in that many of its fields are
// *parsed* but not *unmarshaled*. This allows a handler to distinguish between "null" and
// "undefined" values.
type CurrentUserUpdateRequest struct {
// User, for whatever reason, contains all of the actual data.
User CurrentUserUpdateRequestUser `json:"user"`
}
// CurrentUserUpdateRequestUser holds all of the actual data in a request to update the current user.
type CurrentUserUpdateRequestUser struct {
AddressLine1 json.RawMessage `json:"addressLine1"`
AddressLine2 json.RawMessage `json:"addressLine2"`
City json.RawMessage `json:"city"`
Company json.RawMessage `json:"company"`
ConfirmLocalPasswd *string `json:"confirmLocalPasswd"`
Country json.RawMessage `json:"country"`
Email json.RawMessage `json:"email"`
FullName json.RawMessage `json:"fullName"`
GID json.RawMessage `json:"gid"`
ID json.RawMessage `json:"id"`
LocalPasswd *string `json:"localPasswd"`
PhoneNumber json.RawMessage `json:"phoneNumber"`
PostalCode json.RawMessage `json:"postalCode"`
PublicSSHKey json.RawMessage `json:"publicSshKey"`
Role json.RawMessage `json:"role"`
StateOrProvince json.RawMessage `json:"stateOrProvince"`
TenantID json.RawMessage `json:"tenantId"`
UID json.RawMessage `json:"uid"`
Username json.RawMessage `json:"username"`
}
// UnmarshalAndValidate validates the request and returns a User into which the request's information
// has been unmarshalled.
func (u *CurrentUserUpdateRequestUser) UnmarshalAndValidate(user *User) error {
errs := []error{}
if u.AddressLine1 != nil {
if err := json.Unmarshal(u.AddressLine1, &user.AddressLine1); err != nil {
errs = append(errs, fmt.Errorf("addressLine1: %v", err))
}
}
if u.AddressLine2 != nil {
if err := json.Unmarshal(u.AddressLine2, &user.AddressLine2); err != nil {
errs = append(errs, fmt.Errorf("addressLine2: %v", err))
}
}
if u.City != nil {
if err := json.Unmarshal(u.City, &user.City); err != nil {
errs = append(errs, fmt.Errorf("city: %v", err))
}
}
if u.Company != nil {
if err := json.Unmarshal(u.Company, &user.Company); err != nil {
errs = append(errs, fmt.Errorf("company: %v", err))
}
}
user.ConfirmLocalPassword = u.ConfirmLocalPasswd
user.LocalPassword = u.LocalPasswd
if u.Country != nil {
if err := json.Unmarshal(u.Country, &user.Country); err != nil {
errs = append(errs, fmt.Errorf("country: %v", err))
}
}
if u.Email != nil {
if err := json.Unmarshal(u.Email, &user.Email); err != nil {
errs = append(errs, fmt.Errorf("email: %v", err))
} else if err = validation.Validate(*user.Email, is.Email); err != nil {
errs = append(errs, err)
}
}
if u.FullName != nil {
if err := json.Unmarshal(u.FullName, &user.FullName); err != nil {
errs = append(errs, fmt.Errorf("fullName: %v", err))
} else if user.FullName == nil || *user.FullName == ""
|
}
if u.GID != nil {
if err := json.Unmarshal(u.GID, &user.GID); err != nil {
errs = append(errs, fmt.Errorf("gid: %v", err))
}
}
if u.ID != nil {
var uid int
if err := json.Unmarshal(u.ID, &uid); err != nil {
errs = append(errs, fmt.Errorf("id: %v", err))
} else if user.ID != nil && *user.ID != uid {
errs = append(errs, errors.New("id: cannot change user id"))
} else {
user.ID = &uid
}
}
if u.PhoneNumber != nil {
if err := json.Unmarshal(u.PhoneNumber, &user.PhoneNumber); err != nil {
errs = append(errs, fmt.Errorf("phoneNumber: %v", err))
}
}
if u.PostalCode != nil {
if err := json.Unmarshal(u.PostalCode, &user.PostalCode); err != nil {
errs = append(errs, fmt.Errorf("postalCode: %v", err))
}
}
if u.PublicSSHKey != nil {
if err := json.Unmarshal(u.PublicSSHKey, &user.PublicSSHKey); err != nil {
errs = append(errs, fmt.Errorf("publicSshKey: %v", err))
}
}
if u.Role != nil {
if err := json.Unmarshal(u.Role, &user.Role); err != nil {
errs = append(errs, fmt.Errorf("role: %v", err))
} else if user.Role == nil {
errs = append(errs, errors.New("role: cannot be null"))
}
}
if u.StateOrProvince != nil {
if err := json.Unmarshal(u.StateOrProvince, &user.StateOrProvince); err != nil {
errs = append(errs, fmt.Errorf("stateOrProvince: %v", err))
}
}
if u.TenantID != nil {
if err := json.Unmarshal(u.TenantID, &user.TenantID); err != nil {
errs = append(errs, fmt.Errorf("tenantID: %v", err))
} else if user.TenantID == nil {
errs = append(errs, errors.New("tenantID: cannot be null"))
}
}
if u.UID != nil {
if err := json.Unmarshal(u.UID, &user.UID); err != nil {
errs = append(errs, fmt.Errorf("uid: %v", err))
}
}
if u.Username != nil {
if err := json.Unmarshal(u.Username, &user.Username); err != nil {
errs = append(errs, fmt.Errorf("username: %v", err))
} else if user.Username == nil || *user.Username == "" {
errs = append(errs, errors.New("username: cannot be null or empty string"))
}
}
return util.JoinErrs(errs)
}
// ------------------- Response structs -------------------- //
// Response structs should only be used in the client //
// The client's use of these will eventually be deprecated //
// --------------------------------------------------------- //
type UsersResponseV13 struct {
Response []UserV13 `json:"response"`
}
type UsersResponse struct {
Response []User `json:"response"`
}
type CreateUserResponse struct {
Response User `json:"response"`
Alerts
}
type UpdateUserResponse struct {
Response User `json:"response"`
Alerts
}
type DeleteUserResponse struct {
Alerts
}
type UserCurrentResponse struct {
Response UserCurrent `json:"response"`
}
type UserDeliveryServiceDeleteResponse struct {
Alerts
}
type UserPasswordResetRequest struct {
Email rfc.EmailAddress `json:"email"`
}
// UserRegistrationRequest is the request submitted by operators when they want to register a new
// user.
type UserRegistrationRequest struct {
Email rfc.EmailAddress `json:"email"`
// Role - despite being named "Role" - is actually merely the *ID* of a Role to give the new user.
Role uint `json:"role"`
TenantID uint `json:"tenantId"`
}
// Validate implements the
// github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator interface.
func (urr *UserRegistrationRequest) Validate(tx *sql.Tx) error {
var errs = []error{}
if urr.Role == 0 {
errs = append(errs, errors.New("role: required and cannot be zero."))
}
if urr.TenantID == 0 {
errs = append(errs, errors.New("tenantId: required and cannot be zero."))
}
// This can only happen if an email isn't present in the request; the JSON parse handles actually
// invalid email addresses.
if urr.Email.Address.Address == "" {
errs = append(errs, errors.New("email: required"))
}
return util.JoinErrs(errs)
}
|
{
// Perl enforced this
errs = append(errs, fmt.Errorf("fullName: cannot be set to 'null' or empty string"))
}
|
Midi_Pre_Processor.py
|
import pretty_midi
import glob
import os
import copy
from collections import Counter
from multiprocessing.dummy import Pool as ThreadPool
from tqdm import tqdm
# Import shared files
import sys
sys.path.append('..')
from Shared_Files.Global_Util import *
from Shared_Files.Constants import *
import warnings
warnings.filterwarnings("ignore")
class MidiPreProcessor:
"""
Reads across multiple Notes sets stores meta Notes on each
set and associated files for Notes analysis and model training.
"""
def __init__(self, path_to_full_data_set,
genre_sub_sample_set=sys.maxsize,
generate_validation=False):
"""
:param path_to_full_data_set:
Pass in a string to the path of directory holding all dataset(s)
:param genre_sub_sample_set:
Parses each genre into a subset based on the passed integer value.
:param generate_validation:
Boolean to mark files to be used as validation
"""
# Progress-bar for threading-pool
self.__pbar = None
# ---
self.__all_possible_instr_note_pairs = set()
self.__all_possible_instr_note_pairs_counter = Counter()
self.__instr_note_pairs_dict = dict()
self.__all_instruments = set()
# Files to ignore for when splicing Notes into train/test
self.__blacklisted_files_validation = set()
# Stores all genres to another dict that stores
# the corresponding file note size
self.__genre_file_dict = dict()
self.__genre_instr_note_counters = dict()
# Stores all corrupted files found
self.__corrupted_files_paths = []
# Store files that are to small (Determined by the input sequence)
self.__small_files_paths = []
# Init encoders and decoders
self.__master_instr_note_encoder = dict()
self.__master_instr_note_decoder = dict()
self.__master_instr_encoder = dict()
self.__master_instr_decoder = dict()
self.__master_genre_encoder = dict()
self.__master_genre_decoder = dict()
# ---------------------------------
# Numeric counts
self.__total_file_count = 0
self.__total_intr_note_pair_size = 0
# Thread pool out reading multiple files of each dataset
thread_pool_results = self.__thread_pool_datasets_reader(
self.__genre_dataset_init, path_to_full_data_set, genre_sub_sample_set)
# Init all Notes based on thread pool results
for genre_count, genre_dataset_result in enumerate(thread_pool_results):
# Add to set of all instr/note pairs
self.__all_possible_instr_note_pairs |= genre_dataset_result["genre_instr_note_pairs"]
# Add to set of all instruments
self.__all_instruments |= genre_dataset_result["genre_instruments"]
# Numeric value of non-unique total instr/note pairs
self.__total_intr_note_pair_size += genre_dataset_result[
"genre_size"]
# Store files based on the genre of songs
self.__genre_file_dict = {**self.__genre_file_dict,
**genre_dataset_result["genre_file_meta_data"]}
# Store counter object based on genre
self.__genre_instr_note_counters[genre_dataset_result[
"genre_name"]] = genre_dataset_result["genre_instr_note_pairs_counter"]
# Counter object of all possible instr/note
self.__all_possible_instr_note_pairs_counter += genre_dataset_result["genre_instr_note_pairs_counter"]
# ---
self.__corrupted_files_paths += genre_dataset_result[
"corrupted_files"]
self.__small_files_paths += genre_dataset_result["small_files"]
# Sort all Notes before encoding for my own sanity
self.__all_possible_instr_note_pairs = sorted(
self.__all_possible_instr_note_pairs)
self.__all_instruments = sorted(self.__all_instruments)
self.__instr_note_pairs_dict = {instr:[instr_note_pair
for instr_note_pair in self.__all_possible_instr_note_pairs
if instr_note_pair.find(instr) != -1]
for instr in self.__all_instruments}
# Begin creating label encoders and decoders
# -----
for label, (genre, _) in enumerate(
self.__genre_instr_note_counters.items()):
self.__master_genre_encoder[genre] = label + 1
self.__master_genre_decoder = {v: k for k, v
in self.__master_genre_encoder.items()}
# -----
for label, instr_note_pair in enumerate(
self.__all_possible_instr_note_pairs):
self.__master_instr_note_encoder[instr_note_pair] = label + 1
self.__master_instr_note_decoder = {v: k for k, v
in
self.__master_instr_note_encoder.items()}
# -----
for label, instr in enumerate(
self.__all_instruments):
self.__master_instr_encoder[instr] = label + 1
self.__master_instr_decoder = {v: k for k, v
in self.__master_instr_encoder.items()}
# -------------------------------------
# Corrupted files were found.
if self.__corrupted_files_paths:
print("The Pre Processor found {0} corrupted files".format(len(self.__corrupted_files_paths)))
print("Displaying all corrupted songs:\n")
for song in self.__corrupted_files_paths:
print("\t", song.split("/", 6)[-1])
print()
display_options_menu(menu_intro="Corrupted files found!\n"
"\tIt's fine if you don't delete"
" them.Just know the pre-processor"
" will not use them at all.",
menu_options={1: "Delete all corrupted files",
2: "Ignore"})
user_input = input("\nInput: ")
# Remove corrupted files
if user_input == "1":
self.delete_corrupted_files()
else:
pass
# ---------------------------------------------
# Small files were found.
if self.__small_files_paths:
print("The Pre Processor found {0} files that"
" are smaller or equal to than {1} Classical_Notes".format(
len(self.__small_files_paths),
MIDI_CONSTANTS.SMALL_FILE_CHECK))
print("Displaying all small songs:\n")
for song in self.__small_files_paths:
print("\t", song.split("/", 6)[-1])
print()
display_options_menu(menu_intro="Small files found!\n"
"\tIt's fine if you don't delete"
" them.Just know the pre-processor"
" will not use them at all.",
menu_options={1: "Delete all small files",
2: "Ignore"})
user_input = input("\nInput: ")
# Remove small files
if user_input == "1":
self.delete_small_files()
else:
pass
# ---------------------------------------------
if generate_validation:
# Marks files to be selected for validation
self.__generate_validation_files()
def __thread_pool_datasets_reader(self, func,
path_to_full_data_set,
genre_sub_sample_set):
"""
Thread pools out the dataset by genre
"""
# Get all folder paths for each genre
all_train_datasets_paths = [x[0] for x in os.walk(
path_to_full_data_set)]
all_train_datasets_paths.pop(0)
all_files_by_genre = []
for dataset_pth in all_train_datasets_paths:
dataset_files = [dataset_pth + "/" + file for file in
glob.glob1(dataset_pth, "*.mid")][:genre_sub_sample_set]
# Ensures files were actually extracted
if len(dataset_files):
self.__total_file_count += len(dataset_files)
all_files_by_genre.append(dataset_files)
# Init progress bar
self.__pbar = tqdm(total=self.__total_file_count)
# Begin threaded pool
pool = ThreadPool(HARDWARE_RELATED_CONSTANTS.THREAD_POOL_AMOUNT)
all_results = pool.imap_unordered(func,
all_files_by_genre)
# End threaded pool
pool.close()
pool.join()
self.__pbar.close()
self.__pbar = None
return all_results
def __genre_dataset_init(self, genre_train_files):
"""
Init full dataset attributes on MidiPreProcessor init
"""
# Store meta Notes on file and genre specific Notes
genre_instr_note_pairs = set()
genre_instr_note_pairs_counter = Counter()
genre_instruments = set()
genre_file_meta_data = dict()
genre_size = 0
# Store invalid file paths
corrupted_files = []
small_files = []
genre_name = genre_train_files[0].split("/")[-2].replace('_Midi', '')
for _, file in enumerate(genre_train_files):
# Update thread pool progress bar
self.__pbar.update(1)
self.__pbar.set_postfix_str(s=file.split("/", -1)[-1][:20],
refresh=True)
# Meta Notes on the file
midi_file_attr = self.read_midi_file(file)
# Check if flags were raised
if midi_file_attr["corrupted"]:
corrupted_files.append(file)
elif midi_file_attr["small_file_check"]:
small_files.append(file)
# File passed requirements; store meta Notes on genre and file
else:
genre_instruments |= midi_file_attr["instruments"]
genre_instr_note_pairs |= set(
midi_file_attr["flat_instr_note_seq"])
genre_size += midi_file_attr["flat_instr_note_seq_len"]
genre_file_meta_data[file] = {"flat_instr_note_seq":
midi_file_attr[
"flat_instr_note_seq"],
"flat_instr_note_seq_len":
midi_file_attr[
"flat_instr_note_seq_len"],
"instruments":
midi_file_attr[
"instruments"],}
genre_instr_note_pairs_counter += Counter(midi_file_attr["flat_instr_note_seq"])
return {"genre_name": genre_name,
"genre_size": genre_size,
"genre_instruments": genre_instruments,
"genre_instr_note_pairs": genre_instr_note_pairs,
"genre_instr_note_pairs_counter": genre_instr_note_pairs_counter,
"genre_file_meta_data": {genre_name: genre_file_meta_data},
"corrupted_files": corrupted_files,
"small_files": small_files,}
def __generate_validation_files(self):
"""
Mark files for the validation set
"""
self.__blacklisted_files_validation = set()
# Find files for best fit the for the validation set per genre
for genre_name, instr_note_counter in self.__genre_instr_note_counters.items():
genre_note_count = sum(instr_note_counter.values())
needed_validation_note_count = int(
(genre_note_count / self.__total_intr_note_pair_size) \
* genre_note_count)
note_count_file_dict = {file_meta_data["flat_instr_note_seq_len"]: file_name
for file_name, file_meta_data
in self.__genre_file_dict[
genre_name].items()}
note_count_file_list = list(note_count_file_dict.keys())
'''
The validation count is decreasing per file note count;
When it reaches this arbitrary threshold the validation
set for this particular genre has been reached
'''
while True and needed_validation_note_count > 25:
closest_file_note_count = find_nearest(
numbers=note_count_file_list,
target=needed_validation_note_count)
needed_validation_note_count -= closest_file_note_count
self.__blacklisted_files_validation.add(
note_count_file_dict[closest_file_note_count])
note_count_file_list.remove(closest_file_note_count)
def read_midi_file(self, file):
"""
Extract out the instruments/Classical_Notes of the midi file.
"""
# Attempt to parse midi file
try:
midi_data = pretty_midi.PrettyMIDI(file)
# Midi file couldn't be opened; Raise flag; return dummy dict
except:
return {"flat_instr_note_seq": [],
"flat_instr_note_seq_len": 0,
"instruments": {},
"small_file_check": False,
"corrupted": True}
# Stores instrument note pair
flat_instr_note_seq = []
file_instruments = set()
# Move through midi file; store Notes on instrument/note relationship in
# string
for instr in midi_data.instruments:
for note_obj in instr.notes:
program_instr_str = "Program" + PARAMETER_VAL_SPLITTER.STR + str(instr.program)\
+ INSTRUMENT_NOTE_SPLITTER.STR\
+ "Is_Drum" + PARAMETER_VAL_SPLITTER.STR + str(instr.is_drum)
file_instruments.add(program_instr_str)
flat_instr_note_seq.append(
(program_instr_str + INSTRUMENT_NOTE_SPLITTER.STR + "Note" + PARAMETER_VAL_SPLITTER.STR
+ pretty_midi.note_number_to_name(note_obj.pitch),
note_obj))
# ---
flat_instr_note_seq_len = len(flat_instr_note_seq)
# File is to small for our neural networks to take; Raise flag;
if flat_instr_note_seq_len <= MIDI_CONSTANTS.SMALL_FILE_CHECK:
return {"flat_instr_note_seq": flat_instr_note_seq,
"flat_instr_note_seq_len": flat_instr_note_seq_len,
"instruments": file_instruments,
"small_file_check": True,
"corrupted": False}
# Sort Classical_Notes in proper sequence based on their starting and end points
flat_instr_note_seq.sort(key=lambda tup: (tup[1].start, tup[1].end))
flat_instr_note_seq = [instr_note[0] for instr_note in
flat_instr_note_seq]
# Return dict for more explict multi return type
return {"flat_instr_note_seq": flat_instr_note_seq,
"flat_instr_note_seq_len": flat_instr_note_seq_len,
"instruments": file_instruments,
"small_file_check": False,
"corrupted": False}
# Delete the unused files from personal directory
def delete_corrupted_files(self):
for song in self.__corrupted_files_paths:
os.remove(song)
self.__corrupted_files_paths = []
def delete_small_files(self):
for song in self.__small_files_paths:
os.remove(song)
self.__small_files_paths = []
# --------------- Setters ---------------
def re_init_validation(self, new_file_list):
self.__blacklisted_files_validation = new_file_list
# --------------- Getters ---------------
def return_all_possible_instr_note_pairs(self):
return copy.deepcopy(self.__all_possible_instr_note_pairs)
def return_genre_instr_note_counters(self):
return copy.deepcopy(self.__genre_instr_note_counters)
def return_all_possible_instr_note_pairs_counter(self):
return copy.deepcopy(self.__all_possible_instr_note_pairs_counter)
# ----
def return_all_instruments(self):
return copy.deepcopy(self.__all_instruments)
def return_instr_note_pairs_dict(self):
return copy.deepcopy(self.__instr_note_pairs_dict)
# ----
def return_blacklisted_files_validation(self):
return copy.deepcopy(self.__blacklisted_files_validation)
def return_genre_file_dict(self):
return copy.deepcopy(self.__genre_file_dict)
# ----
def return_corrupted_files_paths(self):
return copy.deepcopy(self.__corrupted_files_paths)
def return_small_files_paths(self):
return copy.deepcopy(self.__small_files_paths)
# ----
def return_master_instr_note_encoder(self):
return copy.deepcopy(self.__master_instr_note_encoder)
def return_master_instr_note_decoder(self):
return copy.deepcopy(self.__master_instr_note_decoder)
# ----
def return_master_instr_encoder(self):
return copy.deepcopy(self.__master_instr_encoder)
def return_master_instr_decoder(self):
return copy.deepcopy(self.__master_instr_decoder)
# ----
def return_master_genre_encoder(self):
return copy.deepcopy(self.__master_genre_encoder)
def return_master_genre_decoder(self):
return copy.deepcopy(self.__master_genre_decoder)
# --------------- Basic Functionality ---------------
def encode_instr_note(self, instr_note_str):
return self.__master_instr_note_encoder[instr_note_str]
def encode_instr_note_seq(self, instr_note_seq):
return [self.__master_instr_note_encoder[instr_note_pair]
for instr_note_pair in instr_note_seq]
# ----
def decode_instr_note(self, instr_note_num):
return self.__master_instr_note_decoder[instr_note_num]
def decode_instr_note_seq(self, instr_note_seq):
return [self.__master_instr_note_decoder[instr_note_pair]
for instr_note_pair in instr_note_seq]
# ----
def encode_instr(self, instr_str):
return self.__master_instr_encoder[instr_str]
def decode_instr(self, instr_num):
|
# ----
def encode_genre(self, genre_str):
return self.__master_genre_encoder[genre_str]
def decode_genre(self, genre_num):
return self.__master_genre_decoder[genre_num]
|
return self.__master_instr_decoder[instr_num]
|
main.go
|
/*
* MinIO Client (C) 2014, 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bufio"
"bytes"
"fmt"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/cheggaaa/pb"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
"github.com/minio/mc/pkg/probe"
"github.com/minio/minio/pkg/words"
"github.com/pkg/profile"
"golang.org/x/crypto/ssh/terminal"
completeinstall "github.com/posener/complete/cmd/install"
)
var (
// global flags for mc.
mcFlags = []cli.Flag{}
)
// Help template for mc
var mcHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...]
COMMANDS:
{{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{if .VisibleFlags}}
GLOBAL FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
VERSION:
` + ReleaseTag +
`{{ "\n"}}{{range $key, $value := ExtraInfo}}
{{$key}}:
{{$value}}
{{end}}`
// Main starts mc application
func Main(args []string) {
if len(args) > 1 {
switch args[1] {
case "mc", "-install", "-uninstall":
mainComplete()
return
}
}
// Enable profiling supported modes are [cpu, mem, block].
// ``MC_PROFILER`` supported options are [cpu, mem, block].
switch os.Getenv("MC_PROFILER") {
case "cpu":
defer profile.Start(profile.CPUProfile, profile.ProfilePath(mustGetProfileDir())).Stop()
case "mem":
defer profile.Start(profile.MemProfile, profile.ProfilePath(mustGetProfileDir())).Stop()
case "block":
defer profile.Start(profile.BlockProfile, profile.ProfilePath(mustGetProfileDir())).Stop()
}
probe.Init() // Set project's root source path.
probe.SetAppInfo("Release-Tag", ReleaseTag)
probe.SetAppInfo("Commit", ShortCommitID)
// Fetch terminal size, if not available, automatically
// set globalQuiet to true.
if w, e := pb.GetTerminalWidth(); e != nil {
globalQuiet = true
} else {
globalTermWidth = w
}
// Set the mc app name.
appName := filepath.Base(args[0])
// Run the app - exit on error.
if err := registerApp(appName).Run(args); err != nil {
os.Exit(1)
}
}
// Function invoked when invalid command is passed.
func commandNotFound(ctx *cli.Context, command string) {
msg := fmt.Sprintf("`%s` is not a mc command. See `mc --help`.", command)
closestCommands := findClosestCommands(command)
if len(closestCommands) > 0 {
msg += fmt.Sprintf("\n\nDid you mean one of these?\n")
if len(closestCommands) == 1 {
cmd := closestCommands[0]
msg += fmt.Sprintf(" `%s`", cmd)
} else {
for _, cmd := range closestCommands {
msg += fmt.Sprintf(" `%s`\n", cmd)
}
}
}
fatalIf(errDummy().Trace(), msg)
}
// Check for sane config environment early on and gracefully report.
func checkConfig() {
// Refresh the config once.
loadMcConfig = loadMcConfigFactory()
// Ensures config file is sane.
config, err := loadMcConfig()
// Verify if the path is accesible before validating the config
fatalIf(err.Trace(mustGetMcConfigPath()), "Unable to access configuration file.")
// Validate and print error messges
ok, errMsgs := validateConfigFile(config)
if !ok {
var errorMsg bytes.Buffer
for index, errMsg := range errMsgs {
// Print atmost 10 errors
|
if index > 10 {
break
}
errorMsg.WriteString(errMsg + "\n")
}
console.Fatal(errorMsg.String())
}
}
func migrate() {
// Fix broken config files if any.
fixConfig()
// Migrate config files if any.
migrateConfig()
// Migrate session files if any.
migrateSession()
// Migrate shared urls if any.
migrateShare()
}
// Get os/arch/platform specific information.
// Returns a map of current os/arch/platform/memstats.
func getSystemData() map[string]string {
host, e := os.Hostname()
fatalIf(probe.NewError(e), "Unable to determine the hostname.")
memstats := &runtime.MemStats{}
runtime.ReadMemStats(memstats)
mem := fmt.Sprintf("Used: %s | Allocated: %s | UsedHeap: %s | AllocatedHeap: %s",
pb.Format(int64(memstats.Alloc)).To(pb.U_BYTES),
pb.Format(int64(memstats.TotalAlloc)).To(pb.U_BYTES),
pb.Format(int64(memstats.HeapAlloc)).To(pb.U_BYTES),
pb.Format(int64(memstats.HeapSys)).To(pb.U_BYTES))
platform := fmt.Sprintf("Host: %s | OS: %s | Arch: %s", host, runtime.GOOS, runtime.GOARCH)
goruntime := fmt.Sprintf("Version: %s | CPUs: %s", runtime.Version(), strconv.Itoa(runtime.NumCPU()))
return map[string]string{
"PLATFORM": platform,
"RUNTIME": goruntime,
"MEM": mem,
}
}
// initMC - initialize 'mc'.
func initMC() {
// Check if mc config exists.
if !isMcConfigExists() {
err := saveMcConfig(newMcConfig())
fatalIf(err.Trace(), "Unable to save new mc config.")
if !globalQuiet && !globalJSON {
console.Infoln("Configuration written to `" + mustGetMcConfigPath() + "`. Please update your access credentials.")
}
}
// Check if mc session directory exists.
if !isSessionDirExists() {
fatalIf(createSessionDir().Trace(), "Unable to create session config directory.")
}
// Check if mc share directory exists.
if !isShareDirExists() {
initShareConfig()
}
// Check if certs dir exists
if !isCertsDirExists() {
fatalIf(createCertsDir().Trace(), "Unable to create `CAs` directory.")
}
// Check if CAs dir exists
if !isCAsDirExists() {
fatalIf(createCAsDir().Trace(), "Unable to create `CAs` directory.")
}
// Load all authority certificates present in CAs dir
loadRootCAs()
}
func installAutoCompletion(ctx *cli.Context) {
if ctx.Bool("no-autocompletion") || ctx.GlobalBool("no-autocompletion") {
return
}
if globalQuiet || globalJSON || !terminal.IsTerminal(int(os.Stdout.Fd())) {
return
}
if runtime.GOOS == "windows" {
return
}
if completeinstall.IsInstalled("mc") {
return
}
for {
fmt.Printf("Install mc auto-completion in your shell ? (y/n): ")
reader := bufio.NewReader(os.Stdin)
char, _, err := reader.ReadRune()
if err != nil {
continue
}
switch char {
case 'y', 'Y':
// Install mc completion, ignore any error for now
err := completeinstall.Install("mc")
if err != nil {
errorIf(probe.NewError(err), "Unable to install mc auto-completion.")
} else {
console.Infoln("Auto-completion installed! Kindly restart your shell to load it.")
}
fallthrough
case 'n', 'N':
return
}
}
}
func registerBefore(ctx *cli.Context) error {
// Check if mc was compiled using a supported version of Golang.
checkGoVersion()
// Set the config directory.
setMcConfigDir(ctx.GlobalString("config-dir"))
// Migrate any old version of config / state files to newer format.
migrate()
// Set global flags.
setGlobalsFromContext(ctx)
// Initialize default config files.
initMC()
// Check if config can be read.
checkConfig()
// Install shell completions
installAutoCompletion(ctx)
return nil
}
// findClosestCommands to match a given string with commands trie tree.
func findClosestCommands(command string) []string {
var closestCommands []string
for _, value := range commandsTree.PrefixMatch(command) {
closestCommands = append(closestCommands, value.(string))
}
sort.Strings(closestCommands)
// Suggest other close commands - allow missed, wrongly added and even transposed characters
for _, value := range commandsTree.Walk(commandsTree.Root()) {
if sort.SearchStrings(closestCommands, value.(string)) < len(closestCommands) {
continue
}
// 2 is arbitrary and represents the max allowed number of typed errors
if words.DamerauLevenshteinDistance(command, value.(string)) < 2 {
closestCommands = append(closestCommands, value.(string))
}
}
return closestCommands
}
// Check for updates and print a notification message
func checkUpdate(ctx *cli.Context) {
// Do not print update messages, if quiet flag is set.
if ctx.Bool("quiet") || ctx.GlobalBool("quiet") {
// Its OK to ignore any errors during doUpdate() here.
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2 * time.Second); err == nil {
printMsg(updateMessage{
Status: "success",
Message: updateMsg,
})
} else {
printMsg(updateMessage{
Status: "success",
Message: prepareUpdateMessage("Run `mc update`", latestReleaseTime.Sub(currentReleaseTime)),
})
}
}
}
var appCmds = []cli.Command{
lsCmd,
mbCmd,
rbCmd,
catCmd,
headCmd,
pipeCmd,
shareCmd,
cpCmd,
mirrorCmd,
findCmd,
sqlCmd,
statCmd,
diffCmd,
rmCmd,
eventCmd,
watchCmd,
policyCmd,
adminCmd,
sessionCmd,
configCmd,
updateCmd,
versionCmd,
}
func registerApp(name string) *cli.App {
for _, cmd := range appCmds {
registerCmd(cmd)
}
cli.HelpFlag = cli.BoolFlag{
Name: "help, h",
Usage: "show help",
}
cli.BashCompletionFlag = cli.BoolFlag{
Name: "compgen",
Usage: "enables bash-completion for all commands and subcommands",
Hidden: true,
}
app := cli.NewApp()
app.Name = name
app.Action = func(ctx *cli.Context) {
if strings.HasPrefix(ReleaseTag, "RELEASE.") {
// Check for new updates from dl.min.io.
checkUpdate(ctx)
}
cli.ShowAppHelp(ctx)
}
app.Before = registerBefore
app.ExtraInfo = func() map[string]string {
if globalDebug {
return getSystemData()
}
return make(map[string]string)
}
app.HideHelpCommand = true
app.Usage = "MinIO Client for cloud storage and filesystems."
app.Commands = commands
app.Author = "MinIO, Inc."
app.Version = ReleaseTag
app.Flags = append(mcFlags, globalFlags...)
app.CustomAppHelpTemplate = mcHelpTemplate
app.CommandNotFound = commandNotFound // handler function declared above.
app.EnableBashCompletion = true
return app
}
// mustGetProfilePath must get location that the profile will be written to.
func mustGetProfileDir() string {
return filepath.Join(mustGetMcConfigDir(), globalProfileDir)
}
| |
provider-day-availability-controller.ts
|
import { Request, Response } from 'express';
import { container } from 'tsyringe';
import { ListProviderDayAvailabilityService } from '@modules/appointments/services/list-provider-day-availability-service';
export class
|
{
public async index(request: Request, response: Response): Promise<Response> {
const { provider_id } = request.params;
const { day, month, year } = request.body;
const listProviderDayAvailability = container.resolve(
ListProviderDayAvailabilityService,
);
const availability = await listProviderDayAvailability.execute({
provider_id,
day,
month,
year,
});
return response.json(availability);
}
}
|
ProviderDayAvailabilityController
|
lens.rs
|
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub(crate) fn reflens_structure_crate_output_list_data_ingestion_jobs_output_next_token(
input: &crate::output::ListDataIngestionJobsOutput,
) -> std::option::Option<&std::string::String>
|
pub(crate) fn reflens_structure_crate_output_list_datasets_output_next_token(
input: &crate::output::ListDatasetsOutput,
) -> std::option::Option<&std::string::String> {
let input = match &input.next_token {
None => return None,
Some(t) => t,
};
Some(input)
}
pub(crate) fn reflens_structure_crate_output_list_inference_executions_output_next_token(
input: &crate::output::ListInferenceExecutionsOutput,
) -> std::option::Option<&std::string::String> {
let input = match &input.next_token {
None => return None,
Some(t) => t,
};
Some(input)
}
pub(crate) fn reflens_structure_crate_output_list_inference_schedulers_output_next_token(
input: &crate::output::ListInferenceSchedulersOutput,
) -> std::option::Option<&std::string::String> {
let input = match &input.next_token {
None => return None,
Some(t) => t,
};
Some(input)
}
pub(crate) fn reflens_structure_crate_output_list_models_output_next_token(
input: &crate::output::ListModelsOutput,
) -> std::option::Option<&std::string::String> {
let input = match &input.next_token {
None => return None,
Some(t) => t,
};
Some(input)
}
|
{
let input = match &input.next_token {
None => return None,
Some(t) => t,
};
Some(input)
}
|
server.go
|
package main
import (
"context"
"flag"
"fmt"
"log"
"net"
"google.golang.org/grpc"
"github.com/shijuvar/go-distributed-sys/eventstream/pb"
)
var (
port = flag.Int("port", 50051, "The server port")
)
// server is used to implement pb.EventStoreServer interface
type server struct {
pb.UnimplementedEventStoreServer
}
// Create a new event to the event store
func (s *server) CreateEvent(context.Context, *pb.CreateEventRequest) (*pb.CreateEventResponse, error) {
return nil, nil
}
// Get all events for the given aggregate and event
func (s *server) GetEvents(context.Context, *pb.GetEventsRequest) (*pb.GetEventsResponse, error) {
return nil, nil
}
// Get stream of events for the given event
func (s *server) GetEventsStream(*pb.GetEventsRequest, pb.EventStore_GetEventsStreamServer) error {
return nil
}
func
|
() {
flag.Parse()
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
pb.RegisterEventStoreServer(s, &server{})
log.Printf("server listening at %v", lis.Addr())
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
main
|
NestedBigIntNullableFilter.ts
|
import * as TypeGraphQL from "type-graphql";
import * as GraphQLScalars from "graphql-scalars";
import { Prisma } from "../../../client";
import { DecimalJSScalar } from "../../scalars";
@TypeGraphQL.InputType("NestedBigIntNullableFilter", {
isAbstract: true
})
export class NestedBigIntNullableFilter {
@TypeGraphQL.Field(_type => GraphQLScalars.BigIntResolver, {
nullable: true
})
equals?: bigint | undefined;
@TypeGraphQL.Field(_type => [GraphQLScalars.BigIntResolver], {
nullable: true
})
in?: bigint[] | undefined;
@TypeGraphQL.Field(_type => [GraphQLScalars.BigIntResolver], {
nullable: true
})
notIn?: bigint[] | undefined;
@TypeGraphQL.Field(_type => GraphQLScalars.BigIntResolver, {
nullable: true
})
lt?: bigint | undefined;
@TypeGraphQL.Field(_type => GraphQLScalars.BigIntResolver, {
nullable: true
})
|
})
gt?: bigint | undefined;
@TypeGraphQL.Field(_type => GraphQLScalars.BigIntResolver, {
nullable: true
})
gte?: bigint | undefined;
@TypeGraphQL.Field(_type => NestedBigIntNullableFilter, {
nullable: true
})
not?: NestedBigIntNullableFilter | undefined;
}
|
lte?: bigint | undefined;
@TypeGraphQL.Field(_type => GraphQLScalars.BigIntResolver, {
nullable: true
|
update_monitor.rs
|
//! # Examples
//!
//! How to monitor if there's a new update and install it.
//! Only available for Flatpak applications.
//!
//! ```rust,no_run
//! use ashpd::{flatpak::FlatpakProxy, WindowIdentifier};
//!
//! async fn run() -> ashpd::Result<()> {
//! let proxy = FlatpakProxy::new().await?;
//!
//! let monitor = proxy.create_update_monitor().await?;
//! let info = monitor.receive_update_available().await?;
//!
//! monitor.update(&WindowIdentifier::default()).await?;
//! let progress = monitor.receive_progress().await?;
//! println!("{:#?}", progress);
//!
//! Ok(())
//! }
//! ```
use serde_repr::{Deserialize_repr, Serialize_repr};
use zbus::zvariant::{DeserializeDict, ObjectPath, SerializeDict, Type};
use super::DESTINATION;
use crate::{
helpers::{call_method, receive_signal, session_connection},
Error, WindowIdentifier,
};
#[derive(SerializeDict, DeserializeDict, Type, Debug, Default)]
/// Specified options for a [`UpdateMonitorProxy::update`] request.
///
/// Currently there are no possible options yet.
#[zvariant(signature = "dict")]
struct UpdateOptions {}
#[derive(SerializeDict, DeserializeDict, Type, Debug)]
/// A response containing the update information when an update is available.
#[zvariant(signature = "dict")]
pub struct UpdateInfo {
#[zvariant(rename = "running-commit")]
/// The currently running OSTree commit.
running_commit: String,
#[zvariant(rename = "local-commit")]
/// The locally installed OSTree commit.
local_commit: String,
#[zvariant(rename = "remote-commit")]
/// The available commit to install.
remote_commit: String,
}
impl UpdateInfo {
pub fn running_commit(&self) -> &str {
&self.running_commit
}
pub fn local_commit(&self) -> &str {
&self.local_commit
}
pub fn remote_commit(&self) -> &str {
&self.remote_commit
}
}
#[derive(Serialize_repr, Deserialize_repr, PartialEq, Eq, Copy, Clone, Debug, Type)]
#[repr(u32)]
/// The update status.
pub enum UpdateStatus {
#[doc(alias = "XDP_UPDATE_STATUS_RUNNING")]
/// Running.
Running = 0,
#[doc(alias = "XDP_UPDATE_STATUS_EMPTY")]
/// No update to install.
Empty = 1,
#[doc(alias = "XDP_UPDATE_STATUS_DONE")]
/// Done.
Done = 2,
#[doc(alias = "XDP_UPDATE_STATUS_FAILED")]
/// Failed.
Failed = 3,
}
#[derive(SerializeDict, DeserializeDict, Type, Debug)]
/// A response of the update progress signal.
#[zvariant(signature = "dict")]
pub struct UpdateProgress {
/// The number of operations that the update consists of.
pub n_ops: Option<u32>,
/// The position of the currently active operation.
pub op: Option<u32>,
/// The progress of the currently active operation, as a number between 0
/// and 100.
pub progress: Option<u32>,
/// The overall status of the update.
pub status: Option<UpdateStatus>,
/// The error name, sent when status is `UpdateStatus::Failed`.
pub error: Option<String>,
/// The error message, sent when status is `UpdateStatus::Failed`.
pub error_message: Option<String>,
}
/// The interface exposes some interactions with Flatpak on the host to the
/// sandbox. For example, it allows you to restart the applications or start a
/// more sandboxed instance.
///
/// Wrapper of the DBus interface: [`org.freedesktop.portal.Flatpak.UpdateMonitor`](https://flatpak.github.io/xdg-desktop-portal/index.html#gdbus-org.freedesktop.portal.Flatpak.UpdateMonitor).
#[derive(Debug)]
#[doc(alias = "org.freedesktop.portal.Flatpak.UpdateMonitor")]
pub struct UpdateMonitorProxy<'a>(zbus::Proxy<'a>);
impl<'a> UpdateMonitorProxy<'a> {
/// Create a new instance of [`UpdateMonitorProxy`].
///
/// **Note** A [`UpdateMonitorProxy`] is not supposed to be created
/// manually.
pub(crate) async fn new(path: ObjectPath<'a>) -> Result<UpdateMonitorProxy<'a>, Error> {
let connection = session_connection().await?;
let proxy = zbus::ProxyBuilder::new_bare(&connection)
.interface("org.freedesktop.portal.Flatpak.UpdateMonitor")?
.path(path)?
.destination(DESTINATION)?
.build()
.await?;
Ok(Self(proxy))
}
/// Get a reference to the underlying Proxy.
pub fn inner(&self) -> &zbus::Proxy<'_> {
&self.0
}
/// A signal received when there's progress during the application update.
///
/// # Specifications
///
/// See also [`Progress`](https://flatpak.github.io/xdg-desktop-portal/index.html#gdbus-signal-org-freedesktop-portal-Flatpak-UpdateMonitor.Progress).
#[doc(alias = "Progress")]
#[doc(alias = "XdpPortal::update-progress")]
pub async fn receive_progress(&self) -> Result<UpdateProgress, Error> {
receive_signal(self.inner(), "Progress").await
}
/// A signal received when there's an application update.
///
/// # Specifications
///
/// See also [`UpdateAvailable`](https://flatpak.github.io/xdg-desktop-portal/index.html#gdbus-signal-org-freedesktop-portal-Flatpak-UpdateMonitor.UpdateAvailable).
#[doc(alias = "UpdateAvailable")]
#[doc(alias = "XdpPortal::update-available")]
pub async fn receive_update_available(&self) -> Result<UpdateInfo, Error> {
receive_signal(self.inner(), "UpdateAvailable").await
}
/// Asks to install an update of the calling app.
///
/// **Note** updates are only allowed if the new version has the same
/// permissions (or less) than the currently installed version.
///
/// # Specifications
///
/// See also [`Update`](https://flatpak.github.io/xdg-desktop-portal/index.html#gdbus-method-org-freedesktop-portal-Flatpak-UpdateMonitor.Update).
#[doc(alias = "Update")]
#[doc(alias = "xdp_portal_update_install")]
pub async fn update(&self, identifier: &WindowIdentifier) -> Result<(), Error> {
let options = UpdateOptions::default();
call_method(self.inner(), "Update", &(&identifier, options)).await
}
/// Ends the update monitoring and cancels any ongoing installation.
///
/// # Specifications
///
/// See also [`Close`](https://flatpak.github.io/xdg-desktop-portal/index.html#gdbus-method-org-freedesktop-portal-Flatpak-UpdateMonitor.Close).
#[doc(alias = "Close")]
pub async fn close(&self) -> Result<(), Error>
|
}
|
{
call_method(self.inner(), "Close", &()).await
}
|
bitcoin_kk_KZ.ts
|
<TS language="kk_KZ" version="2.1">
<context>
<name>AddressBookPage</name>
<message>
<source>Create a new address</source>
<translation>Жаңа адрес енгізу</translation>
</message>
<message>
<source>&New</source>
<translation>Жаңа</translation>
</message>
<message>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Таңдаған адресті тізімнен жою</translation>
</message>
<message>
<source>C&lose</source>
<translation>Жабу</translation>
</message>
<message>
<source>&Export</source>
<translation>Экспорт</translation>
</message>
<message>
<source>&Delete</source>
<translation>Жою</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<source>Enter passphrase</source>
<translation>Құпия сөзді енгізу</translation>
</message>
<message>
<source>New passphrase</source>
<translation>Жаңа құпия сөзі</translation>
</message>
<message>
<source>Repeat new passphrase</source>
<translation>Жаңа құпия сөзді қайта енгізу</translation>
</message>
</context>
<context>
<name>BanTableModel</name>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<source>&Transactions</source>
<translation>&Транзакциялар</translation>
</message>
<message>
<source>E&xit</source>
<translation>Шығу</translation>
</message>
<message>
<source>&Options...</source>
<translation>Параметрлері</translation>
</message>
<message>
<source>&Backup Wallet...</source>
<translation>Әмиянды жасыру</translation>
</message>
<message>
<source>&Change Passphrase...</source>
<translation>Құпия сөзді өзгерту</translation>
</message>
<message>
<source>Eirebit</source>
<translation>Биткоин</translation>
</message>
<message>
<source>Wallet</source>
<translation>Әмиян</translation>
</message>
<message>
<source>&Send</source>
<translation>Жіберу</translation>
</message>
<message>
<source>&Receive</source>
<translation>Алу</translation>
</message>
<message>
<source>&File</source>
<translation>Файл</translation>
</message>
<message>
<source>&Help</source>
<translation>Көмек</translation>
</message>
<message>
<source>%1 behind</source>
<translation>%1 қалмады</translation>
|
</message>
<message>
<source>Warning</source>
<translation>Ескерту</translation>
</message>
<message>
<source>Information</source>
<translation>Информация</translation>
</message>
<message>
<source>Up to date</source>
<translation>Жаңартылған</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<source>Amount:</source>
<translation>Саны</translation>
</message>
<message>
<source>Fee:</source>
<translation>Комиссия</translation>
</message>
<message>
<source>Dust:</source>
<translation>Шаң</translation>
</message>
<message>
<source>After Fee:</source>
<translation>Комиссия алу кейін</translation>
</message>
<message>
<source>Amount</source>
<translation>Саны</translation>
</message>
<message>
<source>Date</source>
<translation>Күні</translation>
</message>
<message>
<source>Confirmations</source>
<translation>Растау саны</translation>
</message>
<message>
<source>Confirmed</source>
<translation>Растық</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<source>&Label</source>
<translation>таңба</translation>
</message>
<message>
<source>&Address</source>
<translation>Адрес</translation>
</message>
</context>
<context>
<name>FreespaceChecker</name>
</context>
<context>
<name>HelpMessageDialog</name>
</context>
<context>
<name>Intro</name>
<message>
<source>Error</source>
<translation>қате</translation>
</message>
</context>
<context>
<name>ModalOverlay</name>
</context>
<context>
<name>OpenURIDialog</name>
</context>
<context>
<name>OptionsDialog</name>
<message>
<source>W&allet</source>
<translation>Әмиян</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
</context>
<context>
<name>PaymentServer</name>
</context>
<context>
<name>PeerTableModel</name>
</context>
<context>
<name>QObject</name>
<message>
<source>Amount</source>
<translation>Саны</translation>
</message>
<message>
<source>%1 and %2</source>
<translation>%1 немесе %2</translation>
</message>
</context>
<context>
<name>QObject::QObject</name>
</context>
<context>
<name>QRImageWidget</name>
</context>
<context>
<name>RPCConsole</name>
<message>
<source>&Information</source>
<translation>Информация</translation>
</message>
</context>
<context>
<name>ReceiveCoinsDialog</name>
<message>
<source>&Amount:</source>
<translation>Саны</translation>
</message>
</context>
<context>
<name>ReceiveRequestDialog</name>
</context>
<context>
<name>RecentRequestsTableModel</name>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<source>Amount:</source>
<translation>Саны</translation>
</message>
<message>
<source>Fee:</source>
<translation>Комиссия:</translation>
</message>
<message>
<source>After Fee:</source>
<translation>Комиссия алу кейін:</translation>
</message>
<message>
<source>Dust:</source>
<translation>Шаң</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<source>A&mount:</source>
<translation>Саны</translation>
</message>
</context>
<context>
<name>SendConfirmationDialog</name>
</context>
<context>
<name>ShutdownWindow</name>
</context>
<context>
<name>SignVerifyMessageDialog</name>
</context>
<context>
<name>SplashScreen</name>
</context>
<context>
<name>TrafficGraphWidget</name>
</context>
<context>
<name>TransactionDesc</name>
</context>
<context>
<name>TransactionDescDialog</name>
</context>
<context>
<name>TransactionTableModel</name>
</context>
<context>
<name>TransactionView</name>
</context>
<context>
<name>UnitDisplayStatusBarControl</name>
</context>
<context>
<name>WalletFrame</name>
</context>
<context>
<name>WalletModel</name>
</context>
<context>
<name>WalletView</name>
</context>
<context>
<name>bitcoin-core</name>
<message>
<source>Information</source>
<translation>Информация</translation>
</message>
<message>
<source>Transaction amount too small</source>
<translation>Транзакция өте кішкентай</translation>
</message>
<message>
<source>Transaction too large</source>
<translation>Транзакция өте үлкен</translation>
</message>
<message>
<source>Warning</source>
<translation>Ескерту</translation>
</message>
<message>
<source>Error</source>
<translation>қате</translation>
</message>
</context>
</TS>
|
</message>
<message>
<source>Error</source>
<translation>қате</translation>
|
__init__.py
|
from polygraphy.tools.base import Tool
|
from polygraphy.tools.registry import TOOL_REGISTRY
| |
models.py
|
from __future__ import unicode_literals
from django.db import models
from django import forms
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
from django.utils import timezone
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
if self.views < 0:
self.views = 0
super(Category, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'categories'
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
first_visit = models.DateTimeField(default=timezone.now)
|
last_visit = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
class UserProfile(models.Model):
# This line is required. Links UserProfile to a User model instance.
user = models.OneToOneField(User)
# The additional attributes we wish to include.
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
# Override the __unicode__() method to return out something meaningful!
def __str__(self):
return self.user.username
| |
command.rs
|
// LNP Node: node running lightning network protocol and generalized lightning
// channels.
// Written in 2020 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
use std::{
convert::TryFrom,
io::{self, Read, Write},
sync::mpsc,
time::Duration,
};
use std::{str::FromStr, thread::sleep};
use internet2::{NodeAddr, RemoteSocketAddr, ToNodeAddr};
use lnp::{message, LIGHTNING_P2P_DEFAULT_PORT};
use microservices::shell::Exec;
use farcaster_core::{
blockchain::Network,
negotiation::PublicOffer,
role::{SwapRole, TradeRole},
swap::SwapId,
};
use strict_encoding::ReadExt;
use super::Command;
use crate::rpc::{request, Client, Request};
use crate::{Error, LogStyle, ServiceId};
impl Exec for Command {
type Client = Client;
type Error = Error;
fn exec(self, runtime: &mut Self::Client) -> Result<(), Self::Error> {
debug!("Performing {:?}: {}", self, self);
match self {
Command::Info { subject } => {
if let Some(subj) = subject {
if let Ok(node_addr) = NodeAddr::from_str(&subj) {
runtime.request(ServiceId::Peer(node_addr), Request::GetInfo)?;
} else if let Ok(swap_id) = SwapId::from_str(&subj) {
runtime.request(ServiceId::Swap(swap_id), Request::GetInfo)?;
} else {
let err = format!(
"{}",
"Subject parameter must be either remote node \
address or channel id represented by a hex string"
.err()
);
return Err(Error::Other(err));
}
} else {
// subject is none
runtime.request(ServiceId::Farcasterd, Request::GetInfo)?;
}
match runtime.response()? {
Request::NodeInfo(info) => println!("{}", info),
Request::PeerInfo(info) => println!("{}", info),
Request::SwapInfo(info) => println!("{}", info),
_ => {
return Err(Error::Other(
"Server returned unrecognizable response".to_string(),
))
}
}
}
Command::Peers => {
runtime.request(ServiceId::Farcasterd, Request::ListPeers)?;
runtime.report_response()?;
}
Command::ListSwaps => {
runtime.request(ServiceId::Farcasterd, Request::ListSwaps)?;
runtime.report_response()?;
}
// TODO: only list offers matching list of OfferIds
Command::ListOffers => {
runtime.request(ServiceId::Farcasterd, Request::ListOffers)?;
runtime.report_response()?;
}
// Command::ListOfferIds => {
// runtime.request(ServiceId::Farcasterd, Request::ListOfferIds)?;
// runtime.report_response()?;
// }
Command::Make {
network,
arbitrating_blockchain,
accordant_blockchain,
arbitrating_amount,
accordant_amount,
arbitrating_addr,
accordant_addr,
cancel_timelock,
punish_timelock,
fee_strategy,
maker_role,
public_ip_addr,
bind_ip_addr,
port,
overlay,
} => {
if network != Network::Testnet && network != Network::Local {
eprintln!(
"Error: {} not yet supported. Only Testnet and Local currently enabled, for your funds safety",
network
);
return Ok(());
}
if accordant_amount < monero::Amount::from_str("0.001 XMR").unwrap() {
eprintln!(
"Error: monero amount {} too low, require at least 0.001 XMR",
accordant_amount
);
return Ok(());
}
let offer = farcaster_core::negotiation::Offer {
network,
arbitrating_blockchain,
accordant_blockchain,
arbitrating_amount,
accordant_amount,
cancel_timelock,
punish_timelock,
fee_strategy,
maker_role,
};
let public_addr = RemoteSocketAddr::with_ip_addr(overlay, public_ip_addr, port);
let bind_addr = RemoteSocketAddr::with_ip_addr(overlay, bind_ip_addr, port);
let proto_offer = request::ProtoPublicOffer {
offer,
public_addr,
bind_addr,
peer_secret_key: None,
arbitrating_addr,
accordant_addr: accordant_addr.to_string(),
};
runtime.request(ServiceId::Farcasterd, Request::MakeOffer(proto_offer))?;
// report success of failure of the request to cli
runtime.report_progress()?;
// TODO: activate when we do client side offer validation, must
// be activated on farcasterd as well
// let public_offer = runtime.response()?;
// let instruction =
// format!("Share the following offer with taker:",);
// let hex = format!("{:?}", &public_offer);
// println!("{} \n {}", instruction.green_bold(),
// hex.bright_yellow_bold());
}
Command::Take {
public_offer,
bitcoin_address,
monero_address,
without_validation,
} => {
// println!("{:#?}", &public_offer);
let PublicOffer {
version: _,
offer,
node_id,
peer_address,
} = public_offer.clone();
if !without_validation {
let taker_role = offer.maker_role.other();
let arb_amount = offer.arbitrating_amount;
let acc_amount = offer.accordant_amount;
println!(
"\nWant to buy {}?\n\nCarefully validate offer!\n",
match taker_role {
SwapRole::Alice => format!("{} for {}", arb_amount, acc_amount),
SwapRole::Bob => format!("{} for {}", acc_amount, arb_amount),
}
);
println!("Trade counterparty: {}@{}\n", &node_id, peer_address);
println!("{}\n", offer);
}
if without_validation || take_offer() {
// pass offer to farcasterd to initiate the swap
runtime.request(
ServiceId::Farcasterd,
Request::TakeOffer(
(public_offer, bitcoin_address, monero_address.to_string()).into(),
),
)?;
// report success of failure of the request to cli
runtime.report_progress()?;
}
}
Command::Progress { swapid } => {
runtime.request(ServiceId::Farcasterd, Request::ReadProgress(swapid))?;
runtime.report_progress()?;
}
}
Ok(())
}
}
|
println!("Take it? [y/n]");
let mut input = [0u8; 1];
std::io::stdin().read_exact(&mut input).unwrap_or(());
match std::str::from_utf8(&input[..]) {
Ok("y") | Ok("Y") => true,
Ok("n") | Ok("N") => {
println!("Rejecting offer");
false
}
_ => take_offer(),
}
}
|
fn take_offer() -> bool {
|
Gauge.js
|
import {t} from '@lingui/macro'
import {Trans} from '@lingui/react'
import Color from 'color'
import React, {Fragment} from 'react'
import TimeLineChart from 'components/ui/TimeLineChart'
import ACTIONS from 'data/ACTIONS'
import JOBS from 'data/JOBS'
import Module from 'parser/core/Module'
import {TieredSuggestion, SEVERITY} from 'parser/core/modules/Suggestions'
import {DualStatistic} from 'parser/jobs/rdm/statistics/DualStatistic'
//import {getCooldownRemaining} from 'parser/core/modules/Cooldowns'
//import {ActionLink} from 'components/ui/DbLink'
//TODO: Should possibly look into different Icons for things in Suggestions
//Mana Gains and Expenditures
export const MANA_GAIN = {
[ACTIONS.VERSTONE.id]: {white: 9, black: 0},
[ACTIONS.VERFIRE.id]: {white: 0, black: 9},
|
[ACTIONS.VERTHUNDER.id]: {white: 0, black: 11},
[ACTIONS.VERHOLY.id]: {white: 21, black: 0},
[ACTIONS.VERFLARE.id]: {white: 0, black: 21},
[ACTIONS.JOLT.id]: {white: 3, black: 3},
[ACTIONS.JOLT_II.id]: {white: 3, black: 3},
[ACTIONS.VERAERO_II.id]: {white: 7, black: 0},
[ACTIONS.VERTHUNDER_II.id]: {white: 0, black: 7},
[ACTIONS.IMPACT.id]: {white: 3, black: 3},
[ACTIONS.ENCHANTED_RIPOSTE.id]: {white: -30, black: -30},
[ACTIONS.ENCHANTED_ZWERCHHAU.id]: {white: -25, black: -25},
[ACTIONS.ENCHANTED_REDOUBLEMENT.id]: {white: -25, black: -25},
[ACTIONS.ENCHANTED_MOULINET.id]: {white: -20, black: -20},
[ACTIONS.ENCHANTED_REPRISE.id]: {white: -5, black: -5},
[ACTIONS.SCORCH.id]: {white: 7, black: 7},
}
export const SEVERITY_WASTED_MANA = {
1: SEVERITY.MINOR,
20: SEVERITY.MEDIUM,
80: SEVERITY.MAJOR,
}
export const SEVERITY_LOST_MANA = {
1: SEVERITY.MINOR,
20: SEVERITY.MEDIUM,
80: SEVERITY.MAJOR,
}
export const MANA_DIFFERENCE_THRESHOLD = 30
const MANA_LOST_DIVISOR = 2
export const MANA_CAP = 100
const MANAFICATION_MULTIPLIER = 2
const MANA_FLOOR = 0
class GaugeAction {
mana = {
white: {
beforeCast: 0,
afterCast: 0,
overCapLoss: 0,
imbalanceLoss: 0,
invulnLoss: 0,
manaficationLoss: 0,
},
black: {
beforeCast: 0,
afterCast: 0,
overCapLoss: 0,
imbalanceLoss: 0,
invulnLoss: 0,
manaficationLoss: 0,
},
}
constructor(startingWhite, startingBlack) {
this.mana.white.beforeCast = startingWhite
this.mana.black.beforeCast = startingBlack
this.mana.white.afterCast = startingWhite
this.mana.black.afterCast = startingBlack
}
calculateManaFicationManaGained() {
this.mana.white.afterCast = this.mana.white.beforeCast * MANAFICATION_MULTIPLIER
this.mana.black.afterCast = this.mana.black.beforeCast * MANAFICATION_MULTIPLIER
this.calculateManaOvercap(true)
}
calculateCastManaGained(event) {
//Determine if the ability we used should yield any mana gain.
//console.log(`White: ${this._whiteMana}, Black: ${this._blackMana}`)
//console.log(`Ability: ${event.ability.name}, timestamp: ${this.parser.formatTimestamp(event.timestamp)}`)
const abilityId = event.ability.guid
const {white, black} = MANA_GAIN[abilityId] || {}
if (white || black) {
if (!event.successfulHit) {
// Melee combo skills will still consume mana but will not continue the combo, set an invuln/missed flag for downstream consumers
this.missOrInvuln = true
if (white > 0 || black > 0) {
// No mana gained from spells that do no damage due to missing or targeting an invulnerable boss (e.g. Omega M/F firewall)
this.mana.white.invulnLoss = white
this.mana.black.invulnLoss = black
return
}
}
this.mana.white.afterCast = this.mana.white.beforeCast + white
this.mana.black.afterCast = this.mana.black.beforeCast + black
this.calculateManaImbalance(white, black)
this.calculateManaOvercap(false)
}
}
calculateManaImbalance(white, black) {
if (white && this.mana.black.beforeCast - this.mana.white.beforeCast > MANA_DIFFERENCE_THRESHOLD) {
//console.log(`Imbalance White Lost, Current White: ${this._mana.white.beforecast} Current Black: ${this._mana.black.beforecast}`)
//If we have more than 30 Black mana over White, our White gains are halved
this.mana.white.imbalanceLoss = Math.ceil(white / MANA_LOST_DIVISOR)
this.mana.white.afterCast -= this.mana.white.imbalanceLoss
}
if (black && this.mana.white.beforeCast - this.mana.black.beforeCast > MANA_DIFFERENCE_THRESHOLD) {
//console.log(`Imbalance Black Lost, Current Black: ${this._mana.black.beforecast} Current White: ${this._mana.white.beforecast}`)
//If we have more than 30 White mana over Black, our Black gains are halved
this.mana.black.imbalanceLoss = Math.ceil(black / MANA_LOST_DIVISOR)
this.mana.black.afterCast -= this.mana.black.imbalanceLoss
}
}
calculateManaOvercap(isManafication) {
if (this.mana.white.afterCast > MANA_CAP) {
if (isManafication) {
this.mana.white.manaficationLoss = this.mana.white.afterCast - MANA_CAP
} else {
this.mana.white.overCapLoss = this.mana.white.afterCast - MANA_CAP
}
this.mana.white.afterCast = MANA_CAP
}
if (this.mana.black.afterCast > MANA_CAP) {
if (isManafication) {
this.mana.black.manaficationLoss = this.mana.black.afterCast - MANA_CAP
} else {
this.mana.black.overCapLoss = this.mana.black.afterCast - MANA_CAP
}
this.mana.black.afterCast = MANA_CAP
}
}
}
export default class Gauge extends Module {
static handle = 'gauge'
static title = t('rdm.gauge.title')`Gauge`
static dependencies = [
'cooldowns',
'suggestions',
'statistics',
]
//Keeps track of our current mana gauge.
_whiteMana = 0
_blackMana = 0
//Keeps track of overall wasted mana
_whiteManaWasted = 0
_blackManaWasted = 0
_whiteManaLostToImbalance = 0
_blackManaLostToImbalance = 0
_whiteManaLostToInvulnerable = 0
_blackManaLostToInvulnerable = 0
_whiteManaLostToManafication = 0
_blackManaLostToManafication = 0
// Chart handling
_history = {
white: [],
black: [],
}
constructor(...args) {
super(...args)
this.addEventHook('cast', {
by: 'player',
abilityId: ACTIONS.MANAFICATION.id,
}, this._gaugeEvent)
this.addEventHook('normaliseddamage', {by: 'player'}, this._gaugeEvent)
this.addEventHook('death', {to: 'player'}, this._onDeath)
this.addEventHook('complete', this._onComplete)
}
_pushToGraph() {
const timestamp = this.parser.currentTimestamp - this.parser.fight.start_time
this._history.white.push({t: timestamp, y: this._whiteMana})
this._history.black.push({t: timestamp, y: this._blackMana})
}
_gaugeEvent(event) {
//If the RDM had resources going into the fight eventually we'll hit a negative number.
//This is very bad, so what we'll do is before we initialize the Action or any calculations we'll insure the base
//Inputs are at the Floor of 0.
if (this._whiteMana && this._whiteMana < MANA_FLOOR) {
this._whiteMana = MANA_FLOOR
}
if (this._blackMana && this._blackMana < MANA_FLOOR) {
this._blackMana = MANA_FLOOR
}
const gaugeAction = new GaugeAction(this._whiteMana, this._blackMana)
const abilityId = event.ability.guid
//console.log(`White: ${this._whiteMana} Black: ${this._blackMana}`)
if (abilityId === ACTIONS.MANAFICATION.id) {
//console.log('Manafication')
gaugeAction.calculateManaFicationManaGained()
// Manafication resets movement abilities
this.cooldowns.resetCooldown(ACTIONS.CORPS_A_CORPS.id)
this.cooldowns.resetCooldown(ACTIONS.DISPLACEMENT.id)
} else {
//Leaving this here, we have an issue where in an overkill returns an amount of 0
//But the game treats it as an ability that hit, this causes a trigger of our invuln logic
//Sadly removing the amount check would cause even more inaccurate information to go out.
//This log shows the issue with verthunder overkill of mustardseed 1 (ID 21), Verholy overkill of mustardseed 2 (ID 25)
//And a verthunder overkill of Titania herself. https://www.fflogs.com/reports/FkVjcbGqBhXyNtg7/#fight=6&type=summary
// if (event.amount === 0) {
// console.log(`${JSON.stringify(event, null, 4)}`)
// console.log(`Cast: ${event.ability.name}, timestamp: ${this.parser.formatTimestamp(event.timestamp)}`)
// }
gaugeAction.calculateCastManaGained(event)
}
this._whiteMana = gaugeAction.mana.white.afterCast
this._blackMana = gaugeAction.mana.black.afterCast
this._whiteManaWasted += gaugeAction.mana.white.overCapLoss
this._blackManaWasted += gaugeAction.mana.black.overCapLoss
this._whiteManaLostToImbalance += gaugeAction.mana.white.imbalanceLoss
this._blackManaLostToImbalance += gaugeAction.mana.black.imbalanceLoss
this._whiteManaLostToInvulnerable += gaugeAction.mana.white.invulnLoss
this._blackManaLostToInvulnerable += gaugeAction.mana.black.invulnLoss
this._whiteManaLostToManafication += gaugeAction.mana.white.manaficationLoss
this._blackManaLostToManafication += gaugeAction.mana.black.manaficationLoss
if (abilityId in MANA_GAIN || abilityId === ACTIONS.MANAFICATION.id) {
this._pushToGraph()
}
const fabricatedEvent = {
...event,
type: 'rdmcast',
mana: gaugeAction.mana,
missOrInvlun: gaugeAction.missOrInvuln,
}
//console.log(`${JSON.stringify(fabricatedEvent, null, 4)}`)
this.parser.fabricateEvent(fabricatedEvent)
}
_onDeath() {
this._whiteMana = 0
this._blackMana = 0
this._pushToGraph()
}
_onComplete() {
this.suggestions.add(new TieredSuggestion({
icon: ACTIONS.VERHOLY.icon,
content: <Fragment>
<Trans id="rdm.gauge.suggestions.white-mana-wasted-content">Ensure you don't overcap your White Mana before a combo, overcapping White Mana indicates your balance was off; and you potentially lost out on Enchanted Combo damage. You should look to execute at 80/80 or as close to it as possible.</Trans>
</Fragment>,
tiers: SEVERITY_WASTED_MANA,
value: this._whiteManaWasted,
why: <Fragment>
<Trans id="rdm.gauge.suggestions.white-mana-wasted-why">You lost {this._whiteManaWasted} White Mana due to capped Gauge resources</Trans>
</Fragment>,
}))
this.suggestions.add(new TieredSuggestion({
icon: ACTIONS.VERHOLY.icon,
content: <Fragment>
<Trans id="rdm.gauge.suggestions.white-mana-lost-content">Ensure you don't allow a difference of more than 30 betwen mana types, you lost white Mana due to Imbalance which reduces your overall mana gain and potentially costs you one or more Enchanted Combos</Trans>
</Fragment>,
tiers: SEVERITY_LOST_MANA,
value: this._whiteManaLostToImbalance,
why: <Fragment>
<Trans id="rdm.gauge.suggestions.white-mana-lost-why">You lost {this._whiteManaLostToImbalance} White Mana due to overage of black Mana</Trans>
</Fragment>,
}))
this.suggestions.add(new TieredSuggestion({
icon: ACTIONS.VERHOLY.icon,
content: <Fragment>
<Trans id="rdm.gauge.suggestions.white-mana-invuln-content">Ensure you don't target a boss that you cannot damage with your damaging spells. Spells that do no damage due to an invulnerable target or due to missing result in no mana gained, which potentially costs you one or more Enchanted Combos.</Trans>
</Fragment>,
tiers: SEVERITY_LOST_MANA,
value: this._whiteManaLostToInvulnerable,
why: <Fragment>
<Trans id="rdm.gauge.suggestions.white-mana-invuln-why">You lost {this._whiteManaLostToInvulnerable} White Mana due to misses or spells that targeted an invulnerable target</Trans>
</Fragment>,
}))
this.suggestions.add(new TieredSuggestion({
icon: ACTIONS.VERFLARE.icon,
content: <Fragment>
<Trans id="rdm.gauge.suggestions.black-mana-wasted-content">Ensure you don't overcap your Black Mana before a combo, overcapping Black Mana indicates your balance was off; and you potentially lost out on Enchanted Combo damage. You should look to execute at 80/80 or as close to it as possible.</Trans>
</Fragment>,
tiers: SEVERITY_WASTED_MANA,
value: this._blackManaWasted,
why: <Fragment>
<Trans id="rdm.gauge.suggestions.black-mana-wasted-why">You lost {this._blackManaWasted} Black Mana due to capped Gauge resources</Trans>
</Fragment>,
}))
this.suggestions.add(new TieredSuggestion({
icon: ACTIONS.VERFLARE.icon,
content: <Fragment>
<Trans id="rdm.gauge.suggestions.black-mana-lost-content">Ensure you don't allow a difference of more than 30 betwen mana types, you lost Black Mana due to Imbalance which reduces your overall mana gain and potentially costs you one or more Enchanted Combos</Trans>
</Fragment>,
tiers: SEVERITY_LOST_MANA,
value: this._blackManaLostToImbalance,
why: <Fragment>
<Trans id="rdm.gauge.suggestions.black-mana-lost-why">You lost {this._blackManaLostToImbalance} Black Mana due to overage of White Mana</Trans>
</Fragment>,
}))
this.suggestions.add(new TieredSuggestion({
icon: ACTIONS.VERFLARE.icon,
content: <Fragment>
<Trans id="rdm.gauge.suggestions.black-mana-invuln-content">Ensure you don't target a boss that you cannot damage with your damaging spells. Spells that do no damage due to an invulnerable target or due to missing result in no mana gained, which potentially costs you one or more Enchanted Combos.</Trans>
</Fragment>,
tiers: SEVERITY_LOST_MANA,
value: this._blackManaLostToInvulnerable,
why: <Fragment>
<Trans id="rdm.gauge.suggestions.black-mana-invuln-why">You lost {this._blackManaLostToInvulnerable} Black Mana due to misses or spells that targeted an invulnerable target</Trans>
</Fragment>,
}))
this.statistics.add(new DualStatistic({
label: <Trans id="rdm.gauge.title-mana-lost-to-manafication">Manafication Loss:</Trans>,
title: <Trans id="rdm.gauge.white-mana-lost-to-manafication">White</Trans>,
title2: <Trans id="rdm.gauge.black-mana-lost-to-manafication">Black</Trans>,
icon: ACTIONS.VERHOLY.icon,
icon2: ACTIONS.VERFLARE.icon,
value: this._whiteManaLostToManafication,
value2: this._blackManaLostToManafication,
info: (
<Trans id="rdm.gauge.white-mana-lost-to-manafication-statistics">
It is ok to lose some mana to manafication over the course of a fight, you should however strive to keep this number as low as possible.
</Trans>
),
}))
}
output() {
const whm = Color(JOBS.WHITE_MAGE.colour)
const blm = Color(JOBS.BLACK_MAGE.colour)
// Disabling magic numbers for the chart, 'cus it's a chart
/* eslint-disable no-magic-numbers */
const data = {
datasets: [{
label: 'White Mana',
data: this._history.white,
backgroundColor: whm.fade(0.5),
borderColor: whm.fade(0.2),
steppedLine: true,
}, {
label: 'Black Mana',
data: this._history.black,
backgroundColor: blm.fade(0.5),
borderColor: blm.fade(0.2),
steppedLine: true,
}],
}
return <TimeLineChart
data={data}
/>
/* eslint-enable no-magic-numbers */
}
/**
* Get the current White Mana as calculated from the most recent OnCast event
*/
get whiteMana() {
return this._whiteMana
}
/**
* Get the current Black Mana as calculated from the most recent OnCast event
*/
get blackMana() {
return this._blackMana
}
}
|
[ACTIONS.VERAERO.id]: {white: 11, black: 0},
|
euler047.go
|
/*
The first two consecutive numbers to have two distinct prime factors are:
14 = 2 × 7
15 = 3 × 5
The first three consecutive numbers to have three distinct prime factors are:
644 = 2² × 7 × 23
645 = 3 × 5 × 43
646 = 2 × 17 × 19.
Find the first four consecutive integers to have four distinct prime factors.
What is the first of these numbers?
*/
// JGB: Lots of room for improvement here. Mostly in finding the factors.
package main
import (
"fmt"
"math"
"time"
)
var p = fmt.Println
var pf = fmt.Printf
// timeTrack is used for basic benchmarking in other functions
func timeTrack(start time.Time, name string) {
elapse
|
od using internal recursion. Room for optimisation here
func primeFactors(num int) []int {
// defer timeTrack(time.Now(), "primeFactors()") // Timer function
var factorSlice []int
for i := 2; i <= num; i++ {
// if i is a factor of num
if num%i == 0 {
// add i to factorSlice
factorSlice = append(factorSlice, i)
// new number to factorise
temp := num / i
if temp > 1 {
// get the rest of the factors via recursion and append to list
factorSlice = append(factorSlice, primeFactors(temp)...)
}
// factorSlice will now contain all factors of whatever it was called with
// so return the solution
return factorSlice
}
}
return []int{0}
}
func uniqueFactors(num int) []int {
// defer timeTrack(time.Now(), "uniqueFactors()")
primeFactorList := primeFactors(num)
// make the map of factors
factorMap := make(map[int]int)
for i := 0; i < len(primeFactorList); i++ {
factorMap[primeFactorList[i]]++
}
//expand duplicate factors
uniqueFactorList := []int{}
for k, v := range factorMap {
uniqueFactorList = append(uniqueFactorList, int(math.Pow(float64(k), float64(v))))
}
return uniqueFactorList
}
func solve047(lim int) int {
defer timeTrack(time.Now(), "solve047()") // Timer function
factorMap := make(map[int]bool)
currentFactors := []int{}
const NUM = 4
count := 0
reset := func() {
factorMap = make(map[int]bool)
count = 0
}
loadCurrentToMap := func() bool {
for x := 0; x < len(currentFactors); x++ {
if factorMap[currentFactors[x]] == true {
return false
} else {
factorMap[currentFactors[x]] = true
}
}
return true
}
for i := 10; i < lim; i++ {
currentFactors = uniqueFactors(i)
if len(currentFactors) != NUM {
reset()
continue
}
if loadCurrentToMap() == false {
reset()
continue
}
count++
if count == NUM {
return i - NUM + 1
}
}
return 0
}
func main() {
// p(primeFactors(28622480))
// p(uniqueFactors(286229))
p(solve047(150000))
}
|
d := time.Since(start)
fmt.Printf("%s took %s \n", name, elapsed)
}
// meth
|
zwaymqtt.go
|
package main
import (
"os"
"log"
"flag"
"fmt"
"time"
"strings"
"regexp"
"errors"
"strconv"
"encoding/json"
"net/http"
"io/ioutil"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
pfl "github.com/davecheney/profile"
)
type MqttUpdate struct {
Topic string
Value string
}
type Gateway struct {
Key string
Topic string
Value string
Write bool
Type string
}
//command line variable
var zway_server string
var zway_username string
var zway_password string
var zway_home string
var zway_refresh int
var mqtt_server string
var mqtt_username string
var mqtt_password string
var mqtt_protocol string
var debug bool
var profile string
//used variables
var zway_timestamp int = 0
var zway_dataapi = "/ZWaveAPI/Data/"
var zway_zautoapi = "/ZAutomation/api/v1/"
var zway_runapi = "/ZWaveAPI/Run/"
var zway_cookiename = "ZWAYSession"
var http_client = new(http.Client)
var zway_cookie = new(http.Cookie)
var gateways []Gateway
var zway_retries int = 0
//ZWay enumerations
const (
BASIC_TYPE_CONTROLER = 1
BASIC_TYPE_STATIC_CONTROLER = 2
BASIC_TYPE_SLAVE = 3
BASIC_TYPE_ROUTING_SLAVE = 4
GENERIC_TYPE_THERMOSTAT = 8
GENERIC_TYPE_BINARY_SWITCH = 16
GENERIC_TYPE_MULTILEVEL_SWITCH = 17
GENERIC_TYPE_SWITCH_REMOTE = 18
GENERIC_TYPE_SWITCH_TOGGLE = 19
GENERIC_TYPE_SECURITY_PANEL = 23
GENERIC_TYPE_BINARY_SENSOR = 32
GENERIC_TYPE_MULTILEVEL_SENSOR = 33
GENERIC_TYPE_METER = 49
GENERIC_TYPE_ENTRY_CONTROL = 64
COMMAND_CLASS_NO_OPERATION = 0
COMMAND_CLASS_BASIC = 32
COMMAND_CLASS_CONTROLLER_REPLICATION = 33
COMMAND_CLASS_APPLICATION_STATUS = 34
COMMAND_CLASS_ZIP_SERVICES = 35
COMMAND_CLASS_ZIP_SERVER = 36
COMMAND_CLASS_SWITCH_BINARY = 37
COMMAND_CLASS_SWITCH_MULTILEVEL = 38
COMMAND_CLASS_SWITCH_ALL = 39
COMMAND_CLASS_SWITCH_TOGGLE_BINARY = 40
COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL = 41
COMMAND_CLASS_CHIMNEY_FAN = 42
COMMAND_CLASS_SCENE_ACTIVATION = 43
COMMAND_CLASS_SCENE_ACTUATOR_CONF = 44
COMMAND_CLASS_SCENE_CONTROLLER_CONF = 45
COMMAND_CLASS_ZIP_CLIENT = 46
COMMAND_CLASS_ZIP_ADV_SERVICES = 47
COMMAND_CLASS_SENSOR_BINARY = 48
COMMAND_CLASS_SENSOR_MULTILEVEL = 49
COMMAND_CLASS_METER = 50
COMMAND_CLASS_ZIP_ADV_SERVER = 51
COMMAND_CLASS_ZIP_ADV_CLIENT = 52
COMMAND_CLASS_METER_PULSE = 53
COMMAND_CLASS_THERMOSTAT_HEATING = 56
COMMAND_CLASS_METER_TABLE_CONFIG = 60
COMMAND_CLASS_METER_TABLE_MONITOR = 61
COMMAND_CLASS_METER_TABLE_PUSH = 62
COMMAND_CLASS_THERMOSTAT_MODE = 64
COMMAND_CLASS_THERMOSTAT_OPERATING_STATE = 66
COMMAND_CLASS_THERMOSTAT_SET_POINT = 67
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 68
COMMAND_CLASS_THERMOSTAT_FAN_STATE = 69
COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE = 70
COMMAND_CLASS_THERMOSTAT_SETBACK = 71
COMMAND_CLASS_DOOR_LOCK_LOGGING = 76
COMMAND_CLASS_SCHEDULE_ENTRY_LOCK = 78
COMMAND_CLASS_BASIC_WINDOW_COVERING = 80
COMMAND_CLASS_MTP_WINDOW_COVERING = 81
COMMAND_CLASS_SCHEDULE = 83
COMMAND_CLASS_CRC_16_ENCAP = 86
COMMAND_CLASS_ASSOCIATION_GROUP_INFO = 89
COMMAND_CLASS_DEVICE_RESET_LOCALLY = 90
COMMAND_CLASS_CENTRAL_SCENE = 91
COMMAND_CLASS_IP_ASSOCIATION = 92
COMMAND_CLASS_ANTITHEFT = 93
COMMAND_CLASS_ZWAVEPLUS_INFO = 94
COMMAND_CLASS_MULTI_INSTANCE = 96
COMMAND_CLASS_DOOR_LOCK = 98
COMMAND_CLASS_USER_CODE = 99
COMMAND_CLASS_BARRIER_OPERATOR = 102
COMMAND_CLASS_CONFIGURATION = 112
COMMAND_CLASS_ALARM = 113
COMMAND_CLASS_MANUFACTURER_SPECIFIC = 114
COMMAND_CLASS_POWER_LEVEL = 115
COMMAND_CLASS_PROTECTION = 117
COMMAND_CLASS_LOCK = 118
COMMAND_CLASS_NODE_NAMING = 119
COMMAND_CLASS_FIRMWARE_UPDATE = 122
COMMAND_CLASS_GROUPING_NAME = 123
COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE = 124
COMMAND_CLASS_REMOTE_ASSOCIATION = 125
COMMAND_CLASS_BATTERY = 128
COMMAND_CLASS_CLOCK = 129
COMMAND_CLASS_HAIL = 130
COMMAND_CLASS_WAKEUP = 132
COMMAND_CLASS_ASSOCIATION = 133
COMMAND_CLASS_VERSION = 134
COMMAND_CLASS_INDICATOR = 135
COMMAND_CLASS_PROPRIETRAY = 136
COMMAND_CLASS_LANGUAGE = 137
COMMAND_CLASS_TIME = 138
COMMAND_CLASS_TIME_PARAMETERS = 139
COMMAND_CLASS_GEOGRAPHIC_LOCATION = 140
COMMAND_CLASS_COMPOSITE = 141
COMMAND_CLASS_MULTICHANNEL_ASSOCIATION = 142
COMMAND_CLASS_MULTI_CMD = 143
COMMAND_CLASS_ENERGY_PRODUCTION = 144
COMMAND_CLASS_MANUFACTURER_PROPRIETRATY = 145
COMMAND_CLASS_SCREEN_MD = 146
COMMAND_CLASS_SCREEN_ATTRIBUTES = 147
COMMAND_CLASS_SIMPLE_AV_CONTROL = 148
COMMAND_CLASS_AV_CONTENT_DIRECTORY_MD = 149
COMMAND_CLASS_RENDERER_STATUS = 150
COMMAND_CLASS_AV_CONTENT_SEARCH_MD = 151
COMMAND_CLASS_SECURITY = 152
COMMAND_CLASS_AV_TAGGING_MD = 153
COMMAND_CLASS_IP_CONFIGURATION = 154
COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION = 155
COMMAND_CLASS_ALARM_SENSOR = 156
COMMAND_CLASS_SILENCE_ALARM = 157
COMMAND_CLASS_SENSOR_CONFIGURATION = 158
COMMAND_CLASS_MARK = 239
COMMAND_CLASS_NON_INEROPERABLE = 240
)
var ZWaveClassNames = [...]string{
COMMAND_CLASS_NO_OPERATION: "command no operation",
COMMAND_CLASS_BASIC: "command basic",
COMMAND_CLASS_CONTROLLER_REPLICATION: "command controler replication",
COMMAND_CLASS_APPLICATION_STATUS: "command application status",
COMMAND_CLASS_ZIP_SERVICES: "command zip services",
COMMAND_CLASS_ZIP_SERVER: "command zip server",
COMMAND_CLASS_SWITCH_BINARY: "command switch binary",
COMMAND_CLASS_SWITCH_MULTILEVEL: "command switch multilevel",
COMMAND_CLASS_SWITCH_ALL: "commad switch all",
COMMAND_CLASS_SWITCH_TOGGLE_BINARY: "command switch toggle binary",
COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL: "command switch toggle multilevel",
COMMAND_CLASS_CHIMNEY_FAN: "command chimney fan",
COMMAND_CLASS_SCENE_ACTIVATION: "command scene activation",
COMMAND_CLASS_SCENE_ACTUATOR_CONF: "command scene actuator configuration",
COMMAND_CLASS_SCENE_CONTROLLER_CONF: "command scene controler configuration",
COMMAND_CLASS_ZIP_CLIENT: "command zip client",
COMMAND_CLASS_ZIP_ADV_SERVICES: "command zip adv services",
COMMAND_CLASS_SENSOR_BINARY: "command sensor binary",
COMMAND_CLASS_SENSOR_MULTILEVEL: "command sensor multilevel",
COMMAND_CLASS_METER: "command meter",
COMMAND_CLASS_ZIP_ADV_SERVER: "command zip adv server",
COMMAND_CLASS_ZIP_ADV_CLIENT: "command zip adv client",
COMMAND_CLASS_METER_PULSE: "command meter pulse",
COMMAND_CLASS_THERMOSTAT_HEATING: "command thermostat heating",
COMMAND_CLASS_METER_TABLE_CONFIG: "command meter table config",
COMMAND_CLASS_METER_TABLE_MONITOR: "command meter table monitor",
COMMAND_CLASS_METER_TABLE_PUSH: "command meter table push",
COMMAND_CLASS_THERMOSTAT_MODE: "command thermostat mode",
COMMAND_CLASS_THERMOSTAT_OPERATING_STATE: "command thermostat operationg state",
COMMAND_CLASS_THERMOSTAT_SET_POINT: "command thermostat set point",
COMMAND_CLASS_THERMOSTAT_FAN_MODE: "command thermostat fan mode",
COMMAND_CLASS_THERMOSTAT_FAN_STATE: "command thermostat fan state",
COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE: "command climate control schedule",
COMMAND_CLASS_THERMOSTAT_SETBACK: "command thermostat setback",
COMMAND_CLASS_DOOR_LOCK_LOGGING: "command door lock logging",
COMMAND_CLASS_SCHEDULE_ENTRY_LOCK: "command schedule entry lock",
COMMAND_CLASS_BASIC_WINDOW_COVERING: "command basic window covering",
COMMAND_CLASS_MTP_WINDOW_COVERING: "command mtp window covering",
COMMAND_CLASS_SCHEDULE: "command shedule",
COMMAND_CLASS_CRC_16_ENCAP: "command crc 16 encap",
COMMAND_CLASS_ASSOCIATION_GROUP_INFO: "command association group info",
COMMAND_CLASS_DEVICE_RESET_LOCALLY: "command device reset locally",
COMMAND_CLASS_CENTRAL_SCENE: "command central scene",
COMMAND_CLASS_IP_ASSOCIATION: "command ip association",
COMMAND_CLASS_ANTITHEFT: "command antitheft",
COMMAND_CLASS_ZWAVEPLUS_INFO: "command zwaveplus info",
COMMAND_CLASS_MULTI_INSTANCE: "command multi instance",
COMMAND_CLASS_DOOR_LOCK: "command door lock",
COMMAND_CLASS_USER_CODE: "command user code",
COMMAND_CLASS_BARRIER_OPERATOR: "command barrier operator",
COMMAND_CLASS_CONFIGURATION: "command configuration",
COMMAND_CLASS_ALARM: "command alarm",
COMMAND_CLASS_MANUFACTURER_SPECIFIC: "commad manufacturer specific",
COMMAND_CLASS_POWER_LEVEL: "command power level",
COMMAND_CLASS_PROTECTION: "command protection",
COMMAND_CLASS_LOCK: "command lock",
COMMAND_CLASS_NODE_NAMING: "command node naming",
COMMAND_CLASS_FIRMWARE_UPDATE: "command firmware update",
COMMAND_CLASS_GROUPING_NAME: "command grouping name",
COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE: "command remote association activte",
COMMAND_CLASS_REMOTE_ASSOCIATION: "command remote association",
COMMAND_CLASS_BATTERY: "command battery",
COMMAND_CLASS_CLOCK: "command clock",
COMMAND_CLASS_HAIL: "command hail",
COMMAND_CLASS_WAKEUP: "command wakeup",
COMMAND_CLASS_ASSOCIATION: "command association",
COMMAND_CLASS_VERSION: "command version",
COMMAND_CLASS_INDICATOR: "command indicator",
COMMAND_CLASS_PROPRIETRAY: "command proprietary",
COMMAND_CLASS_LANGUAGE: "command language",
COMMAND_CLASS_TIME: "command time",
COMMAND_CLASS_TIME_PARAMETERS: "command time parameters",
COMMAND_CLASS_GEOGRAPHIC_LOCATION: "command geographic location",
COMMAND_CLASS_COMPOSITE: "command position",
COMMAND_CLASS_MULTICHANNEL_ASSOCIATION: "command multichannel association",
COMMAND_CLASS_MULTI_CMD: "command multi cmd",
COMMAND_CLASS_ENERGY_PRODUCTION: "command energy production",
COMMAND_CLASS_MANUFACTURER_PROPRIETRATY: "command manufacturer proprietary",
COMMAND_CLASS_SCREEN_MD: "command screen md",
COMMAND_CLASS_SCREEN_ATTRIBUTES: "command screen attributes",
COMMAND_CLASS_SIMPLE_AV_CONTROL: "command simple av control",
COMMAND_CLASS_AV_CONTENT_DIRECTORY_MD: "command av content directory",
COMMAND_CLASS_RENDERER_STATUS: "command renderer status",
COMMAND_CLASS_AV_CONTENT_SEARCH_MD: "command av content search md",
COMMAND_CLASS_SECURITY: "command security",
COMMAND_CLASS_AV_TAGGING_MD: "command av tagging md",
COMMAND_CLASS_IP_CONFIGURATION: "command ip configuration",
COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION:
"command association command configuration",
COMMAND_CLASS_ALARM_SENSOR: "command alarm sensor",
COMMAND_CLASS_SILENCE_ALARM: "command silence alarm",
COMMAND_CLASS_SENSOR_CONFIGURATION: "command sensor configuration",
COMMAND_CLASS_MARK: "command mark",
COMMAND_CLASS_NON_INEROPERABLE: "command non interoperable",
}
var ZWaveTypeNames = [...]string{
BASIC_TYPE_CONTROLER: "basic controler",
BASIC_TYPE_STATIC_CONTROLER: "basic static controler",
BASIC_TYPE_SLAVE: "basic slave",
BASIC_TYPE_ROUTING_SLAVE: "basic routing slave",
GENERIC_TYPE_THERMOSTAT: "generic thermostat",
GENERIC_TYPE_BINARY_SWITCH: "generic binary switch",
GENERIC_TYPE_MULTILEVEL_SWITCH: "generic multilevel switch",
GENERIC_TYPE_SWITCH_REMOTE: "generic switch remote",
GENERIC_TYPE_SWITCH_TOGGLE: "generic switch toggle",
GENERIC_TYPE_SECURITY_PANEL: "generic security panel",
GENERIC_TYPE_BINARY_SENSOR: "generic binary sensor",
GENERIC_TYPE_MULTILEVEL_SENSOR: "generic multilevel sensor",
GENERIC_TYPE_METER: "generic meter",
GENERIC_TYPE_ENTRY_CONTROL: "generic entry control",
}
func (g *Gateway) ToString() string {
w := "->"
if g.Write { w = "<>" }
return fmt.Sprintf("%s %s %s (%s)", g.Key, w, g.Topic, g.Type)
}
func (g *Gateway) GetValue(update map[string]interface{}) string {
switch g.Type {
case "string":
value, err := jsonStringValue(g.Key + "." + g.Value,update)
if err == nil {
return value
}
case "int":
value, err := jsonFloatValue(g.Key + "." + g.Value,update)
if err == nil {
return fmt.Sprintf("%d", int(value))
}
case "float":
value, err := jsonFloatValue(g.Key + "." + g.Value,update)
if err == nil {
v := fmt.Sprintf("%.3f", value)
if strings.Contains(v,".") {
v = strings.TrimRight(v,"0.")
}
return v
}
case "bool":
value, err := jsonBoolValue(g.Key + "." + g.Value,update)
if err == nil {
return fmt.Sprintf("%t", value)
}
}
return ""
}
func init() {
//initialize command line parameters
flag.StringVar(&zway_server,"s","localhost:8083","Z-Way server name or ZWAY_SERVER environment variable")
flag.StringVar(&zway_username,"u","admin","Z-Way username or ZWAY_USERNAME environment variable")
flag.StringVar(&zway_password,"p","","Z-Way passsword or ZWAY_PASSWORD environment variable")
flag.StringVar(&zway_home,"h","razberry","mqtt topic root or ZWAY_HOME environment variable")
flag.StringVar(&mqtt_server,"m","localhost:1883","MQTT server or MQTT_SERVER environment variable")
flag.StringVar(&mqtt_username,"mu","","MQTT username or MQTT_USERNAME environment variable")
flag.StringVar(&mqtt_password,"mp","","MQTT password or MQTT_PASSWORD environment variable")
flag.StringVar(&mqtt_protocol,"proto","tcp","MQTT protocol tcp/ws/tls or MQTT_PROTOCOL environment variable")
flag.IntVar(&zway_refresh,"r",30,"Z-Way refresh rate in seconds or ZWAY_REFRESH environment variable")
flag.BoolVar(&debug,"v",false,"Show debug messages")
flag.StringVar(&profile,"profile","","Profile execution (cpu/mem/all)")
flag.Parse()
// check defaults against environment variables
if zway_server == "localhost:8083" && len(os.Getenv("ZWAY_SERVER")) > 0 {
zway_server = os.Getenv("ZWAY_SERVER")
}
if zway_username == "admin" && len(os.Getenv("ZWAY_USERNAME")) > 0 {
zway_username = os.Getenv("ZWAY_USERNAME")
}
if len(zway_password) == 0 && len(os.Getenv("ZWAY_PASSWORD")) > 0 {
zway_password = os.Getenv("ZWAY_PASSWORD")
}
if zway_home == "razberry" && len(os.Getenv("ZWAY_HOME")) > 0 {
zway_home = os.Getenv("ZWAY_HOME")
}
if zway_refresh == 30 && len(os.Getenv("ZWAY_REFRESH")) > 0 {
zway_refresh, _ = strconv.Atoi(os.Getenv("ZWAY_REFRESH"))
}
if mqtt_server == "localhost:1883" && len(os.Getenv("MQTT_SERVER")) > 0 {
mqtt_server = os.Getenv("MQTT_SERVER")
}
if len(mqtt_username) == 0 && len(os.Getenv("MQTT_USERNAME")) > 0 {
mqtt_username = os.Getenv("MQTT_USERNAME")
}
if len(mqtt_password) == 0 && len(os.Getenv("MQTT_PASSWORD")) > 0 {
mqtt_password = os.Getenv("MQTT_PASSWORD")
}
if mqtt_protocol == "tcp" && len(os.Getenv("MQTT_PROTOCOL")) > 0 {
mqtt_protocol = os.Getenv("MQTT_PROTOCOL")
}
if !debug && len(os.Getenv("ZWAYMQTT_DEBUG")) > 0 {
if os.Getenv("ZWAYMQTT_DEBUG") == "true" {
debug = true
}
}
//standardise hostname values to <host>:<port>
zway_match, err := regexp.MatchString(":[0-9]+$",zway_server)
if err != nil {
log.Fatal(fmt.Sprintf("Could not use regexp: %s", err))
}
if zway_match == false {
log.Print("Setting port 8083 on given Z-Way server")
zway_server = zway_server + ":8083"
}
mqtt_match, err := regexp.MatchString(":[0-9]+$",mqtt_server)
if err != nil {
log.Fatal(fmt.Sprintf("Could not use regexp: %s", err))
}
if mqtt_match == false {
log.Print("Setting port 1883 on given MQTT server")
mqtt_server = mqtt_server + ":1883"
}
}
func getzway() string {
if (debug) { log.Print("Getting Z-Way update.") }
url := fmt.Sprintf("http://%s%s%d", zway_server, zway_dataapi, zway_timestamp)
req, err := http.NewRequest("GET",url,nil)
if err != nil {
log.Printf("Error initializing request: %s", err)
}
if zway_cookie != nil {
req.AddCookie(zway_cookie)
}
rsp, err := http_client.Do(req)
if err != nil {
log.Printf("Could not make zway update: %s", err)
return ""
}
defer rsp.Body.Close()
bdy, err := ioutil.ReadAll(rsp.Body)
if err != nil {
log.Printf("could not read body: %s", err)
}
return string(bdy)
}
func authzway() {
//getting Zway authentication cookie
url := fmt.Sprintf("http://%s%slogin", zway_server, zway_zautoapi)
login := fmt.Sprintf("{\"login\": \"%s\", \"password\": \"%s\"}",
zway_username, zway_password)
req, err := http.NewRequest("POST",url,strings.NewReader(login))
if err != nil {
log.Printf("Error initializing request: %s", err)
}
req.Header.Set("Content-Type", "application/json")
rsp, err := http_client.Do(req)
if err != nil {
log.Fatalf("Could not login to Z-Way: %s", err)
}
cookies := rsp.Cookies()
for i := range cookies {
if cookies[i].Name == zway_cookiename && cookies[i].Path == "/" {
zway_cookie = cookies[i]
break
}
}
if zway_cookie == nil {
log.Fatal("Z-Way cookie not found.")
}
}
func jsonValue(key string, target map[string]interface{}) (interface{}, error) {
//if the value is directly found... return it
if target[key] != nil {
return target[key], nil
}
current := target
keys := strings.Split(key,".")
for i := range keys[:len(keys)-1] {
value := current[keys[i]]
if value == nil {
return nil, errors.New(fmt.Sprintf("Json Key not existent (%s)", keys[i]))
}
current = value.(map[string]interface{})
}
key = keys[len(keys)-1]
value := current[key]
if value != nil {
return value, nil
}
return nil, errors.New("Json Value non existent.")
}
func jsonStringValue(key string, target map[string]interface{}) (string, error) {
iface, err := jsonValue(key,target)
if err != nil {
return "", err
}
return iface.(string), nil
}
func jsonIntValue(key string, target map[string]interface{}) (int, error) {
iface, err := jsonValue(key,target)
if err != nil {
return 0, err
}
return iface.(int), nil
}
func jsonFloatValue(key string, target map[string]interface{}) (float64, error) {
iface, err := jsonValue(key,target)
if err != nil {
return 0.0, err
}
return iface.(float64), nil
}
func jsonMapValue(key string, target map[string]interface{}) (map[string]interface{}, error) {
iface, err := jsonValue(key,target)
if err != nil {
return nil, err
}
return iface.(map[string]interface{}), nil
}
func jsonBoolValue(key string, target map[string]interface{}) (bool, error) {
iface, err := jsonValue(key,target)
if err != nil {
return false, err
}
return iface.(bool), nil
}
func zwaygetcmdclassdata(cmdClasses map[string]interface{}, cmdClass int) (map[string]interface{}, error) {
iface := cmdClasses[strconv.Itoa(cmdClass)]
if iface == nil {
return nil, errors.New("Command class not implemented by instance")
}
class := iface.(map[string]interface{})
data, err := jsonMapValue("data",class)
if err != nil {
return nil, err
}
return data, nil
}
func normName(name string) string {
//trim
res := strings.Trim(name," /")
//lower
res = strings.ToLower(res)
//spaces
res = strings.Replace(res," ","_",-1)
//percents
res = strings.Replace(res,"%","pc",-1)
//deg
res = strings.Replace(res,"°","",-1)
return res
}
func zwayparsedevices(update map[string]interface{}) {
log.Print("Parse Z-Way devices")
for node, info := range update {
m := info.(map[string]interface{})
basicType, err := jsonFloatValue("data.basicType.value",m)
if err != nil {
log.Printf("basic type not found: %s", err)
continue
}
genericType, err:= jsonFloatValue("data.genericType.value",m)
if err != nil {
log.Printf("generic type not found: %s", err)
continue
}
givenName, err := jsonStringValue("data.givenName.value",m)
if err != nil {
log.Printf("given name not found: %s", err)
continue
}
//specificType := int(jsonFloatValue("data.specificType.value",m))
isControler := false
switch int(basicType) {
case BASIC_TYPE_CONTROLER:
isControler = true
case BASIC_TYPE_STATIC_CONTROLER:
isControler = true
}
//skip if controller
if isControler {
log.Printf("Skipping node %s: %s", node, ZWaveTypeNames[int(basicType)])
continue
}
//skip if no name
if len(givenName) == 0 {
log.Printf("given name empty")
continue
}
//parsing instances
instances, err := jsonMapValue("instances",m)
if err != nil {
continue
}
for i := range instances {
instance := instances[i].(map[string]interface{})
commandClasses, err := jsonMapValue("commandClasses",instance)
if err != nil {
log.Printf("command classes not found: %s", err)
continue
}
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_BATTERY)
topic := fmt.Sprintf("%s/sensors/analogic/%s/%s/battery",
zway_home, normName(givenName),i)
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "last.value", Write:false, Type: "int"})
switch int(genericType) {
case GENERIC_TYPE_BINARY_SWITCH:
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_SWITCH_BINARY)
topic := fmt.Sprintf("%s/actuators/binary/%s/%s/switch",
zway_home, normName(givenName), i)
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:true, Type: "bool"})
case GENERIC_TYPE_MULTILEVEL_SWITCH:
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_SWITCH_MULTILEVEL)
topic := fmt.Sprintf("%s/actuators/analogic/%s/%s/switch",
zway_home, normName(givenName),i)
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:true, Type: "float"})
case GENERIC_TYPE_BINARY_SENSOR:
data, err := zwaygetcmdclassdata(commandClasses,
COMMAND_CLASS_SENSOR_BINARY)
if err != nil {
break
}
sensorType := "generic"
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_SENSOR_BINARY)
topic := fmt.Sprintf("%s/sensors/binary/%s/%s/%s",
zway_home, normName(givenName), i, sensorType)
_, err = jsonBoolValue("level.value",update)
if err == nil {
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:false, Type: "bool"})
} else {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
sensor := v.(map[string]interface{})
sensorType, err := jsonStringValue("sensorTypeString.value",sensor)
if err != nil {
log.Printf("Could not get sensor type: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_SENSOR_BINARY,k)
topic := fmt.Sprintf("%s/sensors/binary/%s/%s/%s",
zway_home,normName(givenName), i, normName(sensorType))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:false, Type: "bool"})
}
}
}
fallthrough
case GENERIC_TYPE_MULTILEVEL_SENSOR:
data, err := zwaygetcmdclassdata(commandClasses,
COMMAND_CLASS_SENSOR_MULTILEVEL)
if err == nil {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
sensor := v.(map[string]interface{})
sensorType, err := jsonStringValue("sensorTypeString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor type: %s", err)
continue
}
sensorScale, err := jsonStringValue("scaleString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor scale: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_SENSOR_MULTILEVEL,k)
topic := fmt.Sprintf("%s/sensors/analogic/%s/%s/%s/%s",
zway_home, normName(givenName), i, normName(sensorType),
normName(sensorScale))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "val.value", Write:false, Type: "float"})
}
}
}
case GENERIC_TYPE_METER:
data, err := zwaygetcmdclassdata(commandClasses,COMMAND_CLASS_METER)
if err == nil {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
sensor := v.(map[string]interface{})
sensorType, err := jsonStringValue("sensorTypeString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor type: %s", err)
continue
}
sensorScale, err := jsonStringValue("scaleString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor scale: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_METER,k)
topic := fmt.Sprintf("%s/sensors/analogic/%s/%s/%s/%s",
zway_home, normName(givenName), i, normName(sensorType),
normName(sensorScale))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "val.value", Write:false, Type: "float"})
}
}
}
case GENERIC_TYPE_THERMOSTAT:
//get the binary switch to enable/disable thermostat
nkey := fmt.Sprintf("devices.%s.instances.%s.commandClasses.%d.data",
node, i, COMMAND_CLASS_SWITCH_BINARY)
topic := fmt.Sprintf("%s/actuators/binary/%s/%s/switch",
zway_home, normName(givenName), i)
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "level.value", Write:true, Type: "bool"})
//TODO: informations about set point
data, err := zwaygetcmdclassdata(commandClasses,
COMMAND_CLASS_THERMOSTAT_SET_POINT)
if err == nil {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
setpoint := v.(map[string]interface{})
setpointType, err := jsonStringValue("modeName.value",
setpoint)
if err != nil {
log.Printf("Could not get set point mode: %s", err)
continue
}
setpointScale, err := jsonStringValue("scaleString.value",
setpoint)
if err != nil {
log.Printf("Could not get setpoint scale: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_THERMOSTAT_SET_POINT,k)
topic := fmt.Sprintf("%s/actuators/analogic/%s/%s/%s/%s",
zway_home, normName(givenName), i, normName(setpointType),
normName(setpointScale))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "val.value", Write:true, Type: "int"})
}
}
}
data, err = zwaygetcmdclassdata(commandClasses,
COMMAND_CLASS_SENSOR_MULTILEVEL)
if err == nil {
for k, v := range data {
if _, err := strconv.Atoi(k); err == nil {
sensor := v.(map[string]interface{})
sensorType, err := jsonStringValue("sensorTypeString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor type: %s", err)
continue
}
sensorScale, err := jsonStringValue("scaleString.value",
sensor)
if err != nil {
log.Printf("Could not get sensor scale: %s", err)
continue
}
nkey := fmt.Sprintf(
"devices.%s.instances.%s.commandClasses.%d.data.%s",
node, i, COMMAND_CLASS_SENSOR_MULTILEVEL,k)
topic := fmt.Sprintf("%s/sensors/analogic/%s/%s/%s/%s",
zway_home, normName(givenName), i, normName(sensorType),
normName(sensorScale))
gateways = append(gateways, Gateway{Key: nkey, Topic: topic,
Value: "val.value", Write:false, Type: "float"})
}
}
}
default:
log.Printf("device not implemented: type: %f / name: %s", genericType, givenName)
}
}
}
}
func zwayupdategateways(update map[string]interface{}, mqtt_updates chan<- MqttUpdate) {
if (debug) { log.Print("Update Z-Way devices") }
for _, g := range gateways {
//Z-Way is always true
value := g.GetValue(update)
if len(value) > 0 {
if (debug) { log.Printf("ZWAY: %s / Value: %s", g.ToString(), value ) }
mqtt_updates <- MqttUpdate{Topic: g.Topic, Value: value}
}
}
}
func normalizeJson(json map[string]interface{}) map[string]interface{} {
for k, v := range json {
if strings.IndexRune(k,'.') > -1 {
keys := strings.Split(k,".")
nkey := keys[0]
rest := strings.Join(keys[1:len(keys)],".")
tmp := make(map[string]interface{})
tmp[rest] = v.(map[string]interface{})
if json[nkey] != nil {
for k2, v2 := range json[nkey].(map[string]interface{}) {
tmp[k2] = v2
}
}
json[nkey] = normalizeJson(tmp)
delete(json, k)
}
}
return json
}
func checkzwayupdate(update string,mqtt_updates chan<- MqttUpdate) {
var f interface{}
err := json.Unmarshal([]byte(update), &f)
if err != nil {
log.Printf("Error decoding json: %s", err)
}
m := f.(map[string]interface{})
m = normalizeJson(m)
if zway_timestamp == 0 {
devices, err := jsonMapValue("devices",m)
if err != nil {
log.Printf("devices not found: %s", err)
return
}
zwayparsedevices(devices)
}
zwayupdategateways(m,mqtt_updates)
zway_timestampf, err := jsonFloatValue("updateTime",m)
if err != nil {
log.Printf("timestamp not found: %s", err)
return
}
zway_timestamp = int(zway_timestampf)
}
//define a function for the default message handler
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
topic := msg.Topic()
value := string(msg.Payload())
for _, g := range gateways {
if g.Topic == topic { g.Set(value) }
}
}
func (g *Gateway) Set(value string) {
if !g.Write {
if (debug) { log.Printf("MQTT: %s / Readonly", g.ToString()) }
return
}
if g.Get() == value {
if (debug) { log.Printf("MQTT: %s / Value not changed", g.ToString()) }
return
}
//check value
switch g.Type {
case "int":
if strings.Contains(value,".") {
value = strings.TrimRight(value,"0.")
}
i, err := strconv.Atoi(value)
if err != nil {
log.Printf("MQTT: %s / value not int: %s", g.ToString(), value)
return
}
value = fmt.Sprintf("%d",i)
case "float":
if strings.Contains(value,".") {
value = strings.TrimRight(value,"0.")
}
f, err := strconv.ParseFloat(value,64)
if err != nil {
log.Printf("MQTT: %s / value not float: %s", g.ToString(), value)
return
}
value = fmt.Sprintf("%.3f", f)
}
log.Printf("MQTT: %s / Value: %s ", g.ToString(), value)
key := g.Key
r := regexp.MustCompile("\\.([0-9]+)(\\.|$)")
key = r.ReplaceAllString(key, "[$1].")
r = regexp.MustCompile("\\.data$")
key = r.ReplaceAllString(key,"")
result, _ := zwayget(zway_runapi,fmt.Sprintf("%s.Set(%s)", key, value))
if result != "null" {
log.Printf("Error updating value: %s", result)
}
}
func (g *Gateway) Get() string {
if (debug) { log.Print("Setting Z-Way value.") }
key := g.Key
r := regexp.MustCompile("\\.([0-9]+)\\.")
key = r.ReplaceAllString(key, "[$1].")
result, _ := zwayget(zway_runapi, fmt.Sprintf("%s.%s", key, g.Value))
return result
}
func z
|
api string, path string) (string, error) {
url := fmt.Sprintf("http://%s%s%s", zway_server, api, path)
if (debug) { log.Printf("Http Get on Z-Way: %s", url) }
req, err := http.NewRequest("GET",url,nil)
if err != nil {
return "", err
}
if zway_cookie != nil {
req.AddCookie(zway_cookie)
}
rsp, err := http_client.Do(req)
if err != nil {
return "", err
}
defer rsp.Body.Close()
bdy, err := ioutil.ReadAll(rsp.Body)
if err != nil {
return "", err
}
result := string(bdy)
return result, nil
}
func main() {
//start profiling
if len(profile) > 0 {
log.Print("Profiling enabled")
cfg := pfl.Config{}
if profile=="mem" || profile=="all" {
cfg.MemProfile = true
}
if profile=="cpu" || profile=="all" {
cfg.CPUProfile = true
}
defer pfl.Start(&cfg).Stop()
}
//print informations given
log.Print("Starting Z-Way to mqtt gateway...")
log.Printf("Z-Way server: %s", zway_server)
if len(zway_password) > 0 {
log.Printf("Z-Way user: %s", zway_username)
} else {
log.Print("Not using authentication as no password given.")
}
log.Printf("Z-Way refresh rate: %d", zway_refresh)
log.Printf("MQTT server: %s", mqtt_server)
//authtenticate to zway
if len(zway_password) > 0 {
authzway()
}
//connect and subscribe to mqtt
//prepare
opts := MQTT.NewClientOptions()
opts.AddBroker(mqtt_protocol+"://"+mqtt_server)
opts.SetClientID("ZWayMQTT")
opts.SetDefaultPublishHandler(f)
opts.SetAutoReconnect(true)
if len(mqtt_username) > 0 && len(mqtt_password) > 0 {
opts.SetUsername(mqtt_username)
opts.SetPassword(mqtt_password)
}
//Connect
mqtt := MQTT.NewClient(opts)
if token := mqtt.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
//create the control channel
quit := make(chan struct{})
defer close(quit)
//create zway update channel
zway_updates := make(chan string,3)
defer close(zway_updates)
//create mqtt update channel
mqtt_updates := make(chan MqttUpdate,20)
defer close(mqtt_updates)
//create the zway refresh timer
refreshes := time.NewTicker(time.Second * time.Duration(zway_refresh)).C
//make initial refreshe
zway_updates <- getzway()
//subscribe only when zway started
subject := zway_home + "/actuators/#"
if token := mqtt.Subscribe(subject, 1, nil); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
//start refreshes
go func() {
for _ = range refreshes {
update := getzway()
if len(update) > 0 {
zway_updates <- getzway()
} else {
log.Print("Got empty zwave response...")
if zway_retries < 3 {
log.Printf("Reinitializing Z-Way for the %d time.", zway_retries)
authzway()
zway_retries += 1
} else {
log.Print("Already tested 3 times: stop")
<-quit
return
}
}
}
}()
//start update parsing
go func() {
for zway_update := range zway_updates {
checkzwayupdate(zway_update,mqtt_updates)
}
}()
//star mqtt updating
go func() {
for mqtt_update := range mqtt_updates {
token := mqtt.Publish(mqtt_update.Topic, 1, true, mqtt_update.Value)
token.Wait()
}
}()
//start the main loop
for {
select {
case <- quit:
return
}
}
}
|
wayget(
|
infer_resnet_action_recognition.py
|
from ikomia import dataprocess
# --------------------
# - Interface class to integrate the process with Ikomia application
# - Inherits dataprocess.CPluginProcessInterface from Ikomia API
# --------------------
class IkomiaPlugin(dataprocess.CPluginProcessInterface):
def
|
(self):
dataprocess.CPluginProcessInterface.__init__(self)
def getProcessFactory(self):
from infer_resnet_action_recognition.infer_resnet_action_recognition_process import ResNetActionRecognitionFactory
# Instantiate process object
return ResNetActionRecognitionFactory()
def getWidgetFactory(self):
from infer_resnet_action_recognition.infer_resnet_action_recognition_widget import ResNetActionRecognitionWidgetFactory
# Instantiate associated widget object
return ResNetActionRecognitionWidgetFactory()
|
__init__
|
threescale.go
|
package conversion
import (
v1 "github.com/maistra/istio-operator/pkg/apis/maistra/v1"
v2 "github.com/maistra/istio-operator/pkg/apis/maistra/v2"
)
func populateThreeScaleAddonValues(threeScale *v2.ThreeScaleAddonConfig, values map[string]interface{}) (reterr error)
|
func populateThreeScaleAddonConfig(in *v1.HelmValues, out *v2.ThreeScaleAddonConfig) (bool, error) {
rawThreeScaleValues, ok, err := in.GetMap("3scale")
if err != nil {
return false, err
} else if !ok || len(rawThreeScaleValues) == 0 {
// nothing to do
return false, nil
}
threeScale := out
threeScaleValues := v1.NewHelmValues(rawThreeScaleValues)
if enabled, ok, err := threeScaleValues.GetBool("enabled"); ok {
threeScale.Enabled = &enabled
} else if err != nil {
return false, err
}
if rawListenAddr, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_LISTEN_ADDR"); ok {
listernAddr := int32(rawListenAddr)
threeScale.ListenAddr = &listernAddr
} else if err != nil {
return false, err
}
if logGRPC, ok, err := threeScaleValues.GetBool("PARAM_THREESCALE_LOG_GRPC"); ok {
threeScale.LogGRPC = &logGRPC
} else if err != nil {
return false, err
}
if logJSON, ok, err := threeScaleValues.GetBool("PARAM_THREESCALE_LOG_JSON"); ok {
threeScale.LogJSON = &logJSON
} else if err != nil {
return false, err
}
if logLevel, ok, err := threeScaleValues.GetString("PARAM_THREESCALE_LOG_LEVEL"); ok {
threeScale.LogLevel = logLevel
} else if err != nil {
return false, err
}
metrics := &v2.ThreeScaleMetricsConfig{}
setMetrics := false
if rawPort, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_METRICS_PORT"); ok {
port := int32(rawPort)
metrics.Port = &port
setMetrics = true
} else if err != nil {
return false, err
}
if report, ok, err := threeScaleValues.GetBool("PARAM_THREESCALE_REPORT_METRICS"); ok {
metrics.Report = &report
setMetrics = true
} else if err != nil {
return false, err
}
if setMetrics {
threeScale.Metrics = metrics
}
system := &v2.ThreeScaleSystemConfig{}
setSystem := false
if cacheMaxSize, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_CACHE_ENTRIES_MAX"); ok {
system.CacheMaxSize = &cacheMaxSize
setSystem = true
} else if err != nil {
return false, err
}
if rawCacheRefreshRetries, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_CACHE_REFRESH_RETRIES"); ok {
cacheRefreshRetries := int32(rawCacheRefreshRetries)
system.CacheRefreshRetries = &cacheRefreshRetries
setSystem = true
} else if err != nil {
return false, err
}
if rawCacheRefreshInterval, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_CACHE_REFRESH_SECONDS"); ok {
cacheRefreshInterval := int32(rawCacheRefreshInterval)
system.CacheRefreshInterval = &cacheRefreshInterval
setSystem = true
} else if err != nil {
return false, err
}
if rawCacheTTL, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_CACHE_TTL_SECONDS"); ok {
cacheTTL := int32(rawCacheTTL)
system.CacheTTL = &cacheTTL
setSystem = true
} else if err != nil {
return false, err
}
if setSystem {
threeScale.System = system
}
client := &v2.ThreeScaleClientConfig{}
setClient := true
if allowInsecureConnections, ok, err := threeScaleValues.GetBool("PARAM_THREESCALE_ALLOW_INSECURE_CONN"); ok {
client.AllowInsecureConnections = &allowInsecureConnections
setClient = true
} else if err != nil {
return false, err
}
if rawTimeout, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_CLIENT_TIMEOUT_SECONDS"); ok {
timeout := int32(rawTimeout)
client.Timeout = &timeout
setClient = true
} else if err != nil {
return false, err
}
if setClient {
threeScale.Client = client
}
if rawMaxConnTimeout, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_GRPC_CONN_MAX_SECONDS"); ok {
maxConnTimeout := int32(rawMaxConnTimeout)
threeScale.GRPC = &v2.ThreeScaleGRPCConfig{
MaxConnTimeout: &maxConnTimeout,
}
} else if err != nil {
return false, err
}
backend := &v2.ThreeScaleBackendConfig{}
setBackend := false
if enableCache, ok, err := threeScaleValues.GetBool("PARAM_THREESCALE_USE_CACHED_BACKEND"); ok {
backend.EnableCache = &enableCache
setBackend = true
} else if err != nil {
return false, err
}
if rawCacheFlushInterval, ok, err := threeScaleValues.GetInt64("PARAM_THREESCALE_BACKEND_CACHE_FLUSH_INTERVAL_SECONDS"); ok {
cacheFlushInterval := int32(rawCacheFlushInterval)
backend.CacheFlushInterval = &cacheFlushInterval
setBackend = true
} else if err != nil {
return false, err
}
if policyFailClosed, ok, err := threeScaleValues.GetBool("PARAM_THREESCALE_BACKEND_CACHE_POLICY_FAIL_CLOSED"); ok {
backend.PolicyFailClosed = &policyFailClosed
setBackend = true
} else if err != nil {
return false, err
}
if setBackend {
threeScale.Backend = backend
}
return true, nil
}
|
{
if threeScale == nil {
return nil
}
threeScaleValues := make(map[string]interface{})
defer func() {
if reterr == nil {
if len(threeScaleValues) > 0 {
if err := setHelmValue(values, "3scale", threeScaleValues); err != nil {
reterr = err
}
}
}
}()
if threeScale.Enabled != nil {
if err := setHelmBoolValue(threeScaleValues, "enabled", *threeScale.Enabled); err != nil {
return err
}
}
if threeScale.ListenAddr != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_LISTEN_ADDR", int64(*threeScale.ListenAddr)); err != nil {
return err
}
}
if threeScale.LogGRPC != nil {
if err := setHelmBoolValue(threeScaleValues, "PARAM_THREESCALE_LOG_GRPC", *threeScale.LogGRPC); err != nil {
return err
}
}
if threeScale.LogJSON != nil {
if err := setHelmBoolValue(threeScaleValues, "PARAM_THREESCALE_LOG_JSON", *threeScale.LogJSON); err != nil {
return err
}
}
if threeScale.LogLevel != "" {
if err := setHelmStringValue(threeScaleValues, "PARAM_THREESCALE_LOG_LEVEL", threeScale.LogLevel); err != nil {
return err
}
}
if threeScale.Metrics != nil {
metrics := threeScale.Metrics
if metrics.Port != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_METRICS_PORT", int64(*metrics.Port)); err != nil {
return err
}
}
if metrics.Report != nil {
if err := setHelmBoolValue(threeScaleValues, "PARAM_THREESCALE_REPORT_METRICS", *metrics.Report); err != nil {
return err
}
}
}
if threeScale.System != nil {
system := threeScale.System
if system.CacheMaxSize != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_CACHE_ENTRIES_MAX", int64(*system.CacheMaxSize)); err != nil {
return err
}
}
if system.CacheRefreshRetries != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_CACHE_REFRESH_RETRIES", int64(*system.CacheRefreshRetries)); err != nil {
return err
}
}
if system.CacheRefreshInterval != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_CACHE_REFRESH_SECONDS", int64(*system.CacheRefreshInterval)); err != nil {
return err
}
}
if system.CacheTTL != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_CACHE_TTL_SECONDS", int64(*system.CacheTTL)); err != nil {
return err
}
}
}
if threeScale.Client != nil {
client := threeScale.Client
if client.AllowInsecureConnections != nil {
if err := setHelmBoolValue(threeScaleValues, "PARAM_THREESCALE_ALLOW_INSECURE_CONN", *client.AllowInsecureConnections); err != nil {
return err
}
}
if client.Timeout != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_CLIENT_TIMEOUT_SECONDS", int64(*client.Timeout)); err != nil {
return err
}
}
}
if threeScale.GRPC != nil {
grpc := threeScale.GRPC
if grpc.MaxConnTimeout != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_GRPC_CONN_MAX_SECONDS", int64(*grpc.MaxConnTimeout)); err != nil {
return err
}
}
}
if threeScale.Backend != nil {
backend := threeScale.Backend
if backend.EnableCache != nil {
if err := setHelmBoolValue(threeScaleValues, "PARAM_THREESCALE_USE_CACHED_BACKEND", *backend.EnableCache); err != nil {
return err
}
}
if backend.CacheFlushInterval != nil {
if err := setHelmIntValue(threeScaleValues, "PARAM_THREESCALE_BACKEND_CACHE_FLUSH_INTERVAL_SECONDS", int64(*backend.CacheFlushInterval)); err != nil {
return err
}
}
if backend.PolicyFailClosed != nil {
if err := setHelmBoolValue(threeScaleValues, "PARAM_THREESCALE_BACKEND_CACHE_POLICY_FAIL_CLOSED", *backend.PolicyFailClosed); err != nil {
return err
}
}
}
return nil
}
|
deployment_controller.go
|
package canary
import (
"crypto/rand"
"fmt"
"io"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
hpav1 "k8s.io/api/autoscaling/v2beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
)
// DeploymentController is managing the operations for Kubernetes Deployment kind
type DeploymentController struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker ConfigTracker
labels []string
}
// Initialize creates the primary deployment, hpa,
// scales to zero the canary deployment and returns the pod selector label and container ports
func (c *DeploymentController) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (err error) {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
err = c.createPrimaryDeployment(cd)
if err != nil {
return fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
}
if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing {
if !skipLivenessChecks && !cd.Spec.SkipAnalysis {
_, readyErr := c.IsPrimaryReady(cd)
if readyErr != nil {
return readyErr
}
}
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
if err := c.Scale(cd, 0); err != nil {
return err
}
}
if cd.Spec.AutoscalerRef != nil && cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
if err := c.reconcilePrimaryHpa(cd, true); err != nil {
return fmt.Errorf("creating HorizontalPodAutoscaler %s.%s failed: %v", primaryName, cd.Namespace, err)
}
}
return nil
}
// Promote copies the pod spec, secrets and config maps from canary to primary
func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
label, err := c.getSelectorLabel(canary)
if err != nil {
return fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
targetName, cd.Namespace, targetName)
}
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
}
// promote secrets and config maps
configRefs, err := c.configTracker.GetTargetConfigs(cd)
if err != nil {
return err
}
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
return err
}
primaryCopy := primary.DeepCopy()
primaryCopy.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
primaryCopy.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
primaryCopy.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
primaryCopy.Spec.Strategy = canary.Spec.Strategy
// update spec with primary secrets and config maps
primaryCopy.Spec.Template.Spec = c.configTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
// update pod annotations to ensure a rolling update
annotations, err := c.makeAnnotations(canary.Spec.Template.Annotations)
if err != nil {
return err
}
primaryCopy.Spec.Template.Annotations = annotations
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label)
// apply update
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
if err != nil {
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
primaryCopy.GetName(), primaryCopy.Namespace, err)
}
// update HPA
if cd.Spec.AutoscalerRef != nil && cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
if err := c.reconcilePrimaryHpa(cd, false); err != nil {
return fmt.Errorf("updating HorizontalPodAutoscaler %s.%s failed: %v", primaryName, cd.Namespace, err)
}
}
return nil
}
// HasTargetChanged returns true if the canary deployment pod spec has changed
func (c *DeploymentController) HasTargetChanged(cd *flaggerv1.Canary) (bool, error) {
targetName := cd.Spec.TargetRef.Name
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return false, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
return hasSpecChanged(cd, canary.Spec.Template)
}
// Scale sets the canary deployment replicas
func (c *DeploymentController) Scale(cd *flaggerv1.Canary, replicas int32) error {
targetName := cd.Spec.TargetRef.Name
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
depCopy := dep.DeepCopy()
depCopy.Spec.Replicas = int32p(replicas)
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
if err != nil {
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
}
return nil
}
func (c *DeploymentController) ScaleFromZero(cd *flaggerv1.Canary) error {
targetName := cd.Spec.TargetRef.Name
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
replicas := int32p(1)
if dep.Spec.Replicas != nil && *dep.Spec.Replicas > 0 {
replicas = dep.Spec.Replicas
}
depCopy := dep.DeepCopy()
depCopy.Spec.Replicas = replicas
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
if err != nil {
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
}
return nil
}
// GetMetadata returns the pod label selector and svc ports
func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) {
targetName := cd.Spec.TargetRef.Name
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return "", nil, fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
}
return "", nil, err
}
label, err := c.getSelectorLabel(canaryDep)
if err != nil {
return "", nil, fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
targetName, cd.Namespace, targetName)
}
var ports map[string]int32
if cd.Spec.Service.PortDiscovery {
p, err := c.getPorts(cd, canaryDep)
if err != nil {
return "", nil, fmt.Errorf("port discovery failed with error: %v", err)
}
ports = p
}
return label, ports, nil
}
func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) error {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err)
|
return err
}
label, err := c.getSelectorLabel(canaryDep)
if err != nil {
return fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
targetName, cd.Namespace, targetName)
}
primaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if errors.IsNotFound(err) {
// create primary secrets and config maps
configRefs, err := c.configTracker.GetTargetConfigs(cd)
if err != nil {
return err
}
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
return err
}
annotations, err := c.makeAnnotations(canaryDep.Spec.Template.Annotations)
if err != nil {
return err
}
replicas := int32(1)
if canaryDep.Spec.Replicas != nil && *canaryDep.Spec.Replicas > 0 {
replicas = *canaryDep.Spec.Replicas
}
// create primary deployment
primaryDep = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: primaryName,
Namespace: cd.Namespace,
Labels: map[string]string{
label: primaryName,
},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
Version: flaggerv1.SchemeGroupVersion.Version,
Kind: flaggerv1.CanaryKind,
}),
},
},
Spec: appsv1.DeploymentSpec{
ProgressDeadlineSeconds: canaryDep.Spec.ProgressDeadlineSeconds,
MinReadySeconds: canaryDep.Spec.MinReadySeconds,
RevisionHistoryLimit: canaryDep.Spec.RevisionHistoryLimit,
Replicas: int32p(replicas),
Strategy: canaryDep.Spec.Strategy,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
label: primaryName,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: makePrimaryLabels(canaryDep.Spec.Template.Labels, primaryName, label),
Annotations: annotations,
},
// update spec with the primary secrets and config maps
Spec: c.configTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
},
},
}
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Create(primaryDep)
if err != nil {
return err
}
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
}
return nil
}
func (c *DeploymentController) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
hpa, err := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("HorizontalPodAutoscaler %s.%s not found, retrying",
cd.Spec.AutoscalerRef.Name, cd.Namespace)
}
return err
}
hpaSpec := hpav1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: hpav1.CrossVersionObjectReference{
Name: primaryName,
Kind: hpa.Spec.ScaleTargetRef.Kind,
APIVersion: hpa.Spec.ScaleTargetRef.APIVersion,
},
MinReplicas: hpa.Spec.MinReplicas,
MaxReplicas: hpa.Spec.MaxReplicas,
Metrics: hpa.Spec.Metrics,
}
primaryHpaName := fmt.Sprintf("%s-primary", cd.Spec.AutoscalerRef.Name)
primaryHpa, err := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(primaryHpaName, metav1.GetOptions{})
// create HPA
if errors.IsNotFound(err) {
primaryHpa = &hpav1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: primaryHpaName,
Namespace: cd.Namespace,
Labels: hpa.Labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
Version: flaggerv1.SchemeGroupVersion.Version,
Kind: flaggerv1.CanaryKind,
}),
},
},
Spec: hpaSpec,
}
_, err = c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Create(primaryHpa)
if err != nil {
return err
}
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
return nil
}
if err != nil {
return err
}
// update HPA
if !init && primaryHpa != nil {
diff := cmp.Diff(hpaSpec.Metrics, primaryHpa.Spec.Metrics)
if diff != "" || int32Default(hpaSpec.MinReplicas) != int32Default(primaryHpa.Spec.MinReplicas) || hpaSpec.MaxReplicas != primaryHpa.Spec.MaxReplicas {
fmt.Println(diff, hpaSpec.MinReplicas, primaryHpa.Spec.MinReplicas, hpaSpec.MaxReplicas, primaryHpa.Spec.MaxReplicas)
hpaClone := primaryHpa.DeepCopy()
hpaClone.Spec.MaxReplicas = hpaSpec.MaxReplicas
hpaClone.Spec.MinReplicas = hpaSpec.MinReplicas
hpaClone.Spec.Metrics = hpaSpec.Metrics
_, upErr := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Update(hpaClone)
if upErr != nil {
return upErr
}
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s updated", primaryHpa.GetName(), cd.Namespace)
}
}
return nil
}
// makeAnnotations appends an unique ID to annotations map
func (c *DeploymentController) makeAnnotations(annotations map[string]string) (map[string]string, error) {
idKey := "flagger-id"
res := make(map[string]string)
uuid := make([]byte, 16)
n, err := io.ReadFull(rand.Reader, uuid)
if n != len(uuid) || err != nil {
return res, err
}
uuid[8] = uuid[8]&^0xc0 | 0x80
uuid[6] = uuid[6]&^0xf0 | 0x40
id := fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])
for k, v := range annotations {
if k != idKey {
res[k] = v
}
}
res[idKey] = id
return res, nil
}
// getSelectorLabel returns the selector match label
func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) (string, error) {
for _, l := range c.labels {
if _, ok := deployment.Spec.Selector.MatchLabels[l]; ok {
return l, nil
}
}
return "", fmt.Errorf("selector not found")
}
var sidecars = map[string]bool{
"istio-proxy": true,
"envoy": true,
}
func (c *DeploymentController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) {
return c.configTracker.HasConfigChanged(cd)
}
// getPorts returns a list of all container ports
func (c *DeploymentController) getPorts(cd *flaggerv1.Canary, deployment *appsv1.Deployment) (map[string]int32, error) {
ports := make(map[string]int32)
for _, container := range deployment.Spec.Template.Spec.Containers {
// exclude service mesh proxies based on container name
if _, ok := sidecars[container.Name]; ok {
continue
}
for i, p := range container.Ports {
// exclude canary.service.port or canary.service.targetPort
if cd.Spec.Service.TargetPort.String() == "0" {
if p.ContainerPort == cd.Spec.Service.Port {
continue
}
} else {
if cd.Spec.Service.TargetPort.Type == intstr.Int {
if p.ContainerPort == cd.Spec.Service.TargetPort.IntVal {
continue
}
}
if cd.Spec.Service.TargetPort.Type == intstr.String {
if p.Name == cd.Spec.Service.TargetPort.StrVal {
continue
}
}
}
name := fmt.Sprintf("tcp-%s-%v", container.Name, i)
if p.Name != "" {
name = p.Name
}
ports[name] = p.ContainerPort
}
}
return ports, nil
}
func makePrimaryLabels(labels map[string]string, primaryName string, label string) map[string]string {
res := make(map[string]string)
for k, v := range labels {
if k != label {
res[k] = v
}
}
res[label] = primaryName
return res
}
func int32p(i int32) *int32 {
return &i
}
func int32Default(i *int32) int32 {
if i == nil {
return 1
}
return *i
}
|
{
return fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
}
|
_classes.py
|
from typing import NamedTuple
import aiohttp as _aiohttp
Number = int | float
class ShortLong(NamedTuple):
"""Represents shorthand and longhand of a unit."""
short: str
"""Shorthand form, eg '°C'"""
long: str
"""Longhandform, eg 'Celsius'"""
class _AutomaticClient:
client: _aiohttp.ClientSession
appid: str | None
def __init__(self, client: _aiohttp.ClientSession | None = None, appid: str | None = None) -> None:
self.client = client or _aiohttp.ClientSession()
self.appid = appid or None
async def __aenter__(self, *args):
r
|
async def __aexit__(self, *args):
await self.client.close()
return self
|
eturn self
|
shared.service.js
|
(function() {
'use strict';
/**
* @ngdoc service
* @name Shared
* @module bltDocs.main
*/
function Shared() {
this.sidebarData = [];
this.showSidebar = false;
|
angular
.module('main')
.service('Shared', Shared);
Shared.$inject = [];
})();
|
this.currentPage = '';
}
|
expect.rs
|
use std::fmt;
use std::str;
use unicase;
use header::{Header, Raw};
/// The `Expect` header.
///
/// > The "Expect" header field in a request indicates a certain set of
/// > behaviors (expectations) that need to be supported by the server in
/// > order to properly handle this request. The only such expectation
/// > defined by this specification is 100-continue.
/// >
/// > Expect = "100-continue"
///
/// # Example
/// ```
/// use hyper::header::{Headers, Expect};
/// let mut headers = Headers::new();
/// headers.set(Expect::Continue);
/// ```
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Expect {
/// The value `100-continue`.
Continue
}
impl Header for Expect {
fn header_name() -> &'static str {
static NAME: &'static str = "Expect";
NAME
}
fn parse_header(raw: &Raw) -> ::Result<Expect> {
if let Some(line) = raw.one() {
let text = unsafe {
// safe because:
// 1. we don't actually care if it's utf8, we just want to
// compare the bytes with the "case" normalized. If it's not
// utf8, then the byte comparison will fail, and we'll return
// None. No big deal.
str::from_utf8_unchecked(line)
};
if unicase::eq_ascii(text, "100-continue") {
Ok(Expect::Continue)
} else {
Err(::Error::Header)
}
} else {
Err(::Error::Header)
}
}
fn fmt_header(&self, f: &mut ::header::Formatter) -> fmt::Result {
f.fmt_line(self)
}
}
impl fmt::Display for Expect {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
|
{
f.write_str("100-continue")
}
|
iface_manager_api.rs
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{
access_point::{state_machine as ap_fsm, types as ap_types},
client::types as client_types,
mode_management::iface_manager_types::*,
regulatory_manager::REGION_CODE_LEN,
},
anyhow::Error,
async_trait::async_trait,
fidl_fuchsia_wlan_sme as fidl_sme,
futures::channel::{mpsc, oneshot},
};
#[async_trait]
pub trait IfaceManagerApi {
/// Finds the client iface with the given network configuration, disconnects from the network,
/// and removes the client's network configuration information.
async fn disconnect(
&mut self,
network_id: ap_types::NetworkIdentifier,
reason: client_types::DisconnectReason,
) -> Result<(), Error>;
/// Selects a client iface, ensures that a ClientSmeProxy and client connectivity state machine
/// exists for the iface, and then issues a connect request to the client connectivity state
/// machine.
async fn connect(
&mut self,
connect_req: client_types::ConnectRequest,
) -> Result<oneshot::Receiver<()>, Error>;
/// Marks an existing client interface as unconfigured.
async fn record_idle_client(&mut self, iface_id: u16) -> Result<(), Error>;
/// Returns an indication of whether or not any client interfaces are unconfigured.
async fn has_idle_client(&mut self) -> Result<bool, Error>;
/// Queries the properties of the provided interface ID and internally accounts for the newly
/// added client or AP.
async fn handle_added_iface(&mut self, iface_id: u16) -> Result<(), Error>;
/// Removes all internal references of the provided interface ID.
async fn handle_removed_iface(&mut self, iface_id: u16) -> Result<(), Error>;
/// Selects a client iface and issues a scan request. On success, the `ScanTransactionProxy`
/// is returned to the caller so that the scan results can be monitored.
async fn scan(
&mut self,
scan_request: fidl_sme::ScanRequest,
) -> Result<fidl_sme::ScanTransactionProxy, Error>;
/// Selects a client iface and return it for use with a scan
async fn get_sme_proxy_for_scan(
&mut self,
) -> Result<fidl_fuchsia_wlan_sme::ClientSmeProxy, Error>;
/// Disconnects all configured clients and disposes of all client ifaces before instructing
/// the PhyManager to stop client connections.
async fn stop_client_connections(
&mut self,
reason: client_types::DisconnectReason,
) -> Result<(), Error>;
/// Passes the call to start client connections through to the PhyManager.
async fn start_client_connections(&mut self) -> Result<(), Error>;
/// Starts an AP interface with the provided configuration.
async fn start_ap(&mut self, config: ap_fsm::ApConfig) -> Result<oneshot::Receiver<()>, Error>;
/// Stops the AP interface corresponding to the provided configuration and destroys it.
async fn stop_ap(&mut self, ssid: Ssid, password: Vec<u8>) -> Result<(), Error>;
/// Stops all AP interfaces and destroys them.
async fn stop_all_aps(&mut self) -> Result<(), Error>;
/// Returns whether or not there is an iface that can support a WPA3 connection.
async fn has_wpa3_capable_client(&mut self) -> Result<bool, Error>;
/// Sets the country code for WLAN PHYs.
async fn set_country(
&mut self,
country_code: Option<[u8; REGION_CODE_LEN]>,
) -> Result<(), Error>;
}
#[derive(Clone)]
pub struct IfaceManager {
pub sender: mpsc::Sender<IfaceManagerRequest>,
}
#[async_trait]
impl IfaceManagerApi for IfaceManager {
async fn disconnect(
&mut self,
network_id: ap_types::NetworkIdentifier,
reason: client_types::DisconnectReason,
) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = DisconnectRequest { network_id, responder, reason };
self.sender.try_send(IfaceManagerRequest::Disconnect(req))?;
receiver.await?
}
async fn connect(
&mut self,
connect_req: client_types::ConnectRequest,
) -> Result<oneshot::Receiver<()>, Error> {
let (responder, receiver) = oneshot::channel();
let req = ConnectRequest { request: connect_req, responder };
self.sender.try_send(IfaceManagerRequest::Connect(req))?;
receiver.await?
}
async fn record_idle_client(&mut self, iface_id: u16) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = RecordIdleIfaceRequest { iface_id, responder };
self.sender.try_send(IfaceManagerRequest::RecordIdleIface(req))?;
receiver.await?;
Ok(())
}
async fn has_idle_client(&mut self) -> Result<bool, Error> {
let (responder, receiver) = oneshot::channel();
let req = HasIdleIfaceRequest { responder };
self.sender.try_send(IfaceManagerRequest::HasIdleIface(req))?;
receiver.await.map_err(|e| e.into())
}
async fn handle_added_iface(&mut self, iface_id: u16) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = AddIfaceRequest { iface_id, responder };
self.sender.try_send(IfaceManagerRequest::AddIface(req))?;
receiver.await?;
Ok(())
}
async fn handle_removed_iface(&mut self, iface_id: u16) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = RemoveIfaceRequest { iface_id, responder };
self.sender.try_send(IfaceManagerRequest::RemoveIface(req))?;
receiver.await?;
Ok(())
}
async fn scan(
&mut self,
scan_request: fidl_sme::ScanRequest,
) -> Result<fidl_sme::ScanTransactionProxy, Error> {
let (responder, receiver) = oneshot::channel();
let req = ScanRequest { scan_request, responder };
self.sender.try_send(IfaceManagerRequest::Scan(req))?;
receiver.await?
}
async fn get_sme_proxy_for_scan(
&mut self,
) -> Result<fidl_fuchsia_wlan_sme::ClientSmeProxy, Error> {
let (responder, receiver) = oneshot::channel();
let req = ScanProxyRequest { responder };
self.sender.try_send(IfaceManagerRequest::GetScanProxy(req))?;
receiver.await?
}
async fn stop_client_connections(
&mut self,
reason: client_types::DisconnectReason,
) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = StopClientConnectionsRequest { responder, reason };
self.sender.try_send(IfaceManagerRequest::StopClientConnections(req))?;
receiver.await?
}
async fn start_client_connections(&mut self) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = StartClientConnectionsRequest { responder };
self.sender.try_send(IfaceManagerRequest::StartClientConnections(req))?;
receiver.await?
}
async fn start_ap(&mut self, config: ap_fsm::ApConfig) -> Result<oneshot::Receiver<()>, Error> {
let (responder, receiver) = oneshot::channel();
let req = StartApRequest { config, responder };
self.sender.try_send(IfaceManagerRequest::StartAp(req))?;
receiver.await?
}
async fn stop_ap(&mut self, ssid: Ssid, password: Vec<u8>) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = StopApRequest { ssid, password, responder };
self.sender.try_send(IfaceManagerRequest::StopAp(req))?;
receiver.await?
}
async fn stop_all_aps(&mut self) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = StopAllApsRequest { responder };
self.sender.try_send(IfaceManagerRequest::StopAllAps(req))?;
receiver.await?
}
async fn has_wpa3_capable_client(&mut self) -> Result<bool, Error> {
let (responder, receiver) = oneshot::channel();
let req = HasWpa3IfaceRequest { responder };
self.sender.try_send(IfaceManagerRequest::HasWpa3Iface(req))?;
Ok(receiver.await?)
}
async fn set_country(
&mut self,
country_code: Option<[u8; REGION_CODE_LEN]>,
) -> Result<(), Error> {
let (responder, receiver) = oneshot::channel();
let req = SetCountryRequest { country_code, responder };
self.sender.try_send(IfaceManagerRequest::SetCountry(req))?;
receiver.await?
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{access_point::types, config_management::network_config::Credential},
anyhow::format_err,
fidl::endpoints::create_proxy,
fuchsia_async as fasync,
futures::{future::BoxFuture, task::Poll, StreamExt},
pin_utils::pin_mut,
test_case::test_case,
wlan_common::{
assert_variant,
channel::{Cbw, Phy},
RadioConfig,
},
};
struct TestValues {
exec: fasync::TestExecutor,
iface_manager: IfaceManager,
receiver: mpsc::Receiver<IfaceManagerRequest>,
}
fn test_setup() -> TestValues {
let exec = fasync::TestExecutor::new().expect("failed to create an executor");
let (sender, receiver) = mpsc::channel(1);
TestValues { exec, iface_manager: IfaceManager { sender }, receiver }
}
#[derive(Clone)]
enum NegativeTestFailureMode {
RequestFailure,
OperationFailure,
ServiceFailure,
}
fn handle_negative_test_result_responder<T: std::fmt::Debug>(
responder: oneshot::Sender<Result<T, Error>>,
failure_mode: NegativeTestFailureMode,
) {
match failure_mode {
NegativeTestFailureMode::RequestFailure => {
panic!("Test bug: this request should have been handled previously")
}
NegativeTestFailureMode::OperationFailure => {
responder
.send(Err(format_err!("operation failed")))
.expect("failed to send response");
}
NegativeTestFailureMode::ServiceFailure => {
// Just drop the responder so that the client side sees a failure.
drop(responder);
}
}
}
fn handle_negative_test_responder<T: std::fmt::Debug>(
responder: oneshot::Sender<T>,
failure_mode: NegativeTestFailureMode,
) {
match failure_mode {
NegativeTestFailureMode::RequestFailure | NegativeTestFailureMode::OperationFailure => {
panic!("Test bug: invalid operation")
}
NegativeTestFailureMode::ServiceFailure => {
// Just drop the responder so that the client side sees a failure.
drop(responder);
}
}
}
fn iface_manager_api_negative_test(
mut receiver: mpsc::Receiver<IfaceManagerRequest>,
failure_mode: NegativeTestFailureMode,
) -> BoxFuture<'static, ()> {
match failure_mode {
NegativeTestFailureMode::RequestFailure => {
// Drop the receiver so that no requests can be made.
drop(receiver);
let fut = async move {};
return Box::pin(fut);
}
_ => {}
}
let fut = async move {
let req = match receiver.next().await {
Some(req) => req,
None => panic!("no request available."),
};
match req {
IfaceManagerRequest::Disconnect(DisconnectRequest { responder, .. })
| IfaceManagerRequest::StopClientConnections(StopClientConnectionsRequest {
responder,
..
})
| IfaceManagerRequest::StartClientConnections(StartClientConnectionsRequest {
responder,
})
| IfaceManagerRequest::StopAp(StopApRequest { responder, .. })
| IfaceManagerRequest::StopAllAps(StopAllApsRequest { responder, .. })
| IfaceManagerRequest::SetCountry(SetCountryRequest { responder, .. }) => {
handle_negative_test_result_responder(responder, failure_mode);
}
IfaceManagerRequest::Connect(ConnectRequest { responder, .. }) => {
handle_negative_test_result_responder(responder, failure_mode);
}
IfaceManagerRequest::Scan(ScanRequest { responder, .. }) => {
handle_negative_test_result_responder(responder, failure_mode);
}
IfaceManagerRequest::GetScanProxy(ScanProxyRequest { responder }) => {
handle_negative_test_result_responder(responder, failure_mode);
}
IfaceManagerRequest::StartAp(StartApRequest { responder, .. }) => {
handle_negative_test_result_responder(responder, failure_mode);
}
IfaceManagerRequest::RecordIdleIface(RecordIdleIfaceRequest {
responder, ..
})
| IfaceManagerRequest::AddIface(AddIfaceRequest { responder, .. })
| IfaceManagerRequest::RemoveIface(RemoveIfaceRequest { responder, .. }) => {
handle_negative_test_responder(responder, failure_mode);
}
IfaceManagerRequest::HasIdleIface(HasIdleIfaceRequest { responder }) => {
handle_negative_test_responder(responder, failure_mode);
}
IfaceManagerRequest::HasWpa3Iface(HasWpa3IfaceRequest { responder }) => {
handle_negative_test_responder(responder, failure_mode);
}
}
};
Box::pin(fut)
}
#[fuchsia::test]
fn test_disconnect_succeeds() {
let mut test_values = test_setup();
// Issue a disconnect command and wait for the command to be sent.
let req = ap_types::NetworkIdentifier {
ssid: Ssid::from("foo"),
security_type: ap_types::SecurityType::None,
};
let req_reason = client_types::DisconnectReason::NetworkUnsaved;
let disconnect_fut = test_values.iface_manager.disconnect(req.clone(), req_reason);
pin_mut!(disconnect_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut disconnect_fut), Poll::Pending);
// Verify that the receiver sees the command and send back a response.
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::Disconnect(DisconnectRequest {
network_id, responder, reason
}))) => {
assert_eq!(network_id, req);
assert_eq!(reason, req_reason);
responder.send(Ok(())).expect("failed to send disconnect response");
}
);
// Verify that the disconnect requestr receives the response.
assert_variant!(
test_values.exec.run_until_stalled(&mut disconnect_fut),
Poll::Ready(Ok(()))
);
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn disconnect_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Issue a disconnect command and wait for the command to be sent.
let req = ap_types::NetworkIdentifier {
ssid: Ssid::from("foo"),
security_type: ap_types::SecurityType::None,
};
let disconnect_fut = test_values
.iface_manager
.disconnect(req.clone(), client_types::DisconnectReason::NetworkUnsaved);
pin_mut!(disconnect_fut);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut disconnect_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the disconnect requestr receives the response.
assert_variant!(
test_values.exec.run_until_stalled(&mut disconnect_fut),
Poll::Ready(Err(_))
);
}
#[fuchsia::test]
fn test_connect_succeeds() {
let mut test_values = test_setup();
// Issue a connect command and wait for the command to be sent.
let req = client_types::ConnectRequest {
target: client_types::ConnectionCandidate {
network: client_types::NetworkIdentifier {
ssid: Ssid::from("foo"),
security_type: client_types::SecurityType::None,
},
credential: Credential::None,
bss_description: None,
observed_in_passive_scan: None,
multiple_bss_candidates: None,
},
reason: client_types::ConnectReason::FidlConnectRequest,
};
let connect_fut = test_values.iface_manager.connect(req.clone());
pin_mut!(connect_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut connect_fut), Poll::Pending);
// Verify that the receiver sees the command and send back a response.
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::Connect(ConnectRequest {
request, responder
}))) => {
assert_eq!(request, req);
let (_, receiver) = oneshot::channel();
responder.send(Ok(receiver)).expect("failed to send connect response");
}
);
// Verify that the connect requestr receives the response.
assert_variant!(test_values.exec.run_until_stalled(&mut connect_fut), Poll::Ready(Ok(_)));
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn connect_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Issue a connect command and wait for the command to be sent.
let req = client_types::ConnectRequest {
target: client_types::ConnectionCandidate {
network: client_types::NetworkIdentifier {
ssid: Ssid::from("foo"),
security_type: client_types::SecurityType::None,
},
credential: Credential::None,
bss_description: None,
observed_in_passive_scan: None,
multiple_bss_candidates: None,
},
reason: client_types::ConnectReason::FidlConnectRequest,
};
let connect_fut = test_values.iface_manager.connect(req.clone());
pin_mut!(connect_fut);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut connect_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the request completes in error.
assert_variant!(test_values.exec.run_until_stalled(&mut connect_fut), Poll::Ready(Err(_)));
}
#[fuchsia::test]
fn test_record_idle_client_succeeds() {
let mut test_values = test_setup();
// Request that an idle client be recorded.
let iface_id = 123;
let idle_client_fut = test_values.iface_manager.record_idle_client(iface_id);
pin_mut!(idle_client_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut idle_client_fut), Poll::Pending);
// Verify that the receiver sees the request.
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(
Some(IfaceManagerRequest::RecordIdleIface(RecordIdleIfaceRequest{ iface_id: 123, responder}))
) => {
responder.send(()).expect("failed to send idle iface response");
}
);
// Verify that the client sees the response.
assert_variant!(
test_values.exec.run_until_stalled(&mut idle_client_fut),
Poll::Ready(Ok(()))
);
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn test_record_idle_client_service_failure(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Request that an idle client be recorded.
let iface_id = 123;
let idle_client_fut = test_values.iface_manager.record_idle_client(iface_id);
pin_mut!(idle_client_fut);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut idle_client_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the client side finishes
assert_variant!(
test_values.exec.run_until_stalled(&mut idle_client_fut),
Poll::Ready(Err(_))
);
}
#[fuchsia::test]
fn test_has_idle_client_success() {
let mut test_values = test_setup();
// Query whether there is an idle client
let idle_client_fut = test_values.iface_manager.has_idle_client();
pin_mut!(idle_client_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut idle_client_fut), Poll::Pending);
// Verify that the service sees the query
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(
Some(IfaceManagerRequest::HasIdleIface(HasIdleIfaceRequest{ responder}))
) => responder.send(true).expect("failed to reply to idle client query")
);
// Verify that the client side finishes
assert_variant!(
test_values.exec.run_until_stalled(&mut idle_client_fut),
Poll::Ready(Ok(true))
);
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn idle_client_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Query whether there is an idle client
let idle_client_fut = test_values.iface_manager.has_idle_client();
pin_mut!(idle_client_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut idle_client_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut idle_client_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the request completes in error.
assert_variant!(
test_values.exec.run_until_stalled(&mut idle_client_fut),
Poll::Ready(Err(_))
);
}
#[fuchsia::test]
fn test_add_iface_success() {
let mut test_values = test_setup();
// Add an interface
let added_iface_fut = test_values.iface_manager.handle_added_iface(123);
pin_mut!(added_iface_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut added_iface_fut), Poll::Pending);
// Verify that the service sees the query
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(
Some(IfaceManagerRequest::AddIface(AddIfaceRequest{ iface_id: 123, responder }))
) => {
responder.send(()).expect("failed to respond while adding iface");
}
);
// Verify that the client side finishes
assert_variant!(
test_values.exec.run_until_stalled(&mut added_iface_fut),
Poll::Ready(Ok(()))
);
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn add_iface_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Add an interface
let added_iface_fut = test_values.iface_manager.handle_added_iface(123);
pin_mut!(added_iface_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut added_iface_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut added_iface_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the request completes in error.
assert_variant!(
test_values.exec.run_until_stalled(&mut added_iface_fut),
Poll::Ready(Err(_))
);
}
#[fuchsia::test]
fn test_remove_iface_success() {
let mut test_values = test_setup();
// Report the removal of an interface.
let removed_iface_fut = test_values.iface_manager.handle_removed_iface(123);
pin_mut!(removed_iface_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut removed_iface_fut), Poll::Pending);
// Verify that the service sees the query
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(
Some(IfaceManagerRequest::RemoveIface(RemoveIfaceRequest{ iface_id: 123, responder }))
) => {
responder.send(()).expect("failed to respond while adding iface");
}
);
// Verify that the client side finishes
assert_variant!(
test_values.exec.run_until_stalled(&mut removed_iface_fut),
Poll::Ready(Ok(()))
);
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn remove_iface_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Report the removal of an interface.
let removed_iface_fut = test_values.iface_manager.handle_removed_iface(123);
pin_mut!(removed_iface_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut removed_iface_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut removed_iface_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the client side finishes
assert_variant!(
test_values.exec.run_until_stalled(&mut removed_iface_fut),
Poll::Ready(Err(_))
);
}
#[fuchsia::test]
fn test_scan_success() {
let mut test_values = test_setup();
// Request a scan
let scan_fut = test_values
.iface_manager
.scan(fidl_sme::ScanRequest::Passive(fidl_sme::PassiveScanRequest {}));
pin_mut!(scan_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut scan_fut), Poll::Pending);
// Verify that the service sees the request.
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::Scan(ScanRequest{
scan_request: _,
responder
}))) => {
let (proxy, _) = create_proxy::<fidl_sme::ScanTransactionMarker>()
.expect("failed to create scan proxy");
responder.send(Ok(proxy)).expect("failed to send scan proxy");
}
);
// Verify that the client side gets the scan proxy
assert_variant!(test_values.exec.run_until_stalled(&mut scan_fut), Poll::Ready(Ok(_)));
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn scan_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Request a scan
let scan_fut = test_values
.iface_manager
.scan(fidl_sme::ScanRequest::Passive(fidl_sme::PassiveScanRequest {}));
pin_mut!(scan_fut);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(test_values.exec.run_until_stalled(&mut scan_fut), Poll::Pending);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that an error is returned.
assert_variant!(test_values.exec.run_until_stalled(&mut scan_fut), Poll::Ready(Err(_)));
}
#[fuchsia::test]
fn test_get_scan_proxy_success() {
let mut test_values = test_setup();
// Request a scan
let scan_proxy_fut = test_values.iface_manager.get_sme_proxy_for_scan();
pin_mut!(scan_proxy_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut scan_proxy_fut), Poll::Pending);
// Verify that the service sees the request.
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::GetScanProxy(ScanProxyRequest{
responder
}))) => {
let (proxy, _) = create_proxy::<fidl_sme::ClientSmeMarker>()
.expect("failed to create scan sme proxy");
responder.send(Ok(proxy)).expect("failed to send scan sme proxy");
}
);
// Verify that the client side gets the scan proxy
assert_variant!(
test_values.exec.run_until_stalled(&mut scan_proxy_fut),
Poll::Ready(Ok(_))
);
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn scan_proxy_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Request a scan
let scan_proxy_fut = test_values.iface_manager.get_sme_proxy_for_scan();
pin_mut!(scan_proxy_fut);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut scan_proxy_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that an error is returned.
assert_variant!(
test_values.exec.run_until_stalled(&mut scan_proxy_fut),
Poll::Ready(Err(_))
);
}
#[fuchsia::test]
fn test_stop_client_connections_succeeds() {
let mut test_values = test_setup();
// Request a scan
let stop_fut = test_values.iface_manager.stop_client_connections(
client_types::DisconnectReason::FidlStopClientConnectionsRequest,
);
pin_mut!(stop_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
// Verify that the service sees the request.
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::StopClientConnections(StopClientConnectionsRequest{
responder, reason
}))) => {
assert_eq!(reason, client_types::DisconnectReason::FidlStopClientConnectionsRequest);
responder.send(Ok(())).expect("failed sending stop client connections response");
}
);
// Verify that the client side gets the response.
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Ready(Ok(())));
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn stop_client_connections_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Request a scan
let stop_fut = test_values.iface_manager.stop_client_connections(
client_types::DisconnectReason::FidlStopClientConnectionsRequest,
);
pin_mut!(stop_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the client side gets the response.
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Ready(Err(_)));
}
#[fuchsia::test]
fn test_start_client_connections_succeeds()
|
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn start_client_connections_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Start client connections
let start_fut = test_values.iface_manager.start_client_connections();
pin_mut!(start_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Pending);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the client side gets the response.
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Ready(Err(_)));
}
fn create_ap_config() -> ap_fsm::ApConfig {
ap_fsm::ApConfig {
id: types::NetworkIdentifier {
ssid: Ssid::from("foo"),
security_type: types::SecurityType::None,
},
credential: vec![],
radio_config: RadioConfig::new(Phy::Ht, Cbw::Cbw20, 6),
mode: types::ConnectivityMode::Unrestricted,
band: types::OperatingBand::Any,
}
}
#[fuchsia::test]
fn test_start_ap_succeeds() {
let mut test_values = test_setup();
// Start an AP
let start_fut = test_values.iface_manager.start_ap(create_ap_config());
pin_mut!(start_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Pending);
// Verify the service sees the request
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::StartAp(StartApRequest{
config, responder
}))) => {
assert_eq!(config, create_ap_config());
let (_, receiver) = oneshot::channel();
responder.send(Ok(receiver)).expect("failed to send start AP response");
}
);
// Verify that the client gets the response
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Ready(Ok(_)));
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn start_ap_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Start an AP
let start_fut = test_values.iface_manager.start_ap(create_ap_config());
pin_mut!(start_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Pending);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the client gets the response
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Ready(Err(_)));
}
#[fuchsia::test]
fn test_stop_ap_succeeds() {
let mut test_values = test_setup();
// Stop an AP
let stop_fut =
test_values.iface_manager.stop_ap(Ssid::from("foo"), "bar".as_bytes().to_vec());
pin_mut!(stop_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
// Verify the service sees the request
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::StopAp(StopApRequest{
ssid, password, responder
}))) => {
assert_eq!(ssid, Ssid::from("foo"));
assert_eq!(password, "bar".as_bytes().to_vec());
responder.send(Ok(())).expect("failed to send stop AP response");
}
);
// Verify that the client gets the response
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Ready(Ok(_)));
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn stop_ap_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Stop an AP
let stop_fut =
test_values.iface_manager.stop_ap(Ssid::from("foo"), "bar".as_bytes().to_vec());
pin_mut!(stop_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the client gets the response
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Ready(Err(_)));
}
#[fuchsia::test]
fn test_stop_all_aps_succeeds() {
let mut test_values = test_setup();
// Stop an AP
let stop_fut = test_values.iface_manager.stop_all_aps();
pin_mut!(stop_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
// Verify the service sees the request
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::StopAllAps(StopAllApsRequest{
responder
}))) => {
responder.send(Ok(())).expect("failed to send stop AP response");
}
);
// Verify that the client gets the response
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Ready(Ok(_)));
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn stop_all_aps_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Stop an AP
let stop_fut = test_values.iface_manager.stop_all_aps();
pin_mut!(stop_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Pending);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the client gets the response
assert_variant!(test_values.exec.run_until_stalled(&mut stop_fut), Poll::Ready(Err(_)));
}
#[fuchsia::test]
fn test_has_wpa3_capable_client_success() {
let mut test_values = test_setup();
// Query whether there is an iface that can do WPA3.
let has_wpa3_fut = test_values.iface_manager.has_wpa3_capable_client();
pin_mut!(has_wpa3_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut has_wpa3_fut), Poll::Pending);
// Verify that the service sees the query
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(
Some(IfaceManagerRequest::HasWpa3Iface(HasWpa3IfaceRequest{ responder}))
) => responder.send(true).expect("failed to reply to wpa3 iface query")
);
// Verify that the client side finishes
assert_variant!(
test_values.exec.run_until_stalled(&mut has_wpa3_fut),
Poll::Ready(Ok(true))
);
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn has_wpa3_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Query whether there is an iface with WPA3 support
let has_wpa3_fut = test_values.iface_manager.has_wpa3_capable_client();
pin_mut!(has_wpa3_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut has_wpa3_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut has_wpa3_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the request completes in error.
assert_variant!(test_values.exec.run_until_stalled(&mut has_wpa3_fut), Poll::Ready(Err(_)));
}
#[fuchsia::test]
fn test_set_country_succeeds() {
let mut test_values = test_setup();
// Set country code
let set_country_fut = test_values.iface_manager.set_country(None);
pin_mut!(set_country_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut set_country_fut), Poll::Pending);
// Verify the service sees the request
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::SetCountry(SetCountryRequest{
country_code: None,
responder
}))) => {
responder.send(Ok(())).expect("failed to send stop AP response");
}
);
// Verify that the client gets the response
assert_variant!(
test_values.exec.run_until_stalled(&mut set_country_fut),
Poll::Ready(Ok(_))
);
}
#[test_case(NegativeTestFailureMode::RequestFailure; "request failure")]
#[test_case(NegativeTestFailureMode::OperationFailure; "operation failure")]
#[test_case(NegativeTestFailureMode::ServiceFailure; "service failure")]
#[fuchsia::test(add_test_attr = false)]
fn set_country_negative_test(failure_mode: NegativeTestFailureMode) {
let mut test_values = test_setup();
// Set country code
let set_country_fut = test_values.iface_manager.set_country(None);
pin_mut!(set_country_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut set_country_fut), Poll::Pending);
let service_fut =
iface_manager_api_negative_test(test_values.receiver, failure_mode.clone());
pin_mut!(service_fut);
match failure_mode {
NegativeTestFailureMode::RequestFailure => {}
_ => {
// Run the request and the servicing of the request
assert_variant!(
test_values.exec.run_until_stalled(&mut set_country_fut),
Poll::Pending
);
assert_variant!(
test_values.exec.run_until_stalled(&mut service_fut),
Poll::Ready(())
);
}
}
// Verify that the client gets the response
assert_variant!(
test_values.exec.run_until_stalled(&mut set_country_fut),
Poll::Ready(Err(_))
);
}
}
|
{
let mut test_values = test_setup();
// Start client connections
let start_fut = test_values.iface_manager.start_client_connections();
pin_mut!(start_fut);
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Pending);
// Verify that the service sees the request.
let next_message = test_values.receiver.next();
pin_mut!(next_message);
assert_variant!(
test_values.exec.run_until_stalled(&mut next_message),
Poll::Ready(Some(IfaceManagerRequest::StartClientConnections(StartClientConnectionsRequest{
responder
}))) => {
responder.send(Ok(())).expect("failed sending stop client connections response");
}
);
// Verify that the client side gets the response.
assert_variant!(test_values.exec.run_until_stalled(&mut start_fut), Poll::Ready(Ok(())));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.