file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
index.js
|
return Math.min.apply(null, array);
}
}
exports.max = function max(array) {
if (!Array.isArray(array) || array.length === 0) {
return 0;
} else {
return Math.max.apply(null, array);
}
}
exports.avg = function avg(array) {
if (!Array.isArray(array) || array.length === 0) {
return 0;
} else {
return array.reduce((a, b) => (a + b)) / array.length;
}
}
|
exports.min = function min(array) {
if (!Array.isArray(array) || array.length === 0) {
return 0;
} else {
|
|
_nbdev.py
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"request_headers": "00_core.ipynb",
"get_as_raw_json": "00_core.ipynb",
"get_next_as_raw_json": "00_core.ipynb",
"timestamp_now": "00_core.ipynb",
"new_bundle": "00_core.ipynb",
"new_list": "00_core.ipynb",
"extract_references_from_resource": "00_core.ipynb",
"extract_references": "00_core.ipynb",
"get_by_reference": "00_core.ipynb",
"filter_bundle": "00_core.ipynb",
"create_single_patient_medication_bundle": "10_per_patient.ipynb",
"save_single_patient_medication_bundle": "10_per_patient.ipynb",
"handle_entry_search": "10_per_patient.ipynb",
"medication_status_filter": "10_per_patient.ipynb",
"do_not_perform_filter": "10_per_patient.ipynb",
"CM_EXCLUDE_STATUS_MAP": "20a_status_filter.ipynb",
"get_negated_list": "20a_status_filter.ipynb",
"single_patient_medication_bundle": "30_cli.ipynb",
"remove_non_utf8": "30_cli.ipynb",
"get_single_patient_medication_bundle": "50a_web_demo.ipynb",
"create_app": "50_web_app.ipynb",
"bp": "50a_web_demo.ipynb",
"index": "50a_web_demo.ipynb",
"convert_to_cdisc": "50a_web_demo.ipynb"}
modules = ["core.py",
"per_patient.py",
"status_filter.py",
"cli.py",
"web/app.py",
"web/demo.py"]
doc_url = "https://pete88b.github.io/vulcan_medication_bundle/"
git_url = "https://github.com/pete88b/vulcan_medication_bundle/tree/main/"
def
|
(name): return None
|
custom_doc_links
|
gov-dot-uk.js
|
module.exports={title:"GOV.UK",slug:"gov-dot-uk",svg:'<svg role="img" viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title>GOV.UK icon</title><path d="M2.4 13.031c.261-.64-.04-1.374-.682-1.638-.631-.262-1.362.049-1.623.688-.266.637.038 1.363.668 1.623.632.267 1.368-.037 1.623-.673m.858-3.244c.256-.636-.045-1.37-.691-1.639-.631-.258-1.367.053-1.639.687-.27.638.03 1.369.662 1.629.646.266 1.367-.039 1.639-.677m1.218-1.095c.645.264 1.367-.041 1.637-.675.256-.639-.045-1.375-.676-1.639-.632-.26-1.368.049-1.639.686-.271.638.046 1.367.678 1.626m3.156 1.053c.646.256 1.368-.045 1.64-.676.255-.646-.046-1.383-.691-1.639-.634-.271-1.37.046-1.626.676-.27.632.045 1.368.677 1.625M21.6 13.006c.271.631.993.933 1.639.661.632-.27.933-.992.661-1.639-.271-.646-1.008-.947-1.639-.676-.646.256-.945.992-.676 1.639m.752-2.57c.631-.257.931-.993.676-1.64-.271-.632-1.008-.946-1.639-.677-.646.271-.947.992-.676 1.639.256.632.993.933 1.624.678m-2.78-1.729c.631-.26.932-.992.672-1.631-.27-.646-1.001-.947-1.639-.686-.632.271-.937.992-.67 1.639.27.631 1.007.932 1.638.676m-3.412.858c.631-.271.932-1.007.677-1.639-.271-.646-.993-.947-1.64-.691-.631.271-.932 1.008-.661 1.639.271.632.993.932 1.64.662M9.425 4.935l.093 2.399 1.752-.923c.035.039.079.078.12.107-.181.718-.621 1.914-.896 2.575-.335.812.051 1.744.862 2.078.812.331 1.747-.06 2.083-.872.241-.57.121-1.188-.24-1.623-.391-.678-.781-1.533-.992-2.195.061-.045.105-.105.15-.15l1.895.977.09-2.39-1.969.632c-.016-.031-.031-.061-.045-.075l.766-2.181-2.33-.074.707 2.148c-.061.031-.105.076-.15.137l-1.896-.57zm11.896 9.109c.16.883.188 1.284-.015 1.859-.285-.286-.54-.807-.749-1.594l-.872 2.79c.52-.346.915-.565 1.364-.571-.815 1.713-1.819 2.146-2.461 2.021-.787-.158-1.15-.867-1.012-1.466.175-.842 1.052-1.052 1.448-.067.792-1.566-.52-2.078-1.368-1.624 1.313-1.28 1.468-2.426.426-3.836-1.464 1.106-1.49 2.21-.851 3.772-.842-.985-2.18-.466-1.713 1.113.627-.949 1.432-.346 1.298.565-.109.796-1.163 1.413-2.461 1.292-1.864-.165-1.954-1.457-1.983-2.525.451-.074 1.277.361 1.983 1.354l.286-2.962c-.767.782-1.474.946-2.24.962.271-.781 1.459-2.09 1.459-2.09l-3.729-.045s1.188 1.323 1.443 2.119c-.768-.014-1.459-.164-2.225-.961l.255 2.961c.706-.991 1.534-1.412 1.984-1.338-.045 1.053-.15 2.346-2.015 2.511-1.293.105-2.345-.526-2.449-1.323-.121-.901.691-1.503 1.307-.541.481-1.578-.856-2.104-1.699-1.127.662-1.564.647-2.676-.811-3.789-1.053 1.398-.902 2.541.405 3.85-.842-.467-2.165.029-1.383 1.607.405-.992 1.278-.766 1.458.076.12.601-.24 1.308-1.021 1.457-.646.121-1.639-.315-2.451-2.044.451.016.842.241 1.354.587l-.859-2.783c-.209.781-.48 1.295-.766 1.578-.186-.57-.166-.977.014-1.859l-1.803.613c.922 1.264 1.857 3.021 2.59 6.104 2.275-.376 6.157-.603 8.509-.603 2.345.015 6.229.285 8.494.691.767-3.081 1.713-4.825 2.646-6.073l-1.773-.646-.014-.015z"/></svg>',get path(){return this.svg.match(/<path\s+d="([^"]*)/)[1]},source:"https://github.com/alphagov/design-assets/tree/master/Icons",hex:"005EA5",guidelines:void 0,license:void 0};
|
||
completions.rs
|
//! Command line interface for generating a command line completion script.
use super::build;
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use std::io;
/// Creates a subcommand for printing derivable quantities.
pub fn create_completions_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("completions")
.about("Generate tab-completion script for your shell")
.setting(AppSettings::Hidden)
.arg(
Arg::with_name("shell")
.value_name("SHELL")
.required(true)
.possible_values(&["bash", "zsh", "fish"])
.help("The shell to generate the script for"),
)
.help_message("Print help information")
.after_help(
r#"DISCUSSION
[Adapted from rustup-completions]
Enable tab completion for Bash, Zsh or Fish
The script is output on `stdout`, allowing one to re-direct the
output to the file of their choosing. Where you place the file
will depend on which shell, and which operating system you are
using. Your particular configuration may also determine where
these scripts need to be placed.
Here are some common set ups for the three supported shells under
Unix and similar operating systems (such as GNU/Linux).
BASH:
Completion files are commonly stored in `/etc/bash_completion.d/` for
system-wide commands, but can be stored in
`~/.local/share/bash-completion/completions` for user-specific commands.
Run the command:
$ mkdir -p ~/.local/share/bash-completion/completions
$ backstaff -- completions bash >> ~/.local/share/bash-completion/completions/backstaff
This installs the completion script. You may have to log out and
log back in to your shell session for the changes to take affect.
BASH (macOS/Homebrew):
Homebrew stores bash completion files within the Homebrew directory.
With the `bash-completion` brew formula installed, run the command:
$ mkdir -p $(brew --prefix)/etc/bash_completion.d
$ backstaff -- completions bash > $(brew --prefix)/etc/bash_completion.d/backstaff.bash-completion
ZSH:
ZSH completions are commonly stored in any directory listed in
your `$fpath` variable. To use these completions, you must either
add the generated script to one of those directories, or add your
own to this list.
Adding a custom directory is often the safest bet if you are
unsure of which directory to use. First create the directory; for
this example we'll create a hidden directory inside our `$HOME`
directory:
$ mkdir ~/.zfunc
Then add the following lines to your `.zshrc` just before
`compinit`:
fpath+=~/.zfunc
Now you can install the completions script using the following
command:
$ backstaff -- completions zsh > ~/.zfunc/_backstaff
You must then either log out and log back in, or simply run
$ exec zsh
for the new completions to take affect.
FISH:
Fish completion files are commonly stored in
`$HOME/.config/fish/completions`. Run the command:
$ mkdir -p ~/.config/fish/completions
$ backstaff -- completions fish > ~/.config/fish/completions/backstaff.fish
This installs the completion script. You may have to log out and
log back in to your shell session for the changes to take affect."#,
)
}
pub fn
|
<'a, 'b>(arguments: &ArgMatches) {
let shell = arguments
.value_of("shell")
.expect("No value for required argument")
.parse()
.unwrap();
build::build().gen_completions_to(clap::crate_name!(), shell, &mut io::stdout());
}
|
run_completions_subcommand
|
hello_world.go
|
// This is a "stub" file. It's a little start on your solution.
// It's not a complete solution though; you have to write some code.
// Package greeting should have a package comment that summarizes what it's about.
// https://golang.org/doc/effective_go.html#commentary
package greeting
import "fmt"
// HelloWorld should have a comment documenting it.
func HelloWorld() string
|
func main() {
fmt.Println(HelloWorld())
}
|
{
// Write some code here to pass the test suite.
// Then remove all the stock .
// They're here to help you get started but they only clutter a finished solution.
// If you leave them in, reviewers may protest!
return "Hello, World!"
}
|
mod.rs
|
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Structs and traits which allow other parts of rust-lightning to interact with the blockchain.
use bitcoin::blockdata::script::Script;
use bitcoin::blockdata::transaction::TxOut;
use bitcoin::hash_types::{BlockHash, Txid};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, MonitorEvent};
use chain::keysinterface::ChannelKeys;
use chain::transaction::OutPoint;
pub mod chaininterface;
pub mod chainmonitor;
pub mod channelmonitor;
pub mod transaction;
pub mod keysinterface;
/// The `Access` trait defines behavior for accessing chain data and state, such as blocks and
/// UTXOs.
pub trait Access: Send + Sync {
/// Returns the transaction output of a funding transaction encoded by [`short_channel_id`].
/// Returns an error if `genesis_hash` is for a different chain or if such a transaction output
/// is unknown.
///
/// [`short_channel_id`]: https://github.com/lightningnetwork/lightning-rfc/blob/master/07-routing-gossip.md#definition-of-short_channel_id
fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> Result<TxOut, AccessError>;
}
/// An error when accessing the chain via [`Access`].
///
/// [`Access`]: trait.Access.html
#[derive(Clone)]
pub enum
|
{
/// The requested chain is unknown.
UnknownChain,
/// The requested transaction doesn't exist or hasn't confirmed.
UnknownTx,
}
/// The `Watch` trait defines behavior for watching on-chain activity pertaining to channels as
/// blocks are connected and disconnected.
///
/// Each channel is associated with a [`ChannelMonitor`]. Implementations of this trait are
/// responsible for maintaining a set of monitors such that they can be updated accordingly as
/// channel state changes and HTLCs are resolved. See method documentation for specific
/// requirements.
///
/// Implementations **must** ensure that updates are successfully applied and persisted upon method
/// completion. If an update fails with a [`PermanentFailure`], then it must immediately shut down
/// without taking any further action such as persisting the current state.
///
/// If an implementation maintains multiple instances of a channel's monitor (e.g., by storing
/// backup copies), then it must ensure that updates are applied across all instances. Otherwise, it
/// could result in a revoked transaction being broadcast, allowing the counterparty to claim all
/// funds in the channel. See [`ChannelMonitorUpdateErr`] for more details about how to handle
/// multiple instances.
///
/// [`ChannelMonitor`]: channelmonitor/struct.ChannelMonitor.html
/// [`ChannelMonitorUpdateErr`]: channelmonitor/enum.ChannelMonitorUpdateErr.html
/// [`PermanentFailure`]: channelmonitor/enum.ChannelMonitorUpdateErr.html#variant.PermanentFailure
pub trait Watch: Send + Sync {
/// Keys needed by monitors for creating and signing transactions.
type Keys: ChannelKeys;
/// Watches a channel identified by `funding_txo` using `monitor`.
///
/// Implementations are responsible for watching the chain for the funding transaction along
/// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means
/// calling [`block_connected`] and [`block_disconnected`] on the monitor.
///
/// [`get_outputs_to_watch`]: channelmonitor/struct.ChannelMonitor.html#method.get_outputs_to_watch
/// [`block_connected`]: channelmonitor/struct.ChannelMonitor.html#method.block_connected
/// [`block_disconnected`]: channelmonitor/struct.ChannelMonitor.html#method.block_disconnected
fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<Self::Keys>) -> Result<(), ChannelMonitorUpdateErr>;
/// Updates a channel identified by `funding_txo` by applying `update` to its monitor.
///
/// Implementations must call [`update_monitor`] with the given update. See
/// [`ChannelMonitorUpdateErr`] for invariants around returning an error.
///
/// [`update_monitor`]: channelmonitor/struct.ChannelMonitor.html#method.update_monitor
/// [`ChannelMonitorUpdateErr`]: channelmonitor/enum.ChannelMonitorUpdateErr.html
fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
/// Returns any monitor events since the last call. Subsequent calls must only return new
/// events.
fn release_pending_monitor_events(&self) -> Vec<MonitorEvent>;
}
/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
/// channels.
///
/// This is useful in order to have a [`Watch`] implementation convey to a chain source which
/// transactions to be notified of. Notification may take the form of pre-filtering blocks or, in
/// the case of [BIP 157]/[BIP 158], only fetching a block if the compact filter matches. If
/// receiving full blocks from a chain source, any further filtering is unnecessary.
///
/// After an output has been registered, subsequent block retrievals from the chain source must not
/// exclude any transactions matching the new criteria nor any in-block descendants of such
/// transactions.
///
/// Note that use as part of a [`Watch`] implementation involves reentrancy. Therefore, the `Filter`
/// should not block on I/O. Implementations should instead queue the newly monitored data to be
/// processed later. Then, in order to block until the data has been processed, any `Watch`
/// invocation that has called the `Filter` must return [`TemporaryFailure`].
///
/// [`Watch`]: trait.Watch.html
/// [`TemporaryFailure`]: channelmonitor/enum.ChannelMonitorUpdateErr.html#variant.TemporaryFailure
/// [BIP 157]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki
/// [BIP 158]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki
pub trait Filter: Send + Sync {
/// Registers interest in a transaction with `txid` and having an output with `script_pubkey` as
/// a spending condition.
fn register_tx(&self, txid: &Txid, script_pubkey: &Script);
/// Registers interest in spends of a transaction output identified by `outpoint` having
/// `script_pubkey` as the spending condition.
fn register_output(&self, outpoint: &OutPoint, script_pubkey: &Script);
}
|
AccessError
|
feature.go
|
// Copyright 2017 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pgsql
import (
"database/sql"
"sort"
"github.com/lib/pq"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database"
"github.com/coreos/clair/ext/versionfmt"
"github.com/coreos/clair/pkg/commonerr"
)
const (
soiNamespacedFeature = `
WITH new_feature_ns AS (
INSERT INTO namespaced_feature(feature_id, namespace_id)
SELECT CAST ($1 AS INTEGER), CAST ($2 AS INTEGER)
WHERE NOT EXISTS ( SELECT id FROM namespaced_feature WHERE namespaced_feature.feature_id = $1 AND namespaced_feature.namespace_id = $2)
RETURNING id
)
SELECT id FROM namespaced_feature WHERE namespaced_feature.feature_id = $1 AND namespaced_feature.namespace_id = $2
UNION
SELECT id FROM new_feature_ns`
searchPotentialAffectingVulneraibilities = `
SELECT nf.id, v.id, vaf.affected_version, vaf.id
FROM vulnerability_affected_feature AS vaf, vulnerability AS v,
namespaced_feature AS nf, feature AS f
WHERE nf.id = ANY($1)
AND nf.feature_id = f.id
AND nf.namespace_id = v.namespace_id
AND vaf.feature_name = f.name
AND vaf.vulnerability_id = v.id
AND v.deleted_at IS NULL`
searchNamespacedFeaturesVulnerabilities = `
SELECT vanf.namespaced_feature_id, v.name, v.description, v.link,
v.severity, v.metadata, vaf.fixedin, n.name, n.version_format
FROM vulnerability_affected_namespaced_feature AS vanf,
Vulnerability AS v,
vulnerability_affected_feature AS vaf,
namespace AS n
WHERE vanf.namespaced_feature_id = ANY($1)
AND vaf.id = vanf.added_by
AND v.id = vanf.vulnerability_id
AND n.id = v.namespace_id
AND v.deleted_at IS NULL`
)
func (tx *pgSession) PersistFeatures(features []database.Feature) error {
if len(features) == 0 {
return nil
}
// Sorting is needed before inserting into database to prevent deadlock.
sort.Slice(features, func(i, j int) bool {
return features[i].Name < features[j].Name ||
features[i].Version < features[j].Version ||
features[i].VersionFormat < features[j].VersionFormat
})
// TODO(Sida): A better interface for bulk insertion is needed.
keys := make([]interface{}, 0, len(features)*3)
for _, f := range features {
keys = append(keys, f.Name, f.Version, f.VersionFormat)
if f.Name == "" || f.Version == "" || f.VersionFormat == "" {
return commonerr.NewBadRequestError("Empty feature name, version or version format is not allowed")
}
}
_, err := tx.Exec(queryPersistFeature(len(features)), keys...)
return handleError("queryPersistFeature", err)
}
type namespacedFeatureWithID struct {
database.NamespacedFeature
ID int64
}
type vulnerabilityCache struct {
nsFeatureID int64
vulnID int64
vulnAffectingID int64
}
func (tx *pgSession) searchAffectingVulnerabilities(features []database.NamespacedFeature) ([]vulnerabilityCache, error) {
if len(features) == 0 {
return nil, nil
}
ids, err := tx.findNamespacedFeatureIDs(features)
if err != nil {
return nil, err
}
fMap := map[int64]database.NamespacedFeature{}
for i, f := range features {
if !ids[i].Valid {
return nil, database.ErrMissingEntities
}
fMap[ids[i].Int64] = f
}
cacheTable := []vulnerabilityCache{}
rows, err := tx.Query(searchPotentialAffectingVulneraibilities, pq.Array(ids))
if err != nil {
return nil, handleError("searchPotentialAffectingVulneraibilities", err)
}
defer rows.Close()
for rows.Next() {
var (
cache vulnerabilityCache
affected string
)
err := rows.Scan(&cache.nsFeatureID, &cache.vulnID, &affected, &cache.vulnAffectingID)
if err != nil {
return nil, err
}
if ok, err := versionfmt.InRange(fMap[cache.nsFeatureID].VersionFormat, fMap[cache.nsFeatureID].Version, affected); err != nil {
return nil, err
} else if ok
|
}
return cacheTable, nil
}
func (tx *pgSession) CacheAffectedNamespacedFeatures(features []database.NamespacedFeature) error {
if len(features) == 0 {
return nil
}
_, err := tx.Exec(lockVulnerabilityAffects)
if err != nil {
return handleError("lockVulnerabilityAffects", err)
}
cache, err := tx.searchAffectingVulnerabilities(features)
keys := make([]interface{}, 0, len(cache)*3)
for _, c := range cache {
keys = append(keys, c.vulnID, c.nsFeatureID, c.vulnAffectingID)
}
if len(cache) == 0 {
return nil
}
affected, err := tx.Exec(queryPersistVulnerabilityAffectedNamespacedFeature(len(cache)), keys...)
if err != nil {
return handleError("persistVulnerabilityAffectedNamespacedFeature", err)
}
if count, err := affected.RowsAffected(); err != nil {
log.Debugf("Cached %d features in vulnerability_affected_namespaced_feature", count)
}
return nil
}
func (tx *pgSession) PersistNamespacedFeatures(features []database.NamespacedFeature) error {
if len(features) == 0 {
return nil
}
nsIDs := map[database.Namespace]sql.NullInt64{}
fIDs := map[database.Feature]sql.NullInt64{}
for _, f := range features {
nsIDs[f.Namespace] = sql.NullInt64{}
fIDs[f.Feature] = sql.NullInt64{}
}
fToFind := []database.Feature{}
for f := range fIDs {
fToFind = append(fToFind, f)
}
sort.Slice(fToFind, func(i, j int) bool {
return fToFind[i].Name < fToFind[j].Name ||
fToFind[i].Version < fToFind[j].Version ||
fToFind[i].VersionFormat < fToFind[j].VersionFormat
})
if ids, err := tx.findFeatureIDs(fToFind); err == nil {
for i, id := range ids {
if !id.Valid {
return database.ErrMissingEntities
}
fIDs[fToFind[i]] = id
}
} else {
return err
}
nsToFind := []database.Namespace{}
for ns := range nsIDs {
nsToFind = append(nsToFind, ns)
}
if ids, err := tx.findNamespaceIDs(nsToFind); err == nil {
for i, id := range ids {
if !id.Valid {
return database.ErrMissingEntities
}
nsIDs[nsToFind[i]] = id
}
} else {
return err
}
keys := make([]interface{}, 0, len(features)*2)
for _, f := range features {
keys = append(keys, fIDs[f.Feature], nsIDs[f.Namespace])
}
_, err := tx.Exec(queryPersistNamespacedFeature(len(features)), keys...)
if err != nil {
return err
}
return nil
}
// FindAffectedNamespacedFeatures looks up cache table and retrieves all
// vulnerabilities associated with the features.
func (tx *pgSession) FindAffectedNamespacedFeatures(features []database.NamespacedFeature) ([]database.NullableAffectedNamespacedFeature, error) {
if len(features) == 0 {
return nil, nil
}
returnFeatures := make([]database.NullableAffectedNamespacedFeature, len(features))
// featureMap is used to keep track of duplicated features.
featureMap := map[database.NamespacedFeature][]*database.NullableAffectedNamespacedFeature{}
// initialize return value and generate unique feature request queries.
for i, f := range features {
returnFeatures[i] = database.NullableAffectedNamespacedFeature{
AffectedNamespacedFeature: database.AffectedNamespacedFeature{
NamespacedFeature: f,
},
}
featureMap[f] = append(featureMap[f], &returnFeatures[i])
}
// query unique namespaced features
distinctFeatures := []database.NamespacedFeature{}
for f := range featureMap {
distinctFeatures = append(distinctFeatures, f)
}
nsFeatureIDs, err := tx.findNamespacedFeatureIDs(distinctFeatures)
if err != nil {
return nil, err
}
toQuery := []int64{}
featureIDMap := map[int64][]*database.NullableAffectedNamespacedFeature{}
for i, id := range nsFeatureIDs {
if id.Valid {
toQuery = append(toQuery, id.Int64)
for _, f := range featureMap[distinctFeatures[i]] {
f.Valid = id.Valid
featureIDMap[id.Int64] = append(featureIDMap[id.Int64], f)
}
}
}
rows, err := tx.Query(searchNamespacedFeaturesVulnerabilities, pq.Array(toQuery))
if err != nil {
return nil, handleError("searchNamespacedFeaturesVulnerabilities", err)
}
defer rows.Close()
for rows.Next() {
var (
featureID int64
vuln database.VulnerabilityWithFixedIn
)
err := rows.Scan(&featureID,
&vuln.Name,
&vuln.Description,
&vuln.Link,
&vuln.Severity,
&vuln.Metadata,
&vuln.FixedInVersion,
&vuln.Namespace.Name,
&vuln.Namespace.VersionFormat,
)
if err != nil {
return nil, handleError("searchNamespacedFeaturesVulnerabilities", err)
}
for _, f := range featureIDMap[featureID] {
f.AffectedBy = append(f.AffectedBy, vuln)
}
}
return returnFeatures, nil
}
func (tx *pgSession) findNamespacedFeatureIDs(nfs []database.NamespacedFeature) ([]sql.NullInt64, error) {
if len(nfs) == 0 {
return nil, nil
}
nfsMap := map[database.NamespacedFeature]sql.NullInt64{}
keys := make([]interface{}, 0, len(nfs)*4)
for _, nf := range nfs {
keys = append(keys, nf.Name, nf.Version, nf.VersionFormat, nf.Namespace.Name)
nfsMap[nf] = sql.NullInt64{}
}
rows, err := tx.Query(querySearchNamespacedFeature(len(nfs)), keys...)
if err != nil {
return nil, handleError("searchNamespacedFeature", err)
}
defer rows.Close()
var (
id sql.NullInt64
nf database.NamespacedFeature
)
for rows.Next() {
err := rows.Scan(&id, &nf.Name, &nf.Version, &nf.VersionFormat, &nf.Namespace.Name)
nf.Namespace.VersionFormat = nf.VersionFormat
if err != nil {
return nil, handleError("searchNamespacedFeature", err)
}
nfsMap[nf] = id
}
ids := make([]sql.NullInt64, len(nfs))
for i, nf := range nfs {
ids[i] = nfsMap[nf]
}
return ids, nil
}
func (tx *pgSession) findFeatureIDs(fs []database.Feature) ([]sql.NullInt64, error) {
if len(fs) == 0 {
return nil, nil
}
fMap := map[database.Feature]sql.NullInt64{}
keys := make([]interface{}, 0, len(fs)*3)
for _, f := range fs {
keys = append(keys, f.Name, f.Version, f.VersionFormat)
fMap[f] = sql.NullInt64{}
}
rows, err := tx.Query(querySearchFeatureID(len(fs)), keys...)
if err != nil {
return nil, handleError("querySearchFeatureID", err)
}
defer rows.Close()
var (
id sql.NullInt64
f database.Feature
)
for rows.Next() {
err := rows.Scan(&id, &f.Name, &f.Version, &f.VersionFormat)
if err != nil {
return nil, handleError("querySearchFeatureID", err)
}
fMap[f] = id
}
ids := make([]sql.NullInt64, len(fs))
for i, f := range fs {
ids[i] = fMap[f]
}
return ids, nil
}
|
{
cacheTable = append(cacheTable, cache)
}
|
UserForm.js
|
import React, {Component} from 'react';
import { CSSTransition, TransitionGroup } from 'react-transition-group';
import { Input } from '../bootstrap';
import TaggedInput from '../TaggedInput';
import { Link } from 'react-router';
import { Time } from '../Time';
const Fade = ({ children, ...props }) => (
<CSSTransition
{...props}
timeout={{
enter: 300,
exit: 500,
}}
classNames="fade"
>
{children}
</CSSTransition>
);
/**
* The UserForm Component displays a user and allows to edit the user data.
*/
export class
|
extends Component {
constructor (props) {
super(props);
this.addRole = this.addRole.bind(this);
this.handleChange = this.handleChange.bind(this);
this.handleRoleChange = this.handleRoleChange.bind(this);
this.handleResetUserPasswordManually = this.handleResetUserPasswordManually.bind(this);
this.state = UserForm.getInitialState();
}
addRole(e) {
e.preventDefault();
const role = e.target.getAttribute('data-role');
// User has already this role
if (this.props.user.roles.includes(role)) {
return;
}
this.handleRoleChange(role, this.props.user.roles.concat(role));
}
/**
* Update changes to the user role. We need a custom handler for
* the TaggedInput Component.
*/
handleRoleChange (role, roles) {
const update = {
isDirty: true,
roles: roles
};
// trigger state update
this.props.onChange(update);
}
handleUnblock (event) {
event.preventDefault();
this.props.unblockUser(this.props.user);
}
/**
* Handles all input changes and triggers a redux state change using the onChange handler
* from the parent.
*/
handleChange (event) {
let value;
const update = {
isDirty: true
};
switch (event.target.type) {
case 'checkbox':
value = event.target.checked;
break;
default:
value = event.target.value;
}
// nested data structures are special cases
if (event.target.name.startsWith('verification')) {
const verificationValue = this.props.user.verification;
switch (event.target.name) {
case 'verification.isCompleted':
verificationValue.isCompleted = value;
break;
case 'verification.token':
verificationValue.token = value;
break;
}
update.verification = verificationValue;
} else {
// normal attributes
update[event.target.name] = value;
}
// trigger state update
this.props.onChange(update);
}
/**
* Saves the user.
*/
handleSave (event) {
event.preventDefault();
this.props.save(this.props.user);
}
handleDelete (event) {
event.preventDefault();
this.setState({
showConfirmDelete: true
});
}
handleDeleteConfirm (event) {
event.preventDefault();
this.props.delete(this.props.user);
this.setState({
showConfirmDelete: false
});
}
handleDeleteDismiss (event) {
event.preventDefault();
this.setState({
showConfirmDelete: false
});
}
handleResendConfirmationEmail (event) {
event.preventDefault();
this.props.resendConfirmationEmail(this.props.user);
}
handleResetUserPasswordManually (event) {
event.preventDefault();
this.props.resetUserPasswordManually(this.props.user);
}
handleConfirmUser (event) {
event.preventDefault();
this.props.confirmUser(this.props.user);
}
handleSendMail (event) {
// ToDo: push state
this.props.changeMailData({
email: this.props.user.email,
subject: '',
message: ''
});
}
/**
* Renders the confirm dialog when a user should be deleted
*/
renderConfirmDelete () {
return (
<TransitionGroup className="form-footer">
<Fade>
<div className="form-inline">
<p className="form-control-plaintext">Soll der Benutzer wirklich gelöscht werden?</p>
<button onClick={this.handleDeleteConfirm.bind(this)} className="btn btn-danger btn-sm">Ja</button>
<button onClick={this.handleDeleteDismiss.bind(this)} className="btn btn-secondary btn-sm">Nein</button>
</div>
</Fade>
</TransitionGroup>
);
}
/**
* Renders the Save and Delete Buttons
*/
renderFormButtons (isDirty) {
return (
<div className="form-footer">
<button type="submit" onClick={this.handleSave.bind(this)} disabled={!isDirty} className="btn btn-success btn-sm">Speichern</button>
<button type="submit" onClick={this.handleDelete.bind(this)} className="btn btn-danger btn-sm">Löschen</button>
<button type="submit" onClick={this.handleResendConfirmationEmail.bind(this)} className="btn btn-warning btn-sm">Aktivierungs-E-Mail erneut schicken</button>
<button type="submit" onClick={this.handleUnblock.bind(this)} className="btn btn-secondary btn-sm" title="Setzt die Anmeldesperre zurück, sodass sich der Benutzer wieder anmelden kann.">Anmeldung wieder erlauben</button>
<button type="submit" onClick={this.handleResetUserPasswordManually} className="btn btn-secondary btn-sm" title="Setzt ein neues Passwort und schickt dieses in einer E-Mail dem Benutzer zu.">Neues Passwort zuschicken</button>
<button type="submit" onClick={this.handleConfirmUser.bind(this)} className="btn btn-secondary btn-sm" title="Schaltet den Benutzer frei, ohne das dieser den Aktivierungslink anklicken muss.">Benutzer freischalten</button>
<Link to="/admin/mail" onClick={this.handleSendMail.bind(this)} className="btn btn-info btn-sm" title="Neue E-Mail an den Benutzer schicken">Mail schicken</Link>
</div>
);
}
renderForm () {
const isDirty = this.props.user.isDirty === true;
const deleteContent = this.state.showConfirmDelete ? this.renderConfirmDelete() : null;
const formButtons = this.state.showConfirmDelete ? null : this.renderFormButtons(isDirty);
return (
<form>
<div className="form-group">
<label>Interne ID</label>
<p className="form-control-static"><strong>{this.props.user.id}</strong></p>
<small className="text-muted">Eindeutige interne ID des Benutzers. Diese wird für die interne Datenhaltung verwendet.</small>
</div>
<Input onChange={this.handleChange} name="email" type="text" label="E-Mail-Adresse" muted="Vorsicht: Benutzer werden anhand ihrer E-Mail-Adresse identifiziert. Diese muss eindeutig sein." value={this.props.user.email} />
<Input onChange={this.handleChange} name="username" type="text" label="Benutzername" muted="Dieser wird aus der E-Mail-Adresse automatisch generiert." value={this.props.user.username} />
<Input onChange={this.handleChange} name="isActive" type="checkbox" label="Aktiviert: Hiermit könnten Benutzer aktiviert und deaktiviert werden." checked={this.props.user.isActive} />
<Input onChange={this.handleChange} name="semester" type="text" label="Semester" muted="Semester in dem sich der Benutzer registriert hat." value={this.props.user.semester} />
<Input onChange={this.handleChange} name="createdAt" type="text" label="Erstellt am" readOnly="readonly" value={this.props.user.createdAt} /> <span><Time value={this.props.user.createdAt} locale="de" relative={true} invalidDateString="kein Datum"/></span>
<Input onChange={this.handleChange} name="verification.isCompleted" type="checkbox" label="Registrierung abgeschlossen" checked={this.props.user.verification.isCompleted} />
<Input onChange={this.handleChange} name="verification.token" muted="Mit diesem Token kann der Benutzer den Account aktivieren." disabled type="text" label="Aktivierungs-Token" value={this.props.user.verification.token || ''} />
<div className="form-group">
<label>Rollen</label>
<TaggedInput onAddTag={this.handleRoleChange} onRemoveTag={this.handleRoleChange} name="roles" placeholder="Benutzerrollen" tags={this.props.user.roles} />
<small className="text-muted">Hier können die Benutzerrollen verändert werden. Diese entscheiden über die Zugriffsrechte auf der Seite.</small>
</div>
<div className="form-group">
<button className="btn btn-info btn-sm" onClick={this.addRole} data-role="admin">Admin</button>
<button className="btn btn-info btn-sm" onClick={this.addRole} data-role="author">Autor (Darf Beispiele und Dokumente anlegen)</button>
<small className="text-muted">Klicken Sie auf einen der Buttons um dem Nutzer die Rolle zuzuweisen.</small>
</div>
{formButtons}
{deleteContent}
</form>
);
}
render () {
const form = this.renderForm();
return form;
}
}
UserForm.getInitialState = function getInitialState() {
return {
showConfirmDelete: false
};
};
|
UserForm
|
app.py
|
import os
from flask import (
Flask, flash, render_template, redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
mongo = PyMongo(app)
# all recipes
@app.route("/")
@app.route("/get_recipes")
def get_recipes():
recipes = list(mongo.db.recipes.find())
return render_template(
"recipes.html", recipes=recipes, page_title="All Recipes")
@app.route("/view_recipe/<recipe_id>")
def view_recipe(recipe_id):
the_recipe = mongo.db.recipes.find_one_or_404({"_id": ObjectId(recipe_id)})
if 'user' not in session:
return redirect(url_for("login"))
return render_template(
"view_recipe.html", recipes=the_recipe, page_title="View Recipe")
@app.route("/search", methods=["GET", "POST"])
def search():
query = request.form.get("query")
recipes = list(mongo.db.recipes.find({"$text": {"$search": query}}))
return render_template(
"recipes.html", recipes=recipes, page_title="Search Recipe")
# register
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# check if username already exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("register"))
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# put the new user into 'session' cookie
session["user"] = request.form.get("username").lower()
flash("Registration Successful!")
return redirect(url_for("profile", username=session["user"]))
return render_template("register.html", page_title="Register")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
# check if username exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(request.form.get("username")))
return redirect(url_for("profile", username=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
else:
# username doesn't exist
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html", page_title="Login")
@app.route("/profile/", methods=["GET", "POST"])
def profile():
if "user" not in session:
return redirect(url_for("login"))
recipes = mongo.db.recipes.find(
{"created_by": session["user"]}).sort("_id", -1)
return render_template(
"profile.html", username=session["user"],
recipes=recipes, page_title="Profile")
@app.route("/logout")
def logout():
# remove user from session cookies
flash("You have been logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/add_recipe", methods=["Get", "POST"])
def add_recipe():
if "user" not in session:
return redirect(url_for("login"))
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name"),
"recipe_name": request.form.get("recipe_name"),
"ingredients_list": request.form.get(
"ingredients_list").splitlines(),
"recipe_img": request.form.get("recipe_img"),
"prep_time": request.form.get("prep_time"),
"cook_time": request.form.get("cook_time"),
"serves": request.form.get("serves"),
"instructions": request.form.get("instructions").splitlines(),
"created_by": session["user"]
}
# print(submit)
recipe = mongo.db.recipes.insert_one(submit)
recipe_id = recipe.inserted_id
flash("Recipe Successfully Added")
return redirect(url_for("view_recipe", recipe_id=recipe_id))
categories = mongo.db.categories.find().sort("category_name")
return render_template(
"add_recipe.html", categories=categories, page_title="Insert Recipe")
@app.route("/edit_recipe/<recipe_id>", methods=["GET", "POST"])
def edit_recipe(recipe_id):
if "user" not in session:
return redirect(url_for("login"))
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name"),
"recipe_name": request.form.get("recipe_name"),
"ingredients_list": request.form.get(
"ingredients_list").splitlines(),
"recipe_img": request.form.get("recipe_img"),
"prep_time": request.form.get("prep_time"),
"cook_time": request.form.get("cook_time"),
"serves": request.form.get("serves"),
"instructions": request.form.get("instructions").splitlines(),
"created_by": session["user"]
}
# print(submit["ingredients_list"])
for ingredient in submit["ingredients_list"]:
ingredient = ingredient.strip()
mongo.db.recipes.update({"_id": ObjectId(recipe_id)}, submit)
flash("Recipe Successfully Updated")
if submit:
the_recipe = mongo.db.recipes.find_one_or_404(
{"_id": ObjectId(recipe_id)})
return redirect(url_for('view_recipe', recipe_id=recipe_id))
recipe = mongo.db.recipes.find_one_or_404({"_id": ObjectId(recipe_id)})
categories = mongo.db.categories.find().sort("category_name")
return render_template(
"edit_recipe.html", recipe=recipe, categories=categories,
page_title="Edit Recipe")
@app.route("/delete_recipe/<recipe_id>")
def delete_recipe(recipe_id):
mongo.db.recipes.remove({"_id": ObjectId(recipe_id)})
flash("Recipe Successfully Deleted")
return redirect(url_for("profile"))
# only admin has access to this page
@app.route("/get_categories")
def get_categories():
if "user" not in session:
return redirect(url_for("login"))
categories = list(mongo.db.categories.find().sort("category_name", 1))
if session['user'] == "admin":
return render_template(
"categories.html", categories=categories, page_title="Categories")
flash("You do not have permission")
return redirect(url_for('login'))
@app.route("/add_category", methods=["GET", "POST"])
def add_category():
|
@app.route("/edit_category/<category_id>", methods=["GET", "POST"])
def edit_category(category_id):
if "user" not in session:
return redirect(url_for("login"))
if session['user'] == "admin":
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.update({"_id": ObjectId(category_id)}, submit)
flash("Category Successfully Updated")
return redirect(url_for("get_categories"))
category = mongo.db.categories.find_one({"_id": ObjectId(category_id)})
return render_template(
"edit_category.html", category=category,
page_title="Edit Category")
flash("You do not have permission")
return redirect(url_for('login'))
@app.route("/delete_category/<category_id>")
def delete_category(category_id):
mongo.db.categories.remove({"_id": ObjectId(category_id)})
flash("Category Successfully Deleted")
return redirect(url_for("get_categories"))
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
|
if "user" not in session:
return redirect(url_for("login"))
if session['user'] == "admin":
if request.method == "POST":
category = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.insert_one(category)
flash("New Category Added")
return redirect(url_for("get_categories"))
return render_template(
"add_category.html", page_title="Create Category")
flash("You do not have permission")
return redirect(url_for('login'))
|
runtime.go
|
/*
Copyright 2022 HAProxy Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import "github.com/haproxytech/client-native/v3/runtime"
type rntm struct {
|
p.Runtime = o.cfg
return nil
}
func Runtime(runtime runtime.Runtime) Option {
return rntm{
cfg: runtime,
}
}
|
cfg runtime.Runtime
}
func (o rntm) Set(p *Options) error {
|
windows_persistence_test.py
|
#!/usr/bin/env python
"""Tests for grr.parsers.windows_persistence."""
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.parsers import windows_persistence
class WindowsPersistenceMechanismsParserTest(test_lib.FlowTestsBaseclass):
def testParse(self):
|
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
parser = windows_persistence.WindowsPersistenceMechanismsParser()
path = (r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion"
r"\Run\test")
pathspec = rdf_paths.PathSpec(
path=path, pathtype=rdf_paths.PathSpec.PathType.REGISTRY)
reg_data = "C:\\blah\\some.exe /v"
reg_type = rdf_client.StatEntry.RegistryType.REG_SZ
stat = rdf_client.StatEntry(
aff4path="aff4:/asdfasdf/",
pathspec=pathspec,
registry_type=reg_type,
registry_data=rdf_protodict.DataBlob(string=reg_data))
persistence = [stat]
image_paths = [
"system32\\drivers\\ACPI.sys",
"%systemroot%\\system32\\svchost.exe -k netsvcs",
"\\SystemRoot\\system32\\drivers\\acpipmi.sys"
]
reg_key = rdfvalue.RDFURN("aff4:/C.1000000000000000/registry"
"/HKEY_LOCAL_MACHINE/SYSTEM/ControlSet001"
"/services/AcpiPmi")
for path in image_paths:
serv_info = rdf_client.WindowsServiceInformation(
name="blah",
display_name="GRRservice",
image_path=path,
registry_key=reg_key)
persistence.append(serv_info)
knowledge_base = rdf_client.KnowledgeBase()
knowledge_base.environ_systemroot = "C:\\Windows"
expected = [
"C:\\blah\\some.exe", "C:\\Windows\\system32\\drivers\\ACPI.sys",
"C:\\Windows\\system32\\svchost.exe",
"C:\\Windows\\system32\\drivers\\acpipmi.sys"
]
for index, item in enumerate(persistence):
results = list(
parser.Parse(item, knowledge_base, rdf_paths.PathSpec.PathType.OS))
self.assertEqual(results[0].pathspec.path, expected[index])
self.assertEqual(len(results), 1)
|
regex_parser.rs
|
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TS;
use syn::{DeriveInput, Data};
use quote::quote;
//
// #[cfg(not(feature = "no_use_parsers"))]
// fn crate_regex_parser() -> (syn::Item, TS) {
// let use_code: syn::Item = syn::parse_quote!{use regex_parsers::{Cap, FromMatch};};
// let crate_path: TS = quote!{regex_parsers::};
// (use_code, crate_path)
// }
//
// #[cfg(feature = "no_use_parsers")]
// fn crate_regex_parser() -> (Option<syn::Item>, Option<TS>) {
// (None, None)
// }
//
mod data_struct;
mod data_enum;
pub fn run(input: TokenStream) -> TokenStream {
let obj: DeriveInput = syn::parse(input).unwrap();
match &obj.data {
Data::Struct(_) => {
data_struct::process(obj)
}
Data::Enum(_) => {
data_enum::process(obj)
}
_ => unreachable!("Only structs and enums are supported"),
}
}
fn
|
(obj: &DeriveInput) -> (Option<TS>, Option<TS>) {
if obj.generics.params.is_empty() {
(None, None)
} else {
let generics = obj.generics.clone();
let params = generics.params.iter().map(|g| g.clone());
let where_clause = &obj.generics.where_clause.clone();
(
Some(quote! { <#(#params),*>}),
Some(quote! { #where_clause }),
)
}
}
|
get_generics
|
dataset.rs
|
// Copyright (c) 2018 Blackfynn, Inc. All Rights Reserved.
use std::borrow::Borrow;
use std::ops::Deref;
use serde_derive::Deserialize;
use crate::bf::api::response::package::Package;
use crate::bf::api::BFChildren;
use crate::bf::model;
/// A response wrapping a `model::Dataset`, along with and related metadata.
#[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Dataset {
organization: String,
owner: String,
children: Option<Vec<Package>>,
content: model::Dataset,
}
impl BFChildren for Dataset {
type Child = Package;
fn children(&self) -> Option<&Vec<Self::Child>> {
self.children.as_ref()
}
}
impl Borrow<model::Dataset> for Dataset {
fn
|
(&self) -> &model::Dataset {
&self.content
}
}
impl Deref for Dataset {
type Target = model::Dataset;
fn deref(&self) -> &Self::Target {
&self.content
}
}
impl Dataset {
/// Get the organization associated with this dataset.
pub fn organization(&self) -> &String {
&self.organization
}
/// Get the owner of the dataset.
pub fn owner(&self) -> &String {
&self.owner
}
// Get the child packages contained in this dataset.
pub fn children(&self) -> Option<&Vec<Package>> {
self.children.as_ref()
}
/// Take ownership of the dataset wrapped by this response object.
pub fn take(self) -> model::Dataset {
self.content
}
/// Fetch a package from a dataset by package ID.
pub fn get_package_by_id(&self, package_id: model::PackageId) -> Option<model::Package> {
self.get_child_by_id(package_id).map(|p| p.clone().take())
}
/// Fetch a package from a dataset by package name.
pub fn get_package_by_name<N: Into<String>>(&self, package_name: N) -> Option<model::Package> {
self.get_child_by_name(package_name)
.map(|p| p.clone().take())
}
}
/// A response wrapping a `model::Collaborators`, along with and related metadata.
#[derive(Debug, Clone, Eq, Hash, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Collaborators {
users: Vec<model::User>,
organizations: Vec<model::Organization>,
teams: Vec<model::Team>,
}
impl Collaborators {
/// Get the user collaborators.
#[allow(dead_code)]
pub fn users(&self) -> &Vec<model::User> {
&self.users
}
/// Get the number of user collaborators.
#[allow(dead_code)]
pub fn user_count(&self) -> usize {
self.users.len()
}
/// Get the organization collaborators.
#[allow(dead_code)]
pub fn organizations(&self) -> &Vec<model::Organization> {
&self.organizations
}
/// Get the number of organization collaborators.
#[allow(dead_code)]
pub fn organization_count(&self) -> usize {
self.organizations.len()
}
/// Get the team collaborators.
#[allow(dead_code)]
pub fn teams(&self) -> &Vec<model::Team> {
&self.teams
}
/// Get the number of team collaborators.
#[allow(dead_code)]
pub fn team_count(&self) -> usize {
self.teams.len()
}
}
#[derive(Debug, Clone, Eq, Hash, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CollaboratorCounts {
users: u32,
organizations: u32,
teams: u32,
}
impl CollaboratorCounts {
/// Get the number of user collaborators.
#[allow(dead_code)]
pub fn users(&self) -> u32 {
self.users
}
/// Get the number of organization collaborators.
#[allow(dead_code)]
pub fn organizations(&self) -> u32 {
self.organizations
}
/// Get the number of team collaborators.
#[allow(dead_code)]
pub fn teams(&self) -> u32 {
self.teams
}
}
#[derive(Debug, Clone, Eq, Hash, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChangeResponse {
success: bool,
message: Option<String>,
}
impl ChangeResponse {
/// Test if the operation was successful.
#[allow(dead_code)]
pub fn success(&self) -> bool {
self.success
}
/// Get a message associated with the change.
#[allow(dead_code)]
pub fn message(&self) -> Option<&String> {
self.message.as_ref()
}
}
|
borrow
|
logistic_fit.py
|
from autograd import grad
import autograd.numpy as np
from scipy.stats import logistic, norm
from scipy.optimize import minimize
def logistic_pdf(x, loc, scale):
y = (x - loc)/scale
return np.exp(-y)/(scale * (1 + np.exp(-y))**2)
def logistic_cdf(x, loc, scale):
y = (x-loc)/scale
if y < -100:
return 0
elif y > 100:
return 1
else:
return 1/(1 + np.exp(-y))
def logistic_logpdf(x, loc, scale):
y = (x - loc)/scale
if y < -250:
return y - np.log(scale)
elif y > 250:
return -y - np.log(scale)
else:
return -y - np.log(scale) - 2 * np.log(1 + np.exp(-y))
def square_dist(a1, a2):
s = 0
for k in range(len(a1)):
s += (a1[k] - a2[k])**2
return s
def log_likelihood_logistic(data, params):
n = len(data)
c = (len(params) + 1)//3
r = 0
if (len(params) + 1) % 3 != 0:
print("Parameters specified incorrectly!")
return None
else:
weights = [1]
for k in range(c-1):
weights.append(np.exp(params[2*c + k]))
s = np.sum(weights)
for x in data:
pdf_list = [logistic_logpdf(x, params[2*j], np.exp(params[2*j+1])) for j in range(c)]
pdf_list_avg = np.sum(pdf_list)/c
pdf_list_n = [weights[j] * np.exp(pdf_list[j] - pdf_list_avg) for j in range(c)]
r += (pdf_list_avg + np.log(np.sum(pdf_list_n)/s))/n
return r
def cdf_loss(percentiles, params):
|
def estimate(data, bins=20, num = 1, tol = 0.01, maxiter = 100):
fit_params = np.zeros(3*num - 1)
a = np.average(data)
s = np.log(np.std(data))
percentiles = [np.percentile(data, k) for k in range(100//bins, 100, 100//bins)]
for i in range(num):
fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)
fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)
def training_loss(params):
return cdf_loss(percentiles, params) + 0.0001 * np.dot(params[2*num:], params[2*num:])
training_loss_jac = grad(training_loss)
res = minimize(training_loss, jac=training_loss_jac, x0=fit_params, method="BFGS", options = {"maxiter": maxiter, "gtol": tol})
print(res)
final_params = res.x
for i in range(num):
final_params[2*i+1] = np.exp(final_params[2*i+1])
results = []
for i in range(num):
results.append(final_params[2*i])
results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])
for i in range(num-1):
results.append(final_params[2*num + i])
return results
def estimate_log(data, num = 1, tol = 0.01, maxiter = 100):
fit_params = np.zeros(3*num - 1)
a = np.average(data)
s = np.log(np.std(data))
for i in range(num):
fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)
fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)
def training_likelihood(params):
return log_likelihood_logistic(data, params)
def training_loss(params):
return -log_likelihood_logistic(data, params)
training_likelihood_jac = grad(training_likelihood)
training_loss_jac = grad(training_loss)
res = minimize(training_loss, jac=training_loss_jac, x0=fit_params, method="BFGS", options = {"maxiter": maxiter, "gtol": tol})
print(res)
final_params = res.x
for i in range(num):
final_params[2*i+1] = np.exp(final_params[2*i+1])
results = []
for i in range(num):
results.append(final_params[2*i])
results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])
for i in range(num-1):
results.append(final_params[2*num + i])
return results
def estimate_powell(data, num = 1, tol = 0.01, maxiter = 100):
fit_params = np.zeros(3*num - 1)
a = np.average(data)
s = np.log(np.std(data))
for i in range(num):
fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)
fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)
def training_likelihood(params):
return log_likelihood_logistic(data, params)
def training_loss(params):
return -log_likelihood_logistic(data, params)
training_likelihood_jac = grad(training_likelihood)
training_loss_jac = grad(training_loss)
res = minimize(training_loss, x0=fit_params, method="Powell", tol=tol, options = {"maxiter": maxiter})
print(res)
final_params = res.x
for i in range(num):
final_params[2*i+1] = np.exp(final_params[2*i+1])
results = []
for i in range(num):
results.append(final_params[2*i])
results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])
for i in range(num-1):
results.append(final_params[2*num + i])
return results
|
n = len(percentiles)
c = (len(params) + 1)//3
r = 0
if (len(params) + 1) % 3 != 0:
print("Parameters specified incorrectly!")
return None
else:
weights = [1]
for k in range(c-1):
weights.append(np.exp(params[2*c + k]))
s = np.sum(weights)
for q in range(1, n):
cdf_list = [logistic_cdf(percentiles[q-1], params[2*j], np.exp(params[2*j+1])) for j in range(c)]
cdf_list_n = [weights[j] * cdf_list[j] for j in range(c)]
r += (np.sum(cdf_list_n)/s - q/n)**2/n
return r
|
chat.js
|
"use strict";
$(document).ready(function() {
var chatbg = $(window).height()-57;
$('.chat-bg').css('min-height', chatbg);
var a = $(window).height() - 70;
$(".user-box").slimScroll({
height: a,
|
color: '#000'
});
// search
$(".search-text").on("keyup", function() {
var g = $(this).val().toLowerCase();
$(".userlist-box .media-body .chat-header").each(function() {
var s = $(this).text().toLowerCase();
$(this).closest('.userlist-box')[s.indexOf(g) !== -1 ? 'show' : 'hide']();
});
});
});
|
allowPageScroll: false,
|
contig_filter_util.py
|
from Bio import SeqIO
def
|
(input_path, filtered_path, min_length):
# Inside {username}ContigFilterImpl#run_{username}ContigFilter_max, after you have fetched the fasta file:
# Parse the downloaded file in FASTA format
parsed_assembly = SeqIO.parse(input_path, 'fasta')
min_length = min_length
# Keep a list of contigs greater than min_length
good_contigs = []
# total contigs regardless of length
n_total = 0
# total contigs over the min_length
n_remaining = 0
for record in list(parsed_assembly):
n_total += 1
if len(record.seq) >= min_length:
good_contigs.append(record)
n_remaining += 1
output = {
'n_total': n_total,
'n_remaining': n_remaining
}
SeqIO.write(good_contigs, filtered_path, 'fasta')
return output
|
contig_filter
|
add_embed_field.js
|
module.exports = {
name: 'Add Embed Field',
section: 'Embed Message',
subtitle (data) {
return `${data.name} - ${data.message}`
},
fields: ['storage', 'varName', 'fieldName', 'message', 'inline'],
html (isEvent, data) {
return `
<div><p>This action has been modified by DBM Mods. Use [Title](Link) to mask links here.</p></div><br>
<div>
<div style="float: left; width: 35%;">
Source Embed Object:<br>
<select id="storage" class="round" onchange="glob.refreshVariableList(this)">
${data.variables[1]}
</select>
</div>
<div id="varNameContainer" style="float: right; width: 60%;">
Variable Name:<br>
<input id="varName" class="round varSearcher" type="text" list="variableList"><br>
</div>
</div><br><br><br>
<div style="padding-top: 8px;">
<div style="float: left; width: 50%;">
Field Name:<br>
<input id="fieldName" placeholder="Optional" class="round" type="text">
</div>
<div style="float: left; width: 50%;">
Display Inline:<br>
<select id="inline" class="round">
<option value="0">Yes</option>
<option value="1" selected>No</option>
</select>
</div>
</div><br><br><br>
<div style="padding-top: 8px;">
Field Description:<br>
<textarea id="message" rows="7.5" placeholder="Insert message here... (Optional)" style="width: 99%; font-family: monospace; white-space: nowrap; resize: none;"></textarea>
</div>`
},
init () {},
action (cache) {
|
const varName = this.evalMessage(data.varName, cache)
const embed = this.getVariable(storage, varName, cache)
const name = this.evalMessage(data.fieldName, cache)
const message = this.evalMessage(data.message, cache)
const inline = Boolean(data.inline === '0')
if (embed && embed.addField) {
embed.addField(name || '\u200B', message || '\u200B', inline)
}
this.callNextAction(cache)
},
mod () {}
}
|
const data = cache.actions[cache.index]
const storage = parseInt(data.storage)
|
error.rs
|
use std::error::Error as StdError;
use std::fmt;
use std::io;
use std::result;
use crate::byte_record::{ByteRecord, Position};
use crate::deserializer::DeserializeError;
/// A type alias for `Result<T, csv::Error>`.
pub type Result<T> = result::Result<T, Error>;
/// An error that can occur when processing CSV data.
///
/// This error can happen when writing or reading CSV data.
///
/// There are some important scenarios where an error is impossible to occur.
/// For example, if a CSV reader is used on an in-memory buffer with the
/// `flexible` option enabled and one is reading records as raw byte strings,
/// then no error can occur.
#[derive(Debug)]
pub struct Error(Box<ErrorKind>);
impl Error {
/// A crate private constructor for `Error`.
pub(crate) fn new(kind: ErrorKind) -> Error {
Error(Box::new(kind))
}
/// Return the specific type of this error.
pub fn kind(&self) -> &ErrorKind {
&self.0
}
/// Unwrap this error into its underlying type.
pub fn into_kind(self) -> ErrorKind {
*self.0
}
/// Returns true if this is an I/O error.
///
/// If this is true, the underlying `ErrorKind` is guaranteed to be
/// `ErrorKind::Io`.
pub fn is_io_error(&self) -> bool {
match *self.0 {
ErrorKind::Io(_) => true,
_ => false,
}
}
/// Return the position for this error, if one exists.
///
/// This is a convenience function that permits callers to easily access
/// the position on an error without doing case analysis on `ErrorKind`.
pub fn position(&self) -> Option<&Position> {
self.0.position()
}
}
/// The specific type of an error.
#[derive(Debug)]
pub enum ErrorKind {
/// An I/O error that occurred while reading CSV data.
Io(io::Error),
/// A UTF-8 decoding error that occured while reading CSV data into Rust
/// `String`s.
Utf8 {
/// The position of the record in which this error occurred, if
/// available.
pos: Option<Position>,
/// The corresponding UTF-8 error.
err: Utf8Error,
},
/// This error occurs when two records with an unequal number of fields
/// are found. This error only occurs when the `flexible` option in a
/// CSV reader/writer is disabled.
UnequalLengths {
/// The position of the first record with an unequal number of fields
/// to the previous record, if available.
pos: Option<Position>,
/// The expected number of fields in a record. This is the number of
/// fields in the record read prior to the record indicated by
/// `pos`.
expected_len: u64,
/// The number of fields in the bad record.
len: u64,
},
/// This error occurs when either the `byte_headers` or `headers` methods
/// are called on a CSV reader that was asked to `seek` before it parsed
/// the first record.
Seek,
/// An error of this kind occurs only when using the Serde serializer.
Serialize(String),
/// An error of this kind occurs only when performing automatic
/// deserialization with serde.
Deserialize {
/// The position of this error, if available.
pos: Option<Position>,
/// The deserialization error.
err: DeserializeError,
},
/// Hints that destructuring should not be exhaustive.
///
/// This enum may grow additional variants, so this makes sure clients
/// don't count on exhaustive matching. (Otherwise, adding a new variant
/// could break existing code.)
#[doc(hidden)]
__Nonexhaustive,
}
impl ErrorKind {
/// Return the position for this error, if one exists.
///
/// This is a convenience function that permits callers to easily access
/// the position on an error without doing case analysis on `ErrorKind`.
pub fn position(&self) -> Option<&Position> {
match *self {
ErrorKind::Utf8 { ref pos, .. } => pos.as_ref(),
ErrorKind::UnequalLengths { ref pos, .. } => pos.as_ref(),
ErrorKind::Deserialize { ref pos, .. } => pos.as_ref(),
_ => None,
}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::new(ErrorKind::Io(err))
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> io::Error {
io::Error::new(io::ErrorKind::Other, err)
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match *self.0 {
ErrorKind::Io(ref err) => Some(err),
ErrorKind::Utf8 { ref err, .. } => Some(err),
ErrorKind::UnequalLengths { .. } => None,
ErrorKind::Seek => None,
ErrorKind::Serialize(_) => None,
ErrorKind::Deserialize { ref err, .. } => Some(err),
_ => unreachable!(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self.0 {
ErrorKind::Io(ref err) => err.fmt(f),
ErrorKind::Utf8 { pos: None, ref err } => {
write!(f, "CSV parse error: field {}: {}", err.field(), err)
}
ErrorKind::Utf8 { pos: Some(ref pos), ref err } => write!(
f,
"CSV parse error: record {} \
(line {}, field: {}, byte: {}): {}",
pos.record(),
pos.line(),
err.field(),
pos.byte(),
err
),
ErrorKind::UnequalLengths { pos: None, expected_len, len } => {
write!(
f,
"CSV error: \
found record with {} fields, but the previous record \
has {} fields",
len, expected_len
)
}
ErrorKind::UnequalLengths {
pos: Some(ref pos),
expected_len,
len,
} => write!(
f,
"CSV error: record {} (line: {}, byte: {}): \
found record with {} fields, but the previous record \
has {} fields",
pos.record(),
pos.line(),
pos.byte(),
len,
expected_len
),
ErrorKind::Seek => write!(
f,
"CSV error: cannot access headers of CSV data \
when the parser was seeked before the first record \
could be read"
),
ErrorKind::Serialize(ref err) => {
write!(f, "CSV write error: {}", err)
}
ErrorKind::Deserialize { pos: None, ref err } => {
write!(f, "CSV deserialize error: {}", err)
}
ErrorKind::Deserialize { pos: Some(ref pos), ref err } => write!(
f,
"CSV deserialize error: record {} \
(line: {}, byte: {}): {}",
pos.record(),
pos.line(),
pos.byte(),
err
),
_ => unreachable!(),
}
}
}
/// A UTF-8 validation error during record conversion.
///
/// This occurs when attempting to convert a `ByteRecord` into a
/// `StringRecord`.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FromUtf8Error {
record: ByteRecord,
err: Utf8Error,
}
impl FromUtf8Error {
/// Create a new FromUtf8Error.
pub(crate) fn new(rec: ByteRecord, err: Utf8Error) -> FromUtf8Error {
FromUtf8Error { record: rec, err: err }
}
/// Access the underlying `ByteRecord` that failed UTF-8 validation.
pub fn into_byte_record(self) -> ByteRecord {
self.record
}
/// Access the underlying UTF-8 validation error.
pub fn utf8_error(&self) -> &Utf8Error {
&self.err
}
}
impl fmt::Display for FromUtf8Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.err.fmt(f)
}
}
impl StdError for FromUtf8Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
Some(&self.err)
}
}
/// A UTF-8 validation error.
///
/// This occurs when attempting to convert a `ByteRecord` into a
/// `StringRecord`.
///
/// The error includes the index of the field that failed validation, and the
/// last byte at which valid UTF-8 was verified.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Utf8Error {
/// The field index of a byte record in which UTF-8 validation failed.
field: usize,
/// The index into the given field up to which valid UTF-8 was verified.
valid_up_to: usize,
}
/// Create a new UTF-8 error.
pub fn new_utf8_error(field: usize, valid_up_to: usize) -> Utf8Error {
Utf8Error { field: field, valid_up_to: valid_up_to }
}
impl Utf8Error {
/// The field index of a byte record in which UTF-8 validation failed.
pub fn field(&self) -> usize {
self.field
}
/// The index into the given field up to which valid UTF-8 was verified.
pub fn valid_up_to(&self) -> usize {
self.valid_up_to
}
}
impl StdError for Utf8Error {}
impl fmt::Display for Utf8Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"invalid utf-8: invalid UTF-8 in field {} near byte index {}",
self.field, self.valid_up_to
)
}
}
/// `IntoInnerError` occurs when consuming a `Writer` fails.
///
/// Consuming the `Writer` causes a flush to happen. If the flush fails, then
/// this error is returned, which contains both the original `Writer` and
/// the error that occurred.
///
/// The type parameter `W` is the unconsumed writer.
pub struct IntoInnerError<W> {
wtr: W,
err: io::Error,
}
impl<W> IntoInnerError<W> {
/// Creates a new `IntoInnerError`.
///
/// (This is a visibility hack. It's public in this module, but not in the
/// crate.)
pub(crate) fn new(wtr: W, err: io::Error) -> IntoInnerError<W> {
IntoInnerError { wtr: wtr, err: err }
}
/// Returns the error which caused the call to `into_inner` to fail.
///
/// This error was returned when attempting to flush the internal buffer.
pub fn error(&self) -> &io::Error {
&self.err
}
/// Returns the underlying writer which generated the error.
///
/// The returned value can be used for error recovery, such as
/// re-inspecting the buffer.
pub fn into_inner(self) -> W {
self.wtr
}
}
impl<W: std::any::Any> StdError for IntoInnerError<W> {
fn source(&self) -> Option<&(dyn StdError + 'static)>
|
}
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.err.fmt(f)
}
}
impl<W> fmt::Debug for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.err.fmt(f)
}
}
|
{
self.err.source()
}
|
worker_test.go
|
package work_test
import (
"fmt"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.e13.dev/golib/work"
)
func Example() {
// create a worker with 3 goroutines that can process jobs in parallel
worker := work.NewWorker(3, func(p work.Payload) interface{} {
// this is our worker function that is called for every new job
fmt.Println(p.Data)
time.Sleep(200 * time.Millisecond)
return nil
}, false)
// create 100 jobs and dispatch them to the worker.
for i := 0; i < 100; i++ {
// this call will block when all 3 goroutines are currently busy.
err := worker.Dispatch(work.Payload{Data: strconv.Itoa(i)})
if err != nil {
return // the worker has already been shut down
}
}
// this call makes sure that the worker stops all goroutines as soon as
// they have processed all remaining jobs.
worker.Quit()
}
func TestDispatchShouldReturnErrorWhenWorkerIsShutDown(t *testing.T) {
worker := work.NewWorker(1, func(p work.Payload) interface{} {
return fmt.Sprintf("%s.", p.Data)
}, true)
|
worker.Quit()
err := worker.Dispatch(work.Payload{})
assert.Error(t, err, "Dispatch returned no error")
}
func TestCallingQuitMultipleTimesShouldNotBlock(t *testing.T) {
worker := work.NewWorker(1, func(p work.Payload) interface{} {
return fmt.Sprintf("%s.", p.Data)
}, true)
worker.Quit()
worker.Quit()
}
func TestQuitShouldCloseCompletionsChannel(t *testing.T) {
worker := work.NewWorker(1, func(p work.Payload) interface{} {
return fmt.Sprintf("%s.", p.Data)
}, true)
worker.Quit()
_, ok := <-worker.Completions()
assert.Equal(t, false, ok, "The completions channel was not closed")
}
func TestQuitShouldAscertainThatAllJobsHaveCompleted(t *testing.T) {
resCh := make(chan string, 100)
worker := work.NewWorker(1, func(p work.Payload) interface{} {
resCh <- fmt.Sprintf("%s.", p.Data)
return nil
}, false)
for i := 0; i < 100; i++ {
err := worker.Dispatch(work.Payload{Data: strconv.Itoa(i)})
if err != nil {
t.Fail()
return
}
}
worker.Quit()
assert.Equal(t, 100, len(resCh), "Not all jobs were completed")
}
func TestJobCountShouldReturnZeroWithNoJobsDispatched(t *testing.T) {
worker := work.NewWorker(99, func(p work.Payload) interface{} { return nil }, false)
assert.Equal(t, 0, worker.JobCount(), "Job count is wrong")
}
func TestJobCountShouldReturnCorrectValue(t *testing.T) {
startCh := make(chan struct{})
quitCh := make(chan struct{})
workers := 5
// start a worker with max 5 concurrently running jobs
worker := work.NewWorker(workers, func(p work.Payload) interface{} {
startCh <- struct{}{}
<-quitCh
return nil
}, true)
// iteratively start jobs and check job count
for i := 0; i < workers; i++ {
err := worker.Dispatch(work.Payload{})
if err != nil {
t.Fail()
}
<-startCh
assert.Equal(t, i+1, worker.JobCount(), "Job count is wrong")
}
// iteratively stop jobs and check job count
for i := workers; i > 0; i-- {
quitCh <- struct{}{}
<-worker.Completions()
assert.Equal(t, i-1, worker.JobCount(), "Job count is wrong")
}
}
func TestWorkerShouldWorkSequentiallyWithOnlyOneGoroutine(t *testing.T) {
resultCh := make(chan string)
worker := work.NewWorker(1, func(p work.Payload) interface{} {
return fmt.Sprintf("%s.", p.Data)
}, true)
go func() {
var result string
for v := range worker.Completions() {
result += v.Output.(string)
}
resultCh <- result
}()
for i := 0; i < 100; i++ {
err := worker.Dispatch(work.Payload{Data: strconv.Itoa(i)})
if err != nil {
t.Fail()
return
}
}
worker.Quit()
assert.Equal(t, "0.1.2.3.4.5.6.7.8.9.10.11.12.13.14.15.16.17.18.19.20.21.22.23.24.25.26.27.28.29.30.31.32.33.34.35."+
"36.37.38.39.40.41.42.43.44.45.46.47.48.49.50.51.52.53.54.55.56.57.58.59.60.61.62.63.64.65.66.67.68.69.70.71.72.73.74.75.76.77.78."+
"79.80.81.82.83.84.85.86.87.88.89.90.91.92.93.94.95.96.97.98.99.", <-resultCh, "Jobs were completed in wrong order or incompletely")
}
| |
n_continue.go
|
package stmt
import (
"github.com/setpill/noverify/src/php/parser/freefloating"
"github.com/setpill/noverify/src/php/parser/node"
"github.com/setpill/noverify/src/php/parser/position"
"github.com/setpill/noverify/src/php/parser/walker"
)
// Continue node
type Continue struct {
FreeFloating freefloating.Collection
Position *position.Position
Expr node.Node
}
// NewContinue node constructor
func
|
(Expr node.Node) *Continue {
return &Continue{
FreeFloating: nil,
Expr: Expr,
}
}
// SetPosition sets node position
func (n *Continue) SetPosition(p *position.Position) {
n.Position = p
}
// GetPosition returns node positions
func (n *Continue) GetPosition() *position.Position {
return n.Position
}
func (n *Continue) GetFreeFloating() *freefloating.Collection {
return &n.FreeFloating
}
// Walk traverses nodes
// Walk is invoked recursively until v.EnterNode returns true
func (n *Continue) Walk(v walker.Visitor) {
if !v.EnterNode(n) {
return
}
if n.Expr != nil {
n.Expr.Walk(v)
}
v.LeaveNode(n)
}
|
NewContinue
|
authentication_policy.go
|
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
// API Gateway API
//
// API for the API Gateway service. Use this API to manage gateways, deployments, and related items.
// For more information, see
// Overview of API Gateway (https://docs.cloud.oracle.com/iaas/Content/APIGateway/Concepts/apigatewayoverview.htm).
//
package apigateway
import (
"encoding/json"
"github.com/oracle/oci-go-sdk/v36/common"
)
// AuthenticationPolicy Information on how to authenticate incoming requests.
type AuthenticationPolicy interface {
// Whether an unauthenticated user may access the API. Must be "true" to enable ANONYMOUS
// route authorization.
GetIsAnonymousAccessAllowed() *bool
}
type authenticationpolicy struct {
JsonData []byte
IsAnonymousAccessAllowed *bool `mandatory:"false" json:"isAnonymousAccessAllowed"`
Type string `json:"type"`
}
// UnmarshalJSON unmarshals json
func (m *authenticationpolicy) UnmarshalJSON(data []byte) error {
m.JsonData = data
type Unmarshalerauthenticationpolicy authenticationpolicy
s := struct {
Model Unmarshalerauthenticationpolicy
}{}
err := json.Unmarshal(data, &s.Model)
if err != nil {
return err
}
m.IsAnonymousAccessAllowed = s.Model.IsAnonymousAccessAllowed
m.Type = s.Model.Type
|
// UnmarshalPolymorphicJSON unmarshals polymorphic json
func (m *authenticationpolicy) UnmarshalPolymorphicJSON(data []byte) (interface{}, error) {
if data == nil || string(data) == "null" {
return nil, nil
}
var err error
switch m.Type {
case "JWT_AUTHENTICATION":
mm := JwtAuthenticationPolicy{}
err = json.Unmarshal(data, &mm)
return mm, err
case "CUSTOM_AUTHENTICATION":
mm := CustomAuthenticationPolicy{}
err = json.Unmarshal(data, &mm)
return mm, err
default:
return *m, nil
}
}
//GetIsAnonymousAccessAllowed returns IsAnonymousAccessAllowed
func (m authenticationpolicy) GetIsAnonymousAccessAllowed() *bool {
return m.IsAnonymousAccessAllowed
}
func (m authenticationpolicy) String() string {
return common.PointerString(m)
}
// AuthenticationPolicyTypeEnum Enum with underlying type: string
type AuthenticationPolicyTypeEnum string
// Set of constants representing the allowable values for AuthenticationPolicyTypeEnum
const (
AuthenticationPolicyTypeCustomAuthentication AuthenticationPolicyTypeEnum = "CUSTOM_AUTHENTICATION"
AuthenticationPolicyTypeJwtAuthentication AuthenticationPolicyTypeEnum = "JWT_AUTHENTICATION"
)
var mappingAuthenticationPolicyType = map[string]AuthenticationPolicyTypeEnum{
"CUSTOM_AUTHENTICATION": AuthenticationPolicyTypeCustomAuthentication,
"JWT_AUTHENTICATION": AuthenticationPolicyTypeJwtAuthentication,
}
// GetAuthenticationPolicyTypeEnumValues Enumerates the set of values for AuthenticationPolicyTypeEnum
func GetAuthenticationPolicyTypeEnumValues() []AuthenticationPolicyTypeEnum {
values := make([]AuthenticationPolicyTypeEnum, 0)
for _, v := range mappingAuthenticationPolicyType {
values = append(values, v)
}
return values
}
|
return err
}
|
optimize.rs
|
use std::fs;
use std::io::Error;
use std::process;
use crate::css;
pub fn css(file: &str) -> Result<(), Error> {
let contents = match fs::read_to_string(file) {
Ok(str) => str,
Err(e) => return Err(e),
};
let optimized = match css::optimize(contents) {
Ok(opt) => opt,
Err(e) => {
eprintln!("Error parsing file {}!", file);
eprintln!("{}", e);
process::exit(1);
}
};
// Add min to file
let mut split: Vec<&str> = file.split(".").collect();
|
let out_file = split.join(".");
fs::write(out_file, optimized)
}
|
split.insert(split.len() - 1, "min");
|
init_theano_settings.py
|
# -*- coding: utf-8 -*-
"""
References:
http://deeplearning.net/software/theano/library/config.html
Check Settings:
python -c 'import theano; print theano.config' | less
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import os
from os.path import join
(print, rrr, profile) = ut.inject2(__name__)
"""
CommandLine:
cd %CODE_DIR%/wbia_cnn/code
cd $CODE_DIR/wbia_cnn/code
code
cd wbia_cnn/code
python train.py
Purge from system and environ:
cd
python -c "import utool as ut; ut.total_purge_developed_repo('~/code/pylearn2')"
python -c "import utool as ut; ut.total_purge_developed_repo('~/code/Theano')"
python -c "import utool as ut; ut.total_purge_developed_repo('~/code/Lasagne')"
# Remove pylearn2 scripts
sudo rm /home/joncrall/venv/bin/pylearn2-*
sudo rm /usr/local/bin/pylearn2-*
locate pylearn2 | grep -v /home/joncrall/code/pylearn2 | grep -v /home/jason/code/pylearn2
pip uninstall theano
pip uninstall lasagne
pip uninstall pylearn2
sudo -H pip uninstall theano
sudo -H pip uninstall lasagne
sudo -H pip uninstall pylearn2
sudo pip uninstall theano
sudo pip uninstall lasagne
sudo pip uninstall pylearn2
# If they do try chowning to current user
sudo chown -R $USER:$USER ~/code/pylearn2
sudo chown -R $USER:$USER ~/code/Theano
sudo chown -R $USER:$USER ~/code/Lasagne
export GLOBAL_SITE_PKGS=$(python -c "import utool as ut; print(ut.get_global_dist_packages_dir())")
export LOCAL_SITE_PKGS=$(python -c "import utool as ut; print(ut.get_local_dist_packages_dir())")
export VENV_SITE_PKGS=$(python -c "import utool as ut; print(ut.get_site_packages_dir())")
# Test that they dont exist
python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import theano; print(theano.__version__)"
python -c "import lasagne; print(lasagne.__version__)"
PythonPrereqs:
co
git clone git://github.com/lisa-lab/pylearn2.git
git clone https://github.com/Theano/Theano.git
git clone https://github.com/Erotemic/Lasagne.git
cd ~/code/pylearn2 && git pull && python setup.py develop
cd ~/code/Theano && git pull && python setup.py develop
cd ~/code/Lasagne && git pull && python setup.py develop
python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import theano; print(theano.__version__)"
python -c "import lasagne; print(lasagne.__version__)"
git checkout 8758ac1434175159e5c1f30123041799c2b6098a
OLD:
git clone https://github.com/Lasagne/Lasagne.git
pip install theano
pip install git+https://github.com/Lasagne/Lasagne.git
pip install git+git://github.com/lisa-lab/pylearn2.git
#pip install lasagne
#pip install pylearn2
Ensure CuDNN is installed
http://lasagne.readthedocs.io/en/latest/user/installation.html#cudnn
# Test if Theano Works with CUDNN
python -c "from theano.sandbox.cuda.dnn import dnn_available as d; print(d() or d.msg)"
# Need to register with nvidia
https://developer.nvidia.com/rdp/cudnn-download
# Check cuda version
nvcc --version
# Check if cuda is globally installed
ls -al /usr/local/cuda
# Check if CUDNN is globally installed
ls -al /usr/local/cuda/include/cudnn.h
ls -al /usr/local/cuda/lib64/cudnn*
# Download approprate version
cd ~/Downloads
# doesnt work if you dont sign in
# wget https://developer.nvidia.com/compute/machine-learning/cudnn/secure/v5.1/rc/7.5/cudnn-7.5-linux-x64-v5.1-rc-tgz
# Unpack appropriate version
cd ~/Downloads
7z x cudnn-7.5-linux-x64-v5.1-rc.tgz && 7z x -ocudnn5.1 cudnn-7.5-linux-x64-v5.1-rc.tar
7z x cudnn-7.5-linux-x64-v5.0-ga.tgz && 7z x -ocudnn5.0 cudnn-7.5-linux-x64-v5.0-ga.tar
7z x cudnn-7.0-linux-x64-v4.0-prod.tgz && 7z x -ocudnn4.0 cudnn-7.0-linux-x64-v4.0-prod.tar
tree ~/Downloads/cudnn5.1/
tree ~/Downloads/cudnn4/
# DEFINE WHERE CUDA LIVES
export CUDADIR=/usr/local/cuda
export TARGET_CUDNN_VERSION=5.1
MAIN_CUDNN_VERSION="$(echo $TARGET_CUDNN_VERSION | head -c 1)"
# Check CUDNN Install
ls -al $CUDADIR/include/cudnn.h
ls -al $CUDADIR/lib64/libcudnn*
#Look at other cuda install permissions
ls -al $CUDADIR/include/cublas.h
ls -al $CUDADIR/lib64/libcublas*
# REMOVE / UNINSTALL OLD CUDNN
sudo rm -rf $CUDADIR/include/cudnn.h
sudo rm -rf $CUDADIR/lib64/libcudnn*
# Extract into folder called cuda, need to move it to wherever cuda is installed
# cudnn consists of one header and 4 libraries
sudo cp -rv ~/Downloads/cudnn$TARGET_CUDNN_VERSION/cuda/include/cudnn.h $CUDADIR/include/cudnn.h
sudo cp -rv ~/Downloads/cudnn$TARGET_CUDNN_VERSION/cuda/lib64/libcudnn.so.$TARGET_CUDNN_VERSION* $CUDADIR/lib64/
sudo cp -rv ~/Downloads/cudnn$TARGET_CUDNN_VERSION/cuda/lib64/libcudnn_static.a $CUDADIR/lib64/
# Manually make symlinks (ones nvidia ships are broken)
sudo ln -s $CUDADIR/lib64/libcudnn.so.$TARGET_CUDNN_VERSION* $CUDADIR/lib64/libcudnn.so.$MAIN_CUDNN_VERSION
sudo ln -s $CUDADIR/lib64/libcudnn.so.$MAIN_CUDNN_VERSION $CUDADIR/lib64/libcudnn.so
# Set permissions to reflect cuda install
sudo chmod 755 /usr/local/cuda/lib64/libcudnn.so.$TARGET_CUDNN_VERSION*
# Check CUDNN Install
ls -al $CUDADIR/include/cudnn.h
ls -al $CUDADIR/lib64/libcudnn*
# Test if Theano Works with CUDNN
python -c "from theano.sandbox.cuda.dnn import dnn_available as d; print(d() or d.msg)"
"""
def init_theanorc():
theanorc_fpath = join(os.getenv('HOME'), '.theanorc')
theanorc_text = ut.codeblock(
"""
[global]
floatX = float32
device = gpu0
openmp = True
[nvcc]
fastmath = True
"""
)
if ut.checkpath(theanorc_fpath, verbose=True):
if not ut.arg_you_sure('overwrite?'):
|
ut.write_to(theanorc_fpath, theanorc_text)
if __name__ == '__main__':
init_theanorc()
|
return
|
math-cordic.js
|
/*
* Copyright (C) Rich Moore. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/////. Start CORDIC
var AG_CONST = 0.6072529350;
function FIXED(X)
{
return X * 65536.0;
}
function
|
(X)
{
return X / 65536.0;
}
function DEG2RAD(X)
{
return 0.017453 * (X);
}
var Angles = [
FIXED(45.0), FIXED(26.565), FIXED(14.0362), FIXED(7.12502),
FIXED(3.57633), FIXED(1.78991), FIXED(0.895174), FIXED(0.447614),
FIXED(0.223811), FIXED(0.111906), FIXED(0.055953),
FIXED(0.027977)
];
var Target = 28.027;
function cordicsincos(Target) {
var X;
var Y;
var TargetAngle;
var CurrAngle;
var Step;
X = FIXED(AG_CONST); /* AG_CONST * cos(0) */
Y = 0; /* AG_CONST * sin(0) */
TargetAngle = FIXED(Target);
CurrAngle = 0;
for (Step = 0; Step < 12; Step++) {
var NewX;
if (TargetAngle > CurrAngle) {
NewX = X - (Y >> Step);
Y = (X >> Step) + Y;
X = NewX;
CurrAngle += Angles[Step];
} else {
NewX = X + (Y >> Step);
Y = -(X >> Step) + Y;
X = NewX;
CurrAngle -= Angles[Step];
}
}
return FLOAT(X) * FLOAT(Y);
}
///// End CORDIC
var total = 0;
function cordic( runs ) {
var start = new Date();
for ( var i = 0 ; i < runs ; i++ ) {
total += cordicsincos(Target);
}
var end = new Date();
return end.getTime() - start.getTime();
}
cordic(25000);
var expected = 10362.570468755888;
if (total != expected)
throw "ERROR: bad result: expected " + expected + " but got " + total;
|
FLOAT
|
machine.entity.ts
|
import { Entity, Column, PrimaryGeneratedColumn, ObjectIdColumn, ObjectID, PrimaryColumn, ChildEntity } from 'typeorm';
import { BaseEntity } from '../../utils/base.entity';
import { ApiProperty, ApiPropertyOptional, ApiBody } from '@nestjs/swagger';
@Entity('machines')
export class
|
extends BaseEntity {
@ObjectIdColumn()
_id: ObjectID;
@Column()
@ApiProperty()
public Name: string
@Column()
@ApiProperty()
public Location: string
@Column()
@ApiProperty()
public WarrantyExpiryDate: Date
@Column()
@ApiProperty()
public SerialNumber: string
@Column()
@ApiProperty()
public InService: Boolean
@Column()
@ApiProperty()
public ModelId: number
@Column()
@ApiProperty()
public ModelType: string
@Column()
@ApiProperty()
public SupplierId: number
@Column()
@ApiProperty()
public SupplierManufacturer: string
@Column()
@ApiProperty()
public DealerId: number
@Column()
@ApiProperty()
public SupplierDealer: string
@Column()
@ApiProperty()
public AMCVendorId: number
@Column()
@ApiProperty()
public SupplierAMCVendor: string
constructor(o: Object) {
super(o);
}
}
|
Machines
|
direct_link_v1_integration_test.go
|
/**
* (C) Copyright IBM Corp. 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package directlinkv1_test
/*
How to run this test:
go test -v ./directlinkv1
*/
import (
"bytes"
"io/ioutil"
"os"
"strconv"
"time"
"github.com/IBM/go-sdk-core/v4/core"
"github.com/IBM/networking-go-sdk/directlinkv1"
"github.com/joho/godotenv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var configLoaded = false
func
|
() {
if !configLoaded {
Skip("External configuration is not available, skipping...")
}
}
var _ = Describe(`DirectLinkV1`, func() {
err := godotenv.Load("../directlink.env")
It(`Successfully loading .env file`, func() {
if err == nil {
serviceURL := os.Getenv("SERVICE_URL")
if serviceURL != "" {
configLoaded = true
}
}
if !configLoaded {
Skip("External configuration could not be loaded, skipping...")
}
})
authenticator := &core.IamAuthenticator{
ApiKey: os.Getenv("IAMAPIKEY"),
URL: "https://iam.test.cloud.ibm.com/identity/token",
}
version := time.Now().Format("2006-01-02")
serviceURL := os.Getenv("SERVICE_URL")
options := &directlinkv1.DirectLinkV1Options{
ServiceName: "DirectLinkV1_Mocking",
Authenticator: authenticator,
URL: serviceURL,
Version: &version,
}
service, err := directlinkv1.NewDirectLinkV1UsingExternalConfig(options)
It(`Successfully created DirectLinkV1 service instance`, func() {
shouldSkipTest()
Expect(err).To(BeNil())
})
Describe("Direct Link Gateways", func() {
timestamp := time.Now().Unix()
gatewayName := "GO-INT-SDK-" + strconv.FormatInt(timestamp, 10)
updatedGatewayName := "GO-INT-SDK-PATCH-" + strconv.FormatInt(timestamp, 10)
bgpAsn := int64(64999)
crossConnectRouter := "LAB-xcr01.dal09"
global := true
locationName := os.Getenv("LOCATION_NAME")
speedMbps := int64(1000)
metered := false
carrierName := "carrier1"
customerName := "customer1"
gatewayType := "dedicated"
invalidGatewayId := "000000000000000000000000000000000000"
Context("Get non existing gateway", func() {
getGatewayOptions := service.NewGetGatewayOptions(invalidGatewayId)
It(`Returns the http response with error code 404`, func() {
shouldSkipTest()
result, detailedResponse, err := service.GetGateway(getGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Gateway"))
Expect(detailedResponse.StatusCode).To(Equal(404))
})
})
Context("Create gateway", func() {
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
It("Fails when Invalid BGP is provided", func() {
shouldSkipTest()
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(65500, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("BGP AS Number is invalid."))
Expect(detailedResponse.StatusCode).To(Equal(400))
})
It("Fails when invalid speed_mbps is provided", func() {
shouldSkipTest()
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, 10000000000, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Location with provided 'linkSpeed' and 'OfferingType'."))
Expect(detailedResponse.StatusCode).To(Equal(400))
})
It("Fails when invalid locations is provided", func() {
shouldSkipTest()
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, "InvalidCity")
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Location with provided 'linkSpeed' and 'OfferingType'."))
Expect(detailedResponse.StatusCode).To(Equal(400))
})
It("Successfully Creates a gateway", func() {
shouldSkipTest()
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
os.Setenv("GATEWAY_ID", *result.ID)
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(global))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
It("Successfully fetches the created Gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
getGatewayOptions := service.NewGetGatewayOptions(gatewayId)
result, detailedResponse, err := service.GetGateway(getGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(gatewayId))
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(global))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
It("Throws an Error when creating a gateway with same name", func() {
shouldSkipTest()
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("A gateway with the same name already exists"))
Expect(detailedResponse.StatusCode).To(Equal(409))
})
})
Context("Successfully fetch the gateways list", func() {
listGatewaysOptions := service.NewListGatewaysOptions()
It(`Successfully list all gateways`, func() {
shouldSkipTest()
result, detailedResponse, err := service.ListGateways(listGatewaysOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
gateways := result.Gateways
Expect(len(gateways)).Should(BeNumerically(">", 0))
found := false
// find the created gateway and verify the attributes
gatewayId := os.Getenv("GATEWAY_ID")
for _, gw := range gateways {
if *gw.ID == gatewayId {
found = true
Expect(*gw.Name).To(Equal(gatewayName))
Expect(*gw.BgpAsn).To(Equal(bgpAsn))
Expect(*gw.Global).To(Equal(global))
Expect(*gw.Metered).To(Equal(metered))
Expect(*gw.SpeedMbps).To(Equal(speedMbps))
Expect(*gw.Type).To(Equal(gatewayType))
Expect(*gw.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*gw.LocationName).To(Equal(locationName))
Expect(*gw.LocationDisplayName).NotTo(Equal(""))
Expect(*gw.BgpCerCidr).NotTo(BeEmpty())
Expect(*gw.BgpIbmCidr).NotTo(Equal(""))
Expect(*gw.BgpIbmAsn).NotTo(Equal(""))
Expect(*gw.BgpStatus).To(Equal("idle"))
Expect(*gw.CreatedAt).NotTo(Equal(""))
Expect(*gw.Crn).To(HavePrefix("crn:v1"))
Expect(*gw.LinkStatus).To(Equal("down"))
Expect(*gw.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*gw.ResourceGroup.ID).NotTo(Equal(""))
break
}
}
// expect the created gateway to have been found. If not found, throw an error
Expect(found).To(Equal(true))
})
})
Context("Fail update Gateway", func() {
It("Fails if an invalid GatewayID is provided", func() {
shouldSkipTest()
patchGatewayOptions := service.NewUpdateGatewayOptions(invalidGatewayId).SetOperationalStatus("loa_accepted")
result, detailedResponse, err := service.UpdateGateway(patchGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Gateway"))
Expect(detailedResponse.StatusCode).To(Equal(404))
})
It("Successfully Updates the Gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
patchGatewayOptions := service.NewUpdateGatewayOptions(gatewayId)
result, detailedResponse, err := service.UpdateGateway(patchGatewayOptions.SetGlobal(false).SetSpeedMbps(int64(1000)).SetName(updatedGatewayName))
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(gatewayId))
Expect(*result.Name).To(Equal(updatedGatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(false))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
It("Successfully fetches the updated Gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
getGatewayOptions := service.NewGetGatewayOptions(gatewayId)
result, detailedResponse, err := service.GetGateway(getGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(gatewayId))
Expect(*result.Name).To(Equal(updatedGatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(false))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
})
Context("Delete a gateway", func() {
It("Fails if an invalid GatewayID is provided", func() {
shouldSkipTest()
deteleGatewayOptions := service.NewDeleteGatewayOptions(invalidGatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Gateway"))
Expect(detailedResponse.StatusCode).To(Equal(404))
})
It("Successfully deletes a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
Context("DirectLink connect gateway", func() {
// to create a connect gateway, we need to have a port. List the ports and save the id of the 1st one found
portId := ""
portLocationDisplayName := ""
portLocationName := ""
timestamp := time.Now().Unix()
It("List ports and save the id of the first port", func() {
shouldSkipTest()
listPortsOptions := service.NewListPortsOptions()
result, detailedResponse, err := service.ListPorts(listPortsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
portId = *result.Ports[0].ID
portLocationDisplayName = *result.Ports[0].LocationDisplayName
portLocationName = *result.Ports[0].LocationName
})
It("create connect gateway", func() {
shouldSkipTest()
gatewayName = "GO-INT-SDK-CONNECT-" + strconv.FormatInt(timestamp, 10)
portIdentity, _ := service.NewGatewayPortIdentity(portId)
gateway, _ := service.NewGatewayTemplateGatewayTypeConnectTemplate(bgpAsn, global, metered, gatewayName, speedMbps, "connect", portIdentity)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
// Save the gateway id for deletion
os.Setenv("GATEWAY_ID", *result.ID)
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(true))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.LocationName).To(Equal(portLocationName))
Expect(*result.LocationDisplayName).To(Equal(portLocationDisplayName))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(0))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.OperationalStatus).To(Equal("create_pending"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
Expect(*result.Type).To(Equal("connect"))
Expect(*result.Port.ID).To(Equal(portId))
Expect(*result.ProviderApiManaged).To(Equal(false))
})
It("Successfully waits for connect gateway to be provisioned state", func() {
shouldSkipTest()
getGatewayOptions := service.NewGetGatewayOptions(os.Getenv("GATEWAY_ID"))
// before a connect gateway can be deleted, it needs to have operational_status of provisioned. We need to wait for
// the new gateway to go to provisioned so we can delete it.
timer := 0
for {
// Get the current status for the gateway
result, detailedResponse, err := service.GetGateway(getGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(true))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.LocationName).To(Equal(portLocationName))
Expect(*result.LocationDisplayName).To(Equal(portLocationDisplayName))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(0))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
Expect(*result.Type).To(Equal("connect"))
Expect(*result.Port.ID).To(Equal(portId))
Expect(*result.ProviderApiManaged).To(Equal(false))
// if operational status is "provisioned" then we are done
if *result.OperationalStatus == "provisioned" {
Expect(*result.OperationalStatus).To(Equal("provisioned"))
break
}
// not provisioned yet, see if we have reached the timeout value. If so, exit with failure
if timer > 24 { // 2 min timer (24x5sec)
Expect(*result.OperationalStatus).To(Equal("provisioned")) // timed out fail if status is not provisioned
break
} else {
// Still exists, wait 5 sec
time.Sleep(time.Duration(5) * time.Second)
timer = timer + 1
}
}
})
It("Successfully deletes connect gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
// Context("DirectLink MACsec Enabled Gateway", func() {
// timestamp := time.Now().Unix()
// gatewayName := "GO-INT-SDK-MACSEC" + strconv.FormatInt(timestamp, 10)
// updatedGatewayName := "GO-INT-SDK-MACSEC-PATCH-" + strconv.FormatInt(timestamp, 10)
// bgpAsn := int64(64999)
// crossConnectRouter := "LAB-xcr01.dal09"
// global := true
// locationName := os.Getenv("LOCATION_NAME")
// speedMbps := int64(1000)
// metered := false
// carrierName := "carrier1"
// customerName := "customer1"
// gatewayType := "dedicated"
// macsecCak := os.Getenv("MACSEC_CAK")
// macsecSakExpiryTime := int64(86400)
// macsecWindowSize := int64(64)
// It("Create a macsec enabled dedicated gateway", func() {
// shouldSkipTest()
// // Construct an instance of the GatewayMacsecCak model
// gatewayMacsecCak := new(directlinkv1.GatewayMacsecConfigTemplatePrimaryCak)
// gatewayMacsecCak.Crn = core.StringPtr(macsecCak)
// // Construct an instance of the GatewayMacsecConfigTemplate model
// gatewayMacsecConfigTemplate := new(directlinkv1.GatewayMacsecConfigTemplate)
// gatewayMacsecConfigTemplate.Active = core.BoolPtr(true)
// gatewayMacsecConfigTemplate.PrimaryCak = gatewayMacsecCak
// gatewayMacsecConfigTemplate.WindowSize = core.Int64Ptr(macsecWindowSize)
// gatewayTemplate := new(directlinkv1.GatewayTemplateGatewayTypeDedicatedTemplate)
// gatewayTemplate.BgpAsn = core.Int64Ptr(bgpAsn)
// gatewayTemplate.Global = core.BoolPtr(global)
// gatewayTemplate.Metered = core.BoolPtr(metered)
// gatewayTemplate.Name = core.StringPtr(gatewayName)
// gatewayTemplate.SpeedMbps = core.Int64Ptr(int64(1000))
// gatewayTemplate.Type = core.StringPtr(gatewayType)
// gatewayTemplate.CarrierName = core.StringPtr(carrierName)
// gatewayTemplate.CrossConnectRouter = core.StringPtr(crossConnectRouter)
// gatewayTemplate.CustomerName = core.StringPtr(customerName)
// gatewayTemplate.LocationName = core.StringPtr(locationName)
// gatewayTemplate.MacsecConfig = gatewayMacsecConfigTemplate
// createGatewayOptions := service.NewCreateGatewayOptions(gatewayTemplate)
// result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
// Expect(err).To(BeNil())
// Expect(detailedResponse.StatusCode).To(Equal(201))
// os.Setenv("GATEWAY_ID", *result.ID)
// Expect(*result.Name).To(Equal(gatewayName))
// Expect(*result.BgpAsn).To(Equal(bgpAsn))
// Expect(*result.Global).To(Equal(global))
// Expect(*result.Metered).To(Equal(metered))
// Expect(*result.SpeedMbps).To(Equal(speedMbps))
// Expect(*result.Type).To(Equal(gatewayType))
// Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
// Expect(*result.LocationName).To(Equal(locationName))
// Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
// Expect(*result.MacsecConfig.Active).To(Equal(true))
// Expect(*result.MacsecConfig.PrimaryCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.SakExpiryTime).To(Equal(macsecSakExpiryTime))
// Expect(*result.MacsecConfig.WindowSize).To(Equal(macsecWindowSize))
// })
// It("Should successfully update the macsec enabled gateway", func() {
// shouldSkipTest()
// // Construct an instance of the GatewayMacsecCak model
// gatewayMacsecCak := new(directlinkv1.GatewayMacsecConfigPatchTemplateFallbackCak)
// gatewayMacsecCak.Crn = core.StringPtr(macsecCak)
// // Construct an instance of the GatewayMacsecConfigTemplate model
// gatewayMacsecConfigPatchTemplate := new(directlinkv1.GatewayMacsecConfigPatchTemplate)
// gatewayMacsecConfigPatchTemplate.FallbackCak = gatewayMacsecCak
// gatewayId := os.Getenv("GATEWAY_ID")
// patchGatewayOptions := service.NewUpdateGatewayOptions(gatewayId)
// result, detailedResponse, err := service.UpdateGateway(patchGatewayOptions.SetName(updatedGatewayName).SetMacsecConfig(gatewayMacsecConfigPatchTemplate))
// Expect(err).To(BeNil())
// Expect(detailedResponse.StatusCode).To(Equal(200))
// Expect(*result.ID).To(Equal(gatewayId))
// Expect(*result.Name).To(Equal(updatedGatewayName))
// Expect(*result.MacsecConfig.Active).To(Equal(true))
// Expect(*result.MacsecConfig.PrimaryCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.FallbackCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.SakExpiryTime).To(Equal(macsecSakExpiryTime))
// Expect(*result.MacsecConfig.WindowSize).To(Equal(macsecWindowSize))
// })
// It("Successfully waits for macsec enabled gateway to be provisioned state", func() {
// shouldSkipTest()
// getGatewayOptions := service.NewGetGatewayOptions(os.Getenv("GATEWAY_ID"))
// // before a dedicated gateway can be deleted, it needs to have operational_status of provisioned. We need to wait for
// // the new gateway to go to provisioned so we can delete it.
// timer := 0
// for {
// // Get the current status for the gateway
// result, detailedResponse, err := service.GetGateway(getGatewayOptions)
// Expect(err).To(BeNil())
// Expect(detailedResponse.StatusCode).To(Equal(200))
// Expect(*result.Name).To(Equal(updatedGatewayName))
// Expect(*result.BgpAsn).To(Equal(bgpAsn))
// Expect(*result.Global).To(Equal(true))
// Expect(*result.Metered).To(Equal(metered))
// Expect(*result.SpeedMbps).To(Equal(speedMbps))
// Expect(*result.BgpCerCidr).NotTo(BeEmpty())
// Expect(*result.BgpIbmCidr).NotTo(Equal(""))
// Expect(*result.BgpIbmAsn).NotTo(Equal(0))
// Expect(*result.BgpStatus).To(Equal("idle"))
// Expect(*result.CreatedAt).NotTo(Equal(""))
// Expect(*result.Crn).To(HavePrefix("crn:v1"))
// Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
// Expect(*result.Type).To(Equal("dedicated"))
// Expect(*result.ProviderApiManaged).To(Equal(false))
// Expect(*result.MacsecConfig.Active).To(Equal(true))
// Expect(*result.MacsecConfig.PrimaryCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.FallbackCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.SakExpiryTime).To(Equal(macsecSakExpiryTime))
// Expect(*result.MacsecConfig.WindowSize).To(Equal(macsecWindowSize))
// // if operational status is "provisioned" then we are done
// if *result.OperationalStatus == "provisioned" {
// Expect(*result.OperationalStatus).To(Equal("provisioned"))
// break
// }
// // not provisioned yet, see if we have reached the timeout value. If so, exit with failure
// if timer > 24 { // 2 min timer (24x5sec)
// Expect(*result.OperationalStatus).To(Equal("provisioned")) // timed out fail if status is not provisioned
// break
// } else {
// // Still exists, wait 5 sec
// time.Sleep(time.Duration(5) * time.Second)
// timer = timer + 1
// }
// }
// })
// It("Successfully deletes macsec enabled gateway gateway", func() {
// shouldSkipTest()
// gatewayId := os.Getenv("GATEWAY_ID")
// deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
// detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
// Expect(err).To(BeNil())
// Expect(detailedResponse.StatusCode).To(Equal(204))
// })
// })
})
Describe("Offering Types", func() {
Context("Locations", func() {
It("should fetch the locations for the type dedicated", func() {
shouldSkipTest()
listOfferingTypeLocationsOptions := service.NewListOfferingTypeLocationsOptions("dedicated")
result, detailedResponse, err := service.ListOfferingTypeLocations(listOfferingTypeLocationsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Locations)).Should(BeNumerically(">", 0))
os.Setenv("OT_DEDICATED_LOCATION_DISPLAY_NAME", *result.Locations[0].DisplayName)
os.Setenv("OT_DEDICATED_LOCATION_NAME", *result.Locations[0].Name)
Expect(*result.Locations[0].BillingLocation).NotTo(Equal(""))
Expect(*result.Locations[0].BuildingColocationOwner).NotTo(Equal(""))
Expect(*result.Locations[0].LocationType).NotTo(Equal(""))
// Expect(*result.Locations[0].Market).NotTo(Equal(""))
Expect(*result.Locations[0].MarketGeography).NotTo(Equal(""))
Expect(*result.Locations[0].Mzr).NotTo(Equal(""))
Expect(*result.Locations[0].OfferingType).To(Equal("dedicated"))
Expect(*result.Locations[0].ProvisionEnabled).NotTo(BeNil())
Expect(*result.Locations[0].VpcRegion).NotTo(Equal(""))
})
It("should fetch the locations for the type connect", func() {
shouldSkipTest()
listOfferingTypeLocationsOptions := service.NewListOfferingTypeLocationsOptions("connect")
result, detailedResponse, err := service.ListOfferingTypeLocations(listOfferingTypeLocationsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Locations)).Should(BeNumerically(">", 0))
os.Setenv("OT_CONNECT_LOCATION_DISPLAY_NAME", *result.Locations[0].DisplayName)
os.Setenv("OT_CONNECT_LOCATION_NAME", *result.Locations[0].Name)
Expect(*result.Locations[0].BillingLocation).NotTo(Equal(""))
Expect(*result.Locations[0].LocationType).NotTo(Equal(""))
// Expect(*result.Locations[0].Market).NotTo(Equal(""))
Expect(*result.Locations[0].MarketGeography).NotTo(Equal(""))
Expect(*result.Locations[0].Mzr).NotTo(Equal(""))
Expect(*result.Locations[0].OfferingType).To(Equal("connect"))
Expect(*result.Locations[0].ProvisionEnabled).NotTo(BeNil())
Expect(*result.Locations[0].VpcRegion).NotTo(Equal(""))
})
It("should return an error for invalid location type", func() {
shouldSkipTest()
listOfferingTypeLocationsOptions := service.NewListOfferingTypeLocationsOptions("RANDOM")
result, detailedResponse, err := service.ListOfferingTypeLocations(listOfferingTypeLocationsOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("offering_type_location: RANDOM"))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
})
Context("Cross Connect Routers", func() {
It("should list the location info for type dedicated and location short name", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("dedicated", os.Getenv("OT_DEDICATED_LOCATION_NAME"))
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.CrossConnectRouters)).Should(BeNumerically(">", 0))
Expect(*result.CrossConnectRouters[0].RouterName).NotTo(Equal(""))
Expect(*result.CrossConnectRouters[0].TotalConnections).Should(BeNumerically(">=", 0))
})
It("should list the location info for type dedicated and location display name", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("dedicated", os.Getenv("OT_DEDICATED_LOCATION_DISPLAY_NAME"))
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.CrossConnectRouters)).Should(BeNumerically(">", 0))
Expect(*result.CrossConnectRouters[0].RouterName).NotTo(Equal(""))
Expect(*result.CrossConnectRouters[0].TotalConnections).Should(BeNumerically(">=", 0))
})
It("should return proper error when unsupported offering type CONNECT is provided", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("connect", os.Getenv("OT_CONNECT_LOCATION_NAME"))
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("The supplied OfferingType is not supported for this call"))
Expect(detailedResponse.StatusCode).To(Equal(400))
Expect(result).To(BeNil())
})
It("should return proper error when incorrect offering type is provided", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("random", os.Getenv("OT_CONNECT_LOCATION_DISPLAY_NAME"))
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Invalid Direct Link Offering Type."))
Expect(detailedResponse.StatusCode).To(Equal(400))
Expect(result).To(BeNil())
})
It("should return proper error when incorrect location is provided", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("dedicated", "florida")
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Classic Location not found: florida"))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
})
Context("Offering Speeds", func() {
It("should fetch the offering speeds for the type dedicated", func() {
shouldSkipTest()
listOfferingTypeSpeedsOptions := service.NewListOfferingTypeSpeedsOptions("dedicated")
result, detailedResponse, err := service.ListOfferingTypeSpeeds(listOfferingTypeSpeedsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Speeds)).Should(BeNumerically(">", 0))
})
It("should fetch the offering speeds for the type connect", func() {
shouldSkipTest()
listOfferingTypeSpeedsOptions := service.NewListOfferingTypeSpeedsOptions("connect")
result, detailedResponse, err := service.ListOfferingTypeSpeeds(listOfferingTypeSpeedsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Speeds)).Should(BeNumerically(">", 0))
})
It("should proper error for invalid offering type", func() {
shouldSkipTest()
listOfferingTypeSpeedsOptions := service.NewListOfferingTypeSpeedsOptions("random")
result, detailedResponse, err := service.ListOfferingTypeSpeeds(listOfferingTypeSpeedsOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find OfferingType"))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
})
})
Describe("Ports", func() {
It("should fetch the ports", func() {
shouldSkipTest()
listPortsOptions := service.NewListPortsOptions()
result, detailedResponse, err := service.ListPorts(listPortsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Ports)).Should(BeNumerically(">", 0))
Expect(*result.Ports[0].ID).NotTo(Equal(""))
Expect(*result.Ports[0].DirectLinkCount).Should(BeNumerically(">=", 0))
Expect(*result.Ports[0].Label).NotTo(Equal(""))
Expect(*result.Ports[0].LocationDisplayName).NotTo(Equal(""))
Expect(*result.Ports[0].LocationName).NotTo(Equal(""))
Expect(*result.Ports[0].ProviderName).NotTo(Equal(""))
Expect(len(result.Ports[0].SupportedLinkSpeeds)).Should(BeNumerically(">=", 0))
os.Setenv("PORT_ID", *result.Ports[0].ID)
os.Setenv("PORT_LOCATION_DISPLAY_NAME", *result.Ports[0].LocationDisplayName)
os.Setenv("PORT_LOCATION_NAME", *result.Ports[0].LocationName)
os.Setenv("PORT_LABEL", *result.Ports[0].Label)
})
It("should fetch the port by ID", func() {
shouldSkipTest()
portId := os.Getenv("PORT_ID")
locationDisplayName := os.Getenv("PORT_LOCATION_DISPLAY_NAME")
locationName := os.Getenv("PORT_LOCATION_NAME")
label := os.Getenv("PORT_LABEL")
getPortOptions := service.NewGetPortOptions(portId)
result, detailedResponse, err := service.GetPort(getPortOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(portId))
Expect(*result.LocationDisplayName).To(Equal(locationDisplayName))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.Label).To(Equal(label))
Expect(*result.DirectLinkCount).Should(BeNumerically(">=", 0))
Expect(*result.ProviderName).NotTo(Equal(""))
Expect(len(result.SupportedLinkSpeeds)).Should(BeNumerically(">=", 0))
})
})
Describe("Direct Link Virtual Connections", func() {
timestamp := time.Now().Unix()
gatewayName := "GO-INT-VC-SDK-" + strconv.FormatInt(timestamp, 10)
bgpAsn := int64(64999)
crossConnectRouter := "LAB-xcr01.dal09"
global := true
locationName := os.Getenv("LOCATION_NAME")
speedMbps := int64(1000)
metered := false
carrierName := "carrier1"
customerName := "customer1"
gatewayType := "dedicated"
Context("Create gateway", func() {
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
It("Successfully created a gateway", func() {
shouldSkipTest()
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
os.Setenv("GATEWAY_ID", *result.ID)
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(global))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
})
It("Successfully create a CLASSIC virtual connection", func() {
shouldSkipTest()
vcName := "GO-INT-CLASSIC-VC-SDK-" + strconv.FormatInt(timestamp, 10)
createGatewayVCOptions := service.NewCreateGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), vcName, directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Classic)
result, detailedResponse, err := service.CreateGatewayVirtualConnection(createGatewayVCOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
os.Setenv("CLASSIC_VC_ID", *result.ID)
Expect(*result.ID).NotTo(Equal(""))
Expect(*result.Name).To(Equal(vcName))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Classic))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
})
It("Successfully get a CLASSIC virtual connection", func() {
shouldSkipTest()
vcName := "GO-INT-CLASSIC-VC-SDK-" + strconv.FormatInt(timestamp, 10)
getGatewayVCOptions := service.NewGetGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), os.Getenv("CLASSIC_VC_ID"))
result, detailedResponse, err := service.GetGatewayVirtualConnection(getGatewayVCOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(os.Getenv("CLASSIC_VC_ID")))
Expect(*result.Name).To(Equal(vcName))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Classic))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
})
It("Successfully create a Gen 2 VPC virtual connection", func() {
shouldSkipTest()
vcName := "GO-INT-GEN2-VPC-VC-SDK-" + strconv.FormatInt(timestamp, 10)
vpcCrn := os.Getenv("GEN2_VPC_CRN")
createGatewayVCOptions := service.NewCreateGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), vcName, directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc)
createGatewayVCOptionsWithNetworkID := createGatewayVCOptions.SetNetworkID(vpcCrn)
result, detailedResponse, err := service.CreateGatewayVirtualConnection(createGatewayVCOptionsWithNetworkID)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
// save the id so it can be deleted later
os.Setenv("GEN2_VPC_VC_ID", *result.ID)
Expect(*result.ID).NotTo(Equal(""))
Expect(*result.Name).To(Equal(vcName))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc))
Expect(*result.NetworkID).To(Equal(vpcCrn))
})
It("Successfully get a Gen 2 VPC virtual connection", func() {
shouldSkipTest()
getGatewayVCOptions := service.NewGetGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), os.Getenv("GEN2_VPC_VC_ID"))
result, detailedResponse, err := service.GetGatewayVirtualConnection(getGatewayVCOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(os.Getenv("GEN2_VPC_VC_ID")))
Expect(*result.Name).To(Equal("GO-INT-GEN2-VPC-VC-SDK-" + strconv.FormatInt(timestamp, 10)))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc))
Expect(*result.NetworkID).To(Equal(os.Getenv("GEN2_VPC_CRN")))
})
It("Successfully list the virtual connections for a gateway", func() {
shouldSkipTest()
listVcOptions := service.NewListGatewayVirtualConnectionsOptions(os.Getenv("GATEWAY_ID"))
result, detailedResponse, err := service.ListGatewayVirtualConnections(listVcOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
vcs := result.VirtualConnections
// two VCs were created for the GW, so we should expect 2
Expect(len(vcs)).Should(BeNumerically("==", 2))
for _, vc := range vcs {
if *vc.ID == os.Getenv("GEN2_VPC_VC_ID") {
Expect(*vc.Name).To(Equal("GO-INT-GEN2-VPC-VC-SDK-" + strconv.FormatInt(timestamp, 10)))
Expect(*vc.CreatedAt).NotTo(Equal(""))
Expect(*vc.Status).To(Equal("pending"))
Expect(*vc.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc))
Expect(*vc.NetworkID).To(Equal(os.Getenv("GEN2_VPC_CRN")))
} else {
Expect(*vc.ID).To(Equal(os.Getenv("CLASSIC_VC_ID")))
Expect(*vc.Name).To(Equal("GO-INT-CLASSIC-VC-SDK-" + strconv.FormatInt(timestamp, 10)))
Expect(*vc.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Classic))
Expect(*vc.CreatedAt).NotTo(Equal(""))
Expect(*vc.Status).To(Equal("pending"))
}
}
})
It("Successfully Update a virtual connection name", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
vcId := os.Getenv("GEN2_VPC_VC_ID")
vcName := "GO-INT-GEN2-VPC-VC-PATCH-SDK-" + strconv.FormatInt(timestamp, 10)
patchGatewayOptions := service.NewUpdateGatewayVirtualConnectionOptions(gatewayId, vcId)
patchGatewayOptions = patchGatewayOptions.SetName(vcName)
result, detailedResponse, err := service.UpdateGatewayVirtualConnection(patchGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(vcId))
Expect(*result.Name).To(Equal(vcName))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc))
Expect(*result.NetworkID).To(Equal(os.Getenv("GEN2_VPC_CRN")))
})
It("Fail to Update a virtual connection status", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
vcId := os.Getenv("GEN2_VPC_VC_ID")
patchGatewayOptions := service.NewUpdateGatewayVirtualConnectionOptions(gatewayId, vcId)
patchGatewayOptions = patchGatewayOptions.SetStatus(directlinkv1.UpdateGatewayVirtualConnectionOptions_Status_Rejected)
result, detailedResponse, err := service.UpdateGatewayVirtualConnection(patchGatewayOptions)
// GW owner is not allowed to change the status, but the test calls the API with the status parameter to valid it is allowed.
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("gateway owner can't patch vc status."))
Expect(detailedResponse.StatusCode).To(Equal(400))
})
It("Successfully delete a CLASSIC virtual connection for a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
vcId := os.Getenv("CLASSIC_VC_ID")
deleteClassicVCOptions := service.NewDeleteGatewayVirtualConnectionOptions(gatewayId, vcId)
detailedResponse, err := service.DeleteGatewayVirtualConnection(deleteClassicVCOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
It("Successfully waits for CLASSIC virtual connection to report as deleted", func() {
shouldSkipTest()
getGatewayVCOptions := service.NewGetGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), os.Getenv("CLASSIC_VC_ID"))
// VC delete might not be instantaneous. Poll the VC looking for a not found. Fail after 2 min
timer := 0
for {
// Get the current rc for the VC
_, detailedResponse, _ := service.GetGatewayVirtualConnection(getGatewayVCOptions)
// if 404 then we are done
if detailedResponse.StatusCode == 404 {
Expect(detailedResponse.StatusCode).To(Equal(404)) // response is 404, exit success
break
}
// other than 404, see if we have reached the timeout value. If so, exit with failure
if timer > 24 { // 2 min timer (24x5sec)
Expect(detailedResponse.StatusCode).To(Equal(404)) // timed out fail if code is not 404
break
} else {
// Still exists, wait 5 sec
time.Sleep(time.Duration(5) * time.Second)
timer = timer + 1
}
}
})
It("Successfully deletes GEN 2 VPC virtual connection for a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
vcId := os.Getenv("GEN2_VPC_VC_ID")
deleteVpcVcOptions := service.NewDeleteGatewayVirtualConnectionOptions(gatewayId, vcId)
detailedResponse, err := service.DeleteGatewayVirtualConnection(deleteVpcVcOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
It("Successfully waits for GEN 2 VPC virtual connection to report as deleted", func() {
shouldSkipTest()
getGatewayVCOptions := service.NewGetGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), os.Getenv("GEN2_VPC_VC_ID"))
// VC delete might not be instantaneous. Poll the VC looking for a not found. Fail after 2 min
timer := 0
for {
// Get the current rc for the VC
_, detailedResponse, _ := service.GetGatewayVirtualConnection(getGatewayVCOptions)
// if 404 then we are done
if detailedResponse.StatusCode == 404 {
Expect(detailedResponse.StatusCode).To(Equal(404)) // response is 404, exit success
break
}
// other than 404, see if we have reached the timeout value. If so, exit with failure
if timer > 24 { // 2 min timer (24x5 sec)
Expect(detailedResponse.StatusCode).To(Equal(404)) // timed out fail if code is not 404
break
} else {
// Still exists, wait 5 sec
time.Sleep(time.Duration(5) * time.Second)
timer = timer + 1
}
}
})
It("Successfully deletes a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
})
Describe("LOA and Completion Notice", func() {
timestamp := time.Now().Unix()
gatewayName := "GO-INT-LOA-SDK-" + strconv.FormatInt(timestamp, 10)
bgpAsn := int64(64999)
crossConnectRouter := "LAB-xcr01.dal09"
global := true
locationName := os.Getenv("LOCATION_NAME")
speedMbps := int64(1000)
metered := false
carrierName := "carrier1"
customerName := "customer1"
gatewayType := "dedicated"
// notes about LOA and CN testing. When a GW is created, a github issue is also created by dl-rest. The issue is used for managing the LOA and CN. In normal operation,
// an LOA is added to the issue via manual GH interaction. After that occurs and the GH label changed, then CN upload is allowed. Since we do not have the ability to
// do the manual steps for integration testing, the test will only do the following
// - Issue GET LOA for a gateway. It will expect a 404 error since no one has added the LOA to the GH issue
// - PUT a completion notice to the gw. It will fail with a 412 error because the GH issue and GW status are in the wrong state due to no manual interaction
// - GET CN for a gw. It will expect a 404 since the CN could not be uploaded
//
Context("Create gateway", func() {
It("Successfully created a gateway", func() {
shouldSkipTest()
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
os.Setenv("GATEWAY_ID", *result.ID)
})
It("Successfully call loa", func() {
shouldSkipTest()
listLOAOptions := service.NewListGatewayLetterOfAuthorizationOptions(os.Getenv("GATEWAY_ID"))
result, detailedResponse, err := service.ListGatewayLetterOfAuthorization(listLOAOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Please check whether the resource you are requesting exists."))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
It("Successfully call PUT completion notice", func() {
shouldSkipTest()
buffer, err := ioutil.ReadFile("completion_notice.pdf")
Expect(err).To(BeNil())
r := ioutil.NopCloser(bytes.NewReader(buffer))
createCNOptions := service.NewCreateGatewayCompletionNoticeOptions(os.Getenv("GATEWAY_ID"))
createCNOptions.SetUpload(r)
detailedResponse, err := service.CreateGatewayCompletionNotice(createCNOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Invalid gateway status to upload completion notice."))
Expect(detailedResponse.StatusCode).To(Equal(412))
})
It("Successfully call completion notice", func() {
shouldSkipTest()
listCNOptions := service.NewListGatewayCompletionNoticeOptions(os.Getenv("GATEWAY_ID"))
result, detailedResponse, err := service.ListGatewayCompletionNotice(listCNOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Please check whether the resource you are requesting exists."))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
It("Successfully deletes a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
})
Describe("BGP MD5", func() {
timestamp := time.Now().Unix()
gatewayName := "GO-INT-MD5-SDK-" + strconv.FormatInt(timestamp, 10)
bgpAsn := int64(64999)
crossConnectRouter := "LAB-xcr01.dal09"
global := true
locationName := os.Getenv("LOCATION_NAME")
speedMbps := int64(1000)
metered := false
carrierName := "carrier1"
customerName := "customer1"
gatewayType := "dedicated"
authCrn := os.Getenv("AUTHENTICATION_KEY")
Context("Create a Gateway with Authentication Key", func() {
It("should successfully create a gateway", func() {
shouldSkipTest()
// gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
authenticationKey, _ := service.NewGatewayTemplateAuthenticationKey(authCrn)
gatewayTemplateModel := new(directlinkv1.GatewayTemplateGatewayTypeDedicatedTemplate)
gatewayTemplateModel.AuthenticationKey = authenticationKey
gatewayTemplateModel.BgpAsn = core.Int64Ptr(int64(64999))
gatewayTemplateModel.Global = core.BoolPtr(true)
gatewayTemplateModel.Metered = core.BoolPtr(false)
gatewayTemplateModel.Name = core.StringPtr(gatewayName)
gatewayTemplateModel.SpeedMbps = core.Int64Ptr(int64(1000))
gatewayTemplateModel.Type = core.StringPtr(gatewayType)
gatewayTemplateModel.CarrierName = core.StringPtr(carrierName)
gatewayTemplateModel.CrossConnectRouter = core.StringPtr(crossConnectRouter)
gatewayTemplateModel.CustomerName = core.StringPtr(customerName)
gatewayTemplateModel.LocationName = core.StringPtr(locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gatewayTemplateModel)
result, resp, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(resp.StatusCode).To(Equal(201))
os.Setenv("GATEWAY_ID", *result.ID)
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.AuthenticationKey.Crn).To(Equal(authCrn))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(global))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
})
Context("Update the Authentication key for the gateway", func() {
It("should successfully clear the auth key", func() {
authKey, _ := service.NewGatewayPatchTemplateAuthenticationKey("")
gatewayId := os.Getenv("GATEWAY_ID")
updateGatewayOptions := service.NewUpdateGatewayOptions(gatewayId).SetAuthenticationKey(authKey)
res, resp, err := service.UpdateGateway(updateGatewayOptions)
Expect(err).To(BeNil())
Expect(resp.StatusCode).To(Equal(200))
Expect(*res.ID).To(Equal(gatewayId))
Expect(res.AuthenticationKey).To(BeNil())
Expect(*res.Name).To(Equal(gatewayName))
})
})
Context("Delete a gateway", func() {
It("Successfully deletes a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
})
})
|
shouldSkipTest
|
test_qudit.py
|
import pytest
from pytest_mock import mocker
from hamcrest import *
import numpy as np
from src.objects.quantum_system import SystemType
from src.objects.qudit import Qudit
from src.dirac_notation.constants import *
@pytest.mark.parametrize('input', [
(
comp_ket_x(0, 4)
)
])
def test_init(input):
system = Qudit(input)
assert_that(system.children_systems, equal_to(None))
assert_that(system.system_type, equal_to(SystemType.simple))
@pytest.mark.parametrize('input', [
(
ket_0
)
])
def test_init_fail(input):
try:
system = Qudit(input)
pytest.fail()
except AssertionError:
pass
@pytest.mark.parametrize('input_1,input_2', [
(
comp_ket_x(0, 8), comp_ket_x(0, 4)
)
])
def test_children_systems_1(input_1, input_2):
system = Qudit(input_1)
child_system = Qudit(input_2)
system.children_systems = [child_system]
assert_that(system.children_systems, equal_to([child_system]))
assert_that(system.system_type, equal_to(SystemType.product))
@pytest.mark.parametrize('input', [
(
comp_ket_x(0, 8)
)
])
def
|
(input):
system = Qudit(input)
system.children_systems = []
system.children_systems = None
assert_that(system.children_systems, equal_to(None))
assert_that(system.system_type, equal_to(SystemType.simple))
|
test_children_systems_2
|
linker_namespace.py
|
#!/usr/bin/env python2.7
import os
import sys
sys.path.append(os.path.realpath(__file__ + '/../../../../lib'))
sys.path.append(os.path.realpath(__file__ + '/../../../linker-namespace-sanity'))
import udf
import linker_namespace_base_test
'''
This test verifies that certain libraries are not part of the global linker namespace,
which is the normal behavior (we create a new linker namespace in the UDF client for those dependencies)
'''
class LinkerNamespaceTest(linker_namespace_base_test.LinkerNamespaceBaseTest):
def test_linker_namespace_udf(self):
|
if __name__ == '__main__':
udf.main()
|
rows = self._execute_linker_namespace_udf(['proto', 'zmq'])
self.assertGreater(len(rows), 0)
for item in rows:
self.assertEqual(None, item[0])
|
dut_gen.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018, Kevin Laeufer <[email protected]>
# Generates the `dut.hpp` file which contains dut specific interface code
|
import toml
template = """
// This file was generated from {conf_toml} using the dut_gen.py script.
// It contains DUt specific interface code for the verilator C++ test harness.
#ifndef DUT_CONF_HPP
#define DUT_CONF_HPP
#if defined(E2E)
#include <V{toplevel}_E2EHarness.h>
#define TOP_TYPE V{toplevel}_E2EHarness
#else
#include <V{toplevel}_VHarness.h>
#define TOP_TYPE V{toplevel}_VHarness
#endif
#define TOPLEVEL_STR "{toplevel}"
static constexpr size_t CoverageSize = {cov_size};
static constexpr size_t InputSize = {input_size};
static inline void apply_input(TOP_TYPE* top, const uint8_t* input) {{
{apply_input}
}}
static inline void read_coverage(TOP_TYPE* top, uint8_t* coverage) {{
{read_coverage}
}}
#endif // DUT_CONF_HPP
"""
align = 8
def bits_to_size(bits):
bytes = (bits + 7) // 8
words = (bytes + align - 1) // align
return words * align
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='generate DUT specific verilator code')
parser.add_argument('-o', '--output', help='dut header file name', required=True)
parser.add_argument('-i', '--input', help='toml dut description', required=True)
args = parser.parse_args()
conf_toml = args.input
if not os.path.isfile(conf_toml):
sys.stderr.write("dur config file `{}` not found\n".format(conf_toml))
sys.exit(1)
header = args.output
header_dir = os.path.dirname(os.path.abspath(header))
if not os.path.isdir(header_dir):
sys.stderr.write("output directory `{}` does not exist\n".format(header_dir))
sys.exit(1)
conf = toml.loads(open(conf_toml).read())
input_bits = sum(ii['width'] for ii in conf['input'])
input_size = bits_to_size(input_bits)
cov_bits = sum(counter['width'] for counter in conf['counter'])
# the cycles count in front of the coverage feedback takes 16bit
cov_size = bits_to_size(cov_bits + 2 * 8) - 2
i_line = "\ttop->io_input_bytes_{0: <3} = input[{0: >3}];"
c_line = "\tcoverage[{0: >3}] = top->io_coverage_bytes_{0};"
dd = { 'conf_toml': conf_toml, 'toplevel': conf['general']['top'],
'cov_size': cov_size, 'input_size': input_size,
'apply_input': "\n".join(i_line.format(ii) for ii in range(input_size)),
'read_coverage': "\n".join(c_line.format(ii) for ii in range(cov_size))
}
output = template.format(**dd)
open(header, 'w').write(output)
|
# from the TOML dut description file.
import os, sys, argparse
|
crawler.py
|
#!/usr/bin/env python3
# This code greatly inspires itself from http://aosabook.org/en/500L/a-web-crawler-with-asyncio-coroutines.html
import cgi
from collections import namedtuple
import os
import re
import logging
import urllib
import asyncio
import aiohttp
from asyncio import Queue
import time
LOGGER = logging.getLogger(__name__)
FetchStatistic = namedtuple(
'FetchStatistic', [
'url',
'next_url',
'status',
'exception',
'size',
'content_type',
'encoding',
'num_urls',
'num_new_urls'
]
)
class Crawler(object):
""" Crawls a set of urls.
"""
def __init__(self, roots, exclude=None, strict=True, max_redirect=10, max_tries=3, max_tasks=10, *, loop=None,
max_size=1024**2, file_type=None):
self.loop = loop or asyncio.get_event_loop()
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.queue = Queue(loop=self.loop)
self.seen_urls = set()
self.done = []
self.session = aiohttp.ClientSession(loop=self.loop)
self.root_domains = set()
self.max_file_size = max_size
if file_type.startswith("."):
self.file_type = file_type
else:
self.file_type = "." + file_type
for root in roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
self.root_domains.add(host)
else:
host = host.lower()
if self.strict:
|
else:
self.root_domains.add(self.lenient_host(host))
for root in roots:
self.add_url(root)
self.t0 = time.time()
self.t1 = None
@staticmethod
def lenient_host(host):
parts = host.split('.')[-2:]
return ''.join(parts)
@staticmethod
def is_redirect(response):
return response.status in (300, 301, 302, 303, 307)
def close(self):
""" Close resources
:return: None
"""
self.session.close()
def host_ok(self, host):
""" Can this host be crawled?
:param host:
:return:
"""
host = host.lower()
if host in self.root_domains:
return True
if re.match(r'\A[\d\.]*\Z', host):
return False
if self.strict:
return self.host_ok_strict(host)
else:
return self.host_ok_lenient(host)
def host_ok_strict(self, host):
if host.startswith("www."):
host = host[4:]
else:
host = "www." + host
return host in self.root_domains
def host_ok_lenient(self, host):
return self.lenient_host(host) in self.root_domains
def record_statistic(self, fetch_statistic):
self.done.append(fetch_statistic)
@asyncio.coroutine
def parse_links(self, response):
""" Return a FetchStatistic and list of links.
:param response:
:return: FetchStatistic and links.
"""
links = set()
content_type = None
encoding = None
body = yield from response.read()
if response.status == 200:
content_type = response.headers.get("content-type")
pdict = {}
if content_type:
content_type, pdict = cgi.parse_header(content_type)
encoding = pdict.get("charset", "utf-8")
if content_type in ("text/html", "application/xml"):
text = yield from response.text()
# get all urls links
urls = set(re.findall(r'''(?i)href=["']([^\s"'<>]+)''', text))
if urls:
LOGGER.info("got {} distinct urls from {}".format(len(urls), response.url))
for url in urls:
normalized = urllib.parse.urljoin(response.url, url)
defragmented, frag = urllib.parse.urldefrag(normalized)
if self.url_allowed(defragmented):
links.add(defragmented)
stat = FetchStatistic(
url=response.url,
next_url=None,
status=response.status,
exception=None,
size=len(body),
content_type=content_type,
encoding=encoding,
num_urls=len(links),
num_new_urls=len(links - self.seen_urls)
)
return stat, links
@asyncio.coroutine
def fetch(self, url, max_redirect):
""" Fetch one url.
:param url:
:param max_redirect:
:return:
"""
tries = 0
exception = None
while tries < self.max_tries:
try:
response = yield from self.session.get(url, allow_redirects=False)
if tries > 1:
LOGGER.info("try {} for {} success".format(tries, url))
break
except aiohttp.ClientError as client_error:
LOGGER.info("try {} for {} raised {}".format(tries, url, client_error))
exception = client_error
tries += 1
else:
# we never broke out of the loop: all tries failed
LOGGER.error("{} failed after {} tries".format(url, self.max_tries))
self.record_statistic(
FetchStatistic(
url=url,
next_url=None,
status=None,
exception=exception,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
return
try:
if self.is_redirect(response):
location = response.headers['location']
next_url = urllib.parse.urljoin(url, location)
self.record_statistic(
FetchStatistic(
url=url,
next_url=next_url,
status=response.status,
exception=None,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
if next_url in self.seen_urls:
return
if max_redirect > 0:
LOGGER.info("redirect to {} from {}".format(next_url, url))
self.add_url(next_url, max_redirect - 1)
else:
LOGGER.error("redirect limit reached for {} from {}".format(next_url, url))
else:
stat, links = yield from self.parse_links(response)
self.record_statistic(stat)
for link in links.difference(self.seen_urls):
self.queue.put_nowait((link, self.max_redirect))
self.seen_urls.update(links)
finally:
yield from response.release()
@asyncio.coroutine
def work(self):
""" Process Queue items forever.
:return: None
"""
try:
while True:
url, max_redirect = yield from self.queue.get()
assert url in self.seen_urls
yield from self.fetch(url, max_redirect)
self.queue.task_done()
except asyncio.CancelledError as cancelled:
pass
def url_allowed(self, url):
""" Is url http or https format. Also checks the pointed url file type and size.
:param url: given url
:return: True if all conditions are met. False otherwise.
"""
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ("http", "https"):
LOGGER.debug("skipping non-http scheme in {}".format(url))
return False
host, port = urllib.parse.splitport(parts.netloc)
if not self.host_ok(host):
LOGGER.debug("skipping non-root host in {}".format(url))
return False
# check file type
if not self.file_ok(url):
LOGGER.debug("skipping non {} files".format(self.file_type))
return False
return True
def add_url(self, url, max_redirect=None):
""" Adds url to the queue if not seen before.
:param url:
:param max_redirect:
:return: None
"""
if max_redirect is None:
max_redirect = self.max_redirect
LOGGER.debug("adding {} {}".format(url, max_redirect))
self.seen_urls.add(url)
self.queue.put_nowait((url, max_redirect))
@asyncio.coroutine
def crawl(self):
""" Run the crawler until all finished.
:return: None
"""
workers = [asyncio.Task(self.work(), loop=self.loop) for _ in range(self.max_tasks)]
self.t0 = time.time()
yield from self.queue.join()
self.t1 = time.time()
for w in workers:
w.cancel()
def file_ok(self, url):
""" Is the url pointing to the correct file type? Is its size OK?
:param url:
:return: True if file is from a type the user requested. False otherwise.
"""
href_path = urllib.parse.urlparse(url).path
extension = os.path.splitext(href_path)[1]
return extension == self.file_type
def size_ok(self, response):
""" Check if file size <= MAX_SIZE before downloading.
:param response:
:return:
"""
raise NotImplementedError
|
self.root_domains.add(host)
|
test_stream.py
|
import unittest
from cupy._creation import from_data
from cupy import cuda
from cupy import testing
from cupy.testing import attr
class TestStream(unittest.TestCase):
@attr.gpu
def test_eq(self):
null0 = cuda.Stream.null
null1 = cuda.Stream(True)
null2 = cuda.Stream(True)
null3 = cuda.Stream()
self.assertEqual(null0, null1)
self.assertEqual(null1, null2)
self.assertNotEqual(null2, null3)
def check_del(self, null):
stream = cuda.Stream(null=null).use()
stream_ptr = stream.ptr
x = from_data.array([1, 2, 3])
del stream
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
# Want to test cudaStreamDestory is issued, but
# runtime.streamQuery(stream_ptr) causes SEGV. We cannot test...
del stream_ptr
del x
@attr.gpu
def test_del(self):
|
self.check_del(null=True)
@attr.gpu
def test_get_and_add_callback(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
if not cuda.runtime.is_hip:
stream = cuda.Stream.null
else:
# adding callbacks to the null stream in HIP would segfault...
stream = cuda.Stream()
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.add_callback(
lambda _, __, t: out.append(t[0]),
(i, numpy_array))
stream.synchronize()
self.assertEqual(out, list(range(N)))
@attr.gpu
def test_with_statement(self):
stream1 = cuda.Stream()
stream2 = cuda.Stream()
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
with stream1:
self.assertEqual(stream1, cuda.get_current_stream())
with stream2:
self.assertEqual(stream2, cuda.get_current_stream())
self.assertEqual(stream1, cuda.get_current_stream())
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
@attr.gpu
def test_use(self):
stream1 = cuda.Stream().use()
self.assertEqual(stream1, cuda.get_current_stream())
cuda.Stream.null.use()
self.assertEqual(cuda.Stream.null, cuda.get_current_stream())
class TestExternalStream(unittest.TestCase):
def setUp(self):
self.stream_ptr = cuda.runtime.streamCreate()
self.stream = cuda.ExternalStream(self.stream_ptr)
def tearDown(self):
cuda.runtime.streamDestroy(self.stream_ptr)
@attr.gpu
def test_get_and_add_callback(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
if not cuda.runtime.is_hip:
stream = cuda.Stream.null
else:
# adding callbacks to the null stream in HIP would segfault...
stream = cuda.Stream()
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.add_callback(
lambda _, __, t: out.append(t[0]),
(i, numpy_array))
stream.synchronize()
self.assertEqual(out, list(range(N)))
|
self.check_del(null=False)
@attr.gpu
def test_del_null(self):
|
error.rs
|
// Copyright 2018 Osspial
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Program and shader errors.
use crate::geometry::TypeTag;
use std::{
error::Error,
fmt::{self, Display},
io,
};
/// Error reported by driver that occurred during shader compilation.
#[derive(Debug, Clone)]
pub struct ShaderError(pub String);
/// Error reported by driver that occurred during program linking.
// Link could not be created; Ganon wins big.
#[derive(Debug, Clone)]
pub struct LinkError(pub String);
/// A Rust type was mapped to a mismatched GLSL type.
#[derive(Debug, Clone)]
pub struct MismatchedTypeError {
pub ident: String,
pub shader_ty: TypeTag,
pub rust_ty: TypeTag,
}
/// Error that occurred during program compilation.
#[derive(Debug, Clone)]
pub enum ProgramError {
/// Error reported by driver that occurred during program linking.
LinkError(LinkError),
/// A mismatch exists between a Rust type an a GLSL type.
///
/// Technically, OpenGL's API allow this to compile successfully. However it's also undefined
/// behavior so a well-formed program *should* never do this.
MismatchedTypeError(Vec<MismatchedTypeError>),
}
/// Error detected by Gullery that could indicate a misbehaved program.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ProgramWarning {
/// A uniform was specified, but is unused by OpenGL.
///
/// Includes the uniform's identifier.
UnusedUniform(String),
/// A vertex attribute was specified, but is unused by OpenGL.
///
/// Includes the attribute's identifier.
UnusedVertexAttribute(String),
/// A color attachment was specified, but is unused by OpenGL.
///
/// Includes the attachment's identifier.
UnusedColorAttachment(String),
}
impl Display for ShaderError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.0.fmt(f)
}
}
impl Display for LinkError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.0.fmt(f)
}
}
impl Display for MismatchedTypeError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
f,
"Mismatched type in {}; shader has {}, but Rust repr has {}",
self.ident, self.shader_ty, self.rust_ty
)
}
}
impl Error for ProgramError {}
impl Display for ProgramError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use self::ProgramError::*;
match *self {
LinkError(ref e) => write!(f, "{}", e),
MismatchedTypeError(ref errs) => {
let mut errs = errs.iter();
if let Some(e) = errs.next() {
write!(f, "{}", e)?;
}
for e in errs {
write!(f, "\n{}", e)?;
}
Ok(())
}
}
}
}
impl Display for ProgramWarning {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
use self::ProgramWarning::*;
match *self {
UnusedUniform(ref ident) => write!(f, "Unused uniform `{}`", ident),
UnusedVertexAttribute(ref ident) => write!(f, "Unused vertex attribute `{}`", ident),
UnusedColorAttachment(ref ident) => write!(f, "Unused color attachment `{}`", ident),
}
}
}
impl Error for ShaderError {
fn description(&self) -> &str {
&self.0
}
}
impl Error for LinkError {
fn description(&self) -> &str {
&self.0
}
}
impl From<ShaderError> for io::Error {
fn from(e: ShaderError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, e)
}
}
impl From<LinkError> for io::Error {
fn from(e: LinkError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, e)
}
}
impl From<ProgramError> for io::Error {
fn
|
(e: ProgramError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, e)
}
}
|
from
|
generate.ts
|
import test from 'tape'
import { Project } from 'ts-morph'
import { minify, MinifyOptions } from 'uglify-js'
import { IProcessOptions, processProject } from '../src'
function createProject(): Project {
return new Project({
skipAddingFilesFromTsConfig: true,
compilerOptions: { strict: true },
useInMemoryFileSystem: true,
})
}
interface ITestOptions {
skip?: boolean
only?: boolean
minifyOptions?: MinifyOptions
options?: IProcessOptions
throws?: RegExp | typeof Error
}
function testProcessProject(
typeDescription: string,
input: { readonly [filename: string]: string },
output: { readonly [filename: string]: string | null },
{ skip, only, options, minifyOptions, throws }: ITestOptions = {}
) {
const fn = skip ? test.skip : only ? test.only : test
fn(typeDescription, t => {
const project = createProject()
Object.entries(input).forEach(([filePath, content]) => {
project.createSourceFile(filePath, content)
})
project.saveSync()
const expectedFilenames = new Set(Object.keys(output))
if (throws) {
t.throws(() => {
processProject(project, options)
}, throws)
t.end()
return
}
t.doesNotThrow(() => {
processProject(project, options)
})
for (const sourceFile of project.getSourceFiles()) {
const filePath = sourceFile.getFilePath().slice(1)
const expectedRaw = output[filePath]
if (expectedRaw === undefined) {
t.fail(`unexpected file ${filePath}`)
} else if (expectedRaw === null) {
// This file is expected, but must not have been changed
expectedFilenames.delete(filePath)
const sourceText = sourceFile.getFullText()
t.equal(sourceText, input[filePath], `${filePath} should not change`)
} else {
// This is a new file
expectedFilenames.delete(filePath)
const expectedFile = project.createSourceFile(
`${filePath}.expected`,
expectedRaw
)
let sourceText: string
if (minifyOptions !== undefined) {
const emitOutput = sourceFile.getEmitOutput()
const result = minify(
emitOutput.getOutputFiles()[0].getText(),
minifyOptions
)
t.error(result.error, 'UglifyJS should succeed')
sourceText = result.code
} else {
expectedFile.formatText()
sourceText = sourceFile.getText()
}
const expectedText = expectedFile.getText()
t.equal(sourceText, expectedText, `${filePath} should match`)
}
}
for (const filePath of expectedFilenames) {
t.fail(`${filePath} not found`)
}
t.end()
})
}
testProcessProject(
'removes existing .guard.ts files',
{
'test.guard.ts': `/* WARNING: Do not manually change this file. */ alert("hello")`,
},
{}
)
testProcessProject(
'does not touch .guard.ts files that are not autogenerated',
{ 'test.guard.ts': `alert("hello")` },
{ 'test.guard.ts': null }
)
testProcessProject(
'removes correct .guard.ts files when guardFileName is set',
{
'test.foo.ts': `/* WARNING: Do not manually change this file. */alert("hello")`,
'test.guard.ts': `/* WARNING: Do not manually change this file. */alert("hello")`,
},
{ 'test.guard.ts': null },
{ options: { guardFileName: 'foo' } }
)
testProcessProject(
'rejects invalid guardFileNames: *',
{},
{},
{ options: { guardFileName: 'f*o' }, throws: /guardFileName/ }
)
testProcessProject(
'rejects invalid guardFileNames: /',
{},
{},
{ options: { guardFileName: 'f/o' }, throws: /guardFileName/ }
)
testProcessProject(
'generates type guards for empty object',
{
'test.ts': `
/** @see {isEmpty} ts-auto-guard:type-guard */
export interface Empty {}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Empty } from "./test";
export function isEmpty(obj: any, _argumentName?: string): obj is Empty {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function")
)
}`,
}
)
testProcessProject(
'generates type guards for empty object if exportAll is true',
{
'test.ts': `
export interface Empty {}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Empty } from "./test";
export function isEmpty(obj: any, _argumentName?: string): obj is Empty {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function")
)
}`,
},
{ options: { exportAll: true, debug: false } }
)
testProcessProject(
'generates type guards for boolean',
{
'test.ts': `
/** @see {isBool} ts-auto-guard:type-guard */
export type Bool = boolean`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Bool } from "./test";
export function isBool(obj: any, _argumentName?: string): obj is Bool {
return (
typeof obj === "boolean"
)
}`,
}
)
testProcessProject(
'allows the name of the guard file file to be specified',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
foo: number,
bar: string
}`,
},
{
'test.ts': null,
'test.debug.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.foo === "number" &&
typeof obj.bar === "string"
)
}`,
},
{
options: {
guardFileName: 'debug',
},
}
)
const PATH_PREFIX = process.cwd().slice(1) // Remove / from the beginning
testProcessProject(
'show debug info',
{
[`${PATH_PREFIX}/foo/bar/test.ts`]: `
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
foo: number,
bar: Bar,
bars: Array<Bar>
}
/** @see {isBar} ts-auto-guard:type-guard */
export interface Bar {
bar: number,
}
`,
},
{
[`${PATH_PREFIX}/foo/bar/test.ts`]: null,
[`${PATH_PREFIX}/foo/bar/test.guard.ts`]: `
import { Foo, Bar } from "./test";
function evaluate(
isCorrect: boolean,
varName: string,
expected: string,
actual: any
): boolean {
if (!isCorrect) {
console.error(
\`\${varName} type mismatch, expected: \${expected}, found:\`,
actual
)
}
return isCorrect
}
export function isFoo(obj: any, argumentName: string = "foo"): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
evaluate(typeof obj.foo === "number", \`\${argumentName}.foo\`, "number", obj.foo) &&
evaluate(isBar(obj.bar) as boolean, \`\${argumentName}.bar\`, "import(\\"./foo/bar/test\\").Bar", obj.bar) &&
evaluate(Array.isArray(obj.bars) &&
obj.bars.every((e: any) =>
isBar(e) as boolean
), \`\${argumentName}.bars\`, "import(\\"./foo/bar/test\\").Bar[]", obj.bars)
)
}
export function isBar(obj: any, argumentName: string = "bar"): obj is Bar {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
evaluate(typeof obj.bar === "number", \`\${argumentName}.bar\`, "number", obj.bar)
)
}
`,
},
{
options: {
debug: true,
},
}
)
testProcessProject(
'uses correct import file name if guard file is renamed',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
foo: number,
bar: string
}`,
},
{
'test.ts': null,
'test.debug.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.foo === "number" &&
typeof obj.bar === "string"
)
}`,
},
{
options: {
guardFileName: 'debug',
importGuards: 'CustomGuardAlias',
},
skip: true,
}
)
|
testProcessProject(
'generates type guards for simple interface',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
foo: number,
bar: string
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.foo === "number" &&
typeof obj.bar === "string"
)
}`,
}
)
testProcessProject(
'generates type guards for interface properties with spaces',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
"foo 1": number,
"bar 2": string
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj["foo 1"] === "number" &&
typeof obj["bar 2"] === "string"
)
}`,
}
)
testProcessProject(
'generates type guards for type properties with spaces',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export type Foo = {
"foo 1": number,
"bar 2": string
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj["foo 1"] === "number" &&
typeof obj["bar 2"] === "string"
)
}`,
}
)
testProcessProject(
'generates type guards for interface properties with dashes',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
"foo-1": number,
"bar-2": string
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj["foo-1"] === "number" &&
typeof obj["bar-2"] === "string"
)
}`,
}
)
// Commented out since this is a bug that should be fixed.
// testProcessProject(
// 'generates type guards for type properties with dashes',
// {
// 'test.ts': `
// /** @see {isFoo} ts-auto-guard:type-guard */ /**
// export type Foo = {
// "foo-1": number,
// "bar-2": string
// }`,
// },
// {
// 'test.guard.ts': `
// import { Foo } from "./test";
// export function isFoo(obj: any, _argumentName?: string): obj is Foo {
// return (
// (obj !== null &&
// typeof obj === "object" ||
// typeof obj === "function") &&
// typeof obj["foo-1"] === "number" &&
// typeof obj["bar-2"] === "string"
// )
// }`,
// }
// )
testProcessProject(
'generates type guards for properties with spaces in types instead of interfaces',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export type Foo = {
"foo 1": number,
"bar 2": string
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj["foo 1"] === "number" &&
typeof obj["bar 2"] === "string"
)
}`,
}
)
testProcessProject(
'correctly handles default export',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
interface Foo {
foo: number,
bar: string
}
export default Foo`,
},
{
'test.ts': null,
'test.guard.ts': `
import Foo from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.foo === "number" &&
typeof obj.bar === "string"
)
}`,
}
)
testProcessProject(
'generates type guards for interface with optional field',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
foo?: number,
bar: number | undefined,
baz?: number | undefined
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
( typeof obj.foo === "undefined" ||
typeof obj.foo === "number" ) &&
( typeof obj.bar === "undefined" ||
typeof obj.bar === "number" ) &&
( typeof obj.baz === "undefined" ||
typeof obj.baz === "number" )
)
}`,
}
)
testProcessProject(
'generates type guards for nested interface',
{
'test.ts': `
interface Bar {
bar: number
}
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
foo: Bar,
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
(obj.foo !== null &&
typeof obj.foo === "object" ||
typeof obj.foo === "function") &&
typeof obj.foo.bar === "number"
)
}`,
}
)
testProcessProject(
'generates type guards for nested interface with type guard',
{
'test.ts': `
/** @see {isBar} ts-auto-guard:type-guard */
export interface Bar {
bar: number
}
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo {
foo: Bar,
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Bar, Foo } from "./test";
export function isBar(obj: any, _argumentName?: string): obj is Bar {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.bar === "number"
)
}
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
isBar(obj.foo) as boolean
)
}`,
}
)
testProcessProject(
'generates type guards for interface extending other interface',
{
'test.ts': `
interface Bar {
bar: number
}
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo extends Bar {
foo: number,
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.bar === "number" &&
typeof obj.foo === "number"
)
}`,
}
)
testProcessProject(
'generates type guards for interface extending other interface with type guard',
{
'test.ts': `
/** @see {isBar} ts-auto-guard:type-guard */
export interface Bar {
bar: number
}
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo extends Bar {
foo: number
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Bar, Foo } from "./test";
export function isBar(obj: any, _argumentName?: string): obj is Bar {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.bar === "number"
)
}
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
isBar(obj) as boolean &&
typeof obj.foo === "number"
)
}`,
}
)
testProcessProject(
'generates type guards for interface extending object type',
{
'test.ts': `
export type Bar = {
bar: number
}
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo extends Bar {
foo: number
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.bar === "number" &&
typeof obj.foo === "number"
)
}`,
}
)
testProcessProject(
'generates type guards for interface extending object type with type guard',
{
'test.ts': `
/** @see {isBar} ts-auto-guard:type-guard */
export type Bar = {
bar: number
}
/** @see {isFoo} ts-auto-guard:type-guard */
export interface Foo extends Bar {
foo: number
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Bar, Foo } from "./test";
export function isBar(obj: any, _argumentName?: string): obj is Bar {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.bar === "number"
)
}
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
isBar(obj) as boolean &&
typeof obj.foo === "number"
)
}`,
}
)
testProcessProject(
'generates type guards for an object literal type',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export type Foo = {
foo: number
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.foo === "number"
)
}`,
}
)
testProcessProject(
'generates type guards for a Pick<> type',
{
'test.ts': `
interface Bar {
foo: number,
bar: number
}
/** @see {isFoo} ts-auto-guard:type-guard */
export type Foo = Pick<Bar, "foo">`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.foo === "number"
)
}`,
}
)
testProcessProject(
'generates type guards with a short circuit',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export type Foo = {
foo: number
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
if (DEBUG) return true
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.foo === "number"
)
}`,
},
{
options: { shortCircuitCondition: 'DEBUG', debug: false },
}
)
testProcessProject(
'generated type guards with a short circuit are correctly stripped by UglifyJS',
{
'test.ts': `
/** @see {isFoo} ts-auto-guard:type-guard */
export type Foo = {
foo: number,
bar: Foo | string | () => void,
baz: "foo" | "bar"
}`,
},
{
'test.ts': null,
'test.guard.ts': `"use strict";function isFoo(o,s){return!0}exports.__esModule=!0,exports.isFoo=void 0,exports.isFoo=isFoo;`,
},
{
minifyOptions: {
compress: { global_defs: { DEBUG: true } },
},
options: { shortCircuitCondition: 'DEBUG', debug: false },
}
)
testProcessProject(
'generates type guards for mapped types',
{
'test.ts': `
/** @see {isPropertyValueType} ts-auto-guard:type-guard */
export type PropertyValueType = {value: string};
/** @see {isPropertyName} ts-auto-guard:type-guard */
export type PropertyName = 'name' | 'value';
/** @see {isFoo} ts-auto-guard:type-guard */
export type Foo = {
[key in PropertyName]: PropertyValueType
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { PropertyValueType, PropertyName, Foo } from "./test";
export function isPropertyValueType(obj: any, _argumentName?: string): obj is PropertyValueType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.value === "string"
)
}
export function isPropertyName(obj: any, _argumentName?: string): obj is PropertyName {
return (
(obj === "name" ||
obj === "value")
)
}
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
isPropertyValueType(obj.name) as boolean &&
isPropertyValueType(obj.value) as boolean
)
}
`,
}
)
testProcessProject(
'generates type guards for recursive types',
{
'test.ts': `
/** @see {isBranch1} ts-auto-guard:type-guard */
export type Branch1 = Branch1[] | string;
/** @see {isBranch2} ts-auto-guard:type-guard */
export type Branch2 = { branches: Branch2[] } | string;
/** @see {isBranch3} ts-auto-guard:type-guard */
export type Branch3 = { branches: Branch3[] } | {branches: Branch3 }[] | string;
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Branch1, Branch2, Branch3 } from "./test";
export function isBranch1(obj: any, _argumentName?: string): obj is Branch1 {
return (
(typeof obj === "string" ||
Array.isArray(obj) &&
obj.every((e: any) =>
isBranch1(e) as boolean
))
)
}
export function isBranch2(obj: any, _argumentName?: string): obj is Branch2 {
return (
(typeof obj === "string" ||
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Array.isArray(obj.branches) &&
obj.branches.every((e: any) =>
isBranch2(e) as boolean
))
)
}
export function isBranch3(obj: any, _argumentName?: string): obj is Branch3 {
return (
(typeof obj === "string" ||
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Array.isArray(obj.branches) &&
obj.branches.every((e: any) =>
isBranch3(e) as boolean
) ||
Array.isArray(obj) &&
obj.every((e: any) =>
(e !== null &&
typeof e === "object" ||
typeof e === "function") &&
isBranch3(e.branches) as boolean
))
)
}`,
}
)
testProcessProject(
'generated type guards for discriminated unions',
{
'test.ts': `
export type X = { type: 'a', value: number } | { type: 'b', value: string }
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { X } from "./test";
export function isX(obj: any, _argumentName?: string): obj is X {
return (
((obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
obj.type === "a" &&
typeof obj.value === "number" ||
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
obj.type === "b" &&
typeof obj.value === "string")
)
}`,
},
{ options: { exportAll: true } }
)
testProcessProject(
'generated type guards for enums',
{
'test.ts': `
export enum Types{
TheGood,
TheBad,
TheTypeSafe
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Types } from "./test";
export function isTypes(obj: any, _argumentName?: string): obj is Types {
return (
(obj === Types.TheGood ||
obj === Types.TheBad ||
obj === Types.TheTypeSafe)
)
}`,
},
{ options: { exportAll: true } }
)
testProcessProject(
'generated type guards for numeric enums in optional records',
{
'test.ts': `
export enum Types{
TheGood = 1,
TheBad,
TheTypeSafe
}
export interface TestItem {
room: Partial<Record<Types, string>>>;
}`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Types, TestItem } from "./test";
export function isTypes(obj: any, _argumentName?: string): obj is Types {
return (
(obj === Types.TheGood ||
obj === Types.TheBad ||
obj === Types.TheTypeSafe)
)
}
export function isTestItem(obj: any, _argumentName?: string): obj is TestItem {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
(obj.room !== null &&
typeof obj.room === "object" ||
typeof obj.room === "function") &&
(typeof obj.room["1"] === "undefined" ||
typeof obj.room["1"] === "string") &&
(typeof obj.room["2"] === "undefined" ||
typeof obj.room["2"] === "string") &&
(typeof obj.room["3"] === "undefined" ||
typeof obj.room["3"] === "string")
)
}`,
},
{ options: { exportAll: true } }
)
testProcessProject(
'no type guards for primitive alias types',
{
'test.ts': `
export type Days = number
export type UUID = string
export enum Types { TheGood }
export type Blank = undefined
export type AlwaysNull = null`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Types } from "./test";
export function isTypes(obj: any, _argumentName?: string): obj is Types {
return (
obj === Types.TheGood
)
}`,
},
{ options: { exportAll: true } }
)
testProcessProject(
'generated type guards for arrays of any',
{
'test.ts': `
export interface Foo {
value: any[]
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Array.isArray(obj.value)
)
}`,
},
{ options: { exportAll: true } }
)
testProcessProject(
'generated type guards for nested arrays',
{
'test.ts': `
export type Foo = {
value: Array<{
value: Array<number>
}>
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { Foo } from "./test";
export function isFoo(obj: any, _argumentName?: string): obj is Foo {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Array.isArray(obj.value) &&
obj.value.every((e: any) =>
(e !== null &&
typeof e === "object" ||
typeof e === "function") &&
Array.isArray(e.value) &&
e.value.every((e: any) =>
typeof e === "number"
)
)
)
}`,
},
{ options: { exportAll: true } }
)
testProcessProject(
'type that is an alias to an interface has a different typeguard name',
{
'test.ts': `
export interface TestType {
[index: any]: string
}
export type SecondaryTestType = TestType
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType, SecondaryTestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Object.entries<any>(obj)
.every(([_key, value]) => (typeof value === "string"))
)
}
export function isSecondaryTestType(obj: any, _argumentName?: string): obj is SecondaryTestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Object.entries<any>(obj)
.every(([_key, value]) => (typeof value === "string"))
)
}
`,
},
{ options: { exportAll: true } }
)
testProcessProject(
'adds type guard import to source file and also exports',
{
// NOTE: This file is not automatically cleaned up with `formatText` after
// being modified so it requires this funky indentation to ensure that it is
// conforms to ts-morph's formatting.
'test.ts': `
/** @see {isEmpty} ts-auto-guard:type-guard */
export interface Empty { }
`,
},
{
'test.ts': `
import * as CustomGuardAlias from "./test.guard";
/** @see {isEmpty} ts-auto-guard:type-guard */
export interface Empty {}
export { CustomGuardAlias };`,
'test.guard.ts': `
import { Empty } from "./test";
export function isEmpty(obj: any, _argumentName?: string): obj is Empty {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function")
)
}`,
},
{ options: { importGuards: 'CustomGuardAlias' } }
)
testProcessProject(
'imports and uses generated type guard if the type is used in another file',
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export interface TestType {
someKey: string | number
}
`,
'test-list.ts': `
import { TestType } from './test'
/** @see {isTestTypeList} ts-auto-guard:type-guard */
export type TestTypeList = Array<TestType>
`,
},
{
'test.ts': null,
'test-list.ts': null,
'test-list.guard.ts': `
import { isTestType } from "./test.guard";
import { TestTypeList } from "./test-list";
export function isTestTypeList(obj: any, _argumentName?: string): obj is TestTypeList {
return (
Array.isArray(obj) &&
obj.every((e: any) =>
isTestType(e) as boolean
)
)
}
`,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
(typeof obj.someKey === "string" ||
typeof obj.someKey === "number")
)
}
`,
}
)
testProcessProject(
'generates type guards for dynamic object keys, including when mixed with static keys',
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export interface TestType {
someKey: "some" | "key"
[index: string]: "dynamic" | "string"
[index: number]: "also-dynamic" | "number"
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
(obj.someKey === "some" ||
obj.someKey === "key") &&
Object.entries<any>(obj)
.filter(([key]) => !["someKey"].includes(key))
.every(([key, value]) => ((value === "string" ||
value === "dynamic") &&
typeof key === "string" ||
(value === "number" ||
value === "also-dynamic") &&
typeof key === "number"))
)
}
`,
}
)
testProcessProject(
'generates type guards for Record types',
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export type TestType = Record<string, "dynamic" | "string">
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Object.entries<any>(obj)
.every(([key, value]) => ((value === "string" ||
value === "dynamic") &&
typeof key === "string"))
)
}
`,
}
)
testProcessProject(
'prefixes value with underscore if it goes unused',
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export interface TestType {
[index: string]: any
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Object.entries<any>(obj)
.every(([key, _value]) => (typeof key === "string"))
)
}
`,
}
)
testProcessProject(
'prefixes key with underscore if it goes unused',
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export interface TestType {
[index: any]: string
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Object.entries<any>(obj)
.every(([_key, value]) => (typeof value === "string"))
)
}
`,
}
)
testProcessProject(
'Does not generate empty guard files',
{
'test.ts': '',
},
{ 'test.ts': null }
)
testProcessProject(
'Deals with unknown type as it would any',
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export interface TestType {
[index: string]: unknown
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
Object.entries<any>(obj)
.every(([key, _value]) => (typeof key === "string"))
)
}
`,
}
)
testProcessProject(
'Deals with unknown type as it would any',
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export interface TestType {
test: unknown
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function")
)
}
`,
}
)
testProcessProject(
'Check if any callable properties is a function',
// should also emit a warning about how it is not possible to check function type at runtime.
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export interface TestType {
test: (() => void)
// ts-auto-guard-suppress function-type
test2(someArg: number): boolean
// some other comments
test3: {
(someArg: string): number
test3Arg: number;
}
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
(obj !== null &&
typeof obj === "object" ||
typeof obj === "function") &&
typeof obj.test === "function" &&
typeof obj.test3 === "function" &&
typeof obj.test3.test3Arg === "number" &&
typeof obj.test2 === "function"
)
}
`,
}
)
testProcessProject(
'Check if callable interface is a function',
// should also emit a warning about how it is not possible to check function type at runtime.
{
'test.ts': `
/** @see {isTestType} ts-auto-guard:type-guard */
export interface TestType {
(someArg: string): number
arg: number;
}
`,
},
{
'test.ts': null,
'test.guard.ts': `
import { TestType } from "./test";
export function isTestType(obj: any, _argumentName?: string): obj is TestType {
return (
typeof obj === "function" &&
typeof obj.arg === "number"
)
}
`,
}
)
| |
tls.py
|
# -*- coding: utf-8 -*-
"""
Enforce state for SSL/TLS
=========================
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import logging
import time
__virtualname__ = "tls"
log = logging.getLogger(__name__)
def __virtual__():
if "tls.cert_info" not in __salt__:
return False
return __virtualname__
def
|
(name, weeks=0, days=0, hours=0, minutes=0, seconds=0):
"""
Verify that a TLS certificate is valid now and (optionally) will be valid
for the time specified through weeks, days, hours, minutes, and seconds.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
now = time.time()
try:
cert_info = __salt__["tls.cert_info"](name)
except IOError as exc:
ret["comment"] = "{}".format(exc)
ret["result"] = False
log.error(ret["comment"])
return ret
# verify that the cert is valid *now*
if now < cert_info["not_before"]:
ret["comment"] = "Certificate is not yet valid"
return ret
if now > cert_info["not_after"]:
ret["comment"] = "Certificate is expired"
return ret
# verify the cert will be valid for defined time
delta_remaining = datetime.timedelta(seconds=cert_info["not_after"] - now)
delta_kind_map = {
"weeks": weeks,
"days": days,
"hours": hours,
"minutes": minutes,
"seconds": seconds,
}
delta_min = datetime.timedelta(**delta_kind_map)
# if ther eisn't enough time remaining, we consider it a failure
if delta_remaining < delta_min:
ret[
"comment"
] = "Certificate will expire in {0}, which is less than {1}".format(
delta_remaining, delta_min
)
return ret
ret["result"] = True
ret["comment"] = "Certificate is valid for {0}".format(delta_remaining)
return ret
|
valid_certificate
|
solution_test.go
|
package solution
import (
"testing"
)
func
|
(t *testing.T) {
n1 := 2
r1 := 2
n2 := 3
r2 := 3
n3 := 7
r3 := 21
resu1 := climbStairs(n1)
resu2 := climbStairs(n2)
resu3 := climbStairs(n3)
if resu1 != r1 || resu2 != r2 || resu3 != r3 {
t.Fail()
}
}
|
TestClimbStairs
|
arrays.rs
|
/*Fixed size list where elements are the same data types*/
pub fn
|
(){
let mut numbers: [i32; 5] = [1,2,3,4,5];
println!("Numbers: {:?}", numbers);
// We ca nRe-assign value but we CAN NOT add values
numbers[1]= 20;
println!("Numbers: {:?}", numbers);
// Get value based on index of array
println!("First element: {}", numbers[0]);
// Get length of array
println!("Length of: {}", numbers.len());
// Get slice
let slice: &[i32] = &numbers[0..2];
println!("Slice: {:?}", slice);
}
|
run
|
blake2.rs
|
use evm::{Context, ExitError};
use crate::precompiles::{Precompile, PrecompileOutput, PrecompileResult};
use crate::prelude::{mem, Borrowed, PhantomData, TryInto};
use crate::AuroraState;
/// Blake2 costs.
mod costs {
/// Cost per round of Blake2 F.
pub(super) const F_ROUND: u64 = 1;
}
/// Blake2 constants.
mod consts {
pub(super) const INPUT_LENGTH: usize = 213;
}
pub(super) struct Blake2F<S>(PhantomData<S>);
impl<S> Blake2F<S> {
pub(super) const ADDRESS: [u8; 20] = super::make_address(0, 9);
}
impl<S: AuroraState> Precompile<S> for Blake2F<S> {
fn required_gas(input: &[u8]) -> Result<u64, ExitError> {
let (int_bytes, _) = input.split_at(mem::size_of::<u32>());
Ok(u64::from(u32::from_be_bytes(
int_bytes.try_into().expect("cannot fail"),
)) * costs::F_ROUND)
}
/// The compression function of the blake2 algorithm.
///
/// Takes as an argument the state vector `h`, message block vector `m` (the last block is padded
/// with zeros to full block size, if required), 2w-bit offset counter `t`, and final block
/// indicator flag `f`. Local vector v[0..15] is used in processing. F returns a new state vector.
/// The number of rounds, `r`, is 12 for BLAKE2b and 10 for BLAKE2s. Rounds are numbered from 0 to
/// r - 1.
///
/// See: https://eips.ethereum.org/EIPS/eip-152
/// See: https://etherscan.io/address/0000000000000000000000000000000000000009
fn run(
input: &[u8],
target_gas: u64,
_context: &Context,
_state: &mut S,
_is_static: bool,
) -> PrecompileResult
|
}
#[cfg(test)]
mod tests {
use crate::prelude::Vec;
use crate::test_utils::{new_context, new_state};
use super::*;
// [4 bytes for rounds]
// [64 bytes for h]
// [128 bytes for m]
// [8 bytes for t_0]
// [8 bytes for t_1]
// [1 byte for f]
const INPUT: &str = "\
0000000c\
48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5\
d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b\
6162630000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0300000000000000\
0000000000000000\
01";
fn test_blake2f_out_of_gas() -> PrecompileResult {
let input = hex::decode(INPUT).unwrap();
Blake2F::run(&input, 11, &new_context(), &mut new_state(), false)
}
fn test_blake2f_empty() -> PrecompileResult {
let input = [0u8; 0];
Blake2F::run(&input, 0, &new_context(), &mut new_state(), false)
}
fn test_blake2f_invalid_len_1() -> PrecompileResult {
let input = hex::decode(
"\
00000c\
48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5\
d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b\
6162630000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0300000000000000\
0000000000000000\
01",
)
.unwrap();
Blake2F::run(&input, 12, &new_context(), &mut new_state(), false)
}
fn test_blake2f_invalid_len_2() -> PrecompileResult {
let input = hex::decode(
"\
000000000c\
48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5\
d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b\
6162630000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0300000000000000\
0000000000000000\
01",
)
.unwrap();
Blake2F::run(&input, 12, &new_context(), &mut new_state(), false)
}
fn test_blake2f_invalid_flag() -> PrecompileResult {
let input = hex::decode(
"\
0000000c\
48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5\
d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b\
6162630000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0300000000000000\
0000000000000000\
02",
)
.unwrap();
Blake2F::run(&input, 12, &new_context(), &mut new_state(), false)
}
fn test_blake2f_r_0() -> Vec<u8> {
let input = hex::decode(
"\
00000000\
48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5\
d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b\
6162630000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0300000000000000\
0000000000000000\
01",
)
.unwrap();
Blake2F::run(&input, 12, &new_context(), &mut new_state(), false)
.unwrap()
.output
}
fn test_blake2f_r_12() -> Vec<u8> {
let input = hex::decode(INPUT).unwrap();
Blake2F::run(&input, 12, &new_context(), &mut new_state(), false)
.unwrap()
.output
}
fn test_blake2f_final_block_false() -> Vec<u8> {
let input = hex::decode(
"\
0000000c\
48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5\
d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b\
6162630000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0300000000000000\
0000000000000000\
00",
)
.unwrap();
Blake2F::run(&input, 12, &new_context(), &mut new_state(), false)
.unwrap()
.output
}
#[test]
fn test_blake2f() {
assert!(matches!(
test_blake2f_out_of_gas(),
Err(ExitError::OutOfGas)
));
assert!(matches!(
test_blake2f_empty(),
Err(ExitError::Other(Borrowed("ERR_BLAKE2F_INVALID_LEN")))
));
assert!(matches!(
test_blake2f_invalid_len_1(),
Err(ExitError::Other(Borrowed("ERR_BLAKE2F_INVALID_LEN")))
));
assert!(matches!(
test_blake2f_invalid_len_2(),
Err(ExitError::Other(Borrowed("ERR_BLAKE2F_INVALID_LEN")))
));
assert!(matches!(
test_blake2f_invalid_flag(),
Err(ExitError::Other(Borrowed("ERR_BLAKE2F_FINAL_FLAG",)))
));
let expected = hex::decode(
"08c9bcf367e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d\
282e6ad7f520e511f6c3e2b8c68059b9442be0454267ce079217e1319cde05b",
)
.unwrap();
assert_eq!(test_blake2f_r_0(), expected);
let expected = hex::decode(
"ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d1\
7d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923",
)
.unwrap();
assert_eq!(test_blake2f_r_12(), expected);
let expected = hex::decode(
"75ab69d3190a562c51aef8d88f1c2775876944407270c42c9844252c26d28752\
98743e7f6d5ea2f2d3e8d226039cd31b4e426ac4f2d3d666a610c2116fde4735",
)
.unwrap();
assert_eq!(test_blake2f_final_block_false(), expected);
}
}
|
{
if input.len() != consts::INPUT_LENGTH {
return Err(ExitError::Other(Borrowed("ERR_BLAKE2F_INVALID_LEN")));
}
let cost = Self::required_gas(input)?;
if cost > target_gas {
return Err(ExitError::OutOfGas);
}
let mut rounds_bytes = [0u8; 4];
rounds_bytes.copy_from_slice(&input[0..4]);
let rounds = u32::from_be_bytes(rounds_bytes);
let mut h = [0u64; 8];
for (mut x, value) in h.iter_mut().enumerate() {
let mut word: [u8; 8] = [0u8; 8];
x = x * 8 + 4;
word.copy_from_slice(&input[x..(x + 8)]);
*value = u64::from_le_bytes(word);
}
let mut m = [0u64; 16];
for (mut x, value) in m.iter_mut().enumerate() {
let mut word: [u8; 8] = [0u8; 8];
x = x * 8 + 68;
word.copy_from_slice(&input[x..(x + 8)]);
*value = u64::from_le_bytes(word);
}
let mut t: [u64; 2] = [0u64; 2];
for (mut x, value) in t.iter_mut().enumerate() {
let mut word: [u8; 8] = [0u8; 8];
x = x * 8 + 196;
word.copy_from_slice(&input[x..(x + 8)]);
*value = u64::from_le_bytes(word);
}
if input[212] != 0 && input[212] != 1 {
return Err(ExitError::Other(Borrowed("ERR_BLAKE2F_FINAL_FLAG")));
}
let finished = input[212] != 0;
let output = blake2::blake2b_f(rounds, h, m, t, finished).to_vec();
Ok(PrecompileOutput::without_logs(cost, output))
}
|
config.go
|
package config
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"reflect"
"strings"
"time"
"github.com/golang/glog"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/spf13/viper"
)
// Configuration specifies the static application config.
type Configuration struct {
ExternalURL string `mapstructure:"external_url"`
Host string `mapstructure:"host"`
Port int `mapstructure:"port"`
Client HTTPClient `mapstructure:"http_client"`
CacheClient HTTPClient `mapstructure:"http_client_cache"`
AdminPort int `mapstructure:"admin_port"`
EnableGzip bool `mapstructure:"enable_gzip"`
// StatusResponse is the string which will be returned by the /status endpoint when things are OK.
// If empty, it will return a 204 with no content.
StatusResponse string `mapstructure:"status_response"`
AuctionTimeouts AuctionTimeouts `mapstructure:"auction_timeouts_ms"`
CacheURL Cache `mapstructure:"cache"`
ExtCacheURL ExternalCache `mapstructure:"external_cache"`
RecaptchaSecret string `mapstructure:"recaptcha_secret"`
HostCookie HostCookie `mapstructure:"host_cookie"`
Metrics Metrics `mapstructure:"metrics"`
DataCache DataCache `mapstructure:"datacache"`
StoredRequests StoredRequests `mapstructure:"stored_requests"`
StoredRequestsAMP StoredRequests `mapstructure:"stored_amp_req"`
CategoryMapping StoredRequests `mapstructure:"category_mapping"`
VTrack VTrack `mapstructure:"vtrack"`
Event Event `mapstructure:"event"`
Accounts StoredRequests `mapstructure:"accounts"`
// Note that StoredVideo refers to stored video requests, and has nothing to do with caching video creatives.
StoredVideo StoredRequests `mapstructure:"stored_video_req"`
// Adapters should have a key for every openrtb_ext.BidderName, converted to lower-case.
// Se also: https://github.com/spf13/viper/issues/371#issuecomment-335388559
Adapters map[string]Adapter `mapstructure:"adapters"`
MaxRequestSize int64 `mapstructure:"max_request_size"`
Analytics Analytics `mapstructure:"analytics"`
AMPTimeoutAdjustment int64 `mapstructure:"amp_timeout_adjustment_ms"`
GDPR GDPR `mapstructure:"gdpr"`
CCPA CCPA `mapstructure:"ccpa"`
LMT LMT `mapstructure:"lmt"`
CurrencyConverter CurrencyConverter `mapstructure:"currency_converter"`
DefReqConfig DefReqConfig `mapstructure:"default_request"`
VideoStoredRequestRequired bool `mapstructure:"video_stored_request_required"`
// Array of blacklisted apps that is used to create the hash table BlacklistedAppMap so App.ID's can be instantly accessed.
BlacklistedApps []string `mapstructure:"blacklisted_apps,flow"`
BlacklistedAppMap map[string]bool
// Array of blacklisted accounts that is used to create the hash table BlacklistedAcctMap so Account.ID's can be instantly accessed.
BlacklistedAccts []string `mapstructure:"blacklisted_accts,flow"`
BlacklistedAcctMap map[string]bool
// Is publisher/account ID required to be submitted in the OpenRTB2 request
AccountRequired bool `mapstructure:"account_required"`
// AccountDefaults defines default settings for valid accounts that are partially defined
// and provides a way to set global settings that can be overridden at account level.
AccountDefaults Account `mapstructure:"account_defaults"`
// accountDefaultsJSON is the internal serialized form of AccountDefaults used for json merge
accountDefaultsJSON json.RawMessage
// Local private file containing SSL certificates
PemCertsFile string `mapstructure:"certificates_file"`
// Custom headers to handle request timeouts from queueing infrastructure
RequestTimeoutHeaders RequestTimeoutHeaders `mapstructure:"request_timeout_headers"`
// Debug/logging flags go here
Debug Debug `mapstructure:"debug"`
// RequestValidation specifies the request validation options.
RequestValidation RequestValidation `mapstructure:"request_validation"`
// When true, PBS will assign a randomly generated UUID to req.Source.TID if it is empty
AutoGenSourceTID bool `mapstructure:"auto_gen_source_tid"`
}
const MIN_COOKIE_SIZE_BYTES = 500
type HTTPClient struct {
MaxConnsPerHost int `mapstructure:"max_connections_per_host"`
MaxIdleConns int `mapstructure:"max_idle_connections"`
MaxIdleConnsPerHost int `mapstructure:"max_idle_connections_per_host"`
IdleConnTimeout int `mapstructure:"idle_connection_timeout_seconds"`
}
func (cfg *Configuration) validate() []error {
var errs []error
errs = cfg.AuctionTimeouts.validate(errs)
errs = cfg.StoredRequests.validate(errs)
errs = cfg.StoredRequestsAMP.validate(errs)
errs = cfg.Accounts.validate(errs)
errs = cfg.CategoryMapping.validate(errs)
errs = cfg.StoredVideo.validate(errs)
errs = cfg.Metrics.validate(errs)
if cfg.MaxRequestSize < 0 {
errs = append(errs, fmt.Errorf("cfg.max_request_size must be >= 0. Got %d", cfg.MaxRequestSize))
}
errs = cfg.GDPR.validate(errs)
errs = cfg.CurrencyConverter.validate(errs)
errs = validateAdapters(cfg.Adapters, errs)
errs = cfg.Debug.validate(errs)
errs = cfg.ExtCacheURL.validate(errs)
if cfg.AccountDefaults.Disabled {
glog.Warning(`With account_defaults.disabled=true, host-defined accounts must exist and have "disabled":false. All other requests will be rejected.`)
}
return errs
}
type AuctionTimeouts struct {
// The default timeout is used if the user's request didn't define one. Use 0 if there's no default.
Default uint64 `mapstructure:"default"`
// The max timeout is used as an absolute cap, to prevent excessively long ones. Use 0 for no cap
Max uint64 `mapstructure:"max"`
}
func (cfg *AuctionTimeouts) validate(errs []error) []error {
if cfg.Max < cfg.Default {
errs = append(errs, fmt.Errorf("auction_timeouts_ms.max cannot be less than auction_timeouts_ms.default. max=%d, default=%d", cfg.Max, cfg.Default))
}
return errs
}
func (data *ExternalCache) validate(errs []error) []error {
if data.Host == "" && data.Path == "" {
// Both host and path can be blank. No further validation needed
return errs
}
if data.Scheme != "" && data.Scheme != "http" && data.Scheme != "https" {
return append(errs, errors.New("External cache Scheme must be http or https if specified"))
}
// Either host or path or both not empty, validate.
if data.Host == "" && data.Path != "" || data.Host != "" && data.Path == "" {
return append(errs, errors.New("External cache Host and Path must both be specified"))
}
if strings.HasSuffix(data.Host, "/") {
return append(errs, errors.New(fmt.Sprintf("External cache Host '%s' must not end with a path separator", data.Host)))
}
if strings.ContainsAny(data.Host, "://") {
return append(errs, errors.New(fmt.Sprintf("External cache Host must not specify a protocol. '%s'", data.Host)))
}
if !strings.HasPrefix(data.Path, "/") {
return append(errs, errors.New(fmt.Sprintf("External cache Path '%s' must begin with a path separator", data.Path)))
}
urlObj, err := url.Parse("https://" + data.Host + data.Path)
if err != nil {
return append(errs, errors.New(fmt.Sprintf("External cache Path validation error: %s ", err.Error())))
}
if urlObj.Host != data.Host {
return append(errs, errors.New(fmt.Sprintf("External cache Host '%s' is invalid", data.Host)))
}
if urlObj.Path != data.Path {
return append(errs, errors.New("External cache Path is invalid"))
}
return errs
}
// LimitAuctionTimeout returns the min of requested or cfg.MaxAuctionTimeout.
// Both values treat "0" as "infinite".
func (cfg *AuctionTimeouts) LimitAuctionTimeout(requested time.Duration) time.Duration {
if requested == 0 && cfg.Default != 0 {
return time.Duration(cfg.Default) * time.Millisecond
}
if cfg.Max > 0 {
maxTimeout := time.Duration(cfg.Max) * time.Millisecond
if requested == 0 || requested > maxTimeout {
return maxTimeout
}
}
return requested
}
// Privacy is a grouping of privacy related configs to assist in dependency injection.
type Privacy struct {
|
CCPA CCPA
GDPR GDPR
LMT LMT
}
type GDPR struct {
Enabled bool `mapstructure:"enabled"`
HostVendorID int `mapstructure:"host_vendor_id"`
UsersyncIfAmbiguous bool `mapstructure:"usersync_if_ambiguous"`
Timeouts GDPRTimeouts `mapstructure:"timeouts_ms"`
NonStandardPublishers []string `mapstructure:"non_standard_publishers,flow"`
NonStandardPublisherMap map[string]struct{}
TCF1 TCF1 `mapstructure:"tcf1"`
TCF2 TCF2 `mapstructure:"tcf2"`
AMPException bool `mapstructure:"amp_exception"` // Deprecated: Use account-level GDPR settings (gdpr.integration_enabled.amp) instead
// EEACountries (EEA = European Economic Area) are a list of countries where we should assume GDPR applies.
// If the gdpr flag is unset in a request, but geo.country is set, we will assume GDPR applies if and only
// if the country matches one on this list. If both the GDPR flag and country are not set, we default
// to UsersyncIfAmbiguous
EEACountries []string `mapstructure:"eea_countries"`
EEACountriesMap map[string]struct{}
}
func (cfg *GDPR) validate(errs []error) []error {
if cfg.HostVendorID < 0 || cfg.HostVendorID > 0xffff {
errs = append(errs, fmt.Errorf("gdpr.host_vendor_id must be in the range [0, %d]. Got %d", 0xffff, cfg.HostVendorID))
}
if cfg.HostVendorID == 0 {
glog.Warning("gdpr.host_vendor_id was not specified. Host company GDPR checks will be skipped.")
}
if cfg.AMPException == true {
errs = append(errs, fmt.Errorf("gdpr.amp_exception has been discontinued and must be removed from your config. If you need to disable GDPR for AMP, you may do so per-account (gdpr.integration_enabled.amp) or at the host level for the default account (account_defaults.gdpr.integration_enabled.amp)"))
}
return errs
}
type GDPRTimeouts struct {
InitVendorlistFetch int `mapstructure:"init_vendorlist_fetches"`
ActiveVendorlistFetch int `mapstructure:"active_vendorlist_fetch"`
}
func (t *GDPRTimeouts) InitTimeout() time.Duration {
return time.Duration(t.InitVendorlistFetch) * time.Millisecond
}
func (t *GDPRTimeouts) ActiveTimeout() time.Duration {
return time.Duration(t.ActiveVendorlistFetch) * time.Millisecond
}
// TCF1 defines the TCF1 specific configurations for GDPR
type TCF1 struct {
FetchGVL bool `mapstructure:"fetch_gvl"`
FallbackGVLPath string `mapstructure:"fallback_gvl_path"`
}
// TCF2 defines the TCF2 specific configurations for GDPR
type TCF2 struct {
Enabled bool `mapstructure:"enabled"`
Purpose1 PurposeDetail `mapstructure:"purpose1"`
Purpose2 PurposeDetail `mapstructure:"purpose2"`
Purpose7 PurposeDetail `mapstructure:"purpose7"`
SpecialPurpose1 PurposeDetail `mapstructure:"special_purpose1"`
PurposeOneTreatment PurposeOneTreatement `mapstructure:"purpose_one_treatement"`
}
// Making a purpose struct so purpose specific details can be added later.
type PurposeDetail struct {
Enabled bool `mapstructure:"enabled"`
}
type PurposeOneTreatement struct {
Enabled bool `mapstructure:"enabled"`
AccessAllowed bool `mapstructure:"access_allowed"`
}
type CCPA struct {
Enforce bool `mapstructure:"enforce"`
}
type LMT struct {
Enforce bool `mapstructure:"enforce"`
}
type Analytics struct {
File FileLogs `mapstructure:"file"`
Pubstack Pubstack `mapstructure:"pubstack"`
}
type CurrencyConverter struct {
FetchURL string `mapstructure:"fetch_url"`
FetchIntervalSeconds int `mapstructure:"fetch_interval_seconds"`
StaleRatesSeconds int `mapstructure:"stale_rates_seconds"`
}
func (cfg *CurrencyConverter) validate(errs []error) []error {
if cfg.FetchIntervalSeconds < 0 {
errs = append(errs, fmt.Errorf("currency_converter.fetch_interval_seconds must be in the range [0, %d]. Got %d", 0xffff, cfg.FetchIntervalSeconds))
}
return errs
}
// FileLogs Corresponding config for FileLogger as a PBS Analytics Module
type FileLogs struct {
Filename string `mapstructure:"filename"`
}
type Pubstack struct {
Enabled bool `mapstructure:"enabled"`
ScopeId string `mapstructure:"scopeid"`
IntakeUrl string `mapstructure:"endpoint"`
Buffers PubstackBuffer `mapstructure:"buffers"`
ConfRefresh string `mapstructure:"configuration_refresh_delay"`
}
type PubstackBuffer struct {
BufferSize string `mapstructure:"size"`
EventCount int `mapstructure:"count"`
Timeout string `mapstructure:"timeout"`
}
type VTrack struct {
TimeoutMS int64 `mapstructure:"timeout_ms"`
AllowUnknownBidder bool `mapstructure:"allow_unknown_bidder"`
Enabled bool `mapstructure:"enabled"`
}
type Event struct {
TimeoutMS int64 `mapstructure:"timeout_ms"`
}
type HostCookie struct {
Domain string `mapstructure:"domain"`
Family string `mapstructure:"family"`
CookieName string `mapstructure:"cookie_name"`
OptOutURL string `mapstructure:"opt_out_url"`
OptInURL string `mapstructure:"opt_in_url"`
MaxCookieSizeBytes int `mapstructure:"max_cookie_size_bytes"`
OptOutCookie Cookie `mapstructure:"optout_cookie"`
// Cookie timeout in days
TTL int64 `mapstructure:"ttl_days"`
}
func (cfg *HostCookie) TTLDuration() time.Duration {
return time.Duration(cfg.TTL) * time.Hour * 24
}
type RequestTimeoutHeaders struct {
RequestTimeInQueue string `mapstructure:"request_time_in_queue"`
RequestTimeoutInQueue string `mapstructure:"request_timeout_in_queue"`
}
type Metrics struct {
Influxdb InfluxMetrics `mapstructure:"influxdb"`
Prometheus PrometheusMetrics `mapstructure:"prometheus"`
Disabled DisabledMetrics `mapstructure:"disabled_metrics"`
}
type DisabledMetrics struct {
// True if we want to stop collecting account-to-adapter metrics
AccountAdapterDetails bool `mapstructure:"account_adapter_details"`
// True if we don't want to collect metrics about the connections prebid
// server establishes with bidder servers such as the number of connections
// that were created or reused.
AdapterConnectionMetrics bool `mapstructure:"adapter_connections_metrics"`
}
func (cfg *Metrics) validate(errs []error) []error {
return cfg.Prometheus.validate(errs)
}
type InfluxMetrics struct {
Host string `mapstructure:"host"`
Database string `mapstructure:"database"`
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
MetricSendInterval int `mapstructure:"metric_send_interval"`
}
type PrometheusMetrics struct {
Port int `mapstructure:"port"`
Namespace string `mapstructure:"namespace"`
Subsystem string `mapstructure:"subsystem"`
TimeoutMillisRaw int `mapstructure:"timeout_ms"`
}
func (cfg *PrometheusMetrics) validate(errs []error) []error {
if cfg.Port > 0 && cfg.TimeoutMillisRaw <= 0 {
errs = append(errs, fmt.Errorf("metrics.prometheus.timeout_ms must be positive if metrics.prometheus.port is defined. Got timeout=%d and port=%d", cfg.TimeoutMillisRaw, cfg.Port))
}
return errs
}
func (m *PrometheusMetrics) Timeout() time.Duration {
return time.Duration(m.TimeoutMillisRaw) * time.Millisecond
}
type DataCache struct {
Type string `mapstructure:"type"`
Filename string `mapstructure:"filename"`
CacheSize int `mapstructure:"cache_size"`
TTLSeconds int `mapstructure:"ttl_seconds"`
}
// ExternalCache configures the externally accessible cache url.
type ExternalCache struct {
Scheme string `mapstructure:"scheme"`
Host string `mapstructure:"host"`
Path string `mapstructure:"path"`
}
// Cache configures the url used internally by Prebid Server to communicate with Prebid Cache.
type Cache struct {
Scheme string `mapstructure:"scheme"`
Host string `mapstructure:"host"`
Query string `mapstructure:"query"`
// A static timeout here is not ideal. This is a hack because we have some aggressive timelines for OpenRTB support.
// This value specifies how much time the prebid server host expects a call to prebid cache to take.
//
// OpenRTB allows the caller to specify the auction timeout. Prebid Server will subtract _this_ amount of time
// from the timeout it gives demand sources to respond.
//
// In reality, the cache response time will probably fluctuate with the traffic over time. Someday,
// this should be replaced by code which tracks the response time of recent cache calls and
// adjusts the time dynamically.
ExpectedTimeMillis int `mapstructure:"expected_millis"`
DefaultTTLs DefaultTTLs `mapstructure:"default_ttl_seconds"`
}
// Default TTLs to use to cache bids for different types of imps.
type DefaultTTLs struct {
Banner int `mapstructure:"banner"`
Video int `mapstructure:"video"`
Native int `mapstructure:"native"`
Audio int `mapstructure:"audio"`
}
type Cookie struct {
Name string `mapstructure:"name"`
Value string `mapstructure:"value"`
}
// AliasConfig will define the various source(s) or the default aliases
// Currently only filesystem is supported, but keeping the config structure
type DefReqConfig struct {
Type string `mapstructure:"type"`
FileSystem DefReqFiles `mapstructure:"file"`
AliasInfo bool `mapstructure:"alias_info"`
}
type DefReqFiles struct {
FileName string `mapstructure:"name"`
}
type Debug struct {
TimeoutNotification TimeoutNotification `mapstructure:"timeout_notification"`
}
func (cfg *Debug) validate(errs []error) []error {
return cfg.TimeoutNotification.validate(errs)
}
type TimeoutNotification struct {
// Log timeout notifications in the application log
Log bool `mapstructure:"log"`
// Fraction of notifications to log
SamplingRate float32 `mapstructure:"sampling_rate"`
// Only log failures
FailOnly bool `mapstructure:"fail_only"`
}
func (cfg *TimeoutNotification) validate(errs []error) []error {
if cfg.SamplingRate < 0.0 || cfg.SamplingRate > 1.0 {
errs = append(errs, fmt.Errorf("debug.timeout_notification.sampling_rate must be positive and not greater than 1.0. Got %f", cfg.SamplingRate))
}
return errs
}
// New uses viper to get our server configurations.
func New(v *viper.Viper) (*Configuration, error) {
var c Configuration
if err := v.Unmarshal(&c); err != nil {
return nil, fmt.Errorf("viper failed to unmarshal app config: %v", err)
}
c.setDerivedDefaults()
if err := c.RequestValidation.Parse(); err != nil {
return nil, err
}
if err := isValidCookieSize(c.HostCookie.MaxCookieSizeBytes); err != nil {
glog.Fatal(fmt.Printf("Max cookie size %d cannot be less than %d \n", c.HostCookie.MaxCookieSizeBytes, MIN_COOKIE_SIZE_BYTES))
return nil, err
}
// Update account defaults and generate base json for patch
c.AccountDefaults.CacheTTL = c.CacheURL.DefaultTTLs // comment this out to set explicitly in config
if err := c.MarshalAccountDefaults(); err != nil {
return nil, err
}
// To look for a request's publisher_id in the NonStandardPublishers list in
// O(1) time, we fill this hash table located in the NonStandardPublisherMap field of GDPR
var s struct{}
c.GDPR.NonStandardPublisherMap = make(map[string]struct{})
for i := 0; i < len(c.GDPR.NonStandardPublishers); i++ {
c.GDPR.NonStandardPublisherMap[c.GDPR.NonStandardPublishers[i]] = s
}
c.GDPR.EEACountriesMap = make(map[string]struct{})
for i := 0; i < len(c.GDPR.EEACountriesMap); i++ {
c.GDPR.NonStandardPublisherMap[c.GDPR.EEACountries[i]] = s
}
// To look for a request's app_id in O(1) time, we fill this hash table located in the
// the BlacklistedApps field of the Configuration struct defined in this file
c.BlacklistedAppMap = make(map[string]bool)
for i := 0; i < len(c.BlacklistedApps); i++ {
c.BlacklistedAppMap[c.BlacklistedApps[i]] = true
}
// To look for a request's account id in O(1) time, we fill this hash table located in the
// the BlacklistedAccts field of the Configuration struct defined in this file
c.BlacklistedAcctMap = make(map[string]bool)
for i := 0; i < len(c.BlacklistedAccts); i++ {
c.BlacklistedAcctMap[c.BlacklistedAccts[i]] = true
}
// Migrate combo stored request config to separate stored_reqs and amp stored_reqs configs.
resolvedStoredRequestsConfig(&c)
glog.Info("Logging the resolved configuration:")
logGeneral(reflect.ValueOf(c), " \t")
if errs := c.validate(); len(errs) > 0 {
return &c, errortypes.NewAggregateErrors("validation errors", errs)
}
return &c, nil
}
// MarshalAccountDefaults compiles AccountDefaults into the JSON format used for merge patch
func (cfg *Configuration) MarshalAccountDefaults() error {
var err error
if cfg.accountDefaultsJSON, err = json.Marshal(cfg.AccountDefaults); err != nil {
glog.Warningf("converting %+v to json: %v", cfg.AccountDefaults, err)
}
return err
}
// AccountDefaultsJSON returns the precompiled JSON form of account_defaults
func (cfg *Configuration) AccountDefaultsJSON() json.RawMessage {
return cfg.accountDefaultsJSON
}
//Allows for protocol relative URL if scheme is empty
func (cfg *Cache) GetBaseURL() string {
cfg.Scheme = strings.ToLower(cfg.Scheme)
if strings.Contains(cfg.Scheme, "https") {
return fmt.Sprintf("https://%s", cfg.Host)
}
if strings.Contains(cfg.Scheme, "http") {
return fmt.Sprintf("http://%s", cfg.Host)
}
return fmt.Sprintf("//%s", cfg.Host)
}
func (cfg *Configuration) GetCachedAssetURL(uuid string) string {
return fmt.Sprintf("%s/cache?%s", cfg.CacheURL.GetBaseURL(), strings.Replace(cfg.CacheURL.Query, "%PBS_CACHE_UUID%", uuid, 1))
}
// Initialize any default config values which have sensible defaults, but those defaults depend on other config values.
//
// For example, the typical Bidder's usersync URL includes the PBS config.external_url, because it redirects to the `external_url/setuid` endpoint.
//
func (cfg *Configuration) setDerivedDefaults() {
externalURL := cfg.ExternalURL
setDefaultUsersync(cfg.Adapters, openrtb_ext.Bidder33Across, "https://ic.tynt.com/r/d?m=xch&rt=html&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&ru="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3D33across%26uid%3D33XUSERID33X&id=zzz000000000002zzz")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAcuityAds, "https://cs.admanmedia.com/sync/prebid?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dacuityads%26uid%3D%5BUID%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdform, "https://cm.adform.net/cookie?redirect_url="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadform%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
// openrtb_ext.BidderAdgeneration doesn't have a good default.
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdkernel, "https://sync.adkernel.com/user-sync?t=image&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadkernel%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7BUID%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdkernelAdn, "https://tag.adkernel.com/syncr?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3DadkernelAdn%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7BUID%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdpone, "https://usersync.adpone.com/csync?redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadpone%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7Buid%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdtarget, "https://sync.console.adtarget.com.tr/csync?t=p&ep=0&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadtarget%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7Buid%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdtelligent, "https://sync.adtelligent.com/csync?t=p&ep=0&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadtelligent%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7Buid%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdmixer, "https://inv-nets.admixer.net/adxcm.aspx?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir=1&rurl="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadmixer%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%24visitor_cookie%24%24")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdman, "https://sync.admanmedia.com/pbs.gif?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadman%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%5BUID%5D")
// openrtb_ext.BidderAdOcean doesn't have a good default.
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAdvangelists, "https://nep.advangelists.com/xp/user-sync?acctid={aid}&&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadvangelists%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAJA, "https://ad.as.amanad.adtdp.com/v1/sync/ssp?ssp=4&gdpr={{.GDPR}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Daja%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%25s")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAMX, "https://prebid.a-mo.net/cchain/0?gdpr={{.GDPR}}&us_privacy={{.USPrivacy}}&cb="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Damx%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAppnexus, "https://ib.adnxs.com/getuid?"+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dadnxs%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderAvocet, "https://ads.avct.cloud/getuid?&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&url="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Davocet%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7B%7BUUID%7D%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderBeachfront, "https://sync.bfmio.com/sync_s2s?gdpr={{.GDPR}}&us_privacy={{.USPrivacy}}&url="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dbeachfront%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%5Bio_cid%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderBeintoo, "https://ib.beintoo.com/um?ssp=pbs&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dbeintoo%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderBrightroll, "https://pr-bh.ybp.yahoo.com/sync/appnexusprebidserver/?gdpr={{.GDPR}}&euconsent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&url="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dbrightroll%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderColossus, "https://sync.colossusssp.com/pbs.gif?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dcolossus%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%5BUID%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderConnectAd, "https://cdn.connectad.io/connectmyusers.php?gdpr={{.GDPR}}&consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&cb="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dconnectad%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderConsumable, "https://e.serverbid.com/udb/9969/match?gdpr={{.GDPR}}&euconsent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dconsumable%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderConversant, "https://prebid-match.dotomi.com/match/bounce/current?version=1&networkId=72582&rurl="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dconversant%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderCpmstar, "https://server.cpmstar.com/usersync.aspx?gdpr={{.GDPR}}&consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dcpmstar%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderDatablocks, "https://sync.v5prebid.datablocks.net/s2ssync?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Ddatablocks%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7Buid%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderDmx, "https://dmx.districtm.io/s/v1/img/s/10007?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Ddatablocks%26gdpr%3D%24%7Bgdpr%7D%26gdpr_consent%3D%24%7Bgdpr_consent%7D%26uid%3D%24%7Buid%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderDeepintent, "https://match.deepintent.com/usersync/136?id=unk&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Ddeepintent%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%5BUID%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderEmxDigital, "https://cs.emxdgt.com/um?ssp=pbs&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Demx_digital%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderEngageBDR, "https://match.bnmla.com/usersync/s2s_sync?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dengagebdr%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7BUUID%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderEPlanning, "https://ads.us.e-planning.net/uspd/1/?du="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Deplanning%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
// openrtb_ext.BidderFacebook doesn't have a good default.
// openrtb_ext.BidderGamma doesn't have a good default.
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderGamoshi, "https://rtb.gamoshi.io/user_sync_prebid?gdpr={{.GDPR}}&consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&rurl="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dgamoshi%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%5Bgusr%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderGrid, "https://x.bidswitch.net/check_uuid/"+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dgrid%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7BBSW_UUID%7D?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderGumGum, "https://rtb.gumgum.com/usync/prbds2s?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dgumgum%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderImprovedigital, "https://ad.360yield.com/server_match?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dimprovedigital%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7BPUB_USER_ID%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderIx, "https://ssum.casalemedia.com/usermatchredir?s=184932&cb="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dix%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D")
// openrtb_ext.BidderInvibes doesn't have a good default.
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderKrushmedia, "https://cs.krushmedia.com/4e4abdd5ecc661643458a730b1aa927d.gif?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dkrushmedia%26uid%3D%5BUID%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderLifestreet, "https://ads.lfstmedia.com/idsync/137062?synced=1&ttl=1s&rurl="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dlifestreet%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%24visitor_cookie%24%24")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderLockerDome, "https://lockerdome.com/usync/prebidserver?pid="+cfg.Adapters["lockerdome"].PlatformID+"&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dlockerdome%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7B%7Buid%7D%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderLogicad, "https://cr-p31.ladsp.jp/cookiesender/31?r=true&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&ru="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dlogicad%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderLunaMedia, "https://api.lunamedia.io/xp/user-sync?redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dlunamedia%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderMarsmedia, "https://dmp.rtbsrv.com/dmp/profiles/cm?p_id=179&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dmarsmedia%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7BUUID%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderMgid, "https://cm.mgid.com/m?cdsp=363893&adu="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dmgid%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%7Bmuidn%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderNanoInteractive, "https://ad.audiencemanager.de/hbs/cookie_sync?gdpr={{.GDPR}}&consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirectUri="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dnanointeractive%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderNinthDecimal, "https://rtb.ninthdecimal.com/xp/user-sync?acctid={aid}&&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dninthdecimal%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderNoBid, "https://ads.servenobid.com/getsync?tek=pbs&ver=1&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dnobid%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderOpenx, "https://rtb.openx.net/sync/prebid?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dopenx%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7BUID%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderPubmatic, "https://ads.pubmatic.com/AdServer/js/user_sync.html?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&predirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dpubmatic%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderPulsepoint, "https://bh.contextweb.com/rtset?pid=561205&ev=1&rurl="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dpulsepoint%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%25%25VGUID%25%25")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderRhythmone, "https://sync.1rx.io/usersync2/rmphb?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Drhythmone%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%5BRX_UUID%5D")
// openrtb_ext.BidderRTBHouse doesn't have a good default.
// openrtb_ext.BidderRubicon doesn't have a good default.
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderSharethrough, "https://match.sharethrough.com/FGMrCMMc/v1?redirectUri="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dsharethrough%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderSmartAdserver, "https://ssbsync-global.smartadserver.com/api/sync?callerId=5&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirectUri="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dsmartadserver%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%5Bssb_sync_pid%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderSmartRTB, "https://market-global.smrtb.com/sync/all?nid=smartrtb&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&rr="+url.QueryEscape(externalURL)+"%252Fsetuid%253Fbidder%253Dsmartrtb%2526gdpr%253D{{.GDPR}}%2526gdpr_consent%253D{{.GDPRConsent}}%2526uid%253D%257BXID%257D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderSmartyAds, "https://as.ck-ie.com/prebid.gif?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dsmartyads%26uid%3D%5BUID%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderSomoaudience, "https://publisher-east.mobileadtrading.com/usersync?ru="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dsomoaudience%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7BUID%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderSonobi, "https://sync.go.sonobi.com/us.gif?loc="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dsonobi%26consent_string%3D{{.GDPR}}%26gdpr%3D{{.GDPRConsent}}%26uid%3D%5BUID%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderSovrn, "https://ap.lijit.com/pixel?redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dsovrn%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderSynacormedia, "https://sync.technoratimedia.com/services?srv=cs&pid=70&cb="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dsynacormedia%26uid%3D%5BUSER_ID%5D")
// openrtb_ext.BidderTappx doesn't have a good default.
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderTelaria, "https://pbs.publishers.tremorhub.com/pubsync?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dtelaria%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%5Btvid%5D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderTriplelift, "https://eb2.3lift.com/getuid?gdpr={{.GDPR}}&cmp_cs={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dtriplelift%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderTripleliftNative, "https://eb2.3lift.com/getuid?gdpr={{.GDPR}}&cmp_cs={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dtriplelift_native%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderUcfunnel, "https://sync.aralego.com/idsync?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&usprivacy={{.USPrivacy}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Ducfunnel%26uid%3DSspCookieUserId")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderUnruly, "https://usermatch.targeting.unrulymedia.com/pbsync?gdpr={{.GDPR}}&consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&rurl="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dunruly%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderValueImpression, "https://rtb.valueimpression.com/usersync?gdpr={{.GDPR}}&consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirect="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dvalueimpression%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderVisx, "https://t.visx.net/s2s_sync?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redir="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dvisx%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7BUUID%7D")
// openrtb_ext.BidderVrtcal doesn't have a good default.
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderYieldlab, "https://ad.yieldlab.net/mr?t=2&pid=9140838&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dyieldlab%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%25%25YL_UID%25%25")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderYieldmo, "https://ads.yieldmo.com/pbsync?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirectUri="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dyieldmo%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderYieldone, "https://y.one.impact-ad.jp/hbs_cs?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&redirectUri="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dyieldone%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24UID")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderZeroClickFraud, "https://s.0cf.io/sync?gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&r="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dzeroclickfraud%26gdpr%3D{{.GDPR}}%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7Buid%7D")
setDefaultUsersync(cfg.Adapters, openrtb_ext.BidderBetween, "https://ads.betweendigital.com/match?bidder_id=pbs&gdpr={{.GDPR}}&gdpr_consent={{.GDPRConsent}}&us_privacy={{.USPrivacy}}&callback_url="+url.QueryEscape(externalURL)+"%2Fsetuid%3Fbidder%3Dbetween%26gdpr%3D0%26gdpr_consent%3D{{.GDPRConsent}}%26uid%3D%24%7BUSER_ID%7D")
}
func setDefaultUsersync(m map[string]Adapter, bidder openrtb_ext.BidderName, defaultValue string) {
lowercased := strings.ToLower(string(bidder))
if m[lowercased].UserSyncURL == "" {
// Go doesnt let us edit the properties of a value inside a map directly.
editable := m[lowercased]
editable.UserSyncURL = defaultValue
m[lowercased] = editable
}
}
// Set the default config values for the viper object we are using.
func SetupViper(v *viper.Viper, filename string) {
if filename != "" {
v.SetConfigName(filename)
v.AddConfigPath(".")
v.AddConfigPath("/etc/config")
}
// Fixes #475: Some defaults will be set just so they are accessible via environment variables
// (basically so viper knows they exist)
v.SetDefault("external_url", "http://localhost:8000")
v.SetDefault("host", "")
v.SetDefault("port", 8000)
v.SetDefault("admin_port", 6060)
v.SetDefault("enable_gzip", false)
v.SetDefault("status_response", "")
v.SetDefault("auction_timeouts_ms.default", 0)
v.SetDefault("auction_timeouts_ms.max", 0)
v.SetDefault("cache.scheme", "")
v.SetDefault("cache.host", "")
v.SetDefault("cache.query", "")
v.SetDefault("cache.expected_millis", 10)
v.SetDefault("cache.default_ttl_seconds.banner", 0)
v.SetDefault("cache.default_ttl_seconds.video", 0)
v.SetDefault("cache.default_ttl_seconds.native", 0)
v.SetDefault("cache.default_ttl_seconds.audio", 0)
v.SetDefault("external_cache.scheme", "")
v.SetDefault("external_cache.host", "")
v.SetDefault("external_cache.path", "")
v.SetDefault("recaptcha_secret", "")
v.SetDefault("host_cookie.domain", "")
v.SetDefault("host_cookie.family", "")
v.SetDefault("host_cookie.cookie_name", "")
v.SetDefault("host_cookie.opt_out_url", "")
v.SetDefault("host_cookie.opt_in_url", "")
v.SetDefault("host_cookie.optout_cookie.name", "")
v.SetDefault("host_cookie.value", "")
v.SetDefault("host_cookie.ttl_days", 90)
v.SetDefault("host_cookie.max_cookie_size_bytes", 0)
v.SetDefault("http_client.max_connections_per_host", 0) // unlimited
v.SetDefault("http_client.max_idle_connections", 400)
v.SetDefault("http_client.max_idle_connections_per_host", 10)
v.SetDefault("http_client.idle_connection_timeout_seconds", 60)
v.SetDefault("http_client_cache.max_connections_per_host", 0) // unlimited
v.SetDefault("http_client_cache.max_idle_connections", 10)
v.SetDefault("http_client_cache.max_idle_connections_per_host", 2)
v.SetDefault("http_client_cache.idle_connection_timeout_seconds", 60)
// no metrics configured by default (metrics{host|database|username|password})
v.SetDefault("metrics.disabled_metrics.account_adapter_details", false)
v.SetDefault("metrics.disabled_metrics.adapter_connections_metrics", true)
v.SetDefault("metrics.influxdb.host", "")
v.SetDefault("metrics.influxdb.database", "")
v.SetDefault("metrics.influxdb.username", "")
v.SetDefault("metrics.influxdb.password", "")
v.SetDefault("metrics.influxdb.metric_send_interval", 20)
v.SetDefault("metrics.prometheus.port", 0)
v.SetDefault("metrics.prometheus.namespace", "")
v.SetDefault("metrics.prometheus.subsystem", "")
v.SetDefault("metrics.prometheus.timeout_ms", 10000)
v.SetDefault("datacache.type", "dummy")
v.SetDefault("datacache.filename", "")
v.SetDefault("datacache.cache_size", 0)
v.SetDefault("datacache.ttl_seconds", 0)
v.SetDefault("category_mapping.filesystem.enabled", true)
v.SetDefault("category_mapping.filesystem.directorypath", "./static/category-mapping")
v.SetDefault("category_mapping.http.endpoint", "")
v.SetDefault("stored_requests.filesystem.enabled", false)
v.SetDefault("stored_requests.filesystem.directorypath", "./stored_requests/data/by_id")
v.SetDefault("stored_requests.directorypath", "./stored_requests/data/by_id")
v.SetDefault("stored_requests.postgres.connection.dbname", "")
v.SetDefault("stored_requests.postgres.connection.host", "")
v.SetDefault("stored_requests.postgres.connection.port", 0)
v.SetDefault("stored_requests.postgres.connection.user", "")
v.SetDefault("stored_requests.postgres.connection.password", "")
v.SetDefault("stored_requests.postgres.fetcher.query", "")
v.SetDefault("stored_requests.postgres.fetcher.amp_query", "")
v.SetDefault("stored_requests.postgres.initialize_caches.timeout_ms", 0)
v.SetDefault("stored_requests.postgres.initialize_caches.query", "")
v.SetDefault("stored_requests.postgres.initialize_caches.amp_query", "")
v.SetDefault("stored_requests.postgres.poll_for_updates.refresh_rate_seconds", 0)
v.SetDefault("stored_requests.postgres.poll_for_updates.timeout_ms", 0)
v.SetDefault("stored_requests.postgres.poll_for_updates.query", "")
v.SetDefault("stored_requests.postgres.poll_for_updates.amp_query", "")
v.SetDefault("stored_requests.http.endpoint", "")
v.SetDefault("stored_requests.http.amp_endpoint", "")
v.SetDefault("stored_requests.in_memory_cache.type", "none")
v.SetDefault("stored_requests.in_memory_cache.ttl_seconds", 0)
v.SetDefault("stored_requests.in_memory_cache.request_cache_size_bytes", 0)
v.SetDefault("stored_requests.in_memory_cache.imp_cache_size_bytes", 0)
v.SetDefault("stored_requests.cache_events_api", false)
v.SetDefault("stored_requests.http_events.endpoint", "")
v.SetDefault("stored_requests.http_events.amp_endpoint", "")
v.SetDefault("stored_requests.http_events.refresh_rate_seconds", 0)
v.SetDefault("stored_requests.http_events.timeout_ms", 0)
// stored_video is short for stored_video_requests.
// PBS is not in the business of storing video content beyond the normal prebid cache system.
v.SetDefault("stored_video_req.filesystem.enabled", false)
v.SetDefault("stored_video_req.filesystem.directorypath", "")
v.SetDefault("stored_video_req.postgres.connection.dbname", "")
v.SetDefault("stored_video_req.postgres.connection.host", "")
v.SetDefault("stored_video_req.postgres.connection.port", 0)
v.SetDefault("stored_video_req.postgres.connection.user", "")
v.SetDefault("stored_video_req.postgres.connection.password", "")
v.SetDefault("stored_video_req.postgres.fetcher.query", "")
v.SetDefault("stored_video_req.postgres.initialize_caches.timeout_ms", 0)
v.SetDefault("stored_video_req.postgres.initialize_caches.query", "")
v.SetDefault("stored_video_req.postgres.poll_for_updates.refresh_rate_seconds", 0)
v.SetDefault("stored_video_req.postgres.poll_for_updates.timeout_ms", 0)
v.SetDefault("stored_video_req.postgres.poll_for_updates.query", "")
v.SetDefault("stored_video_req.http.endpoint", "")
v.SetDefault("stored_video_req.in_memory_cache.type", "none")
v.SetDefault("stored_video_req.in_memory_cache.ttl_seconds", 0)
v.SetDefault("stored_video_req.in_memory_cache.request_cache_size_bytes", 0)
v.SetDefault("stored_video_req.in_memory_cache.imp_cache_size_bytes", 0)
v.SetDefault("stored_video_req.cache_events.enabled", false)
v.SetDefault("stored_video_req.cache_events.endpoint", "")
v.SetDefault("stored_video_req.http_events.endpoint", "")
v.SetDefault("stored_video_req.http_events.refresh_rate_seconds", 0)
v.SetDefault("stored_video_req.http_events.timeout_ms", 0)
v.SetDefault("vtrack.timeout_ms", 2000)
v.SetDefault("vtrack.allow_unknown_bidder", true)
v.SetDefault("vtrack.enabled", true)
v.SetDefault("event.timeout_ms", 1000)
v.SetDefault("accounts.filesystem.enabled", false)
v.SetDefault("accounts.filesystem.directorypath", "./stored_requests/data/by_id")
v.SetDefault("accounts.in_memory_cache.type", "none")
for _, bidder := range openrtb_ext.CoreBidderNames() {
setBidderDefaults(v, strings.ToLower(string(bidder)))
}
// Disabling adapters by default that require some specific config params.
// If you're using one of these, make sure you check out the documentation (https://github.com/prebid/prebid-server/tree/master/docs/bidders)
// for them and specify all the parameters they need for them to work correctly.
v.SetDefault("adapters.33across.endpoint", "http://ssc.33across.com/api/v1/hb")
v.SetDefault("adapters.33across.partner_id", "")
v.SetDefault("adapters.acuityads.endpoint", "http://{{.Host}}.admanmedia.com/bid?token={{.AccountID}}")
v.SetDefault("adapters.adform.endpoint", "http://adx.adform.net/adx")
v.SetDefault("adapters.adgeneration.endpoint", "https://d.socdm.com/adsv/v1")
v.SetDefault("adapters.adhese.endpoint", "https://ads-{{.AccountID}}.adhese.com/json")
v.SetDefault("adapters.adkernel.endpoint", "http://{{.Host}}/hb?zone={{.ZoneID}}")
v.SetDefault("adapters.adkerneladn.endpoint", "http://{{.Host}}/rtbpub?account={{.PublisherID}}")
v.SetDefault("adapters.adman.endpoint", "http://pub.admanmedia.com/?c=o&m=ortb")
v.SetDefault("adapters.admixer.endpoint", "http://inv-nets.admixer.net/pbs.aspx")
v.SetDefault("adapters.adocean.endpoint", "https://{{.Host}}")
v.SetDefault("adapters.adoppler.endpoint", "http://{{.AccountID}}.trustedmarketplace.io/ads/processHeaderBid/{{.AdUnit}}")
v.SetDefault("adapters.adpone.endpoint", "http://rtb.adpone.com/bid-request?src=prebid_server")
v.SetDefault("adapters.adprime.endpoint", "http://delta.adprime.com/?c=o&m=ortb")
v.SetDefault("adapters.adtarget.endpoint", "http://ghb.console.adtarget.com.tr/pbs/ortb")
v.SetDefault("adapters.adtelligent.endpoint", "http://ghb.adtelligent.com/pbs/ortb")
v.SetDefault("adapters.advangelists.endpoint", "http://nep.advangelists.com/xp/get?pubid={{.PublisherID}}")
v.SetDefault("adapters.aja.endpoint", "https://ad.as.amanad.adtdp.com/v1/bid/4")
v.SetDefault("adapters.amx.endpoint", "http://pbs.amxrtb.com/auction/openrtb")
v.SetDefault("adapters.applogy.endpoint", "http://rtb.applogy.com/v1/prebid")
v.SetDefault("adapters.appnexus.endpoint", "http://ib.adnxs.com/openrtb2") // Docs: https://wiki.appnexus.com/display/supply/Incoming+Bid+Request+from+SSPs
v.SetDefault("adapters.appnexus.platform_id", "5")
v.SetDefault("adapters.audiencenetwork.disabled", true)
v.SetDefault("adapters.audiencenetwork.endpoint", "https://an.facebook.com/placementbid.ortb")
v.SetDefault("adapters.avocet.disabled", true)
v.SetDefault("adapters.beachfront.endpoint", "https://display.bfmio.com/prebid_display")
v.SetDefault("adapters.beachfront.extra_info", "{\"video_endpoint\":\"https://reachms.bfmio.com/bid.json?exchange_id\"}")
v.SetDefault("adapters.beintoo.endpoint", "https://ib.beintoo.com/um")
v.SetDefault("adapters.between.endpoint", "http://{{.Host}}.betweendigital.com/openrtb_bid?sspId={{.PublisherID}}")
v.SetDefault("adapters.brightroll.endpoint", "http://east-bid.ybp.yahoo.com/bid/appnexuspbs")
v.SetDefault("adapters.colossus.endpoint", "http://colossusssp.com/?c=o&m=rtb")
v.SetDefault("adapters.connectad.endpoint", "http://bidder.connectad.io/API?src=pbs")
v.SetDefault("adapters.consumable.endpoint", "https://e.serverbid.com/api/v2")
v.SetDefault("adapters.conversant.endpoint", "http://api.hb.ad.cpe.dotomi.com/cvx/server/hb/ortb/25")
v.SetDefault("adapters.cpmstar.endpoint", "https://server.cpmstar.com/openrtbbidrq.aspx")
v.SetDefault("adapters.datablocks.endpoint", "http://{{.Host}}/openrtb2?sid={{.SourceId}}")
v.SetDefault("adapters.deepintent.endpoint", "https://prebid.deepintent.com/prebid")
v.SetDefault("adapters.dmx.endpoint", "https://dmx-direct.districtm.io/b/v2")
v.SetDefault("adapters.emx_digital.endpoint", "https://hb.emxdgt.com")
v.SetDefault("adapters.engagebdr.endpoint", "http://dsp.bnmla.com/hb")
v.SetDefault("adapters.eplanning.endpoint", "http://rtb.e-planning.net/pbs/1")
v.SetDefault("adapters.gamma.endpoint", "https://hb.gammaplatform.com/adx/request/")
v.SetDefault("adapters.gamoshi.endpoint", "https://rtb.gamoshi.io")
v.SetDefault("adapters.grid.endpoint", "https://grid.bidswitch.net/sp_bid?sp=prebid")
v.SetDefault("adapters.gumgum.endpoint", "https://g2.gumgum.com/providers/prbds2s/bid")
v.SetDefault("adapters.improvedigital.endpoint", "http://ad.360yield.com/pbs")
v.SetDefault("adapters.inmobi.endpoint", "https://api.w.inmobi.com/showad/openrtb/bidder/prebid")
v.SetDefault("adapters.ix.endpoint", "http://exchange.indexww.com/pbs?p=192919")
v.SetDefault("adapters.krushmedia.endpoint", "http://ads4.krushmedia.com/?c=rtb&m=req&key={{.AccountID}}")
v.SetDefault("adapters.invibes.endpoint", "https://{{.Host}}/bid/ServerBidAdContent")
v.SetDefault("adapters.kidoz.endpoint", "http://prebid-adapter.kidoz.net/openrtb2/auction?src=prebid-server")
v.SetDefault("adapters.kubient.endpoint", "https://kssp.kbntx.ch/prebid")
v.SetDefault("adapters.lifestreet.endpoint", "https://prebid.s2s.lfstmedia.com/adrequest")
v.SetDefault("adapters.lockerdome.endpoint", "https://lockerdome.com/ladbid/prebidserver/openrtb2")
v.SetDefault("adapters.logicad.endpoint", "https://pbs.ladsp.com/adrequest/prebidserver")
v.SetDefault("adapters.lunamedia.endpoint", "http://api.lunamedia.io/xp/get?pubid={{.PublisherID}}")
v.SetDefault("adapters.marsmedia.endpoint", "https://bid306.rtbsrv.com/bidder/?bid=f3xtet")
v.SetDefault("adapters.mgid.endpoint", "https://prebid.mgid.com/prebid/")
v.SetDefault("adapters.mobilefuse.endpoint", "http://mfx.mobilefuse.com/openrtb?pub_id={{.PublisherID}}")
v.SetDefault("adapters.mobfoxpb.endpoint", "http://bes.mobfox.com/?c=o&m=ortb")
v.SetDefault("adapters.nanointeractive.endpoint", "https://ad.audiencemanager.de/hbs")
v.SetDefault("adapters.ninthdecimal.endpoint", "http://rtb.ninthdecimal.com/xp/get?pubid={{.PublisherID}}")
v.SetDefault("adapters.nobid.endpoint", "https://ads.servenobid.com/ortb_adreq?tek=pbs&ver=1")
v.SetDefault("adapters.openx.endpoint", "http://rtb.openx.net/prebid")
v.SetDefault("adapters.orbidder.endpoint", "https://orbidder.otto.de/openrtb2")
v.SetDefault("adapters.pubmatic.endpoint", "https://hbopenbid.pubmatic.com/translator?source=prebid-server")
v.SetDefault("adapters.pubnative.endpoint", "http://dsp.pubnative.net/bid/v1/request")
v.SetDefault("adapters.pulsepoint.endpoint", "http://bid.contextweb.com/header/s/ortb/prebid-s2s")
v.SetDefault("adapters.rhythmone.endpoint", "http://tag.1rx.io/rmp")
v.SetDefault("adapters.rtbhouse.endpoint", "http://prebidserver-s2s-ams.creativecdn.com/bidder/prebidserver/bids")
v.SetDefault("adapters.rubicon.disabled", true)
v.SetDefault("adapters.rubicon.endpoint", "http://exapi-us-east.rubiconproject.com/a/api/exchange.json")
v.SetDefault("adapters.sharethrough.endpoint", "http://btlr.sharethrough.com/FGMrCMMc/v1")
v.SetDefault("adapters.silvermob.endpoint", "http://{{.Host}}.silvermob.com/marketplace/api/dsp/bid/{{.ZoneID}}")
v.SetDefault("adapters.smaato.endpoint", "https://prebid.ad.smaato.net/oapi/prebid")
v.SetDefault("adapters.smartadserver.endpoint", "https://ssb-global.smartadserver.com")
v.SetDefault("adapters.smartrtb.endpoint", "http://market-east.smrtb.com/json/publisher/rtb?pubid={{.PublisherID}}")
v.SetDefault("adapters.smartyads.endpoint", "http://{{.Host}}.smartyads.com/bid?rtb_seat_id={{.SourceId}}&secret_key={{.AccountID}}")
v.SetDefault("adapters.somoaudience.endpoint", "http://publisher-east.mobileadtrading.com/rtb/bid")
v.SetDefault("adapters.sonobi.endpoint", "https://apex.go.sonobi.com/prebid?partnerid=71d9d3d8af")
v.SetDefault("adapters.sovrn.endpoint", "http://ap.lijit.com/rtb/bid?src=prebid_server")
v.SetDefault("adapters.synacormedia.endpoint", "http://{{.Host}}.technoratimedia.com/openrtb/bids/{{.Host}}")
v.SetDefault("adapters.tappx.endpoint", "https://{{.Host}}")
v.SetDefault("adapters.telaria.endpoint", "https://ads.tremorhub.com/ad/rtb/prebid")
v.SetDefault("adapters.triplelift_native.disabled", true)
v.SetDefault("adapters.triplelift_native.extra_info", "{\"publisher_whitelist\":[]}")
v.SetDefault("adapters.triplelift.endpoint", "https://tlx.3lift.com/s2s/auction?sra=1&supplier_id=20")
v.SetDefault("adapters.ucfunnel.endpoint", "https://pbs.aralego.com/prebid")
v.SetDefault("adapters.unruly.endpoint", "http://targeting.unrulymedia.com/openrtb/2.2")
v.SetDefault("adapters.valueimpression.endpoint", "https://rtb.valueimpression.com/endpoint")
v.SetDefault("adapters.verizonmedia.disabled", true)
v.SetDefault("adapters.visx.endpoint", "https://t.visx.net/s2s_bid?wrapperType=s2s_prebid_standard")
v.SetDefault("adapters.vrtcal.endpoint", "http://rtb.vrtcal.com/bidder_prebid.vap?ssp=1804")
v.SetDefault("adapters.yeahmobi.endpoint", "https://{{.Host}}/prebid/bid")
v.SetDefault("adapters.yieldlab.endpoint", "https://ad.yieldlab.net/yp/")
v.SetDefault("adapters.yieldmo.endpoint", "https://ads.yieldmo.com/exchange/prebid-server")
v.SetDefault("adapters.yieldone.endpoint", "https://y.one.impact-ad.jp/hbs_imp")
v.SetDefault("adapters.zeroclickfraud.endpoint", "http://{{.Host}}/openrtb2?sid={{.SourceId}}")
v.SetDefault("max_request_size", 1024*256)
v.SetDefault("analytics.file.filename", "")
v.SetDefault("analytics.pubstack.endpoint", "https://s2s.pbstck.com/v1")
v.SetDefault("analytics.pubstack.scopeid", "change-me")
v.SetDefault("analytics.pubstack.enabled", false)
v.SetDefault("analytics.pubstack.configuration_refresh_delay", "2h")
v.SetDefault("analytics.pubstack.buffers.size", "2MB")
v.SetDefault("analytics.pubstack.buffers.count", 100)
v.SetDefault("analytics.pubstack.buffers.timeout", "900s")
v.SetDefault("amp_timeout_adjustment_ms", 0)
v.SetDefault("gdpr.enabled", true)
v.SetDefault("gdpr.host_vendor_id", 0)
v.SetDefault("gdpr.usersync_if_ambiguous", false)
v.SetDefault("gdpr.timeouts_ms.init_vendorlist_fetches", 0)
v.SetDefault("gdpr.timeouts_ms.active_vendorlist_fetch", 0)
v.SetDefault("gdpr.non_standard_publishers", []string{""})
v.SetDefault("gdpr.tcf1.fetch_gvl", true)
v.SetDefault("gdpr.tcf1.fallback_gvl_path", "./static/tcf1/fallback_gvl.json")
v.SetDefault("gdpr.tcf2.enabled", true)
v.SetDefault("gdpr.tcf2.purpose1.enabled", true)
v.SetDefault("gdpr.tcf2.purpose2.enabled", true)
v.SetDefault("gdpr.tcf2.purpose4.enabled", true)
v.SetDefault("gdpr.tcf2.purpose7.enabled", true)
v.SetDefault("gdpr.tcf2.special_purpose1.enabled", true)
v.SetDefault("gdpr.tcf2.purpose_one_treatement.enabled", true)
v.SetDefault("gdpr.tcf2.purpose_one_treatement.access_allowed", true)
v.SetDefault("gdpr.amp_exception", false)
v.SetDefault("gdpr.eea_countries", []string{"ALA", "AUT", "BEL", "BGR", "HRV", "CYP", "CZE", "DNK", "EST",
"FIN", "FRA", "GUF", "DEU", "GIB", "GRC", "GLP", "GGY", "HUN", "ISL", "IRL", "IMN", "ITA", "JEY", "LVA",
"LIE", "LTU", "LUX", "MLT", "MTQ", "MYT", "NLD", "NOR", "POL", "PRT", "REU", "ROU", "BLM", "MAF", "SPM",
"SVK", "SVN", "ESP", "SWE", "GBR"})
v.SetDefault("ccpa.enforce", false)
v.SetDefault("lmt.enforce", true)
v.SetDefault("currency_converter.fetch_url", "https://cdn.jsdelivr.net/gh/prebid/currency-file@1/latest.json")
v.SetDefault("currency_converter.fetch_interval_seconds", 1800) // fetch currency rates every 30 minutes
v.SetDefault("currency_converter.stale_rates_seconds", 0)
v.SetDefault("default_request.type", "")
v.SetDefault("default_request.file.name", "")
v.SetDefault("default_request.alias_info", false)
v.SetDefault("blacklisted_apps", []string{""})
v.SetDefault("blacklisted_accts", []string{""})
v.SetDefault("account_required", false)
v.SetDefault("account_defaults.disabled", false)
v.SetDefault("certificates_file", "")
v.SetDefault("auto_gen_source_tid", true)
v.SetDefault("request_timeout_headers.request_time_in_queue", "")
v.SetDefault("request_timeout_headers.request_timeout_in_queue", "")
v.SetDefault("debug.timeout_notification.log", false)
v.SetDefault("debug.timeout_notification.sampling_rate", 0.0)
v.SetDefault("debug.timeout_notification.fail_only", false)
/* IPv4
/* Site Local: 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16
/* Link Local: 169.254.0.0/16
/* Loopback: 127.0.0.0/8
/*
/* IPv6
/* Loopback: ::1/128
/* Documentation: 2001:db8::/32
/* Unique Local: fc00::/7
/* Link Local: fe80::/10
/* Multicast: ff00::/8
*/
v.SetDefault("request_validation.ipv4_private_networks", []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16", "127.0.0.0/8"})
v.SetDefault("request_validation.ipv6_private_networks", []string{"::1/128", "fc00::/7", "fe80::/10", "ff00::/8", "2001:db8::/32"})
// Set environment variable support:
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
v.SetTypeByDefaultValue(true)
v.SetEnvPrefix("PBS")
v.AutomaticEnv()
v.ReadInConfig()
// Migrate config settings to maintain compatibility with old configs
migrateConfig(v)
}
func migrateConfig(v *viper.Viper) {
// if stored_requests.filesystem is not a map in conf file as expected from defaults,
// means we have old-style settings; migrate them to new filesystem map to avoid breaking viper
if _, ok := v.Get("stored_requests.filesystem").(map[string]interface{}); !ok {
glog.Warning("stored_requests.filesystem should be changed to stored_requests.filesystem.enabled")
glog.Warning("stored_requests.directorypath should be changed to stored_requests.filesystem.directorypath")
m := v.GetStringMap("stored_requests.filesystem")
m["enabled"] = v.GetBool("stored_requests.filesystem")
m["directorypath"] = v.GetString("stored_requests.directorypath")
v.Set("stored_requests.filesystem", m)
}
}
func setBidderDefaults(v *viper.Viper, bidder string) {
adapterCfgPrefix := "adapters."
v.SetDefault(adapterCfgPrefix+bidder+".endpoint", "")
v.SetDefault(adapterCfgPrefix+bidder+".usersync_url", "")
v.SetDefault(adapterCfgPrefix+bidder+".platform_id", "")
v.SetDefault(adapterCfgPrefix+bidder+".app_secret", "")
v.SetDefault(adapterCfgPrefix+bidder+".xapi.username", "")
v.SetDefault(adapterCfgPrefix+bidder+".xapi.password", "")
v.SetDefault(adapterCfgPrefix+bidder+".xapi.tracker", "")
v.SetDefault(adapterCfgPrefix+bidder+".disabled", false)
v.SetDefault(adapterCfgPrefix+bidder+".partner_id", "")
v.SetDefault(adapterCfgPrefix+bidder+".extra_info", "")
}
func isValidCookieSize(maxCookieSize int) error {
// If a non-zero-less-than-500-byte "host_cookie.max_cookie_size_bytes" value was specified in the
// environment configuration of prebid-server, default to 500 bytes
if maxCookieSize != 0 && maxCookieSize < MIN_COOKIE_SIZE_BYTES {
return fmt.Errorf("Configured cookie size is less than allowed minimum size of %d \n", MIN_COOKIE_SIZE_BYTES)
}
return nil
}
| |
secrets.rs
|
// Copyright 2021 Databricks, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use ansi_term::Colour::Yellow;
use clap::{App, Arg};
use k8s_openapi::api::core::v1 as api;
use crate::{
command::command_def::{exec_match, show_arg, sort_arg, start_clap, Cmd},
command::{run_list_command, Extractor},
completer,
env::Env,
kobj::{KObj, ObjType},
output::ClickWriter,
table::CellSpec,
};
use std::array::IntoIter;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io::Write;
lazy_static! {
static ref SECRET_EXTRACTORS: HashMap<String, Extractor<api::Secret>> = {
let mut m: HashMap<String, Extractor<api::Secret>> = HashMap::new();
m.insert("Data".to_owned(), secret_data);
m.insert("Type".to_owned(), secret_type);
m
};
}
const COL_MAP: &[(&str, &str)] = &[
("name", "Name"),
("type", "Type"),
("data", "Data"),
("age", "Age"),
];
const COL_FLAGS: &[&str] = &{ extract_first!(COL_MAP) };
const EXTRA_COL_MAP: &[(&str, &str)] = &[("namespace", "Namespace"), ("labels", "Labels")];
const EXTRA_COL_FLAGS: &[&str] = &{ extract_first!(EXTRA_COL_MAP) };
fn secret_to_kobj(secret: &api::Secret) -> KObj {
let meta = &secret.metadata;
KObj {
name: meta.name.clone().unwrap_or_else(|| "<Unknown>".into()),
namespace: meta.namespace.clone(),
typ: ObjType::Secret,
}
}
fn secret_type(secret: &api::Secret) -> Option<CellSpec<'_>> {
secret.type_.as_deref().map(|t| t.into())
}
fn secret_data(secret: &api::Secret) -> Option<CellSpec<'_>> {
Some(secret.data.len().into())
}
list_command!(
Secrets,
"secrets",
"Get secrets (in current namespace if set)",
super::COL_FLAGS,
super::EXTRA_COL_FLAGS,
|clap: App<'static, 'static>| clap
.arg(
Arg::with_name("show_label")
.short("L")
.long("labels")
.help("Show secrets labels (deprecated, use --show labels)")
.takes_value(false)
)
.arg(
Arg::with_name("regex")
.short("r")
.long("regex")
.help("Filter secrets by the specified regex")
.takes_value(true)
)
.arg(show_arg(EXTRA_COL_FLAGS, true))
.arg(sort_arg(COL_FLAGS, Some(EXTRA_COL_FLAGS)))
.arg(
Arg::with_name("reverse")
|
),
vec!["secrets"],
noop_complete!(),
IntoIter::new([]),
|matches, env, writer| {
let (request, _response_body) = match &env.namespace {
Some(ns) => api::Secret::list_namespaced_secret(ns, Default::default())?,
None => api::Secret::list_secret_for_all_namespaces(Default::default())?,
};
let cols: Vec<&str> = COL_MAP.iter().map(|(_, col)| *col).collect();
run_list_command(
matches,
env,
writer,
cols,
request,
COL_MAP,
Some(EXTRA_COL_MAP),
Some(&SECRET_EXTRACTORS),
secret_to_kobj,
)
}
);
|
.short("R")
.long("reverse")
.help("Reverse the order of the returned list")
.takes_value(false),
|
healthcheck.go
|
package healthcheck
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"time"
"github.com/linkerd/linkerd2/controller/api/public"
spclient "github.com/linkerd/linkerd2/controller/gen/client/clientset/versioned"
healthcheckPb "github.com/linkerd/linkerd2/controller/gen/common/healthcheck"
pb "github.com/linkerd/linkerd2/controller/gen/public"
"github.com/linkerd/linkerd2/pkg/k8s"
"github.com/linkerd/linkerd2/pkg/profiles"
"github.com/linkerd/linkerd2/pkg/tls"
"github.com/linkerd/linkerd2/pkg/version"
log "github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
k8sVersion "k8s.io/apimachinery/pkg/version"
)
// CategoryID is an identifier for the types of health checks.
type CategoryID string
const (
// KubernetesAPIChecks adds a series of checks to validate that the caller is
// configured to interact with a working Kubernetes cluster.
KubernetesAPIChecks CategoryID = "kubernetes-api"
// KubernetesVersionChecks validate that the cluster meets the minimum version
// requirements.
KubernetesVersionChecks CategoryID = "kubernetes-version"
// LinkerdPreInstall* checks enabled by `linkerd check --pre`
// LinkerdPreInstallChecks adds checks to validate that the control plane
// namespace does not already exist, and that the user can create cluster-wide
// resources, including ClusterRole, ClusterRoleBinding, and
// CustomResourceDefinition, as well as namespace-wide resources, including
// Service, Deployment, and ConfigMap. This check only runs as part of the set
// of pre-install checks.
// This check is dependent on the output of KubernetesAPIChecks, so those
// checks must be added first.
LinkerdPreInstallChecks CategoryID = "pre-kubernetes-setup"
// LinkerdPreInstallCapabilityChecks adds a check to validate the user has the
// capabilities necessary to deploy Linkerd. For example, the NET_ADMIN
// capability is required by the `linkerd-init` container to modify IP tables.
// These checks are no run when the `--linkerd-cni-enabled` flag is set.
LinkerdPreInstallCapabilityChecks CategoryID = "pre-kubernetes-capability"
// LinkerdConfigChecks enabled by `linkerd check config`
// LinkerdConfigChecks adds a series of checks to validate that the Linkerd
// namespace, RBAC, ServiceAccounts, and CRDs were successfully created.
// These checks specifically validate that the `linkerd install config`
// command succeeded in a multi-stage install, but also applies to a default
// `linkerd install`.
// These checks are dependent on the output of KubernetesAPIChecks, so those
// checks must be added first.
LinkerdConfigChecks CategoryID = "linkerd-config"
// LinkerdControlPlaneExistenceChecks adds a series of checks to validate that
// the control plane namespace and controller pod exist.
// These checks are dependent on the output of KubernetesAPIChecks, so those
// checks must be added first.
LinkerdControlPlaneExistenceChecks CategoryID = "linkerd-existence"
// LinkerdAPIChecks adds a series of checks to validate that the control plane
// is successfully serving the public API.
// These checks are dependent on the output of KubernetesAPIChecks, so those
// checks must be added first.
LinkerdAPIChecks CategoryID = "linkerd-api"
// LinkerdVersionChecks adds a series of checks to query for the latest
// version, and validate the the CLI is up to date.
LinkerdVersionChecks CategoryID = "linkerd-version"
// LinkerdControlPlaneVersionChecks adds a series of checks to validate that
// the control plane is running the latest available version.
// These checks are dependent on the following:
// 1) `apiClient` from LinkerdControlPlaneExistenceChecks
// 2) `latestVersions` from LinkerdVersionChecks
// 3) `serverVersion` from `LinkerdControlPlaneExistenceChecks`
LinkerdControlPlaneVersionChecks CategoryID = "control-plane-version"
// LinkerdDataPlaneChecks adds data plane checks to validate that the data
// plane namespace exists, and that the the proxy containers are in a ready
// state and running the latest available version.
// These checks are dependent on the output of KubernetesAPIChecks,
// `apiClient` from LinkerdControlPlaneExistenceChecks, and `latestVersions`
// from LinkerdVersionChecks, so those checks must be added first.
LinkerdDataPlaneChecks CategoryID = "linkerd-data-plane"
)
// HintBaseURL is the base URL on the linkerd.io website that all check hints
// point to. Each check adds its own `hintAnchor` to specify a location on the
// page.
const HintBaseURL = "https://linkerd.io/checks/#"
// AllowedClockSkew sets the allowed skew in clock synchronization
// between the system running inject command and the node(s), being
// based on assumed node's heartbeat interval (<= 60 seconds) plus default TLS
// clock skew allowance.
//
// TODO: Make this default value overridiable, e.g. by CLI flag
const AllowedClockSkew = time.Minute + tls.DefaultClockSkewAllowance
var (
retryWindow = 5 * time.Second
requestTimeout = 30 * time.Second
)
type checker struct {
// description is the short description that's printed to the command line
// when the check is executed
description string
// hintAnchor, when appended to `HintBaseURL`, provides a URL to more
// information about the check
hintAnchor string
// fatal indicates that all remaining checks should be aborted if this check
// fails; it should only be used if subsequent checks cannot possibly succeed
// (default false)
fatal bool
// warning indicates that if this check fails, it should be reported, but it
// should not impact the overall outcome of the health check (default false)
warning bool
// retryDeadline establishes a deadline before which this check should be
// retried; if the deadline has passed, the check fails (default: no retries)
retryDeadline time.Time
// check is the function that's called to execute the check; if the function
// returns an error, the check fails
check func(context.Context) error
// checkRPC is an alternative to check that can be used to perform a remote
// check using the SelfCheck gRPC endpoint; check status is based on the value
// of the gRPC response
checkRPC func(context.Context) (*healthcheckPb.SelfCheckResponse, error)
}
// CheckResult encapsulates a check's identifying information and output
// Note there exists an analogous user-facing type, `cmd.check`, for output via
// `linkerd check -o json`.
type CheckResult struct {
Category CategoryID
Description string
HintAnchor string
Retry bool
Warning bool
Err error
}
type checkObserver func(*CheckResult)
type category struct {
id CategoryID
checkers []checker
enabled bool
}
// Options specifies configuration for a HealthChecker.
type Options struct {
ControlPlaneNamespace string
DataPlaneNamespace string
KubeConfig string
KubeContext string
APIAddr string
VersionOverride string
RetryDeadline time.Time
}
// HealthChecker encapsulates all health check checkers, and clients required to
// perform those checks.
type HealthChecker struct {
categories []category
*Options
// these fields are set in the process of running checks
kubeAPI *k8s.KubernetesAPI
kubeVersion *k8sVersion.Info
controlPlanePods []corev1.Pod
apiClient public.APIClient
latestVersions version.Channels
serverVersion string
}
// NewHealthChecker returns an initialized HealthChecker
func NewHealthChecker(categoryIDs []CategoryID, options *Options) *HealthChecker {
hc := &HealthChecker{
Options: options,
}
hc.categories = hc.allCategories()
checkMap := map[CategoryID]struct{}{}
for _, category := range categoryIDs {
checkMap[category] = struct{}{}
}
for i := range hc.categories {
if _, ok := checkMap[hc.categories[i].id]; ok {
hc.categories[i].enabled = true
}
}
return hc
}
// allCategories is the global, ordered list of all checkers, grouped by
// category. This method is attached to the HealthChecker struct because the
// checkers directly reference other members of the struct, such as kubeAPI,
// controlPlanePods, etc.
//
// Ordering is important because checks rely on specific `HealthChecker` members
// getting populated by earlier checks, such as kubeAPI, controlPlanePods, etc.
//
// Note that all checks should include a `hintAnchor` with a corresponding section
// in the linkerd check faq:
// https://linkerd.io/checks/#
func (hc *HealthChecker) allCategories() []category {
return []category{
{
id: KubernetesAPIChecks,
checkers: []checker{
{
description: "can initialize the client",
hintAnchor: "k8s-api",
fatal: true,
check: func(context.Context) (err error) {
hc.kubeAPI, err = k8s.NewAPI(hc.KubeConfig, hc.KubeContext, requestTimeout)
return
},
},
{
description: "can query the Kubernetes API",
hintAnchor: "k8s-api",
fatal: true,
check: func(ctx context.Context) (err error) {
hc.kubeVersion, err = hc.kubeAPI.GetVersionInfo()
return
},
},
},
},
{
id: KubernetesVersionChecks,
checkers: []checker{
{
description: "is running the minimum Kubernetes API version",
hintAnchor: "k8s-version",
check: func(context.Context) error {
return hc.kubeAPI.CheckVersion(hc.kubeVersion)
},
},
{
description: "is running the minimum kubectl version",
hintAnchor: "kubectl-version",
check: func(context.Context) error {
return k8s.CheckKubectlVersion()
},
},
},
},
{
id: LinkerdPreInstallChecks,
checkers: []checker{
{
description: "control plane namespace does not already exist",
hintAnchor: "pre-ns",
check: func(context.Context) error {
return hc.CheckNamespace(hc.ControlPlaneNamespace, false)
},
},
{
description: "can create Namespaces",
hintAnchor: "pre-k8s-cluster-k8s",
check: func(context.Context) error {
return hc.checkCanCreate("", "", "v1", "namespaces")
},
},
{
description: "can create ClusterRoles",
hintAnchor: "pre-k8s-cluster-k8s",
check: func(context.Context) error {
return hc.checkCanCreate("", "rbac.authorization.k8s.io", "v1beta1", "clusterroles")
},
},
{
description: "can create ClusterRoleBindings",
hintAnchor: "pre-k8s-cluster-k8s",
check: func(context.Context) error {
return hc.checkCanCreate("", "rbac.authorization.k8s.io", "v1beta1", "clusterrolebindings")
},
},
{
description: "can create CustomResourceDefinitions",
hintAnchor: "pre-k8s-cluster-k8s",
check: func(context.Context) error {
return hc.checkCanCreate("", "apiextensions.k8s.io", "v1beta1", "customresourcedefinitions")
},
},
{
description: "can create PodSecurityPolicies",
hintAnchor: "pre-k8s",
check: func(context.Context) error {
return hc.checkCanCreate(hc.ControlPlaneNamespace, "policy", "v1beta1", "podsecuritypolicies")
},
},
{
description: "can create ServiceAccounts",
hintAnchor: "pre-k8s",
check: func(context.Context) error {
return hc.checkCanCreate(hc.ControlPlaneNamespace, "", "v1", "serviceaccounts")
},
},
{
description: "can create Services",
hintAnchor: "pre-k8s",
check: func(context.Context) error {
return hc.checkCanCreate(hc.ControlPlaneNamespace, "", "v1", "services")
},
},
{
description: "can create Deployments",
hintAnchor: "pre-k8s",
check: func(context.Context) error {
return hc.checkCanCreate(hc.ControlPlaneNamespace, "extensions", "v1beta1", "deployments")
},
},
{
description: "can create ConfigMaps",
hintAnchor: "pre-k8s",
check: func(context.Context) error {
return hc.checkCanCreate(hc.ControlPlaneNamespace, "", "v1", "configmaps")
},
},
{
description: "no clock skew detected",
hintAnchor: "pre-k8s-clock-skew",
check: func(context.Context) error {
return hc.checkClockSkew()
},
},
},
},
{
id: LinkerdPreInstallCapabilityChecks,
checkers: []checker{
{
description: "has NET_ADMIN capability",
hintAnchor: "pre-k8s-cluster-net-admin",
warning: true,
check: func(context.Context) error {
return hc.checkNetAdmin()
},
},
},
},
{
id: LinkerdConfigChecks,
checkers: []checker{
{
description: "control plane Namespace exists",
hintAnchor: "l5d-existence-ns",
fatal: true,
check: func(context.Context) error {
return hc.CheckNamespace(hc.ControlPlaneNamespace, true)
},
},
{
description: "control plane ClusterRoles exist",
hintAnchor: "l5d-existence-cr",
fatal: true,
check: func(context.Context) error {
return hc.checkClusterRoles()
},
},
{
description: "control plane ClusterRoleBindings exist",
hintAnchor: "l5d-existence-crb",
fatal: true,
check: func(context.Context) error {
return hc.checkClusterRoleBindings()
},
},
{
description: "control plane ServiceAccounts exist",
hintAnchor: "l5d-existence-sa",
fatal: true,
check: func(context.Context) error {
return hc.checkServiceAccounts()
|
},
},
{
description: "control plane CustomResourceDefinitions exist",
hintAnchor: "l5d-existence-crd",
fatal: true,
check: func(context.Context) error {
return hc.checkCustomResourceDefinitions()
},
},
},
},
{
id: LinkerdControlPlaneExistenceChecks,
checkers: []checker{
{
description: "control plane components ready",
hintAnchor: "l5d-existence-psp", // needs https://github.com/linkerd/website/issues/272
fatal: true,
check: func(context.Context) error {
controlPlaneReplicaSet, err := hc.kubeAPI.GetReplicaSets(hc.ControlPlaneNamespace)
if err != nil {
return err
}
return checkControlPlaneReplicaSets(controlPlaneReplicaSet)
},
},
{
description: "no unschedulable pods",
hintAnchor: "l5d-existence-unschedulable-pods", // needs https://github.com/linkerd/website/issues/272
fatal: true,
check: func(context.Context) error {
// do not save this into hc.controlPlanePods, as this check may
// succeed prior to all expected control plane pods being up
controlPlanePods, err := hc.kubeAPI.GetPodsByNamespace(hc.ControlPlaneNamespace)
if err != nil {
return err
}
return checkUnschedulablePods(controlPlanePods)
},
},
{
description: "controller pod is running",
hintAnchor: "l5d-existence-controller",
retryDeadline: hc.RetryDeadline,
fatal: true,
check: func(ctx context.Context) error {
// save this into hc.controlPlanePods, since this check only
// succeeds when all pods are up
var err error
hc.controlPlanePods, err = hc.kubeAPI.GetPodsByNamespace(hc.ControlPlaneNamespace)
if err != nil {
return err
}
return checkControllerRunning(hc.controlPlanePods)
},
},
{
description: "can initialize the client",
hintAnchor: "l5d-existence-client",
fatal: true,
check: func(context.Context) (err error) {
if hc.APIAddr != "" {
hc.apiClient, err = public.NewInternalClient(hc.ControlPlaneNamespace, hc.APIAddr)
} else {
hc.apiClient, err = public.NewExternalClient(hc.ControlPlaneNamespace, hc.kubeAPI)
}
return
},
},
{
description: "can query the control plane API",
hintAnchor: "l5d-existence-api",
retryDeadline: hc.RetryDeadline,
fatal: true,
check: func(ctx context.Context) (err error) {
hc.serverVersion, err = GetServerVersion(ctx, hc.apiClient)
return
},
},
},
},
{
id: LinkerdAPIChecks,
checkers: []checker{
{
description: "control plane pods are ready",
hintAnchor: "l5d-api-control-ready",
retryDeadline: hc.RetryDeadline,
fatal: true,
check: func(context.Context) error {
var err error
hc.controlPlanePods, err = hc.kubeAPI.GetPodsByNamespace(hc.ControlPlaneNamespace)
if err != nil {
return err
}
return validateControlPlanePods(hc.controlPlanePods)
},
},
{
description: "control plane self-check",
hintAnchor: "l5d-api-control-api",
fatal: true,
retryDeadline: hc.RetryDeadline,
checkRPC: func(ctx context.Context) (*healthcheckPb.SelfCheckResponse, error) {
return hc.apiClient.SelfCheck(ctx, &healthcheckPb.SelfCheckRequest{})
},
},
{
description: "no invalid service profiles",
hintAnchor: "l5d-sp",
warning: true,
check: func(context.Context) error {
return hc.validateServiceProfiles()
},
},
},
},
{
id: LinkerdVersionChecks,
checkers: []checker{
{
description: "can determine the latest version",
hintAnchor: "l5d-version-latest",
check: func(ctx context.Context) (err error) {
if hc.VersionOverride != "" {
hc.latestVersions, err = version.NewChannels(hc.VersionOverride)
} else {
// The UUID is only known to the web process. At some point we may want
// to consider providing it in the Public API.
uuid := "unknown"
for _, pod := range hc.controlPlanePods {
if strings.Split(pod.Name, "-")[0] == "web" {
for _, container := range pod.Spec.Containers {
if container.Name == "web" {
for _, arg := range container.Args {
if strings.HasPrefix(arg, "-uuid=") {
uuid = strings.TrimPrefix(arg, "-uuid=")
}
}
}
}
}
}
hc.latestVersions, err = version.GetLatestVersions(ctx, uuid, "cli")
}
return
},
},
{
description: "cli is up-to-date",
hintAnchor: "l5d-version-cli",
warning: true,
check: func(context.Context) error {
return hc.latestVersions.Match(version.Version)
},
},
},
},
{
id: LinkerdControlPlaneVersionChecks,
checkers: []checker{
{
description: "control plane is up-to-date",
hintAnchor: "l5d-version-control",
warning: true,
check: func(context.Context) error {
return hc.latestVersions.Match(hc.serverVersion)
},
},
{
description: "control plane and cli versions match",
hintAnchor: "l5d-version-control",
warning: true,
check: func(context.Context) error {
if hc.serverVersion != version.Version {
return fmt.Errorf("control plane running %s but cli running %s", hc.serverVersion, version.Version)
}
return nil
},
},
},
},
{
id: LinkerdDataPlaneChecks,
checkers: []checker{
{
description: "data plane namespace exists",
hintAnchor: "l5d-data-plane-exists",
fatal: true,
check: func(context.Context) error {
if hc.DataPlaneNamespace == "" {
// when checking proxies in all namespaces, this check is a no-op
return nil
}
return hc.CheckNamespace(hc.DataPlaneNamespace, true)
},
},
{
description: "data plane proxies are ready",
hintAnchor: "l5d-data-plane-ready",
retryDeadline: hc.RetryDeadline,
fatal: true,
check: func(ctx context.Context) error {
pods, err := hc.getDataPlanePods(ctx)
if err != nil {
return err
}
return validateDataPlanePods(pods, hc.DataPlaneNamespace)
},
},
{
description: "data plane proxy metrics are present in Prometheus",
hintAnchor: "l5d-data-plane-prom",
retryDeadline: hc.RetryDeadline,
check: func(ctx context.Context) error {
pods, err := hc.getDataPlanePods(ctx)
if err != nil {
return err
}
return validateDataPlanePodReporting(pods)
},
},
{
description: "data plane is up-to-date",
hintAnchor: "l5d-data-plane-version",
warning: true,
check: func(ctx context.Context) error {
pods, err := hc.getDataPlanePods(ctx)
if err != nil {
return err
}
for _, pod := range pods {
err = hc.latestVersions.Match(pod.ProxyVersion)
if err != nil {
return fmt.Errorf("%s: %s", pod.Name, err)
}
}
return nil
},
},
{
description: "data plane and cli versions match",
hintAnchor: "l5d-data-plane-cli-version",
warning: true,
check: func(ctx context.Context) error {
pods, err := hc.getDataPlanePods(ctx)
if err != nil {
return err
}
for _, pod := range pods {
if pod.ProxyVersion != version.Version {
return fmt.Errorf("%s running %s but cli running %s", pod.Name, pod.ProxyVersion, version.Version)
}
}
return nil
},
},
},
},
}
}
// Add adds an arbitrary checker. This should only be used for testing. For
// production code, pass in the desired set of checks when calling
// NewHealthChecker.
func (hc *HealthChecker) Add(categoryID CategoryID, description string, hintAnchor string, check func(context.Context) error) {
hc.addCategory(
category{
id: categoryID,
checkers: []checker{
{
description: description,
check: check,
hintAnchor: hintAnchor,
},
},
},
)
}
// addCategory is also for testing
func (hc *HealthChecker) addCategory(c category) {
c.enabled = true
hc.categories = append(hc.categories, c)
}
// RunChecks runs all configured checkers, and passes the results of each
// check to the observer. If a check fails and is marked as fatal, then all
// remaining checks are skipped. If at least one check fails, RunChecks returns
// false; if all checks passed, RunChecks returns true. Checks which are
// designated as warnings will not cause RunCheck to return false, however.
func (hc *HealthChecker) RunChecks(observer checkObserver) bool {
success := true
for _, c := range hc.categories {
if c.enabled {
for _, checker := range c.checkers {
checker := checker // pin
if checker.check != nil {
if !hc.runCheck(c.id, &checker, observer) {
if !checker.warning {
success = false
}
if checker.fatal {
return success
}
}
}
if checker.checkRPC != nil {
if !hc.runCheckRPC(c.id, &checker, observer) {
if !checker.warning {
success = false
}
if checker.fatal {
return success
}
}
}
}
}
}
return success
}
func (hc *HealthChecker) runCheck(categoryID CategoryID, c *checker, observer checkObserver) bool {
for {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
defer cancel()
err := c.check(ctx)
checkResult := &CheckResult{
Category: categoryID,
Description: c.description,
HintAnchor: c.hintAnchor,
Warning: c.warning,
Err: err,
}
if err != nil && time.Now().Before(c.retryDeadline) {
checkResult.Retry = true
checkResult.Err = errors.New("waiting for check to complete")
log.Debugf("Retrying on error: %s", err)
observer(checkResult)
time.Sleep(retryWindow)
continue
}
observer(checkResult)
return err == nil
}
}
func (hc *HealthChecker) runCheckRPC(categoryID CategoryID, c *checker, observer checkObserver) bool {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
defer cancel()
checkRsp, err := c.checkRPC(ctx)
observer(&CheckResult{
Category: categoryID,
Description: c.description,
HintAnchor: c.hintAnchor,
Warning: c.warning,
Err: err,
})
if err != nil {
return false
}
for _, check := range checkRsp.Results {
var err error
if check.Status != healthcheckPb.CheckStatus_OK {
err = fmt.Errorf(check.FriendlyMessageToUser)
}
observer(&CheckResult{
Category: categoryID,
Description: fmt.Sprintf("[%s] %s", check.SubsystemName, check.CheckDescription),
HintAnchor: c.hintAnchor,
Warning: c.warning,
Err: err,
})
if err != nil {
return false
}
}
return true
}
// PublicAPIClient returns a fully configured public API client. This client is
// only configured if the KubernetesAPIChecks and LinkerdAPIChecks are
// configured and run first.
func (hc *HealthChecker) PublicAPIClient() public.APIClient {
return hc.apiClient
}
// CheckNamespace checks whether the given namespace exists, and returns an
// error if it does not match `shouldExist`.
func (hc *HealthChecker) CheckNamespace(namespace string, shouldExist bool) error {
exists, err := hc.kubeAPI.NamespaceExists(namespace)
if err != nil {
return err
}
if shouldExist && !exists {
return fmt.Errorf("The \"%s\" namespace does not exist", namespace)
}
if !shouldExist && exists {
return fmt.Errorf("The \"%s\" namespace already exists", namespace)
}
return nil
}
func (hc *HealthChecker) expectedRBACNames() []string {
return []string{
fmt.Sprintf("linkerd-%s-controller", hc.ControlPlaneNamespace),
fmt.Sprintf("linkerd-%s-identity", hc.ControlPlaneNamespace),
fmt.Sprintf("linkerd-%s-prometheus", hc.ControlPlaneNamespace),
fmt.Sprintf("linkerd-%s-proxy-injector", hc.ControlPlaneNamespace),
fmt.Sprintf("linkerd-%s-sp-validator", hc.ControlPlaneNamespace),
fmt.Sprintf("linkerd-%s-tap", hc.ControlPlaneNamespace),
}
}
func expectedServiceAccountNames() []string {
return []string{
"linkerd-controller",
"linkerd-grafana",
"linkerd-identity",
"linkerd-prometheus",
"linkerd-proxy-injector",
"linkerd-sp-validator",
"linkerd-web",
"linkerd-tap",
}
}
func (hc *HealthChecker) checkClusterRoles() error {
crList, err := hc.kubeAPI.RbacV1().ClusterRoles().List(metav1.ListOptions{})
if err != nil {
return err
}
objects := []runtime.Object{}
for _, item := range crList.Items {
item := item // pin
objects = append(objects, &item)
}
return checkResources("ClusterRoles", objects, hc.expectedRBACNames())
}
func (hc *HealthChecker) checkClusterRoleBindings() error {
crbList, err := hc.kubeAPI.RbacV1().ClusterRoleBindings().List(metav1.ListOptions{})
if err != nil {
return err
}
objects := []runtime.Object{}
for _, item := range crbList.Items {
item := item // pin
objects = append(objects, &item)
}
return checkResources("ClusterRoleBindings", objects, hc.expectedRBACNames())
}
func (hc *HealthChecker) checkServiceAccounts() error {
saList, err := hc.kubeAPI.CoreV1().ServiceAccounts(hc.ControlPlaneNamespace).List(metav1.ListOptions{})
if err != nil {
return err
}
objects := []runtime.Object{}
for _, item := range saList.Items {
item := item // pin
objects = append(objects, &item)
}
return checkResources("ServiceAccounts", objects, expectedServiceAccountNames())
}
func (hc *HealthChecker) checkCustomResourceDefinitions() error {
crdList, err := hc.kubeAPI.Apiextensions.ApiextensionsV1beta1().CustomResourceDefinitions().List(metav1.ListOptions{})
if err != nil {
return err
}
objects := []runtime.Object{}
for _, item := range crdList.Items {
item := item // pin
objects = append(objects, &item)
}
return checkResources("CustomResourceDefinitions", objects, []string{"serviceprofiles.linkerd.io"})
}
func checkResources(resourceName string, objects []runtime.Object, expectedNames []string) error {
expected := map[string]bool{}
for _, name := range expectedNames {
expected[name] = false
}
for _, obj := range objects {
metaObj, err := meta.Accessor(obj)
if err != nil {
return err
}
if _, ok := expected[metaObj.GetName()]; ok {
expected[metaObj.GetName()] = true
}
}
missing := []string{}
for name, found := range expected {
if !found {
missing = append(missing, name)
}
}
if len(missing) > 0 {
sort.Strings(missing)
return fmt.Errorf("missing %s: %s", resourceName, strings.Join(missing, ", "))
}
return nil
}
func (hc *HealthChecker) getDataPlanePods(ctx context.Context) ([]*pb.Pod, error) {
req := &pb.ListPodsRequest{}
if hc.DataPlaneNamespace != "" {
req.Selector = &pb.ResourceSelection{
Resource: &pb.Resource{
Namespace: hc.DataPlaneNamespace,
},
}
}
resp, err := hc.apiClient.ListPods(ctx, req)
if err != nil {
return nil, err
}
pods := make([]*pb.Pod, 0)
for _, pod := range resp.GetPods() {
if pod.ControllerNamespace == hc.ControlPlaneNamespace {
pods = append(pods, pod)
}
}
return pods, nil
}
func (hc *HealthChecker) checkCanCreate(namespace, group, version, resource string) error {
if hc.kubeAPI == nil {
// we should never get here
return fmt.Errorf("unexpected error: Kubernetes ClientSet not initialized")
}
return k8s.ResourceAuthz(
hc.kubeAPI,
namespace,
"create",
group,
version,
resource,
"",
)
}
func (hc *HealthChecker) checkNetAdmin() error {
if hc.kubeAPI == nil {
// we should never get here
return fmt.Errorf("unexpected error: Kubernetes ClientSet not initialized")
}
pspList, err := hc.kubeAPI.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{})
if err != nil {
return err
}
if len(pspList.Items) == 0 {
// no PodSecurityPolicies found, assume PodSecurityPolicy admission controller is disabled
return nil
}
// if PodSecurityPolicies are found, validate one exists that:
// 1) permits usage
// AND
// 2) provides NET_ADMIN
for _, psp := range pspList.Items {
err := k8s.ResourceAuthz(
hc.kubeAPI,
"",
"use",
"policy",
"v1beta1",
"podsecuritypolicies",
psp.GetName(),
)
if err == nil {
for _, capability := range psp.Spec.AllowedCapabilities {
if capability == "*" || capability == "NET_ADMIN" {
return nil
}
}
}
}
return fmt.Errorf("found %d PodSecurityPolicies, but none provide NET_ADMIN, proxy injection will fail if the PSP admission controller is running", len(pspList.Items))
}
func (hc *HealthChecker) checkClockSkew() error {
if hc.kubeAPI == nil {
// we should never get here
return fmt.Errorf("unexpected error: Kubernetes ClientSet not initialized")
}
var clockSkewNodes []string
nodeList, err := hc.kubeAPI.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}
for _, node := range nodeList.Items {
for _, condition := range node.Status.Conditions {
// we want to check only KubeletReady condition and only execute if the node is ready
if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue {
since := time.Since(condition.LastHeartbeatTime.Time)
if (since > AllowedClockSkew) || (since < -AllowedClockSkew) {
clockSkewNodes = append(clockSkewNodes, node.Name)
}
}
}
}
if len(clockSkewNodes) > 0 {
return fmt.Errorf("clock skew detected for node(s): %s", strings.Join(clockSkewNodes, ", "))
}
return nil
}
func (hc *HealthChecker) validateServiceProfiles() error {
spClientset, err := spclient.NewForConfig(hc.kubeAPI.Config)
if err != nil {
return err
}
svcProfiles, err := spClientset.LinkerdV1alpha1().ServiceProfiles("").List(metav1.ListOptions{})
if err != nil {
return err
}
for _, p := range svcProfiles.Items {
// TODO: remove this check once we implement ServiceProfile validation via a
// ValidatingAdmissionWebhook
result := spClientset.RESTClient().Get().RequestURI(p.GetSelfLink()).Do()
raw, err := result.Raw()
if err != nil {
return err
}
err = profiles.Validate(raw)
if err != nil {
return fmt.Errorf("%s: %s", p.Name, err)
}
}
return nil
}
// getPodStatuses returns a map of all Linkerd container statuses:
// component =>
// pod name =>
// container statuses
// "controller" =>
// "linkerd-controller-6f78cbd47-bc557" =>
// [destination status, public-api status, ...]
func getPodStatuses(pods []corev1.Pod) map[string]map[string][]corev1.ContainerStatus {
statuses := make(map[string]map[string][]corev1.ContainerStatus)
for _, pod := range pods {
if pod.Status.Phase == corev1.PodRunning && strings.HasPrefix(pod.Name, "linkerd-") {
parts := strings.Split(pod.Name, "-")
// All control plane pods should have a name that results in at least 4
// substrings when string.Split on '-'
if len(parts) >= 4 {
name := strings.Join(parts[1:len(parts)-2], "-")
if _, found := statuses[name]; !found {
statuses[name] = make(map[string][]corev1.ContainerStatus)
}
statuses[name][pod.Name] = pod.Status.ContainerStatuses
}
}
}
return statuses
}
func validateControlPlanePods(pods []corev1.Pod) error {
statuses := getPodStatuses(pods)
names := []string{"controller", "grafana", "identity", "prometheus", "sp-validator", "web", "tap"}
// TODO: deprecate this when we drop support for checking pre-default proxy-injector control-planes
if _, found := statuses["proxy-injector"]; found {
names = append(names, "proxy-injector")
}
for _, name := range names {
pods, found := statuses[name]
if !found {
return fmt.Errorf("No running pods for \"linkerd-%s\"", name)
}
var err error
var ready bool
for pod, containers := range pods {
containersReady := true
for _, container := range containers {
if !container.Ready {
// TODO: Save this as a warning, allow check to pass but let the user
// know there is at least one pod not ready. This might imply
// restructuring health checks to allow individual checks to return
// either fatal or warning, rather than setting this property at
// compile time.
err = fmt.Errorf("The \"%s\" pod's \"%s\" container is not ready", pod,
container.Name)
containersReady = false
}
}
if containersReady {
// at least one pod has all containers ready
ready = true
break
}
}
if !ready {
return err
}
}
return nil
}
func checkControllerRunning(pods []corev1.Pod) error {
statuses := getPodStatuses(pods)
if _, ok := statuses["controller"]; !ok {
return errors.New("No running pods for \"linkerd-controller\"")
}
return nil
}
func validateDataPlanePods(pods []*pb.Pod, targetNamespace string) error {
if len(pods) == 0 {
msg := fmt.Sprintf("No \"%s\" containers found", k8s.ProxyContainerName)
if targetNamespace != "" {
msg += fmt.Sprintf(" in the \"%s\" namespace", targetNamespace)
}
return fmt.Errorf(msg)
}
for _, pod := range pods {
if pod.Status != "Running" {
return fmt.Errorf("The \"%s\" pod is not running",
pod.Name)
}
if !pod.ProxyReady {
return fmt.Errorf("The \"%s\" container in the \"%s\" pod is not ready",
k8s.ProxyContainerName, pod.Name)
}
}
return nil
}
func validateDataPlanePodReporting(pods []*pb.Pod) error {
notInPrometheus := []string{}
for _, p := range pods {
// the `Added` field indicates the pod was found in Prometheus
if !p.Added {
notInPrometheus = append(notInPrometheus, p.Name)
}
}
errMsg := ""
if len(notInPrometheus) > 0 {
errMsg = fmt.Sprintf("Data plane metrics not found for %s.", strings.Join(notInPrometheus, ", "))
}
if errMsg != "" {
return fmt.Errorf(errMsg)
}
return nil
}
func checkUnschedulablePods(pods []corev1.Pod) error {
var errors []string
for _, pod := range pods {
for _, condition := range pod.Status.Conditions {
if condition.Reason == corev1.PodReasonUnschedulable {
errors = append(errors, fmt.Sprintf("%s: %s", pod.Name, condition.Message))
}
}
}
if len(errors) > 0 {
return fmt.Errorf("%s", strings.Join(errors, "\n "))
}
return nil
}
func checkControlPlaneReplicaSets(rst []appsv1.ReplicaSet) error {
var errors []string
for _, rs := range rst {
for _, r := range rs.Status.Conditions {
if r.Type == appsv1.ReplicaSetReplicaFailure && r.Status == corev1.ConditionTrue {
errors = append(errors, fmt.Sprintf("%s: %s", r.Reason, r.Message))
}
}
}
if len(errors) > 0 {
return fmt.Errorf("%s", strings.Join(errors, "\n "))
}
return nil
}
| |
Attributes.ts
|
namespace Serenity {
export class ColumnsKeyAttribute {
constructor(public value: string) { }
}
export class DialogTypeAttribute {
constructor(public value: Function) { }
}
export class Ed
|
constructor() { }
key: string;
}
export class ElementAttribute {
constructor(public value: string) { }
}
export class EntityTypeAttribute {
constructor(public value: string) { }
}
export class EnumKeyAttribute {
constructor(public value: string) { }
}
export class FlexifyAttribute {
constructor(public value = true) { }
}
export class FormKeyAttribute {
constructor(public value: string) { }
}
export class GeneratedCodeAttribute {
constructor(public origin?: string) { }
}
export class IdPropertyAttribute {
constructor(public value: string) { }
}
export class IsActivePropertyAttribute {
constructor(public value: string) { }
}
export class ItemNameAttribute {
constructor(public value: string) { }
}
export class LocalTextPrefixAttribute {
constructor(public value: string) { }
}
export class MaximizableAttribute {
constructor(public value = true) { }
}
export class NamePropertyAttribute {
constructor(public value: string) { }
}
export class OptionAttribute {
constructor() { }
}
export class OptionsTypeAttribute {
constructor(public value: Function) { }
}
export class PanelAttribute {
constructor(public value = true) { }
}
export class ResizableAttribute {
constructor(public value = true) { }
}
export class ResponsiveAttribute {
constructor(public value = true) { }
}
export class ServiceAttribute {
constructor(public value: string) { }
}
}
declare namespace Serenity {
class CategoryAttribute {
constructor(category: string);
category: string;
}
class CssClassAttribute {
constructor(cssClass: string);
cssClass: string;
}
class DefaultValueAttribute {
constructor(defaultValue: any);
value: any;
}
class EditorOptionAttribute {
constructor(key: string, value: any);
key: string;
value: any;
}
class EditorTypeAttribute extends EditorTypeAttributeBase {
constructor(editorType: string);
}
class EditorTypeAttributeBase {
constructor(type: string);
setParams(editorParams: any): void;
editorType: string;
}
class FilterableAttribute {
constructor(value: boolean);
value: boolean;
}
class HiddenAttribute {
}
class HintAttribute {
constructor(hint: string);
hint: string;
}
class InsertableAttribute {
constructor(insertable?: boolean);
value: boolean;
}
class MaxLengthAttribute {
constructor(maxLength: number);
maxLength: number;
}
class OneWayAttribute {
}
class PlaceholderAttribute {
constructor(value: string);
value: string;
}
class ReadOnlyAttribute {
constructor(readOnly?: boolean);
value: boolean;
}
class RequiredAttribute {
constructor(isRequired: boolean);
isRequired: boolean;
}
class UpdatableAttribute {
constructor(updatable?: boolean);
value: boolean;
}
}
|
itorAttribute {
|
dcim_interface_connections_list_parameters.go
|
// Code generated by go-swagger; DO NOT EDIT.
// Copyright (c) 2020 Samuel Mutel <[email protected]>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
package dcim
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NewDcimInterfaceConnectionsListParams creates a new DcimInterfaceConnectionsListParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewDcimInterfaceConnectionsListParams() *DcimInterfaceConnectionsListParams {
return &DcimInterfaceConnectionsListParams{
timeout: cr.DefaultTimeout,
}
}
// NewDcimInterfaceConnectionsListParamsWithTimeout creates a new DcimInterfaceConnectionsListParams object
// with the ability to set a timeout on a request.
func NewDcimInterfaceConnectionsListParamsWithTimeout(timeout time.Duration) *DcimInterfaceConnectionsListParams {
return &DcimInterfaceConnectionsListParams{
timeout: timeout,
}
}
// NewDcimInterfaceConnectionsListParamsWithContext creates a new DcimInterfaceConnectionsListParams object
// with the ability to set a context for a request.
func NewDcimInterfaceConnectionsListParamsWithContext(ctx context.Context) *DcimInterfaceConnectionsListParams {
return &DcimInterfaceConnectionsListParams{
Context: ctx,
}
}
// NewDcimInterfaceConnectionsListParamsWithHTTPClient creates a new DcimInterfaceConnectionsListParams object
// with the ability to set a custom HTTPClient for a request.
func NewDcimInterfaceConnectionsListParamsWithHTTPClient(client *http.Client) *DcimInterfaceConnectionsListParams {
return &DcimInterfaceConnectionsListParams{
HTTPClient: client,
}
}
/* DcimInterfaceConnectionsListParams contains all the parameters to send to the API endpoint
for the dcim interface connections list operation.
Typically these are written to a http.Request.
*/
type DcimInterfaceConnectionsListParams struct {
// Device.
Device *string
// DeviceID.
DeviceID *string
/* Limit.
Number of results to return per page.
*/
Limit *int64
/* Offset.
The initial index from which to return the results.
*/
Offset *int64
// Site.
Site *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the dcim interface connections list params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DcimInterfaceConnectionsListParams) WithDefaults() *DcimInterfaceConnectionsListParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the dcim interface connections list params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *DcimInterfaceConnectionsListParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) WithTimeout(timeout time.Duration) *DcimInterfaceConnectionsListParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) WithContext(ctx context.Context) *DcimInterfaceConnectionsListParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) WithHTTPClient(client *http.Client) *DcimInterfaceConnectionsListParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithDevice adds the device to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) WithDevice(device *string) *DcimInterfaceConnectionsListParams {
o.SetDevice(device)
return o
}
// SetDevice adds the device to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) SetDevice(device *string) {
o.Device = device
}
// WithDeviceID adds the deviceID to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) WithDeviceID(deviceID *string) *DcimInterfaceConnectionsListParams {
o.SetDeviceID(deviceID)
return o
}
// SetDeviceID adds the deviceId to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) SetDeviceID(deviceID *string) {
o.DeviceID = deviceID
}
// WithLimit adds the limit to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) WithLimit(limit *int64) *DcimInterfaceConnectionsListParams {
o.SetLimit(limit)
return o
}
// SetLimit adds the limit to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) SetLimit(limit *int64) {
o.Limit = limit
}
// WithOffset adds the offset to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) WithOffset(offset *int64) *DcimInterfaceConnectionsListParams {
o.SetOffset(offset)
return o
}
// SetOffset adds the offset to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) SetOffset(offset *int64) {
o.Offset = offset
}
// WithSite adds the site to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) WithSite(site *string) *DcimInterfaceConnectionsListParams {
o.SetSite(site)
return o
}
// SetSite adds the site to the dcim interface connections list params
func (o *DcimInterfaceConnectionsListParams) SetSite(site *string) {
o.Site = site
}
// WriteToRequest writes these params to a swagger request
func (o *DcimInterfaceConnectionsListParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Device != nil {
// query param device
var qrDevice string
if o.Device != nil {
qrDevice = *o.Device
}
qDevice := qrDevice
if qDevice != "" {
if err := r.SetQueryParam("device", qDevice); err != nil {
return err
}
}
}
if o.DeviceID != nil {
// query param device_id
var qrDeviceID string
if o.DeviceID != nil {
qrDeviceID = *o.DeviceID
}
qDeviceID := qrDeviceID
if qDeviceID != ""
|
}
if o.Limit != nil {
// query param limit
var qrLimit int64
if o.Limit != nil {
qrLimit = *o.Limit
}
qLimit := swag.FormatInt64(qrLimit)
if qLimit != "" {
if err := r.SetQueryParam("limit", qLimit); err != nil {
return err
}
}
}
if o.Offset != nil {
// query param offset
var qrOffset int64
if o.Offset != nil {
qrOffset = *o.Offset
}
qOffset := swag.FormatInt64(qrOffset)
if qOffset != "" {
if err := r.SetQueryParam("offset", qOffset); err != nil {
return err
}
}
}
if o.Site != nil {
// query param site
var qrSite string
if o.Site != nil {
qrSite = *o.Site
}
qSite := qrSite
if qSite != "" {
if err := r.SetQueryParam("site", qSite); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
|
{
if err := r.SetQueryParam("device_id", qDeviceID); err != nil {
return err
}
}
|
errors.go
|
package config
import (
"fmt"
"strings"
)
// ErrMissingCollaborator returns an error when an alias is requested that doesn't exist in the config
func
|
(missing []string) error {
if len(missing) == 0 {
return nil
} else if len(missing) == 1 {
return fmt.Errorf("No collaborator exists for the alias '%s'", missing[0])
}
return fmt.Errorf("No collaborators exist for aliases '%s'", strings.Join(missing, "', '"))
}
|
ErrMissingCollaborator
|
args.rs
|
use proc_macro2::{Group, Span};
use syn::parse::{Error, Parse, ParseStream, Result};
use syn::token::{Comma, Eq, Pub};
use syn::{parse2, Ident, LitStr, Meta, Visibility};
mod kw {
// NOTE: when adding new keywords update ArgList::next_is_kw
syn::custom_keyword!(doc);
syn::custom_keyword!(merge_fn);
syn::custom_keyword!(rewrap);
syn::custom_keyword!(attrs);
syn::custom_keyword!(field_doc);
syn::custom_keyword!(field_attrs);
syn::custom_keyword!(from);
pub mod attrs_sub {
syn::custom_keyword!(add);
}
}
#[cfg_attr(test, derive(PartialEq))]
pub struct Args {
pub item: GenItem,
pub merge: Option<MergeFn>,
pub rewrap: bool,
pub doc: Option<Doc>,
pub attrs: Option<Attrs>,
pub field_doc: bool,
pub field_attrs: Option<Attrs>,
pub from: bool,
}
enum Arg {
Merge(MergeFn),
Doc(Doc),
Rewrap(bool),
Attrs(Attrs),
FieldDocs(bool),
FieldAttrs(Attrs),
From(bool),
}
#[cfg_attr(test, derive(PartialEq))]
pub struct GenItem {
pub name: Ident,
pub visibility: Option<Visibility>,
}
#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub struct MergeFn {
pub visibility: Visibility,
pub name: MergeFnName,
}
#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub enum MergeFnName {
Default,
Custom(Ident),
}
#[cfg_attr(test, derive(Debug, PartialEq))]
pub enum Doc {
Same,
Custom(String),
}
#[cfg_attr(test, derive(Debug, PartialEq))]
pub enum Attrs {
/// Keep same attributes.
Keep,
/// Replace with given attributes.
Replace(Vec<Meta>),
/// Keep original attributes and add the given ones.
Add(Vec<Meta>),
}
#[derive(Debug)]
pub struct AttrList(Vec<Meta>);
/// Parser for unordered args.
struct ArgList {
item: GenItem,
merge: Option<Span>,
doc: Option<Span>,
rewrap: Option<Span>,
attrs: Option<Span>,
field_doc: Option<Span>,
field_attrs: Option<Span>,
from: Option<Span>,
list: Vec<Arg>,
}
impl Parse for Args {
fn parse(input: ParseStream) -> Result<Self> {
let arg_list: ArgList = input.parse()?;
Ok(arg_list.into())
}
}
impl Parse for ArgList {
fn parse(input: ParseStream) -> Result<Self> {
if input.is_empty() {
return Err(input.error("expected opt struct name"));
}
if ArgList::next_is_kw(&input) {
return Err(input.error("first argument must be opt struct name"));
}
let item = input.parse()?;
let mut arg_list = ArgList::new(item);
while !input.is_empty() {
input.parse::<Comma>()?;
// allow trailing commas
if input.is_empty() {
break;
}
let lookahead = input.lookahead1();
if lookahead.peek(kw::doc) {
arg_list.parse_doc(&input)?;
} else if lookahead.peek(kw::merge_fn) {
arg_list.parse_merge(&input)?;
} else if lookahead.peek(kw::rewrap) {
arg_list.parse_rewrap(&input)?;
} else if lookahead.peek(kw::attrs) {
arg_list.parse_attrs(&input)?;
} else if lookahead.peek(kw::field_doc) {
arg_list.parse_field_doc(&input)?;
} else if lookahead.peek(kw::field_attrs) {
arg_list.parse_field_attrs(&input)?;
} else if lookahead.peek(kw::from) {
arg_list.parse_from(&input)?;
} else {
return Err(lookahead.error());
}
}
Ok(arg_list)
}
}
impl Args {
fn new(item: GenItem) -> Self {
Self {
item,
merge: None,
rewrap: false,
doc: None,
attrs: None,
field_doc: false,
field_attrs: None,
from: false,
}
}
}
impl ArgList {
fn new(item: GenItem) -> Self {
Self {
item,
merge: None,
doc: None,
rewrap: None,
attrs: None,
field_doc: None,
field_attrs: None,
from: None,
list: Vec::with_capacity(6),
}
}
fn next_is_kw(input: ParseStream) -> bool {
input.peek(kw::doc)
|| input.peek(kw::merge_fn)
|| input.peek(kw::rewrap)
|| input.peek(kw::field_doc)
|| input.peek(kw::field_attrs)
|| input.peek(kw::attrs)
|| input.peek(kw::from)
}
fn parse_doc(&mut self, input: ParseStream) -> Result<()> {
if let Some(doc_span) = self.doc {
return ArgList::already_defined_error(input, "doc", doc_span);
}
let span = input.span();
let doc: Doc = input.parse()?;
self.doc = Some(span);
self.list.push(Arg::Doc(doc));
Ok(())
}
fn parse_merge(&mut self, input: ParseStream) -> Result<()> {
if let Some(merge_span) = self.merge {
return ArgList::already_defined_error(input, "merge_fn", merge_span);
}
let span = input.span();
let merge: MergeFn = input.parse()?;
self.merge = Some(span);
self.list.push(Arg::Merge(merge));
Ok(())
}
fn parse_rewrap(&mut self, input: ParseStream) -> Result<()> {
if let Some(rewrap_span) = self.rewrap {
return ArgList::already_defined_error(input, "rewrap", rewrap_span);
}
let span = input.span();
input.parse::<kw::rewrap>()?;
self.rewrap = Some(span);
self.list.push(Arg::Rewrap(true));
Ok(())
}
fn parse_attrs(&mut self, input: ParseStream) -> Result<()> {
if let Some(attrs_span) = self.attrs {
return ArgList::already_defined_error(input, "attrs", attrs_span);
}
let span = input.span();
input.parse::<kw::attrs>()?;
let attrs: Attrs = input.parse()?;
self.attrs = Some(span);
self.list.push(Arg::Attrs(attrs));
Ok(())
}
fn parse_field_doc(&mut self, input: ParseStream) -> Result<()> {
if let Some(field_doc_span) = self.field_doc {
return ArgList::already_defined_error(input, "field_doc", field_doc_span);
}
let span = input.span();
input.parse::<kw::field_doc>()?;
self.field_doc = Some(span);
self.list.push(Arg::FieldDocs(true));
Ok(())
}
fn parse_field_attrs(&mut self, input: ParseStream) -> Result<()> {
if let Some(field_attrs_apan) = self.field_attrs {
return ArgList::already_defined_error(input, "field_attrs", field_attrs_apan);
}
let span = input.span();
input.parse::<kw::field_attrs>()?;
let field_attrs: Attrs = input.parse()?;
self.field_attrs = Some(span);
self.list.push(Arg::FieldAttrs(field_attrs));
Ok(())
}
fn parse_from(&mut self, input: ParseStream) -> Result<()> {
if let Some(from_span) = self.from {
return ArgList::already_defined_error(input, "from", from_span);
}
let span = input.span();
input.parse::<kw::from>()?;
self.from = Some(span);
self.list.push(Arg::From(true));
Ok(())
}
fn already_defined_error(
input: ParseStream,
arg_name: &'static str,
prev_span: Span,
) -> Result<()> {
let mut e = input.error(&format!("{} already defined", arg_name));
e.combine(Error::new(prev_span, &format!("{} defined here", arg_name)));
Err(e)
}
}
impl GenItem {
pub fn final_visibility(&self) -> Visibility {
match &self.visibility {
None => Visibility::Inherited,
Some(v) => v.clone(),
}
}
}
impl Parse for GenItem {
fn parse(input: ParseStream) -> Result<Self> {
let visibility = if input.peek(Pub) {
Some(input.parse()?)
} else {
None
};
let name = input.parse()?;
Ok(Self { name, visibility })
}
}
impl Parse for Doc {
fn parse(input: ParseStream) -> Result<Self> {
input.parse::<kw::doc>()?;
if input.peek(Eq) {
input.parse::<Eq>()?;
let doc_text: LitStr = input.parse()?;
Ok(Doc::Custom(doc_text.value()))
} else {
Ok(Doc::Same)
}
}
}
impl Parse for MergeFn {
fn parse(input: ParseStream) -> Result<Self> {
input.parse::<kw::merge_fn>()?;
if input.peek(Eq) {
input.parse::<Eq>()?;
let visibility = if input.peek(Pub) {
input.parse()?
} else {
Visibility::Inherited
};
let name = if input.peek(Ident) {
MergeFnName::Custom(input.parse()?)
} else {
MergeFnName::Default
};
Ok(MergeFn { visibility, name })
} else {
Ok(MergeFn::default())
}
}
}
impl Default for MergeFn {
fn default() -> MergeFn {
MergeFn {
visibility: Visibility::Inherited,
name: MergeFnName::Default,
}
}
}
impl Parse for Attrs {
fn parse(input: ParseStream) -> Result<Self> {
use Attrs::*;
if input.peek(Eq) {
input.parse::<Eq>()?;
if input.peek(kw::attrs_sub::add) {
input.parse::<kw::attrs_sub::add>()?;
Ok(Add(Attrs::parse_attr_list(input)?))
} else {
Ok(Replace(Attrs::parse_attr_list(input)?))
}
} else {
Ok(Keep)
}
}
}
impl Attrs {
fn parse_attr_list(input: ParseStream) -> Result<Vec<Meta>> {
let group: Group = input.parse()?;
let attrs: AttrList = parse2(group.stream())?;
Ok(attrs.0)
}
}
impl Parse for AttrList {
fn parse(input: ParseStream) -> Result<Self> {
let mut attrs = Vec::new();
while !input.is_empty() {
attrs.push(input.parse()?);
if input.peek(Comma) {
input.parse::<Comma>()?;
}
}
Ok(Self(attrs))
}
}
impl From<ArgList> for Args {
fn from(arg_list: ArgList) -> Args {
use Arg::*;
let mut args = Args::new(arg_list.item);
for arg in arg_list.list {
match arg {
Merge(merge) => args.merge = Some(merge),
Doc(doc) => args.doc = Some(doc),
Rewrap(rewrap) => args.rewrap = rewrap,
Attrs(attrs) => args.attrs = Some(attrs),
FieldDocs(field_doc) => args.field_doc = field_doc,
FieldAttrs(field_attrs) => args.field_attrs = Some(field_attrs),
From(from) => args.from = from,
}
}
args
}
}
#[cfg(test)]
mod tests {
use super::*;
use proc_macro2::TokenStream;
use quote::quote;
use syn::parse::Parser;
use crate::test_util::*;
macro_rules! duplicate_arg_panics_test {
($attr:meta, $expected:literal) => {
duplicate_arg_panics_test!($attr, $attr, $expected);
};
($attr:meta, $dup:meta, $expected:literal) => {
paste::item! {
#[test]
#[should_panic(expected = $expected)]
fn [<duplicate_ $attr _panics>]() {
parse_args(quote! {
Opt,
$attr,
$dup
});
}
}
};
}
duplicate_arg_panics_test!(doc, doc = "custom", "doc already defined");
duplicate_arg_panics_test!(merge_fn, "merge_fn already defined");
duplicate_arg_panics_test!(rewrap, "rewrap already defined");
duplicate_arg_panics_test!(attrs, "attrs already defined");
duplicate_arg_panics_test!(field_doc, "field_doc already defined");
duplicate_arg_panics_test!(field_attrs, "field_attrs already defined");
duplicate_arg_panics_test!(from, "from already defined");
macro_rules! struct_name_not_first_panics {
($attr:meta) => {
paste::item! {
#[test]
#[should_panic(expected = "first argument must be opt struct name")]
fn [<$attr _first_panics>]() {
parse_args(quote! {
$attr,
Opt
});
}
}
};
}
struct_name_not_first_panics!(doc);
struct_name_not_first_panics!(merge_fn);
struct_name_not_first_panics!(rewrap);
struct_name_not_first_panics!(attrs);
struct_name_not_first_panics!(field_doc);
struct_name_not_first_panics!(field_attrs);
struct_name_not_first_panics!(from);
#[test]
#[should_panic(expected = "expected opt struct name")]
fn empty_args_panics() {
parse_args(TokenStream::new());
}
#[test]
fn parse_name() {
let cases = vec![
quote! {
OptionalFields
},
quote! {
pub OptionalFields
},
quote! {
pub(crate) OptionalFields
},
quote! {
pub(in test::path) OptionalFields
},
];
for case in cases {
let args = parse_args(case);
assert_eq!(args.item.name, "OptionalFields");
}
}
#[test]
fn parse_no_optional_args() {
let args = parse_args(quote! {
Opt
});
assert_eq!(args.item.visibility, None);
assert_eq!(args.item.final_visibility(), Visibility::Inherited);
assert_eq!(args.merge, None);
assert_eq!(args.rewrap, false);
assert_eq!(args.doc, None);
assert_eq!(args.attrs, None);
assert_eq!(args.field_doc, false);
assert_eq!(args.field_attrs, None);
assert_eq!(args.from, false);
}
#[test]
fn parse_visibility() {
let cases = vec![
(quote! {pub Opt}, quote!(pub)),
(quote! {pub(crate) Opt}, quote!(pub(crate))),
(quote! {pub(in test::path) Opt}, quote!(pub(in test::path))),
];
for (args_tokens, vis_tokens) in cases {
let args: Args = syn::parse2(args_tokens).unwrap();
let vis: Visibility = syn::parse2(vis_tokens).unwrap();
assert_eq!(args.item.visibility.as_ref(), Some(&vis));
assert_eq!(args.item.final_visibility(), vis);
}
}
#[test]
fn parse_merge_fn() {
let custom_fn_name = MergeFnName::Custom(syn::parse2(quote!(custom_fn)).unwrap());
let cases = vec![
(
quote! {Opt, merge_fn},
MergeFnName::Default,
Visibility::Inherited,
),
(
quote! {Opt, merge_fn = custom_fn},
custom_fn_name.clone(),
Visibility::Inherited,
),
(
quote! {Opt, merge_fn = pub custom_fn},
custom_fn_name.clone(),
syn::parse2(quote!(pub)).unwrap(),
),
(
quote! {Opt, merge_fn = pub(crate) custom_fn},
custom_fn_name.clone(),
syn::parse2(quote!(pub(crate))).unwrap(),
),
(
quote! {Opt, merge_fn = pub(in test::path) custom_fn},
custom_fn_name,
syn::parse2(quote!(pub(in test::path))).unwrap(),
),
];
for (args_tokens, fn_name, vis) in cases {
let args = parse_args(args_tokens);
assert_eq!(args.merge.clone().unwrap().name, fn_name);
assert_eq!(args.merge.unwrap().visibility, vis);
}
}
#[test]
fn parse_rewrap() {
let args = parse_args(quote! {
Opt,
rewrap
});
assert!(args.rewrap);
}
#[test]
fn parse_doc() {
let cases = vec![
(quote! {Opt, doc}, Doc::Same),
(
quote! {Opt, doc = "custom docs"},
Doc::Custom("custom docs".to_string()),
),
];
for (args_tokens, doc) in cases {
let args: Args = syn::parse2(args_tokens).unwrap();
assert_eq!(args.doc, Some(doc));
}
}
#[test]
fn parse_attr_list() {
let meta_tokens = quote! {
(
derive(Debug, Clone),
serde(rename_all = "camelCase", default)
)
};
let meta = Attrs::parse_attr_list.parse2(meta_tokens).unwrap();
let meta_attrs = parse_attrs(quote! {
#(#[#meta])*
});
let attrs = parse_attrs(quote! {
#[derive(Debug, Clone)]
#[serde(rename_all = "camelCase", default)]
});
assert_eq!(meta_attrs, attrs);
}
#[test]
fn parse_attrs_test()
|
#[test]
fn parse_field_doc() {
let args = parse_args(quote! {
Opt,
field_doc
});
assert!(args.field_doc);
}
#[test]
fn parse_field_attrs() {
let parser = Attrs::parse_attr_list;
let cases = vec![
(quote! {Opt, field_attrs}, Attrs::Keep),
(
quote! {Opt, field_attrs = (derive(Debug), serde(transparent))},
Attrs::Replace(
parser
.parse2(quote! {(derive(Debug), serde(transparent))})
.unwrap(),
),
),
(
quote! {Opt, field_attrs = add(derive(Clone), serde(deny_unknown_fields))},
Attrs::Add(
parser
.parse2(quote! {(derive(Clone), serde(deny_unknown_fields))})
.unwrap(),
),
),
];
for (args_tokens, attrs) in cases {
let args = parse_args(args_tokens);
assert_eq!(args.field_attrs, Some(attrs));
}
}
#[test]
fn parse_from() {
let args = parse_args(quote! {
Opt,
from
});
assert!(args.from);
}
}
|
{
let parser = Attrs::parse_attr_list;
let cases = vec![
(quote! {Opt, attrs}, Attrs::Keep),
(
quote! {Opt, attrs = (derive(Debug), serde(rename_all = "camelCase"))},
Attrs::Replace(
parser
.parse2(quote! {(derive(Debug), serde(rename_all = "camelCase"))})
.unwrap(),
),
),
(
quote! {Opt, attrs = add(derive(Clone), serde(default))},
Attrs::Add(
parser
.parse2(quote! {(derive(Clone), serde(default))})
.unwrap(),
),
),
];
for (args_tokens, attrs) in cases {
let args = parse_args(args_tokens);
assert_eq!(args.attrs, Some(attrs));
}
}
|
shell-script-action.ts
|
import * as codebuild from '@aws-cdk/aws-codebuild';
import * as codepipeline from '@aws-cdk/aws-codepipeline';
import * as codepipeline_actions from '@aws-cdk/aws-codepipeline-actions';
import * as ec2 from '@aws-cdk/aws-ec2';
import * as events from '@aws-cdk/aws-events';
import * as iam from '@aws-cdk/aws-iam';
import { Construct } from '@aws-cdk/core';
import { StackOutput } from '../stage';
/**
* Properties for ShellScriptAction
*/
export interface ShellScriptActionProps {
/**
* Name of the validation action in the pipeline
*/
readonly actionName: string;
/**
* Stack outputs to make available as environment variables
*
* @default - No outputs used
*/
readonly useOutputs?: Record<string, StackOutput>;
/**
* Commands to run
*/
readonly commands: string[];
/**
* Bash options to set at the start of the script
*
* @default '-eu' (errexit and nounset)
*/
readonly bashOptions?: string;
/**
* Additional artifacts to use as input for the CodeBuild project
*
* You can use these files to load more complex test sets into the
* shellscript build environment.
*
* The files artifact given here will be unpacked into the current
* working directory, the other ones will be unpacked into directories
* which are available through the environment variables
* $CODEBUILD_SRC_DIR_<artifactName>.
*
* The CodeBuild job must have at least one input artifact, so you
* must provide either at least one additional artifact here or one
* stack output using `useOutput`.
*
* @default - No additional artifacts
*/
readonly additionalArtifacts?: codepipeline.Artifact[];
/**
* The CodeBuild environment where scripts are executed.
*
* @default LinuxBuildImage.STANDARD_4_0
*/
readonly environment?: codebuild.BuildEnvironment
/**
* Environment variables to send into build
*
* @default - No additional environment variables
*/
readonly environmentVariables?: Record<string, codebuild.BuildEnvironmentVariable>;
/**
* RunOrder for this action
*
* Use this to sequence the shell script after the deployments.
*
* The default value is 100 so you don't have to supply the value if you just
* want to run this after the application stacks have been deployed, and you
* don't have more than 100 stacks.
*
* @default 100
*/
readonly runOrder?: number;
/**
* Additional policy statements to add to the execution role
*
* @default - No policy statements
*/
readonly rolePolicyStatements?: iam.PolicyStatement[];
/**
* The VPC where to execute the specified script.
*
* @default - No VPC
*/
readonly vpc?: ec2.IVpc;
/**
* Which subnets to use.
*
* Only used if 'vpc' is supplied.
*
* @default - All private subnets.
*/
readonly subnetSelection?: ec2.SubnetSelection
/**
* Which security group to associate with the script's project network interfaces.
* If no security group is identified, one will be created automatically.
*
* Only used if 'vpc' is supplied.
*
* @default - Security group will be automatically created.
*
*/
readonly securityGroups?: ec2.ISecurityGroup[];
}
/**
* Validate a revision using shell commands
*/
export class ShellScriptAction implements codepipeline.IAction, iam.IGrantable {
private _project?: codebuild.IProject;
private _action?: codepipeline_actions.CodeBuildAction;
private _actionProperties: codepipeline.ActionProperties;
constructor(private readonly props: ShellScriptActionProps) {
// A number of actionProperties get read before bind() is even called (so before we
// have made the Project and can construct the actual CodeBuildAction)
//
// - actionName
// - resource
// - region
// - category
// - role
// - owner
this._actionProperties = {
actionName: props.actionName,
category: codepipeline.ActionCategory.BUILD,
provider: 'CodeBuild',
artifactBounds: { minInputs: 0, maxInputs: 5, minOutputs: 0, maxOutputs: 5 },
inputs: [],
outputs: [],
};
if (Object.keys(props.useOutputs ?? {}).length + (props.additionalArtifacts ?? []).length === 0) {
throw new Error('You must supply either \'useOutputs\' or \'additionalArtifacts\', since a CodeBuild Action must always have at least one input artifact.');
}
}
/**
* The CodeBuild Project's principal
*/
public get grantPrincipal(): iam.IPrincipal {
return this.project.grantPrincipal;
}
/**
* Exists to implement IAction
*/
public get actionProperties(): codepipeline.ActionProperties {
return this._actionProperties;
}
/**
* Exists to implement IAction
*/
public bind(scope: Construct, stage: codepipeline.IStage, options: codepipeline.ActionBindOptions): codepipeline.ActionConfig {
const inputs = new Array<codepipeline.Artifact>();
|
const envVarCommands = new Array<string>();
const bashOptions = this.props.bashOptions ?? '-eu';
if (bashOptions) {
envVarCommands.push(`set ${bashOptions}`);
}
for (const [varName, output] of Object.entries(this.props.useOutputs ?? {})) {
const outputArtifact = output.artifactFile;
// Add the artifact to the list of inputs, if it's not in there already. Determine
// the location where CodeBuild is going to stick it based on whether it's the first (primary)
// input or an 'extra input', then parse.
let artifactIndex = inputs.findIndex(a => a.artifactName === outputArtifact.artifact.artifactName);
if (artifactIndex === -1) {
artifactIndex = inputs.push(outputArtifact.artifact) - 1;
}
const dirEnv = artifactIndex === 0 ? 'CODEBUILD_SRC_DIR' : `CODEBUILD_SRC_DIR_${outputArtifact.artifact.artifactName}`;
envVarCommands.push(`export ${varName}="$(node -pe 'require(process.env.${dirEnv} + "/${outputArtifact.fileName}")["${output.outputName}"]')"`);
}
this._project = new codebuild.PipelineProject(scope, 'Project', {
environment: this.props.environment || { buildImage: codebuild.LinuxBuildImage.STANDARD_4_0 },
vpc: this.props.vpc,
securityGroups: this.props.securityGroups,
subnetSelection: this.props.subnetSelection,
buildSpec: codebuild.BuildSpec.fromObject({
version: '0.2',
phases: {
build: {
commands: [
...envVarCommands,
...this.props.commands,
],
},
},
}),
});
for (const statement of this.props.rolePolicyStatements ?? []) {
this._project.addToRolePolicy(statement);
}
this._action = new codepipeline_actions.CodeBuildAction({
actionName: this.props.actionName,
input: inputs[0],
extraInputs: inputs.slice(1),
runOrder: this.props.runOrder ?? 100,
project: this._project,
environmentVariables: this.props.environmentVariables,
});
// Replace the placeholder actionProperties at the last minute
this._actionProperties = this._action.actionProperties;
return this._action.bind(scope, stage, options);
}
/**
* Project generated to run the shell script in
*/
public get project(): codebuild.IProject {
if (!this._project) {
throw new Error('Project becomes available after ShellScriptAction has been bound to a stage');
}
return this._project;
}
/**
* Exists to implement IAction
*/
public onStateChange(name: string, target?: events.IRuleTarget, options?: events.RuleProps): events.Rule {
if (!this._action) {
throw new Error('Need bind() first');
}
return this._action.onStateChange(name, target, options);
}
}
|
inputs.push(...this.props.additionalArtifacts ?? []);
|
pulumiTypes.go
|
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package sourcerepo
import (
"context"
"reflect"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
type RepositoryIamBindingCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
// RepositoryIamBindingConditionInput is an input type that accepts RepositoryIamBindingConditionArgs and RepositoryIamBindingConditionOutput values.
// You can construct a concrete instance of `RepositoryIamBindingConditionInput` via:
//
// RepositoryIamBindingConditionArgs{...}
type RepositoryIamBindingConditionInput interface {
pulumi.Input
ToRepositoryIamBindingConditionOutput() RepositoryIamBindingConditionOutput
ToRepositoryIamBindingConditionOutputWithContext(context.Context) RepositoryIamBindingConditionOutput
}
type RepositoryIamBindingConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (RepositoryIamBindingConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*RepositoryIamBindingCondition)(nil)).Elem()
}
func (i RepositoryIamBindingConditionArgs) ToRepositoryIamBindingConditionOutput() RepositoryIamBindingConditionOutput {
return i.ToRepositoryIamBindingConditionOutputWithContext(context.Background())
}
func (i RepositoryIamBindingConditionArgs) ToRepositoryIamBindingConditionOutputWithContext(ctx context.Context) RepositoryIamBindingConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(RepositoryIamBindingConditionOutput)
}
func (i RepositoryIamBindingConditionArgs) ToRepositoryIamBindingConditionPtrOutput() RepositoryIamBindingConditionPtrOutput {
return i.ToRepositoryIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i RepositoryIamBindingConditionArgs) ToRepositoryIamBindingConditionPtrOutputWithContext(ctx context.Context) RepositoryIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(RepositoryIamBindingConditionOutput).ToRepositoryIamBindingConditionPtrOutputWithContext(ctx)
}
// RepositoryIamBindingConditionPtrInput is an input type that accepts RepositoryIamBindingConditionArgs, RepositoryIamBindingConditionPtr and RepositoryIamBindingConditionPtrOutput values.
// You can construct a concrete instance of `RepositoryIamBindingConditionPtrInput` via:
//
// RepositoryIamBindingConditionArgs{...}
//
// or:
//
// nil
type RepositoryIamBindingConditionPtrInput interface {
pulumi.Input
ToRepositoryIamBindingConditionPtrOutput() RepositoryIamBindingConditionPtrOutput
ToRepositoryIamBindingConditionPtrOutputWithContext(context.Context) RepositoryIamBindingConditionPtrOutput
}
type repositoryIamBindingConditionPtrType RepositoryIamBindingConditionArgs
func RepositoryIamBindingConditionPtr(v *RepositoryIamBindingConditionArgs) RepositoryIamBindingConditionPtrInput {
return (*repositoryIamBindingConditionPtrType)(v)
}
func (*repositoryIamBindingConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**RepositoryIamBindingCondition)(nil)).Elem()
}
func (i *repositoryIamBindingConditionPtrType) ToRepositoryIamBindingConditionPtrOutput() RepositoryIamBindingConditionPtrOutput {
return i.ToRepositoryIamBindingConditionPtrOutputWithContext(context.Background())
}
func (i *repositoryIamBindingConditionPtrType) ToRepositoryIamBindingConditionPtrOutputWithContext(ctx context.Context) RepositoryIamBindingConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(RepositoryIamBindingConditionPtrOutput)
}
type RepositoryIamBindingConditionOutput struct{ *pulumi.OutputState }
func (RepositoryIamBindingConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*RepositoryIamBindingCondition)(nil)).Elem()
}
func (o RepositoryIamBindingConditionOutput) ToRepositoryIamBindingConditionOutput() RepositoryIamBindingConditionOutput {
return o
}
func (o RepositoryIamBindingConditionOutput) ToRepositoryIamBindingConditionOutputWithContext(ctx context.Context) RepositoryIamBindingConditionOutput {
return o
}
func (o RepositoryIamBindingConditionOutput) ToRepositoryIamBindingConditionPtrOutput() RepositoryIamBindingConditionPtrOutput {
return o.ToRepositoryIamBindingConditionPtrOutputWithContext(context.Background())
}
func (o RepositoryIamBindingConditionOutput) ToRepositoryIamBindingConditionPtrOutputWithContext(ctx context.Context) RepositoryIamBindingConditionPtrOutput {
return o.ApplyTWithContext(ctx, func(_ context.Context, v RepositoryIamBindingCondition) *RepositoryIamBindingCondition {
return &v
}).(RepositoryIamBindingConditionPtrOutput)
}
func (o RepositoryIamBindingConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func(v RepositoryIamBindingCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o RepositoryIamBindingConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func(v RepositoryIamBindingCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o RepositoryIamBindingConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func(v RepositoryIamBindingCondition) string { return v.Title }).(pulumi.StringOutput)
}
type RepositoryIamBindingConditionPtrOutput struct{ *pulumi.OutputState }
func (RepositoryIamBindingConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**RepositoryIamBindingCondition)(nil)).Elem()
}
func (o RepositoryIamBindingConditionPtrOutput) ToRepositoryIamBindingConditionPtrOutput() RepositoryIamBindingConditionPtrOutput {
return o
}
func (o RepositoryIamBindingConditionPtrOutput) ToRepositoryIamBindingConditionPtrOutputWithContext(ctx context.Context) RepositoryIamBindingConditionPtrOutput {
return o
}
func (o RepositoryIamBindingConditionPtrOutput) Elem() RepositoryIamBindingConditionOutput {
return o.ApplyT(func(v *RepositoryIamBindingCondition) RepositoryIamBindingCondition {
if v != nil {
return *v
}
var ret RepositoryIamBindingCondition
return ret
}).(RepositoryIamBindingConditionOutput)
}
func (o RepositoryIamBindingConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func(v *RepositoryIamBindingCondition) *string {
if v == nil {
return nil
}
return v.Description
}).(pulumi.StringPtrOutput)
}
func (o RepositoryIamBindingConditionPtrOutput) Expression() pulumi.StringPtrOutput {
return o.ApplyT(func(v *RepositoryIamBindingCondition) *string {
if v == nil {
return nil
}
return &v.Expression
}).(pulumi.StringPtrOutput)
}
func (o RepositoryIamBindingConditionPtrOutput) Title() pulumi.StringPtrOutput {
return o.ApplyT(func(v *RepositoryIamBindingCondition) *string {
if v == nil {
return nil
}
return &v.Title
}).(pulumi.StringPtrOutput)
}
type RepositoryIamMemberCondition struct {
Description *string `pulumi:"description"`
Expression string `pulumi:"expression"`
Title string `pulumi:"title"`
}
// RepositoryIamMemberConditionInput is an input type that accepts RepositoryIamMemberConditionArgs and RepositoryIamMemberConditionOutput values.
// You can construct a concrete instance of `RepositoryIamMemberConditionInput` via:
//
// RepositoryIamMemberConditionArgs{...}
type RepositoryIamMemberConditionInput interface {
pulumi.Input
ToRepositoryIamMemberConditionOutput() RepositoryIamMemberConditionOutput
ToRepositoryIamMemberConditionOutputWithContext(context.Context) RepositoryIamMemberConditionOutput
}
type RepositoryIamMemberConditionArgs struct {
Description pulumi.StringPtrInput `pulumi:"description"`
Expression pulumi.StringInput `pulumi:"expression"`
Title pulumi.StringInput `pulumi:"title"`
}
func (RepositoryIamMemberConditionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*RepositoryIamMemberCondition)(nil)).Elem()
}
func (i RepositoryIamMemberConditionArgs) ToRepositoryIamMemberConditionOutput() RepositoryIamMemberConditionOutput {
return i.ToRepositoryIamMemberConditionOutputWithContext(context.Background())
}
func (i RepositoryIamMemberConditionArgs) ToRepositoryIamMemberConditionOutputWithContext(ctx context.Context) RepositoryIamMemberConditionOutput {
return pulumi.ToOutputWithContext(ctx, i).(RepositoryIamMemberConditionOutput)
}
func (i RepositoryIamMemberConditionArgs) ToRepositoryIamMemberConditionPtrOutput() RepositoryIamMemberConditionPtrOutput {
return i.ToRepositoryIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i RepositoryIamMemberConditionArgs) ToRepositoryIamMemberConditionPtrOutputWithContext(ctx context.Context) RepositoryIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(RepositoryIamMemberConditionOutput).ToRepositoryIamMemberConditionPtrOutputWithContext(ctx)
}
// RepositoryIamMemberConditionPtrInput is an input type that accepts RepositoryIamMemberConditionArgs, RepositoryIamMemberConditionPtr and RepositoryIamMemberConditionPtrOutput values.
// You can construct a concrete instance of `RepositoryIamMemberConditionPtrInput` via:
//
// RepositoryIamMemberConditionArgs{...}
//
// or:
//
// nil
type RepositoryIamMemberConditionPtrInput interface {
pulumi.Input
ToRepositoryIamMemberConditionPtrOutput() RepositoryIamMemberConditionPtrOutput
ToRepositoryIamMemberConditionPtrOutputWithContext(context.Context) RepositoryIamMemberConditionPtrOutput
}
type repositoryIamMemberConditionPtrType RepositoryIamMemberConditionArgs
func RepositoryIamMemberConditionPtr(v *RepositoryIamMemberConditionArgs) RepositoryIamMemberConditionPtrInput
|
func (*repositoryIamMemberConditionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**RepositoryIamMemberCondition)(nil)).Elem()
}
func (i *repositoryIamMemberConditionPtrType) ToRepositoryIamMemberConditionPtrOutput() RepositoryIamMemberConditionPtrOutput {
return i.ToRepositoryIamMemberConditionPtrOutputWithContext(context.Background())
}
func (i *repositoryIamMemberConditionPtrType) ToRepositoryIamMemberConditionPtrOutputWithContext(ctx context.Context) RepositoryIamMemberConditionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(RepositoryIamMemberConditionPtrOutput)
}
type RepositoryIamMemberConditionOutput struct{ *pulumi.OutputState }
func (RepositoryIamMemberConditionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*RepositoryIamMemberCondition)(nil)).Elem()
}
func (o RepositoryIamMemberConditionOutput) ToRepositoryIamMemberConditionOutput() RepositoryIamMemberConditionOutput {
return o
}
func (o RepositoryIamMemberConditionOutput) ToRepositoryIamMemberConditionOutputWithContext(ctx context.Context) RepositoryIamMemberConditionOutput {
return o
}
func (o RepositoryIamMemberConditionOutput) ToRepositoryIamMemberConditionPtrOutput() RepositoryIamMemberConditionPtrOutput {
return o.ToRepositoryIamMemberConditionPtrOutputWithContext(context.Background())
}
func (o RepositoryIamMemberConditionOutput) ToRepositoryIamMemberConditionPtrOutputWithContext(ctx context.Context) RepositoryIamMemberConditionPtrOutput {
return o.ApplyTWithContext(ctx, func(_ context.Context, v RepositoryIamMemberCondition) *RepositoryIamMemberCondition {
return &v
}).(RepositoryIamMemberConditionPtrOutput)
}
func (o RepositoryIamMemberConditionOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func(v RepositoryIamMemberCondition) *string { return v.Description }).(pulumi.StringPtrOutput)
}
func (o RepositoryIamMemberConditionOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func(v RepositoryIamMemberCondition) string { return v.Expression }).(pulumi.StringOutput)
}
func (o RepositoryIamMemberConditionOutput) Title() pulumi.StringOutput {
return o.ApplyT(func(v RepositoryIamMemberCondition) string { return v.Title }).(pulumi.StringOutput)
}
type RepositoryIamMemberConditionPtrOutput struct{ *pulumi.OutputState }
func (RepositoryIamMemberConditionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**RepositoryIamMemberCondition)(nil)).Elem()
}
func (o RepositoryIamMemberConditionPtrOutput) ToRepositoryIamMemberConditionPtrOutput() RepositoryIamMemberConditionPtrOutput {
return o
}
func (o RepositoryIamMemberConditionPtrOutput) ToRepositoryIamMemberConditionPtrOutputWithContext(ctx context.Context) RepositoryIamMemberConditionPtrOutput {
return o
}
func (o RepositoryIamMemberConditionPtrOutput) Elem() RepositoryIamMemberConditionOutput {
return o.ApplyT(func(v *RepositoryIamMemberCondition) RepositoryIamMemberCondition {
if v != nil {
return *v
}
var ret RepositoryIamMemberCondition
return ret
}).(RepositoryIamMemberConditionOutput)
}
func (o RepositoryIamMemberConditionPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func(v *RepositoryIamMemberCondition) *string {
if v == nil {
return nil
}
return v.Description
}).(pulumi.StringPtrOutput)
}
func (o RepositoryIamMemberConditionPtrOutput) Expression() pulumi.StringPtrOutput {
return o.ApplyT(func(v *RepositoryIamMemberCondition) *string {
if v == nil {
return nil
}
return &v.Expression
}).(pulumi.StringPtrOutput)
}
func (o RepositoryIamMemberConditionPtrOutput) Title() pulumi.StringPtrOutput {
return o.ApplyT(func(v *RepositoryIamMemberCondition) *string {
if v == nil {
return nil
}
return &v.Title
}).(pulumi.StringPtrOutput)
}
type RepositoryPubsubConfig struct {
// The format of the Cloud Pub/Sub messages.
// - PROTOBUF: The message payload is a serialized protocol buffer of SourceRepoEvent.
// - JSON: The message payload is a JSON string of SourceRepoEvent.
// Possible values are `PROTOBUF` and `JSON`.
MessageFormat string `pulumi:"messageFormat"`
// Email address of the service account used for publishing Cloud Pub/Sub messages.
// This service account needs to be in the same project as the PubsubConfig. When added,
// the caller needs to have iam.serviceAccounts.actAs permission on this service account.
// If unspecified, it defaults to the compute engine default service account.
ServiceAccountEmail *string `pulumi:"serviceAccountEmail"`
// The identifier for this object. Format specified above.
Topic string `pulumi:"topic"`
}
// RepositoryPubsubConfigInput is an input type that accepts RepositoryPubsubConfigArgs and RepositoryPubsubConfigOutput values.
// You can construct a concrete instance of `RepositoryPubsubConfigInput` via:
//
// RepositoryPubsubConfigArgs{...}
type RepositoryPubsubConfigInput interface {
pulumi.Input
ToRepositoryPubsubConfigOutput() RepositoryPubsubConfigOutput
ToRepositoryPubsubConfigOutputWithContext(context.Context) RepositoryPubsubConfigOutput
}
type RepositoryPubsubConfigArgs struct {
// The format of the Cloud Pub/Sub messages.
// - PROTOBUF: The message payload is a serialized protocol buffer of SourceRepoEvent.
// - JSON: The message payload is a JSON string of SourceRepoEvent.
// Possible values are `PROTOBUF` and `JSON`.
MessageFormat pulumi.StringInput `pulumi:"messageFormat"`
// Email address of the service account used for publishing Cloud Pub/Sub messages.
// This service account needs to be in the same project as the PubsubConfig. When added,
// the caller needs to have iam.serviceAccounts.actAs permission on this service account.
// If unspecified, it defaults to the compute engine default service account.
ServiceAccountEmail pulumi.StringPtrInput `pulumi:"serviceAccountEmail"`
// The identifier for this object. Format specified above.
Topic pulumi.StringInput `pulumi:"topic"`
}
func (RepositoryPubsubConfigArgs) ElementType() reflect.Type {
return reflect.TypeOf((*RepositoryPubsubConfig)(nil)).Elem()
}
func (i RepositoryPubsubConfigArgs) ToRepositoryPubsubConfigOutput() RepositoryPubsubConfigOutput {
return i.ToRepositoryPubsubConfigOutputWithContext(context.Background())
}
func (i RepositoryPubsubConfigArgs) ToRepositoryPubsubConfigOutputWithContext(ctx context.Context) RepositoryPubsubConfigOutput {
return pulumi.ToOutputWithContext(ctx, i).(RepositoryPubsubConfigOutput)
}
// RepositoryPubsubConfigArrayInput is an input type that accepts RepositoryPubsubConfigArray and RepositoryPubsubConfigArrayOutput values.
// You can construct a concrete instance of `RepositoryPubsubConfigArrayInput` via:
//
// RepositoryPubsubConfigArray{ RepositoryPubsubConfigArgs{...} }
type RepositoryPubsubConfigArrayInput interface {
pulumi.Input
ToRepositoryPubsubConfigArrayOutput() RepositoryPubsubConfigArrayOutput
ToRepositoryPubsubConfigArrayOutputWithContext(context.Context) RepositoryPubsubConfigArrayOutput
}
type RepositoryPubsubConfigArray []RepositoryPubsubConfigInput
func (RepositoryPubsubConfigArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]RepositoryPubsubConfig)(nil)).Elem()
}
func (i RepositoryPubsubConfigArray) ToRepositoryPubsubConfigArrayOutput() RepositoryPubsubConfigArrayOutput {
return i.ToRepositoryPubsubConfigArrayOutputWithContext(context.Background())
}
func (i RepositoryPubsubConfigArray) ToRepositoryPubsubConfigArrayOutputWithContext(ctx context.Context) RepositoryPubsubConfigArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(RepositoryPubsubConfigArrayOutput)
}
type RepositoryPubsubConfigOutput struct{ *pulumi.OutputState }
func (RepositoryPubsubConfigOutput) ElementType() reflect.Type {
return reflect.TypeOf((*RepositoryPubsubConfig)(nil)).Elem()
}
func (o RepositoryPubsubConfigOutput) ToRepositoryPubsubConfigOutput() RepositoryPubsubConfigOutput {
return o
}
func (o RepositoryPubsubConfigOutput) ToRepositoryPubsubConfigOutputWithContext(ctx context.Context) RepositoryPubsubConfigOutput {
return o
}
// The format of the Cloud Pub/Sub messages.
// - PROTOBUF: The message payload is a serialized protocol buffer of SourceRepoEvent.
// - JSON: The message payload is a JSON string of SourceRepoEvent.
// Possible values are `PROTOBUF` and `JSON`.
func (o RepositoryPubsubConfigOutput) MessageFormat() pulumi.StringOutput {
return o.ApplyT(func(v RepositoryPubsubConfig) string { return v.MessageFormat }).(pulumi.StringOutput)
}
// Email address of the service account used for publishing Cloud Pub/Sub messages.
// This service account needs to be in the same project as the PubsubConfig. When added,
// the caller needs to have iam.serviceAccounts.actAs permission on this service account.
// If unspecified, it defaults to the compute engine default service account.
func (o RepositoryPubsubConfigOutput) ServiceAccountEmail() pulumi.StringPtrOutput {
return o.ApplyT(func(v RepositoryPubsubConfig) *string { return v.ServiceAccountEmail }).(pulumi.StringPtrOutput)
}
// The identifier for this object. Format specified above.
func (o RepositoryPubsubConfigOutput) Topic() pulumi.StringOutput {
return o.ApplyT(func(v RepositoryPubsubConfig) string { return v.Topic }).(pulumi.StringOutput)
}
type RepositoryPubsubConfigArrayOutput struct{ *pulumi.OutputState }
func (RepositoryPubsubConfigArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]RepositoryPubsubConfig)(nil)).Elem()
}
func (o RepositoryPubsubConfigArrayOutput) ToRepositoryPubsubConfigArrayOutput() RepositoryPubsubConfigArrayOutput {
return o
}
func (o RepositoryPubsubConfigArrayOutput) ToRepositoryPubsubConfigArrayOutputWithContext(ctx context.Context) RepositoryPubsubConfigArrayOutput {
return o
}
func (o RepositoryPubsubConfigArrayOutput) Index(i pulumi.IntInput) RepositoryPubsubConfigOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) RepositoryPubsubConfig {
return vs[0].([]RepositoryPubsubConfig)[vs[1].(int)]
}).(RepositoryPubsubConfigOutput)
}
type GetRepositoryPubsubConfig struct {
MessageFormat string `pulumi:"messageFormat"`
ServiceAccountEmail string `pulumi:"serviceAccountEmail"`
Topic string `pulumi:"topic"`
}
// GetRepositoryPubsubConfigInput is an input type that accepts GetRepositoryPubsubConfigArgs and GetRepositoryPubsubConfigOutput values.
// You can construct a concrete instance of `GetRepositoryPubsubConfigInput` via:
//
// GetRepositoryPubsubConfigArgs{...}
type GetRepositoryPubsubConfigInput interface {
pulumi.Input
ToGetRepositoryPubsubConfigOutput() GetRepositoryPubsubConfigOutput
ToGetRepositoryPubsubConfigOutputWithContext(context.Context) GetRepositoryPubsubConfigOutput
}
type GetRepositoryPubsubConfigArgs struct {
MessageFormat pulumi.StringInput `pulumi:"messageFormat"`
ServiceAccountEmail pulumi.StringInput `pulumi:"serviceAccountEmail"`
Topic pulumi.StringInput `pulumi:"topic"`
}
func (GetRepositoryPubsubConfigArgs) ElementType() reflect.Type {
return reflect.TypeOf((*GetRepositoryPubsubConfig)(nil)).Elem()
}
func (i GetRepositoryPubsubConfigArgs) ToGetRepositoryPubsubConfigOutput() GetRepositoryPubsubConfigOutput {
return i.ToGetRepositoryPubsubConfigOutputWithContext(context.Background())
}
func (i GetRepositoryPubsubConfigArgs) ToGetRepositoryPubsubConfigOutputWithContext(ctx context.Context) GetRepositoryPubsubConfigOutput {
return pulumi.ToOutputWithContext(ctx, i).(GetRepositoryPubsubConfigOutput)
}
// GetRepositoryPubsubConfigArrayInput is an input type that accepts GetRepositoryPubsubConfigArray and GetRepositoryPubsubConfigArrayOutput values.
// You can construct a concrete instance of `GetRepositoryPubsubConfigArrayInput` via:
//
// GetRepositoryPubsubConfigArray{ GetRepositoryPubsubConfigArgs{...} }
type GetRepositoryPubsubConfigArrayInput interface {
pulumi.Input
ToGetRepositoryPubsubConfigArrayOutput() GetRepositoryPubsubConfigArrayOutput
ToGetRepositoryPubsubConfigArrayOutputWithContext(context.Context) GetRepositoryPubsubConfigArrayOutput
}
type GetRepositoryPubsubConfigArray []GetRepositoryPubsubConfigInput
func (GetRepositoryPubsubConfigArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]GetRepositoryPubsubConfig)(nil)).Elem()
}
func (i GetRepositoryPubsubConfigArray) ToGetRepositoryPubsubConfigArrayOutput() GetRepositoryPubsubConfigArrayOutput {
return i.ToGetRepositoryPubsubConfigArrayOutputWithContext(context.Background())
}
func (i GetRepositoryPubsubConfigArray) ToGetRepositoryPubsubConfigArrayOutputWithContext(ctx context.Context) GetRepositoryPubsubConfigArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(GetRepositoryPubsubConfigArrayOutput)
}
type GetRepositoryPubsubConfigOutput struct{ *pulumi.OutputState }
func (GetRepositoryPubsubConfigOutput) ElementType() reflect.Type {
return reflect.TypeOf((*GetRepositoryPubsubConfig)(nil)).Elem()
}
func (o GetRepositoryPubsubConfigOutput) ToGetRepositoryPubsubConfigOutput() GetRepositoryPubsubConfigOutput {
return o
}
func (o GetRepositoryPubsubConfigOutput) ToGetRepositoryPubsubConfigOutputWithContext(ctx context.Context) GetRepositoryPubsubConfigOutput {
return o
}
func (o GetRepositoryPubsubConfigOutput) MessageFormat() pulumi.StringOutput {
return o.ApplyT(func(v GetRepositoryPubsubConfig) string { return v.MessageFormat }).(pulumi.StringOutput)
}
func (o GetRepositoryPubsubConfigOutput) ServiceAccountEmail() pulumi.StringOutput {
return o.ApplyT(func(v GetRepositoryPubsubConfig) string { return v.ServiceAccountEmail }).(pulumi.StringOutput)
}
func (o GetRepositoryPubsubConfigOutput) Topic() pulumi.StringOutput {
return o.ApplyT(func(v GetRepositoryPubsubConfig) string { return v.Topic }).(pulumi.StringOutput)
}
type GetRepositoryPubsubConfigArrayOutput struct{ *pulumi.OutputState }
func (GetRepositoryPubsubConfigArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]GetRepositoryPubsubConfig)(nil)).Elem()
}
func (o GetRepositoryPubsubConfigArrayOutput) ToGetRepositoryPubsubConfigArrayOutput() GetRepositoryPubsubConfigArrayOutput {
return o
}
func (o GetRepositoryPubsubConfigArrayOutput) ToGetRepositoryPubsubConfigArrayOutputWithContext(ctx context.Context) GetRepositoryPubsubConfigArrayOutput {
return o
}
func (o GetRepositoryPubsubConfigArrayOutput) Index(i pulumi.IntInput) GetRepositoryPubsubConfigOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetRepositoryPubsubConfig {
return vs[0].([]GetRepositoryPubsubConfig)[vs[1].(int)]
}).(GetRepositoryPubsubConfigOutput)
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*RepositoryIamBindingConditionInput)(nil)).Elem(), RepositoryIamBindingConditionArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*RepositoryIamBindingConditionPtrInput)(nil)).Elem(), RepositoryIamBindingConditionArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*RepositoryIamMemberConditionInput)(nil)).Elem(), RepositoryIamMemberConditionArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*RepositoryIamMemberConditionPtrInput)(nil)).Elem(), RepositoryIamMemberConditionArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*RepositoryPubsubConfigInput)(nil)).Elem(), RepositoryPubsubConfigArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*RepositoryPubsubConfigArrayInput)(nil)).Elem(), RepositoryPubsubConfigArray{})
pulumi.RegisterInputType(reflect.TypeOf((*GetRepositoryPubsubConfigInput)(nil)).Elem(), GetRepositoryPubsubConfigArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*GetRepositoryPubsubConfigArrayInput)(nil)).Elem(), GetRepositoryPubsubConfigArray{})
pulumi.RegisterOutputType(RepositoryIamBindingConditionOutput{})
pulumi.RegisterOutputType(RepositoryIamBindingConditionPtrOutput{})
pulumi.RegisterOutputType(RepositoryIamMemberConditionOutput{})
pulumi.RegisterOutputType(RepositoryIamMemberConditionPtrOutput{})
pulumi.RegisterOutputType(RepositoryPubsubConfigOutput{})
pulumi.RegisterOutputType(RepositoryPubsubConfigArrayOutput{})
pulumi.RegisterOutputType(GetRepositoryPubsubConfigOutput{})
pulumi.RegisterOutputType(GetRepositoryPubsubConfigArrayOutput{})
}
|
{
return (*repositoryIamMemberConditionPtrType)(v)
}
|
visitor_test.go
|
package client
import (
"errors"
"testing"
"github.com/flagship-io/flagship-go-sdk/v2/pkg/bucketing"
"github.com/flagship-io/flagship-go-sdk/v2/pkg/cache"
"github.com/flagship-io/flagship-go-sdk/v2/pkg/model"
"github.com/flagship-io/flagship-go-sdk/v2/pkg/decision"
"github.com/stretchr/testify/assert"
)
var caID = "cid"
var vgID = "vgid"
var testVID = "vid"
func createVisitor(vID string, context model.Context, options ...VisitorOptionBuilder) *Visitor {
client := createClient()
client.decisionClient = createMockClient()
visitor, _ := client.NewVisitor(vID, context, options...)
return visitor
}
func createMockClient() decision.ClientInterface {
modification := model.Modification{
Type: "FLAG",
Value: map[string]interface{}{
"test_string": "string",
"test_bool": true,
"test_number": 35.6,
"test_nil": nil,
"test_object": map[string]interface{}{
"test_key": true,
},
"test_array": []interface{}{true},
},
}
variation := model.ClientVariation{
ID: testVID,
Reference: true,
Modifications: modification,
}
return decision.NewAPIClientMock(testEnvID, &model.APIClientResponse{
VisitorID: "test_vid",
Campaigns: []model.Campaign{
{
ID: caID,
VariationGroupID: vgID,
Variation: variation,
},
},
}, 200)
}
func TestGenerateID(t *testing.T) {
visitor := createVisitor("", nil)
assert.NotEqual(t, "", visitor.ID)
}
func TestUpdateContext(t *testing.T) {
visitor := createVisitor("test", nil)
context := model.Context{}
context["test_string"] = "123"
context["test_number"] = 36.5
context["test_bool"] = true
context["test_wrong"] = errors.New("wrong type")
err := visitor.UpdateContext(context)
if err == nil {
t.Error("Visitor with wrong context variable should raise an error")
}
delete(context, "test_wrong")
err = visitor.UpdateContext(context)
if err != nil {
t.Errorf("Visitor update context raised an error : %v", err)
return
}
if visitor.Context["test_string"] != "123" {
t.Errorf("Visitor update context string failed. Expected %s, got %s", "123", visitor.Context["test_string"])
}
if visitor.Context["test_number"] != 36.5 {
t.Errorf("Visitor update context string failed. Expected %f, got %v", 36.5, visitor.Context["test_number"])
}
if visitor.Context["test_bool"] != true {
t.Errorf("Visitor update context string failed. Expected %v, got %v", true, visitor.Context["test_bool"])
}
}
func TestUpdateContextKey(t *testing.T) {
context := model.Context{}
context["test_string"] = "123"
context["test_number"] = 36.5
context["test_bool"] = true
visitor := createVisitor("test", context)
err := visitor.UpdateContextKey("test_error", errors.New("wrong type"))
if err == nil {
t.Error("Visitor with wrong context variable should raise an error")
}
delete(context, "test_wrong")
err = visitor.UpdateContextKey("test_ok", true)
if err != nil {
t.Errorf("Visitor update context raised an error : %v", err)
}
if visitor.Context["test_ok"] != true {
t.Errorf("Visitor update context string failed. Expected %v, got %v", true, visitor.Context["test_ok"])
}
}
func TestAuthenticate(t *testing.T) {
context := map[string]interface{}{}
visitor := createVisitor("firstID", context)
err := visitor.Authenticate("newID", nil, false)
assert.Nil(t, err)
assert.Equal(t, "newID", visitor.ID)
assert.Equal(t, "firstID", *visitor.AnonymousID)
newContext := model.Context{
"test": "string",
}
visitor.Authenticate("newerID", newContext, false)
assert.Equal(t, "newerID", visitor.ID)
assert.Equal(t, newContext, visitor.Context)
assert.Equal(t, "firstID", *visitor.AnonymousID)
visitor.decisionMode = API
newContext = model.Context{
"test2": "string",
}
err = visitor.Unauthenticate(newContext, false)
assert.Nil(t, err)
assert.Equal(t, "firstID", visitor.ID)
assert.Equal(t, newContext, visitor.Context)
assert.Nil(t, visitor.AnonymousID)
visitor = createVisitor("firstID", context, WithAuthenticated(false))
assert.Nil(t, visitor.AnonymousID)
visitor = createVisitor("firstID", context, WithAuthenticated(true))
assert.NotNil(t, visitor.AnonymousID)
}
func TestSynchronizeModifications(t *testing.T) {
visitor := &Visitor{}
err := visitor.SynchronizeModifications()
if err == nil {
t.Error("Flag synchronization without visitorID should raise an error")
}
visitor = createVisitor("test", nil)
errorMock := decision.NewAPIClientMock(testEnvID, nil, 400)
visitor.decisionClient = errorMock
err = visitor.SynchronizeModifications()
if err == nil {
t.Error("Flag synchronization should have raised the http error")
}
visitor = createVisitor("test", nil)
flag, ok := visitor.GetAllModifications()["test_string"]
if ok {
t.Errorf("Flag should be nil before synchronization. Got %v", flag)
}
err = visitor.SynchronizeModifications()
if err != nil {
t.Errorf("Flag synchronization should not raise error. Got %v", err)
}
_, ok = visitor.GetAllModifications()["test_string"]
if !ok {
t.Errorf("Flag should exist after synchronization")
}
}
func TestGetModification(t *testing.T) {
visitor := createVisitor("test", nil)
// Test before sync
_, err := visitor.getModification("not_exists", true)
assert.NotEqual(t, nil, err, "Should raise an error as modifications are not synced")
// Test infos before sync
_, err = visitor.GetModificationInfo("not_exists")
assert.NotEqual(t, nil, err, "Should raise an error as modifications are not synced")
visitor.SynchronizeModifications()
// Test default value
val, err := visitor.getModification("not_exists", true)
assert.NotEqual(t, nil, err, "Should have an error as key does not exist")
assert.Equal(t, nil, val, "Expected nil value")
// Test infos of missing key
_, err = visitor.GetModificationInfo("not_exists")
assert.NotEqual(t, nil, err, "Should raise an error as modification key does not exist")
// Test response value
val, err = visitor.getModification("test_string", true)
assert.Equal(t, nil, err, "Should not have an error as flag exists")
assert.Equal(t, "string", val, "Expected string value")
// Test modification info response value
infos, err := visitor.GetModificationInfo("test_string")
assert.Equal(t, nil, err, "Should not have an error as flag exists")
assert.Equal(t, caID, infos.CampaignID)
assert.Equal(t, vgID, infos.VariationGroupID)
assert.Equal(t, testVID, infos.VariationID)
assert.Equal(t, true, infos.IsReference)
assert.Equal(t, "string", infos.Value)
}
func TestGetModificationBool(t *testing.T) {
visitor := createVisitor("test", nil)
// Test before sync
_, err := visitor.GetModificationBool("not_exists", false, true)
assert.NotEqual(t, nil, err, "Should have an error as modifications are not synced")
visitor.SynchronizeModifications()
// Test default value
val, err := visitor.GetModificationBool("not_exists", false, true)
assert.NotEqual(t, nil, err, "Should have an error as flag does not exists")
assert.Equal(t, false, val, "Expected default value getting nil flag")
// Test wrong type value
val, err = visitor.GetModificationBool("test_string", false, true)
assert.NotEqual(t, nil, err, "Should have an error as flag test_string is not of type bool")
assert.Equal(t, false, val, "Expected default value getting nil flag")
// Test nil value
val, err = visitor.GetModificationBool("test_nil", false, true)
assert.Equal(t, nil, err, "Did not expect error when getting nil flag")
assert.Equal(t, false, val, "Expected default value getting nil flag")
// Test response value
val, err = visitor.GetModificationBool("test_bool", false, true)
assert.Equal(t, nil, err, "Should not have an error as flag does exists")
assert.Equal(t, true, val, "Expected value true")
}
func TestGetModificationNumber(t *testing.T) {
visitor := createVisitor("test", nil)
// Test before sync
_, err := visitor.GetModificationNumber("not_exists", 10, true)
assert.NotEqual(t, nil, err, "Should have an error as modifications are not synced")
visitor.SynchronizeModifications()
// Test default value
val, err := visitor.GetModificationNumber("not_exists", 10, true)
assert.NotEqual(t, nil, err, "Should have an error as flag does not exists")
assert.Equal(t, 10., val, "Expected default value getting nil flag")
// Test wrong type value
val, err = visitor.GetModificationNumber("test_string", 10, true)
assert.NotEqual(t, nil, err, "Should have an error as flag test_string is not of type float")
assert.Equal(t, 10., val, "Expected default value getting nil flag")
// Test nil value
val, err = visitor.GetModificationNumber("test_nil", 10, true)
assert.Equal(t, nil, err, "Did not expect error when getting nil flag")
assert.Equal(t, 10., val, "Expected default value getting nil flag")
// Test response value
val, err = visitor.GetModificationNumber("test_number", 10, true)
assert.Equal(t, nil, err, "Should not have an error as flag does exists")
assert.Equal(t, 35.6, val, "Expected value 36.5")
}
func TestGetModificationString(t *testing.T)
|
func TestGetModificationObject(t *testing.T) {
visitor := createVisitor("test", nil)
// Test before sync
_, err := visitor.GetModificationObject("not_exists", nil, true)
assert.NotEqual(t, nil, err, "Should have an error as modifications are not synced")
visitor.SynchronizeModifications()
defaultValue := map[string]interface{}{
"default_key": false,
}
// Test default value
val, err := visitor.GetModificationObject("not_exists", defaultValue, true)
assert.NotEqual(t, nil, err, "Should have an error as flag does not exists")
assert.Equal(t, defaultValue["default_key"], val["default_key"])
// Test wrong type value
val, err = visitor.GetModificationObject("test_bool", defaultValue, true)
assert.NotEqual(t, nil, err, "Should have an error as flag does not exists")
assert.Equal(t, defaultValue["default_key"], val["default_key"])
// Test nil value
val, err = visitor.GetModificationObject("test_nil", defaultValue, true)
assert.Equal(t, nil, err, "Did not expect error when getting nil flag")
assert.Equal(t, defaultValue["default_key"], val["default_key"])
// Test response value
val, err = visitor.GetModificationObject("test_object", defaultValue, true)
assert.Equal(t, nil, err, "Should not have an error as flag exists")
assert.Equal(t, true, val["test_key"])
}
func TestGetModificationArray(t *testing.T) {
visitor := createVisitor("test", nil)
// Test before sync
_, err := visitor.GetModificationArray("not_exists", nil, true)
assert.NotEqual(t, nil, err, "Should have an error as modifications are not synced")
visitor.SynchronizeModifications()
defaultValue := []interface{}{true}
// Test default value
val, err := visitor.GetModificationArray("not_exists", defaultValue, true)
assert.NotEqual(t, nil, err, "Should have an error as flag does not exists")
assert.Equal(t, defaultValue[0], val[0])
// Test wrong type value
val, err = visitor.GetModificationArray("test_bool", defaultValue, true)
assert.NotEqual(t, nil, err, "Should have an error as flag does not exists")
assert.Equal(t, defaultValue[0], val[0])
// Test nil value
val, err = visitor.GetModificationArray("test_nil", defaultValue, true)
assert.Equal(t, nil, err, "Did not expect error when getting nil flag")
assert.Equal(t, defaultValue[0], val[0])
// Test response value
val, err = visitor.GetModificationArray("test_array", defaultValue, true)
assert.Equal(t, nil, err, "Should not have an error as flag exists")
assert.Equal(t, true, val[0])
}
func TestActivateModification(t *testing.T) {
visitor := createVisitor("test", nil)
// Test before sync
err := visitor.ActivateModification("not_exists")
assert.NotEqual(t, nil, err, "Should raise an error as modifications are not synced")
visitor.SynchronizeModifications()
// Test default value
err = visitor.ActivateModification("not_exists")
assert.NotEqual(t, nil, err, "Should have an error as flag does not exists")
// Test response value
err = visitor.ActivateModification("test_string")
assert.Equal(t, nil, err, "Should not have an error as flag exists")
}
func TestActivateModificationCache(t *testing.T) {
// Test engine with cache
cacheCampaignsVisitors := map[string]map[string]*cache.CampaignCache{}
get := func(visitorID string) (map[string]*cache.CampaignCache, error) {
cacheCampaigns := cacheCampaignsVisitors[visitorID]
return cacheCampaigns, nil
}
set := func(visitorID string, cache map[string]*cache.CampaignCache) error {
cacheCampaignsVisitors[visitorID] = cache
return nil
}
cache, _ := cache.InitManager(cache.WithCustomOptions(cache.CustomOptions{
Getter: get,
Setter: set,
}))
client, _ := Create(&Options{
EnvID: testEnvID,
APIKey: testAPIKey,
})
client.cacheManager = cache
engine := bucketing.GetBucketingEngineMock(testEnvID, cache)
client.decisionClient = engine
client.decisionMode = Bucketing
visitor, _ := client.NewVisitor("test", map[string]interface{}{
"test": true,
})
// Test before sync
err := visitor.ActivateCacheModification("not_exists")
if err == nil {
t.Errorf("Should have an error as modifications are not synced")
}
visitor.SynchronizeModifications()
// Test default value
err = visitor.ActivateCacheModification("not_exists")
if err == nil {
t.Errorf("Should have an error as flag does not exists")
}
// Test response value
err = visitor.ActivateCacheModification("test")
if err != nil {
t.Errorf("Should not have an error as flag does exists. Got %v", err)
}
}
func TestSendHitVisitor(t *testing.T) {
visitor := createVisitor("test", nil)
err := visitor.SendHit(&model.EventHit{})
if err == nil {
t.Errorf("Expected error as hit is malformed.")
}
err = visitor.SendHit(&model.EventHit{
Action: "test_action",
})
if err != nil {
t.Errorf("Did not expect error as hit is correct. Got %v", err)
}
}
|
{
visitor := createVisitor("test", nil)
// Test before sync
_, err := visitor.GetModificationString("not_exists", "default", true)
assert.NotEqual(t, nil, err, "Should have an error as modifications are not synced")
visitor.SynchronizeModifications()
// Test default value
val, err := visitor.GetModificationString("not_exists", "default", true)
assert.NotEqual(t, nil, err, "Should have an error as flag does not exists")
assert.Equal(t, "default", val, "Expected default value getting nil flag")
// Test wrong type value
val, err = visitor.GetModificationString("test_bool", "default", true)
assert.NotEqual(t, nil, err, "Should have an error as flag test_string is not of type float")
assert.Equal(t, "default", val, "Expected default value getting nil flag")
// Test nil value
val, err = visitor.GetModificationString("test_nil", "default", true)
assert.Equal(t, nil, err, "Did not expect error when getting nil flag")
assert.Equal(t, "default", val, "Expected default value getting nil flag")
// Test response value
val, err = visitor.GetModificationString("test_string", "default", true)
assert.Equal(t, nil, err, "Did not expect error when getting nil flag")
assert.Equal(t, "string", val, "Expected value string")
}
|
norb_record.py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input utility functions for norb."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import tensorflow.compat.v1 as tf
def _read_and_decode(filename_queue, image_pixel=96, distort=0):
"""Read a norb tf record file."""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'meta': tf.FixedLenFeature([4], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length image_pixels) to a uint8 tensor with shape
# [image_pixels].
image = tf.decode_raw(features['image_raw'], tf.uint8)
height = tf.cast(features['height'], tf.int32)
depth = tf.cast(features['depth'], tf.int32)
image = tf.reshape(image, tf.stack([depth, height, height]))
image = tf.transpose(image, [1, 2, 0])
image = tf.cast(image, tf.float32)
print(image.get_shape()[0].value)
if image_pixel < 96:
print('image resizing to {}'.format(image_pixel))
image = tf.image.resize_images(image, [image_pixel, image_pixel])
orig_images = image
if image_pixel == 48:
new_dim = 32
elif image_pixel == 32:
new_dim = 22
if distort == 1:
image = tf.image.random_brightness(image, max_delta=63)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.random_crop(image, tf.stack([new_dim, new_dim, depth]))
# 0.26179938779 is 15 degress in radians
image = tf.image.per_image_standardization(image)
image_pixel = new_dim
elif distort == 2:
image = tf.image.resize_image_with_crop_or_pad(image, new_dim, new_dim)
image = tf.image.per_image_standardization(image)
image_pixel = new_dim
else:
image = image * (1.0 / 255.0)
image = tf.div(
tf.subtract(image, tf.reduce_min(image)),
tf.subtract(tf.reduce_max(image), tf.reduce_min(image)))
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label, image_pixel, orig_images
bxs_m2 = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
def inputs(train_dir,
batch_size,
split,
multi,
image_pixel=96,
distort=False,
patching=False):
"""Reads input data num_epochs times."""
if multi:
filename = os.path.join(train_dir, '{}duo-az.tfrecords'.format(split))
else:
|
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer([filename])
if distort:
d = 1 + (split == 'test')
else:
d = 0
# Even when reading in multiple threads, share the filename
# queue.
image, label, dim, orig_image = _read_and_decode(
filename_queue, image_pixel=image_pixel, distort=d)
orig_image.set_shape([48, 48, 1 + multi])
image.set_shape([dim, dim, 1 + multi])
image = tf.transpose(image, [2, 0, 1])
if split == 'train':
images, sparse_labels = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=2,
capacity=2000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=2000)
else:
images, sparse_labels, orig_images = tf.train.batch(
[image, label, orig_image],
batch_size=batch_size,
num_threads=1,
capacity=1000 + 3 * batch_size)
if patching:
t_images = tf.tile(orig_images, [4, 1, 1, 1])
c_images = tf.image.extract_glimpse(
t_images, [32, 32], bxs_m2, centered=True, normalized=False)
c2images = tf.image.extract_glimpse(
t_images, [32, 32],
2 * np.array(bxs_m2),
centered=True,
normalized=False)
c3images = tf.image.extract_glimpse(
t_images, [32, 32],
3 * np.array(bxs_m2),
centered=True,
normalized=False)
c_images = tf.map_fn(tf.image.per_image_standardization, c_images)
c2images = tf.map_fn(tf.image.per_image_standardization, c2images)
c3images = tf.map_fn(tf.image.per_image_standardization, c3images)
c_images = tf.transpose(c_images, [0, 3, 1, 2])
c2images = tf.transpose(c2images, [0, 3, 1, 2])
c3images = tf.transpose(c3images, [0, 3, 1, 2])
# cc_images = tf.concat([images, m_images, c_images], axis=0)
# cc_labels = tf.tile(sparse_labels, [9])
cc_images = tf.concat([images, c_images, c2images, c3images], axis=0)
cc_labels = tf.tile(sparse_labels, [13])
features = {
'images': images,
'labels': tf.one_hot(sparse_labels, 5),
'recons_image': images,
'recons_label': sparse_labels,
'height': dim,
'depth': 1 + multi,
'num_classes': 5,
'cc_images': cc_images,
'cc_recons_label': cc_labels,
'cc_labels': tf.one_hot(cc_labels, 5),
}
return features
|
filename = os.path.join(train_dir, '{}.tfrecords'.format(split))
|
dataToParquet.py
|
"""
@Title: dataToParquet.py
@author: Ashia Lewis
GOAL: Create and update the parquet files for the air and soil data, separately.
"""
import os
import glob
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
#CODE TO BE USED FOR THE BATCH DATA
"""
#file directories for the air and soil files
air_dir = r"D:\sample_biodiversitree\data\export_data\air_data"
soil_dir = r"D:\sample_biodiversitree\scripts\data\export_data\soil_data"
#all_air_files = glob.glob(air_dir + '/**/*.csv', recursive=True)
all_soil_files = glob.glob(soil_dir + '/**/*.csv', recursive=True)
#air_data = pd.concat((pd.read_csv(f) for f in all_air_files ))
#air_data.to_parquet('air_data.parquet')
#need to look at soil's clean up job
soil_data = pd.concat((pd.read_csv(f) for f in all_soil_files ))
soil_data.to_parquet('soil_data.parquet')
"""
#CODE TO BE USED IN THE ACTUAL PIPELINE
# file directories for the air and soil files
air_dir = r"D:\sample_biodiversitree\data\export_data\air_data"
soil_dir = r"D:\sample_biodiversitree\data\export_data\soil_data"
#concatentate all of files' data
all_air_files = glob.glob(air_dir + '/**/*.csv', recursive=True)
all_soil_files = glob.glob(soil_dir + '/**/*.csv', recursive=True)
#put the data in a dataframe
air_data = pd.concat((pd.read_csv(f) for f in all_air_files))
soil_data = pd.concat((pd.read_csv(f) for f in all_soil_files))
#add data to existing parquet files
air_table = pa.Table.from_pandas(air_data)
soil_table = pa.Table.from_pandas(soil_data)
air_writer = pq.ParquetWriter('air_data.parquet', air_table.schema)
air_writer.write_table(table = air_table)
|
soil_writer = pq.ParquetWriter('soil_data.parquet', soil_table.schema)
soil_writer.write_table(table = soil_table)
if soil_writer:
soil_writer.close()
|
if air_writer:
air_writer.close()
|
token_test.go
|
package token_test
import (
"testing"
"github.com/goccy/go-yaml/token"
)
func TestToken(t *testing.T) {
pos := &token.Position{}
tokens := token.Tokens{
token.SequenceEntry("-", pos),
token.MappingKey(pos),
token.MappingValue(pos),
token.CollectEntry(",", pos),
token.SequenceStart("[", pos),
token.SequenceEnd("]", pos),
token.MappingStart("{", pos),
token.MappingEnd("}", pos),
token.Comment("#", "#", pos),
token.Anchor("&", pos),
token.Alias("*", pos),
token.Literal("|", "|", pos),
token.Folded(">", ">", pos),
token.SingleQuote("'", "'", pos),
token.DoubleQuote(`"`, `"`, pos),
token.Directive(pos),
token.Space(pos),
token.MergeKey("<<", pos),
token.DocumentHeader(pos),
token.DocumentEnd(pos),
token.New("1", "1", pos),
token.New("3.14", "3.14", pos),
token.New("true", "true", pos),
token.New("false", "false", pos),
token.New(".nan", ".nan", pos),
token.New(".inf", ".inf", pos),
token.New("-.inf", "-.inf", pos),
token.New("null", "null", pos),
token.Tag("!!null", "!!null", pos),
token.Tag("!!map", "!!map", pos),
token.Tag("!!str", "!!str", pos),
token.Tag("!!seq", "!!seq", pos),
token.Tag("!!binary", "!!binary", pos),
token.Tag("!!omap", "!!omap", pos),
token.Tag("!!set", "!!set", pos),
token.Tag("!!int", "!!int", pos),
token.Tag("!!float", "!!float", pos),
token.Tag("!hoge", "!hoge", pos),
}
tokens.Dump()
tokens.Add(token.New("hoge", "hoge", pos))
if tokens[len(tokens)-1].PreviousType() != token.TagType {
t.Fatal("invalid previous token type")
}
if tokens[0].PreviousType() != token.UnknownType {
t.Fatal("invalid previous token type")
}
if tokens[len(tokens)-2].NextType() != token.StringType {
t.Fatal("invalid next token type")
}
if tokens[len(tokens)-1].NextType() != token.UnknownType {
t.Fatal("invalid next token type")
}
}
func TestIsNeedQuoted(t *testing.T) {
if !token.IsNeedQuoted("true") {
t.Fatal("failed to quoted judge for boolean")
|
t.Fatal("failed to quoted judge for number")
}
if !token.IsNeedQuoted("1:1") {
t.Fatal("failed to quoted judge for time")
}
if !token.IsNeedQuoted("hoge # comment") {
t.Fatal("failed to quoted judge for comment")
}
if !token.IsNeedQuoted("\\0") {
t.Fatal("failed to quoted judge for escaped token")
}
if token.IsNeedQuoted("Hello World") {
t.Fatal("failed to unquoted judge")
}
}
|
}
if !token.IsNeedQuoted("1.234") {
|
NodeList.rs
|
// WARNING: This file was autogenerated by jni-bindgen. Any changes to this file may be lost!!!
#[cfg(any(feature = "all", feature = "org-w3c-dom-NodeList"))]
__jni_bindgen! {
/// public interface [NodeList](https://developer.android.com/reference/org/w3c/dom/NodeList.html)
///
/// Required feature: org-w3c-dom-NodeList
public interface NodeList ("org/w3c/dom/NodeList") extends crate::java::lang::Object {
/// [item](https://developer.android.com/reference/org/w3c/dom/NodeList.html#item(int))
///
/// Required features: "org-w3c-dom-Node"
#[cfg(any(feature = "all", all(feature = "org-w3c-dom-Node")))]
pub fn item<'env>(&'env self, arg0: i32) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::org::w3c::dom::Node>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "org/w3c/dom/NodeList", java.flags == PUBLIC | ABSTRACT, .name == "item", .descriptor == "(I)Lorg/w3c/dom/Node;"
unsafe {
|
__jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [getLength](https://developer.android.com/reference/org/w3c/dom/NodeList.html#getLength())
pub fn getLength<'env>(&'env self) -> __jni_bindgen::std::result::Result<i32, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "org/w3c/dom/NodeList", java.flags == PUBLIC | ABSTRACT, .name == "getLength", .descriptor == "()I"
unsafe {
let __jni_args = [];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("org/w3c/dom/NodeList\0", "getLength\0", "()I\0");
__jni_env.call_int_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
}
}
|
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0)];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("org/w3c/dom/NodeList\0", "item\0", "(I)Lorg/w3c/dom/Node;\0");
|
login.component.ts
|
import { Router } from '@angular/router';
import { AuthAccessService } from '../dbaccess/auth-access.service';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { Component, OnInit, OnDestroy } from '@angular/core';
@Component({
selector: 'app-login',
templateUrl: './login.component.html',
styleUrls: ['./login.component.scss']
})
export class
|
implements OnInit, OnDestroy {
public loginForm: FormGroup;
constructor(
public router: Router,
public formBuilder: FormBuilder,
public authAccessService: AuthAccessService
) { }
/**
* When Component initialized.
*/
ngOnInit() {
this.initForm();
}
/**
* When the LoginComponent is left.
*/
public ngOnDestroy(): void {
}
/**
* Initialize the form.
*/
public initForm(): void {
this.loginForm = this.formBuilder.group({
email: ['', [Validators.required, Validators.maxLength(255)]],
password: ['', [Validators.required, Validators.maxLength(2048)]]
});
}
/**
* Login if verifycation succeded.
*
* @param email the user email
* @param password the user password
*/
public async onSubmitButtonClicked(email, password): Promise<void> {
await this.authAccessService.login(email, password);
if (this.authAccessService.userIsLoggedIn) {
this.router.navigate(['/home']);
}
}
}
|
LoginComponent
|
index.js
|
const config = require('./config');
const teleinfo = require('./teleinfo');
const mqtt = require('./mqtt');
const log = require('./log');
async function disconnect() {
await teleinfo.disconnect();
await mqtt.disconnect();
}
async function
|
() {
const configHidden = { ...config, mqttPassword: '<hidden>' };
log.info('Starting teleinfo-mqtt with configuration =', configHidden);
try {
// Connect to MQTT
await mqtt.connect();
// Connect to serial port
const teleinfoEventEmitter = await teleinfo.connect();
// Register to frame events and publish to mqtt
teleinfoEventEmitter.on('frame', (frame) => {
mqtt.publishFrame(frame);
});
// Graceful exit
process.on('SIGTERM', disconnect);
process.on('SIGINT', disconnect);
} catch (e) {
log.error('Unable to run => See errors below');
log.error(e);
process.exit(1);
}
}
run();
|
run
|
ProcessDefinitionListApi.ts
|
/*
* Copyright 2021 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
|
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export interface ProcessDefinitionListApi {
// process definition list api
}
|
* you may not use this file except in compliance with the License.
|
nova_conf.py
|
# Copyright (c) 2018 European Organization for Nuclear Research.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
SERVICE_TYPE = 'compute'
compute_group = cfg.OptGroup(
'compute',
title='Compute Service Options',
help="Configuration options for connecting to the Nova API service"
)
compute_opts = [
cfg.StrOpt("client_version",
default="2.61",
help="""
Selects where the API microversion requested by the novaclient.
"""
),
]
def register_opts(conf):
|
def get_ksa_adapter_opts(default_service_type, deprecated_opts=None):
opts = ks_loading.get_adapter_conf_options(
include_deprecated=False, deprecated_opts=deprecated_opts)
cfg.set_defaults(opts,
valid_interfaces=['internal', 'public'],
service_type=default_service_type)
return opts
|
conf.register_group(compute_group)
conf.register_opts(compute_opts, group=compute_group)
group = getattr(compute_group, 'name', compute_group)
ks_loading.register_session_conf_options(conf, group)
ks_loading.register_auth_conf_options(conf, group)
adapter_opts = get_ksa_adapter_opts(SERVICE_TYPE)
conf.register_opts(adapter_opts, group=group)
|
_compiler.py
|
import distutils.ccompiler
import os
import os.path
import platform
import shutil
import sys
import subprocess
from typing import Optional, List
import setuptools
import setuptools.msvc
from setuptools import Extension
from cupy_builder._context import Context
import cupy_builder.install_build as build
def _nvcc_gencode_options(cuda_version: int) -> List[str]:
"""Returns NVCC GPU code generation options."""
if sys.argv == ['setup.py', 'develop']:
return []
envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None)
if envcfg is not None and envcfg != 'current':
|
if envcfg == 'current' and build.get_compute_capabilities() is not None:
ccs = build.get_compute_capabilities()
arch_list = [
f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}')
for cc in ccs]
else:
# The arch_list specifies virtual architectures, such as 'compute_61',
# and real architectures, such as 'sm_61', for which the CUDA
# input files are to be compiled.
#
# The syntax of an entry of the list is
#
# entry ::= virtual_arch | (virtual_arch, real_arch)
#
# where virtual_arch is a string which means a virtual architecture and
# real_arch is a string which means a real architecture.
#
# If a virtual architecture is supplied, NVCC generates a PTX code
# the virtual architecture. If a pair of a virtual architecture and a
# real architecture is supplied, NVCC generates a PTX code for the
# virtual architecture as well as a cubin code for the real one.
#
# For example, making NVCC generate a PTX code for 'compute_60' virtual
# architecture, the arch_list has an entry of 'compute_60'.
#
# arch_list = ['compute_60']
#
# For another, making NVCC generate a PTX code for 'compute_61' virtual
# architecture and a cubin code for 'sm_61' real architecture, the
# arch_list has an entry of ('compute_61', 'sm_61').
#
# arch_list = [('compute_61', 'sm_61')]
#
# See the documentation of each CUDA version for the list of supported
# architectures:
#
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-steering-gpu-code-generation
if cuda_version >= 11040:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
('compute_87', 'sm_87'),
'compute_87']
elif cuda_version >= 11010:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
'compute_86']
elif cuda_version >= 11000:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
'compute_80']
elif cuda_version >= 10000:
arch_list = ['compute_30',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
'compute_70']
else:
# This should not happen.
assert False
options = []
for arch in arch_list:
if type(arch) is tuple:
virtual_arch, real_arch = arch
options.append('--generate-code=arch={},code={}'.format(
virtual_arch, real_arch))
else:
options.append('--generate-code=arch={},code={}'.format(
arch, arch))
return options
class DeviceCompilerBase:
"""A class that invokes NVCC or HIPCC."""
def __init__(self, ctx: Context):
self._context = ctx
def _get_preprocess_options(self, ext: Extension) -> List[str]:
# https://setuptools.pypa.io/en/latest/deprecated/distutils/apiref.html#distutils.core.Extension
# https://github.com/pypa/setuptools/blob/v60.0.0/setuptools/_distutils/command/build_ext.py#L524-L526
incdirs = ext.include_dirs[:] # type: ignore
macros = ext.define_macros[:] # type: ignore
for undef in ext.undef_macros: # type: ignore
macros.append((undef,))
return distutils.ccompiler.gen_preprocess_options(macros, incdirs)
def spawn(self, commands: List[str]) -> None:
print('Command:', commands)
subprocess.check_call(commands)
class DeviceCompilerUnix(DeviceCompilerBase):
def compile(self, obj: str, src: str, ext: Extension) -> None:
if self._context.use_hip:
self._compile_unix_hipcc(obj, src, ext)
else:
self._compile_unix_nvcc(obj, src, ext)
def _compile_unix_nvcc(self, obj: str, src: str, ext: Extension) -> None:
cc_args = self._get_preprocess_options(ext) + ['-c']
# For CUDA C source files, compile them with NVCC.
nvcc_path = build.get_nvcc_path()
base_opts = build.get_compiler_base_options(nvcc_path)
compiler_so = nvcc_path
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + [
'-O2', '--compiler-options="-fPIC"']
if cuda_version >= 11020:
postargs += ['--std=c++14']
num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
postargs += [f'-t{num_threads}']
else:
postargs += ['--std=c++11']
postargs += ['-Xcompiler=-fno-gnu-unique']
print('NVCC options:', postargs)
self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
postargs)
def _compile_unix_hipcc(self, obj: str, src: str, ext: Extension) -> None:
cc_args = self._get_preprocess_options(ext) + ['-c']
# For CUDA C source files, compile them with HIPCC.
rocm_path = build.get_hipcc_path()
base_opts = build.get_compiler_base_options(rocm_path)
compiler_so = rocm_path
hip_version = build.get_hip_version()
postargs = ['-O2', '-fPIC', '--include', 'hip_runtime.h']
if hip_version >= 402:
postargs += ['--std=c++14']
else:
postargs += ['--std=c++11']
print('HIPCC options:', postargs)
self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
postargs)
class DeviceCompilerWin32(DeviceCompilerBase):
def compile(self, obj: str, src: str, ext: Extension) -> None:
if self._context.use_hip:
raise RuntimeError('ROCm is not supported on Windows')
compiler_so = build.get_nvcc_path()
cc_args = self._get_preprocess_options(ext) + ['-c']
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + ['-O2']
if cuda_version >= 11020:
# MSVC 14.0 (2015) is deprecated for CUDA 11.2 but we need it
# to build CuPy because some Python versions were built using it.
# REF: https://wiki.python.org/moin/WindowsCompilers
postargs += ['-allow-unsupported-compiler']
postargs += ['-Xcompiler', '/MD', '-D_USE_MATH_DEFINES']
# This is to compile thrust with MSVC2015
if cuda_version >= 11020:
postargs += ['--std=c++14']
num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
postargs += [f'-t{num_threads}']
cl_exe_path = self._find_host_compiler_path()
if cl_exe_path is not None:
print(f'Using host compiler at {cl_exe_path}')
postargs += ['--compiler-bindir', cl_exe_path]
print('NVCC options:', postargs)
self.spawn(compiler_so + cc_args + [src, '-o', obj] + postargs)
def _find_host_compiler_path(self) -> Optional[str]:
# c.f. cupy.cuda.compiler._get_extra_path_for_msvc
cl_exe = shutil.which('cl.exe')
if cl_exe:
# The compiler is already on PATH, no extra path needed.
return None
vctools: List[str] = setuptools.msvc.EnvironmentInfo(
platform.machine()).VCTools
for path in vctools:
cl_exe = os.path.join(path, 'cl.exe')
if os.path.exists(cl_exe):
return path
print(f'Warning: cl.exe could not be found in {vctools}')
return None
|
return ['--generate-code={}'.format(arch)
for arch in envcfg.split(';') if len(arch) > 0]
|
task.rs
|
use crate::*;
use std::ops::DerefMut;
/// This hold all the info for a task te be given to a creep
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct Task(VecDeque<(Action, Target)>);
impl Display for Task {
fn fmt(&self, f: &mut Formatter<'_>) -> Result
|
}
impl Default for Task {
fn default() -> Self {
Task(VecDeque::new())
}
}
impl Deref for Task {
type Target = VecDeque<(Action, Target)>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Task {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl Task {
/// Get all associated actions for this task
pub fn actions(&self) -> HashSet<&Action> {
let mut actions = HashSet::new();
for (a, _) in &self.0 {
actions.insert(a);
}
actions
}
/// get all the associated targets
pub fn targets(&self) -> HashSet<Position> {
let mut targets = HashSet::new();
for (_, t) in &self.0 {
targets.insert(t.pos());
}
targets
}
/// Is the task paved?
pub fn is_paved_from<T: HasPosition>(&self, source: T) -> bool {
todo!("Return if this is paved from {} to all the targets", source.pos())
}
/// Get all the required body parts for a task
pub fn parts_required(&self) -> HashSet<Part> {
let mut parts = HashSet::new();
for (a, _) in &self.0 {
parts = parts.union(&a.req_parts()).cloned().collect();
}
parts
}
}
|
{
for (a, t) in &self.0 {
writeln!(f, "{:?} -> {:?}", a, t)?;
}
Ok(())
}
|
locustfile.py
|
import datetime
from http import HTTPStatus
from locust import HttpUser, task, between
# This test can be run after installing locust through the cli as "locust --host=http://<deployed_host>:<port>"
# Then url http://localhost:8089/ should be access to start the test.
# Can also be run using no UI mode as "locust --no-web -c <number_of_clients> -r <clients_per_second> --run-time <time e.g. 1h30m> --host=http://<deployed_host>:<port>"
|
class QuickstartUser(HttpUser):
wait_time = between(1, 2)
@task(1)
def get_developers(self):
r = self.client.get("/developers")
assert r.status_code == HTTPStatus.OK, "Unexpected response code: " + str(r.status_code)
@task(1)
def get_developers_search(self):
r = self.client.get("/developers/search/james")
assert r.status_code == HTTPStatus.OK, "Unexpected response code: " + str(r.status_code)
| |
plotmodel.py
|
"""
The base top-level plot model class.
From this all data and plotting flow.
"""
from pageplot.exceptions import PagePlotParserError
from pathlib import Path
from typing import Any, Optional, Dict, List, Union
from pageplot.extensionmodel import PlotExtension
from pageplot.extensions import built_in_extensions
from pageplot.io.spec import IOSpecification
from pageplot.config import GlobalConfig
from pageplot.mask import get_mask
import matplotlib.pyplot as plt
import numpy as np
import unyt
import attr
@attr.s(auto_attribs=True)
class PlotModel:
"""
Model describing an individual plot. De-serializes the input
json describing an individual figure's extension values.
To use this, you'll need to initialise it with the configuration
(for all the extensions!), and then associate the data with
the appropraite method. The plots can then be created using the
methods in the following order:
``setup_figures`` - creates Figure and Axes objects
``run_extensions`` - runs all of the extensions' ``preprocess`` steps
``perform_blitting`` - runs the extensions' ``blit`` functions
``save`` - writes out the figures to disk
``finalize`` - closes the Figure object
You can also serialize the contents of the whole figure to a dictionary
with the ``serialize`` object.
Parameters
----------
name: str
Plot name. This is the filename of the plot (without file extension).
config: GlobalConfig
Global configuration object.
plot_spec: Dict[str, Any]
Data controlling the behaviour of each extension. The keys should
be the same as the used extensions. Mis-matches will raise a
``PagePlotParserError``.
x, y, z: str, optional
Strings to be passed to the data to load appropriate x, y, and z
data. Here only x is required.
x_units, y_units, z_units: Union[str, None, unyt.unyt_quantity]
Expected output units for the plot, to be parsed.
mask: str, optional
Mask text (see :func:`get_mask`).
"""
name: str
config: GlobalConfig
plot_spec: Dict[str, Any]
x: str
y: Optional[str] = None
z: Optional[str] = None
# Output units for the plot.
x_units: Union[str, None, unyt.unyt_quantity] = None
y_units: Union[str, None, unyt.unyt_quantity] = None
z_units: Union[str, None, unyt.unyt_quantity] = None
mask: Optional[str] = None
data: IOSpecification = attr.ib(init=False)
fig: plt.Figure = attr.ib(init=False)
axes: plt.Axes = attr.ib(init=False)
extensions: Dict[str, PlotExtension] = attr.ib(init=False)
def associate_data(self, data: IOSpecification):
"""
Associates the data file (which conforms to the
``IOSpecification``) with the plot.
data: IOSpecification
Any data file that conforms to the specification.
"""
self.data = data
def setup_figures(self):
"""
Sets up the internal figure and axes.
"""
self.fig, self.axes = plt.subplots()
return
def
|
(
self, additional_extensions: Optional[Dict[str, PlotExtension]] = None
):
"""
Run the figure extensions (these provide all data for the figures,
excluding the plotting). Internal extensions are performed
first, then any additional extensions are executed.
additional_extensions: Dict[str, PlotExtension]
Any additional extensions conforming to the specification.
"""
# First, sort out units and masking
units = {
"x_units": self.x_units,
"y_units": self.y_units,
"z_units": self.z_units,
}
for name, value in units.items():
if value is None:
if (associated_data := getattr(self, name[0])) is None:
units[name] = unyt.unyt_quantity(1.0, None)
else:
units[name] = unyt.unyt_quantity(
1.0, associated_data.split(" ", 1)[1]
)
else:
units[name] = unyt.unyt_quantity(1.0, value)
mask = get_mask(data=self.data, mask_text=self.mask)
self.extensions = {}
if additional_extensions is None:
additional_extensions = {}
combined_extensions = {**built_in_extensions, **additional_extensions}
for name in self.plot_spec.keys():
try:
Extension = combined_extensions[name]
except KeyError:
raise PagePlotParserError(
name, "Unable to find matching extension for configuration value."
)
extension = Extension(
name=name,
config=self.config,
metadata=self.data.metadata,
x=self.data.data_from_string(self.x, mask=mask),
y=self.data.data_from_string(self.y, mask=mask),
z=self.data.data_from_string(self.z, mask=mask),
**units,
**self.plot_spec.get(name, {}),
)
extension.preprocess()
self.extensions[name] = extension
return
def perform_blitting(self):
"""
Performs the blitting (creating the figure).
Without this, the extensions are just 'created' and pre-processed
without affecting or creating the figure.
"""
for extension in self.extensions.values():
extension.blit(fig=self.fig, axes=self.axes)
def save(self, filename: Path):
"""
Saves the figure to file.
filename: Path
Filename that you would like to save the figure to. Can have
any matplotlib-compatible file extension.
Notes
-----
It's suggested that you run finalzie() after this function, otherwise
there will be lots of figures open at one time causing potential slowdowns.
"""
self.fig.savefig(filename)
return
def serialize(self) -> Dict[str, Any]:
"""
Serializes the contents of the extensions to a dictionary.
Note that you do not have to have 'created' the figure to run this,
if you just want the data you should be able to just request
the serialized data.
"""
serialized = {name: ext.serialize() for name, ext in self.extensions.items()}
return serialized
def finalize(self):
"""
Closes figures and cleans up.
"""
plt.close(self.fig)
class Config:
arbitrary_types_allowed = True
|
run_extensions
|
hasura.constants.ts
|
export const HASURA_EVENT_HANDLER = Symbol('HASURA_EVENT_HANDLER');
export const HASURA_MODULE_CONFIG = Symbol('HASURA_MODULE_CONFIG');
|
||
survivalProb.py
|
import numpy as np
import matplotlib.pyplot as plt
"""
As in evAccum.py, the direct simulation for a single agent. Here the code is modified to stop when the agent hits a
boundary and also tracks where the LLR paths are. This allows us to output an array of exit times and compute the
survival probability.
"""
# Parameters for the simulation
length = 100
mean1 = 0.1
mean2 = -0.1
var1 = 1
var2 = 1
bdy_plus = 0.9
bdy_minus = -3
# # Observations are drawn from the Norm(mean1, var1) distribution.
# obs = np.sqrt(var1) * np.random.randn(length) + mean1 # scale and translate draws from the standard distribution
runs = int(1e3)
max_time = 500
exit_times = np.zeros(runs)
paths_plus = np.zeros(max_time) # How many sims have chosen H^+
paths_minus = np.zeros(max_time) # ^^ H^-
paths_pos = np.zeros(max_time) # How many sims have not exited and are positive
paths_neg = np.zeros(max_time) # How many sims have not exited and are negative
correct = 0
class Dist:
"""We define a class for distributions so that we can easily access the truth distributions rather than writing out
the formula for the distribution each time we want to use it."""
def __init__(self, mean, var):
self.mean = mean
self.var = var
def
|
(self, x):
return np.exp(-np.power(x - self.mean, 2) / (2*self.var))/(np.sqrt(2 * np.pi * self.var))
pos = Dist(mean1, var1) # the positive state distribution
neg = Dist(mean2, var2)
def compute_llr(x_array, dist1, dist2):
"""
Computes the log-likelihood ratio for a given array of observations.
:param x_array: an array of observations
:param dist1: the positive truth distribution
:param dist2: the negative truth distribution
:return: an array the size of x_array of LLRs
"""
return np.log(dist1(x_array)/dist2(x_array))
# Compute and store the LLRs as a vector of accumulated evidence.
for r in range(runs):
ev = 0
T = 0
time = 0
while (ev < bdy_plus) and (ev > bdy_minus) and (time < max_time):
if ev >= 0:
paths_pos[time] += 1
else:
paths_neg[time] += 1
time += 1
obs = np.sqrt(var1) * np.random.randn(1) + mean1
ev += compute_llr(obs, pos.prob, neg.prob)
T += 1
if ev >= bdy_plus:
correct += 1
paths_plus[T:] += 1
else:
paths_minus[T:] += 1
exit_times[r] = T
# The last part here plots time (in steps) against the accumulated evidence. After adding modifications to the plot we
# then call it using the show() method.
print "Correct: " + str(100 * correct / runs) + "%"
plt.hist(exit_times, 50, normed=1, facecolor='green', alpha=0.75)
np.save('exit_times.npy', exit_times)
path_data = np.vstack((paths_plus, paths_minus, paths_pos, paths_neg))
np.save('path_data.npy', path_data)
plt.xlabel('Time')
plt.ylabel('LLR')
plt.title('Evidence Accum')
# plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
# plt.axis([0, length, 0, 1])
# plt.grid(True)
plt.show()
|
prob
|
delete_crate.rs
|
use crate::{admin::dialoguer, db, models::Crate, schema::crates};
use diesel::prelude::*;
|
after_help = "Please be super sure you want to do this before running this!"
)]
pub struct Opts {
/// Name of the crate
crate_name: String,
}
pub fn run(opts: Opts) {
let conn = db::connect_now().unwrap();
conn.transaction::<_, diesel::result::Error, _>(|| {
delete(opts, &conn);
Ok(())
})
.unwrap()
}
fn delete(opts: Opts, conn: &PgConnection) {
let krate: Crate = Crate::by_name(&opts.crate_name).first(conn).unwrap();
let prompt = format!(
"Are you sure you want to delete {} ({})?",
opts.crate_name, krate.id
);
if !dialoguer::confirm(&prompt) {
return;
}
println!("deleting the crate");
let n = diesel::delete(crates::table.find(krate.id))
.execute(conn)
.unwrap();
println!(" {n} deleted");
if !dialoguer::confirm("commit?") {
panic!("aborting transaction");
}
}
|
#[derive(clap::Parser, Debug)]
#[clap(
name = "delete-crate",
about = "Purge all references to a crate from the database.",
|
init.js
|
$('.modal').modal({
dismissible: true,
opacity: .8,
inDuration: 400,
outDuration: 200,
startingTop: '4%',
|
document.getElementById('to-top').onclick = function () {
scrollTo(document.body, 0, 100);
}
function scrollTo(element, to, duration) {
if (duration < 0) return;
var difference = to - element.scrollTop;
var perTick = difference / duration * 2;
setTimeout(function() {
element.scrollTop = element.scrollTop + perTick;
scrollTo(element, to, duration - 2);
}, 10);
}
|
endingTop: '10%'
}
);
|
reader.go
|
// Package reader parses change sets and provides config values
package reader
import (
"time"
"github.com/dynamicgo/go-config/source"
)
// Reader is an interface for merging changesets
type Reader interface {
Merge(...*source.ChangeSet) (*source.ChangeSet, error)
Values(*source.ChangeSet) (Values, error)
String() string
}
// Values is returned by the reader
type Values interface {
Bytes() []byte
Get(path ...string) Value
Map() map[string]interface{}
Scan(v interface{}) error
}
// Value represents a value of any type
type Value interface {
Bool(def bool) bool
|
Int(def int) int
String(def string) string
Float64(def float64) float64
Duration(def time.Duration) time.Duration
StringSlice(def []string) []string
StringMap(def map[string]string) map[string]string
Scan(val interface{}) error
Bytes() []byte
}
| |
recurrent.py
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import get_new_symbol
from ._op_reqs import *
@register_op(doc_str="")
class gru(Operation):
r"""
Gated recurrent unit (GRU).
.. math::
r_t = \rm{recurrent\_activation}(W_{ir} x_t + b_{ir} + W_{hr} h_{t-1} + b_{hr})
.. math::
z_t = \rm{recurrent\_activation}(W_{iz} x_t + b_{iz} + W_{hz} h_(t−1) + b_{hz})
.. math::
o_t = activation(W_{io} x_t + b_{io} + r_t * (W_{ho} h_(t−1) + b_{ho}))
.. math::
h_t = (1 − z_t) * o_t + z_t * h_{(t−1)}
Where:
* ``W_{ir}``, ``W_{iz}``, and `` W_{io}`` state input-hidden weight for reset, update
and output gate, respectively.
* Similar to the above, ``W_{h[r/z/o]}`` states hidden-hidden / recurrent weights.
* ``h_t`` is the hidden state at time ``t``.
* ``x_t`` is the input at time ``t``.
* ``h_(t-1)`` is the hidden state of the layer at time ``t-1`` or the initial
hidden state at time ``0``.
* ``r_t``, ``z_t``, and ``o_t`` are the reset, update, and new gates, respectively.
* ``*`` is elementwise product.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, H, T> (Required)
* ``H`` denotes hidden size.
weight: const<I+H, 3*H, T> (Required) - Weight matrix
* ``weight[:I] = [W_{iz} | W_{ir} | W_{io}]`` where ``[a|b]`` denotes column
concatenation and ``[a, b]`` denotes row concatenation. ``W_{iz}``,
``W_{ir}``, and ``W_{io}`` have shape ``(I, H)``.
* ``weight[I:] = [W_{hz} | W_{hr} | W_{hn}]``: ``W_{hz}``, ``W_{hr}``, and
``W_{hn}`` have shape ``(H, H)``.
bias: const<2, 3*H, T> (Optional) [Default all 0s]
* ``bias[0]`` and ``bias[1]`` are input-hidden and hidden-hidden
bias, respectively.
* ``3*H`` are biases for ``[b_{ir} + b_{hr}, b_{iz} + b_{hz}, b_{io} + b_{ho}]``.
direction: const<str> (Optional) [Default=forward]
* Either ``forward`` or ``reverse``.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
recurrent_activation: const<str> (Optional) [Default=sigmoid]
* Activation applied on update and reset gate.
activation: const<str> (Optional) [Default=tanh]
* Activation applied on output gate.
Returns
-------
<s, b, H, T> or <1, b, H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, H, T>``.
* Else ``<1, b, H, T>`` (hidden states of the final step).
<b, H, T>
* Hidden states of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
weight=TensorInputType(const=True),
bias=TensorInputType(const=True, optional=True, default=None),
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
recurrent_activation=StringInputType(const=True, default="sigmoid"),
activation=StringInputType(const=True, default="tanh")
)
def __init__(self, **kwargs):
super(gru, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
input_hidden_size, hidden_dim = self.weight.shape
hidden_size = input_hidden_size - input_size
direction = self.direction.val
valid_directions = {"forward", "reverse"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
dim_factor = 3
if hidden_size != (hidden_dim // dim_factor):
raise ValueError(
"Incorrect weight matrix: hidden dim size mismatch. \
Provided {}. Expecting <b, 3*H>".format(
self.weight.shape
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
output_shape = [out_seq_len, batch_size, hidden_size]
output_h_shape = [batch_size, hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
)
@register_op(doc_str="")
class lstm(Operation):
r"""
Single long short-term memory (LSTM) sequence.
.. math::
i_t = \rm{recurrent\_activation}(W_{ii} x_t + B_{ii} + W_{hi} h_(t-1) + B_{hi})
.. math::
f_t = \rm{recurrent\_activation}(W_{if} x_t + B_{if} + W_{hf} h_(t-1) + B_{hf})
.. math::
z_t = cell_activation(W_{iz} x_t + B_{iz} + W_{hz} h_(t-1) + B_{hz})
.. math::
o_t = \rm{recurrent\_activation}(W_{io} x_t + B_{io} + W_{ho} h_(t-1) + B_{ho})
.. math::
c_t = f_t * c_(t-1) + i_t * z_t
.. math::
h_t = o_t * activation(c_t)
Where:
* ``i_t``, ``f_t``, ``o_t``, and ``z_t`` are input, forget, output, and cell gates,
respectively, at time ``t``.
* ``c_t`` is cell state at time ``t``.
* ``h_t`` is the hidden state at time ``t``.
* ``W_{ii}``, ``W_{if}``, ``W_{io}``, and ``W_{iz}`` are input weights for input,
forget, output and cell gate, respectively.
* ``W_{hi}``, ``W_{hf}``, ``W_{ho}``, and ``W_{hz}`` are recurrent weights for input,
forget, output and cell gate, respectively.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, DIRECTION*H, T> (Required)
* Initial hidden state. ``DIRECTION = 1`` for uni-directional, ``2`` for
bi-directional LSTM.
* ``H`` denotes hidden size.
* ``[b, :H]`` and ``[b, H:]`` represents forward and reverse direction
values, respectively.
initial_c: <b, DIRECTION*H, T> (Required)
* Initial cell state.
* Format is same as ``initial_h``.
weight: const<I+H, 4*DIRECTION*H, T> (Required) - Weight matrix
* Weight tensor should be in order of
``[input_gate, forget_gate, output_gate, cell_gate]``.
* ``[I+H, :4*H]`` and ``[I+H, 4*H:]`` represent forward and reverse direction
values, respectively.
bias: const<2, 4*DIRECTION*H, T> (Optional) [Default all 0s]
* ``bias[0]`` and ``bias[1]`` are input-hidden and hidden-hidden
bias, respectively.
direction: const<str> (Optional) [Default=forward]
* One of the following: ``forward``, ``reverse``, or ``bidirectional``.
* Must match ``DIRECTIONAL`` in initial states and weight parameters.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
recurrent_activation: const<str> (Optional) [Default=sigmoid]
* Activation applied on input, forget, and output gates.
cell_activation: const<str> (Optional) [Default=tang]
* Activation applied on cell gate.
activation: const<str> (Optional) [Default=tanh]
* Activation applied on output gate.
peephole: const<3*DIRECTION*H, T> (Optional, default to 0)
* Weight tensor for peephole.
* Order is ``[input_gate, forget_gate, output_gate]``.
* Shape of each peephole vector is ``(H,)`` (``H`` is hidden size).
clip: const<fp32> (optional) [Default=None]
* Cell gate is clipped to ``[-clip, +clip]``.
Returns
-------
<s, b, DIRECTION*H, T> or <1, b, DIRECTION*H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, DIRECTION*H, T>``.
* Else ``<1, b, DIRECTION*H, T>`` (hidden states of the final step).
<b, DIRECTION*H, T>
* Hidden states of the final step.
<b, DIRECTION*H, T>
* Memory state of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
initial_c=TensorInputType(),
weight=TensorInputType(const=True), # ifoz layout
bias=TensorInputType(const=True, optional=True, default=None), # ifoz layout
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
recurrent_activation=StringInputType(const=True, default="sigmoid"),
cell_activation=StringInputType(const=True, default="tanh"),
activation=StringInputType(const=True, default="tanh"),
peephole=TensorInputType(const=True, optional=True, default=None), # ifo layout
clip=FloatInputType(const=True, optional=True, default=None),
)
def __init__(self, **kwargs):
super(lstm, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
input_hidden_size, hidden_dim = self.weight.shape
hidden_size = input_hidden_size - input_size
direction = self.direction.val
valid_directions = {"forward", "reverse", "bidirectional"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
dim_factor = 8 if direction == "bidirectional" else 4
if hidden_size != (hidden_dim // dim_factor):
raise ValueError(
"Incorrect weight matrix: hidden dim size mismatch. \
Provided {}. Expecting <b, 4*DIRECTION*H>".format(
self.weight.shape
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
num_directions = dim_factor // 4
output_shape = [out_seq_len, batch_size, num_directions * hidden_size]
output_h_shape = [batch_size, num_directions * hidden_size]
output_c_shape = [batch_size, num_directions * hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
types.tensor(self.x.dtype, tuple(output_c_shape)),
|
@register_op(doc_str="")
class rnn(Operation):
"""
Recurrent neural network (RNN).
.. math::
h_t = activation(W_{ih} x_t + b_{ih} + W_{hh} h_(t−1) + b_{hh})
Where:
* ``W_{ih}`` is input weight.
* ``W_{hh}`` is hidden/recurrent weight.
* ``h_t`` is the hidden state at time ``t``.
* ``x_t`` is the input at time ``t``.
* ``h_(t-1)`` is the hidden state of the layer at time ``t-1`` or the initial
hidden state at time ``0``.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, H, T> (Required)
* ``H`` denotes hidden size.
weight: const<I+H, 3*H, T> (Required) - Weight matrix
bias: const<2, H, T> (Optional) [Default all 0s]
* ``bias[0]`` and ``bias[1]`` are input-hidden and hidden-hidden
bias, respectively.
direction: const<str> (Optional) [Default=forward]
* Either ``forward`` or ``reverse``.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
activation: const<str> (Optional) [Default=tanh]
* Supported activation functions: ``relu``, ``tanh``, ``sigmoid``,
``sigmoid_hard``, ``scaled_tanh``, and ``linear``.
Returns
-------
<s, b, H, T> or <1, b, H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, H, T>``.
* Else ``<1, b, H, T>`` (hidden states of the final step).
<b, H, T>
* Hidden states of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
weight=TensorInputType(const=True),
bias=TensorInputType(const=True, optional=True, default=None),
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
activation=StringInputType(const=True, default="tanh"),
)
def __init__(self, **kwargs):
super(rnn, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
_, hidden_size = self.weight.shape
direction = self.direction.val
valid_directions = {"forward", "reverse"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
output_shape = [out_seq_len, batch_size, hidden_size]
output_h_shape = [batch_size, hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
)
|
)
|
timetrigger.go
|
/*
Copyrigtt 2017 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
tttp://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"text/tabwriter"
"github.com/satori/go.uuid"
"github.com/urfave/cli"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fission/fission"
"github.com/fission/fission/crd"
)
func ttCreate(c *cli.Context) error {
client := getClient(c.GlobalString("server"))
name := c.String("name")
if len(name) == 0 {
name = uuid.NewV4().String()
}
fnName := c.String("function")
if len(fnName) == 0 {
fatal("Need a function name to create a trigger, use --function")
}
fnNamespace := c.String("fnNamespace")
cron := c.String("cron")
if len(cron) == 0 {
fatal("Need a cron spec like '0 30 * * *', '@every 1h30m', or '@hourly'; use --cron")
}
tt := &crd.TimeTrigger{
Metadata: metav1.ObjectMeta{
Name: name,
Namespace: fnNamespace,
},
Spec: fission.TimeTriggerSpec{
Cron: cron,
FunctionReference: fission.FunctionReference{
Type: fission.FunctionReferenceTypeFunctionName,
Name: fnName,
},
},
}
// if we're writing a spec, don't call the API
if c.Bool("spec") {
specFile := fmt.Sprintf("timetrigger-%v.yaml", name)
err := specSave(*tt, specFile)
checkErr(err, "create time trigger spec")
return nil
}
_, err := client.TimeTriggerCreate(tt)
checkErr(err, "create Time trigger")
fmt.Printf("trigger '%v' created\n", name)
return err
}
func ttGet(c *cli.Context) error {
return nil
}
func
|
(c *cli.Context) error {
client := getClient(c.GlobalString("server"))
ttName := c.String("name")
if len(ttName) == 0 {
fatal("Need name of trigger, use --name")
}
ttNs := c.String("triggerns")
tt, err := client.TimeTriggerGet(&metav1.ObjectMeta{
Name: ttName,
Namespace: ttNs,
})
checkErr(err, "get time trigger")
updated := false
newCron := c.String("cron")
if len(newCron) != 0 {
tt.Spec.Cron = newCron
updated = true
}
// TODO : During update, function has to be in the same ns as the trigger object
// but since we are not checking this for other triggers too, not sure if we need a check here.
fnName := c.String("function")
if len(fnName) > 0 {
tt.Spec.FunctionReference.Name = fnName
updated = true
}
if !updated {
fatal("Nothing to update. Use --cron or --function.")
}
_, err = client.TimeTriggerUpdate(tt)
checkErr(err, "update Time trigger")
fmt.Printf("trigger '%v' updated\n", ttName)
return nil
}
func ttDelete(c *cli.Context) error {
client := getClient(c.GlobalString("server"))
ttName := c.String("name")
if len(ttName) == 0 {
fatal("Need name of trigger to delete, use --name")
}
ttNs := c.String("triggerns")
err := client.TimeTriggerDelete(&metav1.ObjectMeta{
Name: ttName,
Namespace: ttNs,
})
checkErr(err, "delete trigger")
fmt.Printf("trigger '%v' deleted\n", ttName)
return nil
}
func ttList(c *cli.Context) error {
client := getClient(c.GlobalString("server"))
ttNs := c.String("triggerns")
tts, err := client.TimeTriggerList(ttNs)
checkErr(err, "list Time triggers")
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "%v\t%v\t%v\n", "NAME", "CRON", "FUNCTION_NAME")
for _, tt := range tts {
fmt.Fprintf(w, "%v\t%v\t%v\n",
tt.Metadata.Name, tt.Spec.Cron, tt.Spec.FunctionReference.Name)
}
w.Flush()
return nil
}
|
ttUpdate
|
actions.js
|
import Reflux from 'reflux'
|
export default Reflux.createActions(['getAll','add','remove']);
| |
interface.go
|
/*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package proxy
import (
internalinterfaces "kubeform.dev/provider-aws-api/client/informers/externalversions/internalinterfaces"
v1alpha1 "kubeform.dev/provider-aws-api/client/informers/externalversions/proxy/v1alpha1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
type group struct {
|
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}
| |
consistency_tests.rs
|
// Copyright 2017 The UNIC Project Developers.
//
// See the COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[macro_use]
extern crate unic_char_range;
use unic_ucd_category::GeneralCategory;
use unic_ucd_common::{is_alphabetic, is_alphanumeric, is_control, is_numeric, is_white_space};
macro_rules! assert_char {
(
$ch:expr,
if $precondition:expr => [
$( $condition:expr $(,)* ),+
]
) => (
if $precondition {
$(
assert!(
$condition,
"Code-point U+{:04x} meets precondition `{}` but not condition `{}`.",
$ch as u32,
stringify!($precondition),
stringify!($condition)
);
)+
}
);
}
#[test]
fn test_values_internally_and_against_general_category() {
for ch in chars!(..) {
let ch_is_alphabetic = is_alphabetic(ch);
let ch_is_alphanumeric = is_alphanumeric(ch);
let ch_is_control = is_control(ch);
let ch_is_numeric = is_numeric(ch);
let ch_is_white_space = is_white_space(ch);
let gc = GeneralCategory::of(ch);
let gc_is_letter_number = gc == GeneralCategory::LetterNumber;
let gc_is_control = gc == GeneralCategory::Control;
// Alphabetic
assert_char!(ch, if ch_is_alphabetic => [
!ch_is_control,
!ch_is_numeric || gc_is_letter_number,
!ch_is_white_space,
ch_is_alphanumeric,
]);
// Control
assert_char!(ch, if ch_is_control => [
!ch_is_alphabetic,
// Has overlap with ch_is_white_space, like U+0009..U+000D, U+0085
!ch_is_numeric,
!ch_is_alphanumeric,
]);
// Numeric
assert_char!(ch, if ch_is_numeric => [
!ch_is_alphabetic || gc_is_letter_number,
!ch_is_control,
!ch_is_white_space,
ch_is_alphanumeric,
]);
// White Space
assert_char!(ch, if ch_is_white_space => [
!ch_is_alphabetic,
// has overlap with ch_is_control, like U+0009..U+000D, U+0085
!ch_is_numeric,
!ch_is_alphanumeric,
]);
// Alphanumeric
assert_char!(ch, if ch_is_alphanumeric => [
ch_is_alphabetic || ch_is_numeric,
!ch_is_control,
!ch_is_white_space,
]);
// General Category vs common
assert_char!(ch, if ch_is_control => [ gc_is_control ]);
assert_char!(ch, if gc_is_control => [ ch_is_control ]);
assert_char!(ch, if gc.is_letter() => [ ch_is_alphabetic ]);
assert_char!(ch, if gc.is_number() => [ ch_is_numeric ]);
|
}
}
|
|
game.rs
|
use std::collections::HashMap;
use std::sync::{Arc, mpsc, RwLock, TryLockResult};
use std::time::{Duration, Instant};
use actix::*;
use actix_web::web::Data;
use log::{error, info};
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use wsbps::VarInt;
use crate::Connection;
use crate::packets::{ClientPackets, GameState, PlayerDataMode, QuestionData, ServerPackets, StateChange};
use crate::packets::ServerPackets::PlayerData;
use crate::socket::GameData;
use crate::tools::{Identifier, random_identifier};
pub type AnswerIndex = u8;
pub type QuestionIndex = u8;
pub struct GameManager {
pub games: Arc<RwLock<HashMap<Identifier, Game>>>,
}
impl GameManager {
const SLEEP_INTERVAL: Duration = Duration::from_secs(1);
const START_DELAY: Duration = Duration::from_secs(5);
const QUESTION_TIME: Duration = Duration::from_secs(10);
const MARK_TIME: Duration = Duration::from_secs(3);
const BONUS_TIME: Duration = Duration::from_secs(5);
const POINTS: u32 = 100;
const BONUS_POINTS: f32 = 200.0;
pub fn new() -> Data<Addr<GameManager>> {
Data::new(GameManager {
games: Arc::new(RwLock::new(HashMap::new()))
}.start())
}
}
#[derive(Message)]
#[rtype(result = "()")]
#[derive(Debug)]
enum GameChangeType {
Remove,
Started,
SkipQuestion,
NextQuestion,
SyncTime { total: Duration, remaining: Duration },
Continue,
}
impl Actor for GameManager {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
ctx.run_interval(GameManager::SLEEP_INTERVAL, |act, _ctx| {
let arc = act.games.clone();
let mut games = arc.write().unwrap();
games.iter_mut().for_each(|(id, game)| game.sync());
// games.iter_mut()
// .for_each(|(id, game)|{
// game.timer.sync(game);
//
// });
//
// let changes = games.par_iter()
// .filter_map(|(id, mut game)| {
//
// if game.state == GameState::Stopped {
// Some((id, GameChangeType::Remove))
// } else if game.state != GameState::Waiting {
// let time = Instant::now();
// let elapsed_since_sync = time - game.time.last_sync;
// if game.state == GameState::Starting {
// Some(if elapsed_since_sync >= GameManager::START_DELAY {
// (id, GameChangeType::Started)
// } else {
// let remaining = time - game.time.game_start;
// (id, GameChangeType::SyncTime {
// total: GameManager::QUESTION_TIME,
// remaining,
// })
// })
// } else {
// None
// }
// } else {
// None
// }
// })
// .collect::<Vec<(&Identifier, GameChangeType)>>();
});
}
}
#[derive(Message)]
#[rtype(result = "ClientAction")]
pub enum ServerAction {
Packet {
packet: ClientPackets,
ret: Addr<Connection>,
},
DoStateChange {
state: StateChange,
game_data: GameData,
},
TryKick { id: Identifier, game_data: GameData },
None,
}
#[derive(Message, Clone)]
#[rtype(result = "()")]
pub enum ClientAction {
CreatedGame { id: Identifier, title: String },
NameTakenResult(bool),
Packet(ServerPackets),
Error(&'static str),
JoinedGame { id: Identifier, player_id: Identifier, title: String },
StateChange(StateChange),
BeginKick(Identifier),
Disconnect,
Multiple(Vec<ClientAction>),
None,
}
impl Handler<ServerAction> for GameManager {
type Result = MessageResult<ServerAction>;
fn handle(&mut self, msg: ServerAction, ctx: &mut Self::Context) -> Self::Result {
MessageResult(match msg {
ServerAction::Packet { packet, ret } => match packet {
ClientPackets::CreateGame { title, questions } => {
let mut id: Identifier;
let mut games = self.games.write().unwrap();
loop {
id = random_identifier(Game::ID_LENGTH);
if !games.contains_key(&id) { break; };
};
let mut q = Vec::with_capacity(questions.len());
for que in questions {
q.push(Question {
data: que,
start_time: Instant::now(),
})
}
let game = Game {
host: ret,
id: id.clone(),
title: title.clone(),
questions: q,
players: Arc::new(RwLock::new(HashMap::new())),
state: GameState::Waiting,
timer: GameTimer::new(),
};
games.insert(id.clone(), game);
ClientAction::CreatedGame {
id,
title,
}
}
ClientPackets::CheckNameTaken { id, name } => {
let games = self.games.read().unwrap();
let game = games.get(&id);
match game {
None => ClientAction::Error("That game code doesn't exist"),
Some(game) => ClientAction::NameTakenResult(game.is_name_taken(&name))
}
}
ClientPackets::RequestGameState { id } => {
let games = self.games.read().unwrap();
let game = games.get(&id);
ClientAction::Packet(ServerPackets::GameState {
state: match game {
None => GameState::DoesNotExist,
Some(game) => game.state.clone()
}
})
}
ClientPackets::RequestJoin { id, name } => {
let mut games = self.games.write().unwrap();
let game = games.get_mut(&id);
match game {
None => ClientAction::Error("That game code doesn't exist"),
Some(game) => {
if game.is_name_taken(&name) {
ClientAction::Error("That name is already in use")
} else {
let player_id = game.new_player(name, ret);
ClientAction::JoinedGame { id, player_id, title: game.title.clone() }
}
}
}
}
ClientPackets::StateChange { state } => ClientAction::StateChange(state),
ClientPackets::Kick { id } => ClientAction::BeginKick(id),
_ => ClientAction::None
}
ServerAction::DoStateChange { state, game_data } => {
match state {
StateChange::Start => {
if game_data.game_id.is_none() {
ClientAction::Error("You are not in a game.")
} else {
let mut games = self.games.write().unwrap();
let game = games.get_mut(&game_data.game_id.unwrap());
match game {
None => ClientAction::Multiple(vec![
ClientAction::Error("You are not in a game."),
ClientAction::Disconnect,
]),
Some(game) => {
game.state = GameState::Starting;
game.timer.track(GameManager::START_DELAY);
game.broadcast(ServerPackets::GameState { state: GameState::Starting });
ClientAction::None
}
}
}
}
StateChange::Skip => ClientAction::None,
StateChange::Disconnect => {
if game_data.game_id.is_some() {
let mut games = self.games.write().unwrap();
let game_id = game_data.game_id.unwrap();
let game = games.remove(&game_id);
if game.is_some() {
if game_data.hosting {
let mut game = game.unwrap();
info!("Shutting down game {} ({}) because host left", game.title, game.id);
game.broadcast(ServerPackets::Disconnect { reason: String::from("Game ended.") });
} else if game_data.player_id.is_some() {
game.unwrap().remove_player(game_data.player_id.unwrap())
}
}
}
ClientAction::Disconnect
}
}
}
ServerAction::TryKick { id, game_data } => {
if !game_data.hosting {
ClientAction::Error("You are not the host.")
} else {
if game_data.game_id.is_some() {
let mut games = self.games.write().unwrap();
let game = games.get_mut(&game_data.game_id.unwrap());
match game {
None => ClientAction::Error("You are not in a game."),
Some(game) => {
game.remove_player(id);
ClientAction::None
}
}
} else {
ClientAction::Error("You are not in a game.")
}
}
}
ServerAction::None => ClientAction::None,
})
}
}
#[derive(Debug)]
pub struct Question {
pub data: QuestionData,
pub start_time: Instant,
}
#[derive(Debug)]
pub struct Game {
pub host: Addr<Connection>,
pub id: Identifier,
pub title: String,
pub questions: Vec<Question>,
pub players: Arc<RwLock<HashMap<Identifier, Player>>>,
pub state: GameState,
pub timer: GameTimer,
}
impl Game {
pub fn sync(&mut self)
|
}
#[derive(Debug)]
pub struct GameTime {
pub last_sync: Instant,
pub game_start: Instant,
pub need_sync: bool,
}
#[derive(Debug)]
pub struct GameTimer {
pub last_sync: Instant,
pub start: Instant,
pub duration: Duration,
pub elapsed: Duration,
pub need_sync: bool,
}
impl GameTimer {
const SYNC_DELAY: Duration = Duration::from_secs(2);
pub fn new() -> GameTimer {
GameTimer {
last_sync: Instant::now(),
start: Instant::now(),
duration: Duration::from_secs(0),
elapsed: Duration::from_secs(0),
need_sync: false,
}
}
pub fn track(&mut self, duration: Duration) {
self.duration = duration;
self.start = Instant::now();
self.elapsed = Duration::from_secs(0);
self.need_sync = true;
}
pub fn remaining(&self) -> u32 {
if self.duration < self.elapsed {
0
} else {
(self.duration - self.elapsed).as_millis() as u32
}
}
}
impl Game {
const ID_LENGTH: usize = 5;
fn is_name_taken(&self, name: &String) -> bool {
let players = self.players.read().unwrap();
players.values().any(|v| v.name.eq_ignore_ascii_case(name))
}
fn remove_player(&mut self, id: Identifier) {
let mut players = self.players.write().unwrap();
let player = players.remove(&id);
match player {
None => {}
Some(player) => {
players.values().for_each(|p| p.ret.do_send(ClientAction::Packet(player.as_data(PlayerDataMode::Remove))));
player.ret.do_send(ClientAction::Multiple(vec![
ClientAction::Packet(ServerPackets::Disconnect { reason: String::from("Removed from game.") }),
ClientAction::Disconnect,
]))
}
}
}
fn new_player(&mut self, name: String, ret: Addr<Connection>) -> Identifier {
let mut players = self.players.write().unwrap();
let mut id: Identifier;
loop {
id = random_identifier(Player::ID_LENGTH);
if !players.contains_key(&id) { break; };
};
let player = Player {
id: id.clone(),
name: name.clone(),
score: 0,
answers: HashMap::new(),
answer_time: None,
ret: ret.clone(),
};
for v in players.values() {
v.ret.do_send(ClientAction::Packet(player.as_data(PlayerDataMode::Add)));
ret.do_send(ClientAction::Packet(v.as_data(PlayerDataMode::Add)));
}
ret.do_send(ClientAction::Packet(player.as_data(PlayerDataMode::Me)));
self.host.do_send(ClientAction::Packet(player.as_data(PlayerDataMode::Add)));
players.insert(id.clone(), player);
id
}
fn broadcast(&self, packet: ServerPackets) {
let action = ClientAction::Packet(packet);
let players = self.players.read().unwrap();
players.values().for_each(|p| p.ret.do_send(action.clone()));
self.host.do_send(action)
}
fn broadcast_excluding(&self, excluding: Identifier, packet: ServerPackets) {
let players = self.players.read().unwrap();
players.values().filter(|p| p.id != excluding).for_each(|p| p.ret.do_send(ClientAction::Packet(packet.clone())))
}
}
#[derive(Debug)]
pub struct Player {
pub id: Identifier,
pub name: String,
pub score: u32,
pub answers: HashMap<QuestionIndex, AnswerIndex>,
pub answer_time: Option<Instant>,
pub ret: Addr<Connection>,
}
impl Player {
const ID_LENGTH: usize = 5;
pub fn as_data(&self, mode: PlayerDataMode) -> ServerPackets {
ServerPackets::PlayerData {
id: self.id.clone(),
name: self.name.clone(),
mode,
}
}
}
|
{
if !self.timer.need_sync { return; }
let now = Instant::now();
self.timer.elapsed = now - self.timer.start;
if self.timer.last_sync + GameTimer::SYNC_DELAY <= now {
self.timer.last_sync = now;
let remaining = self.timer.remaining();
let packet = ServerPackets::TimeSync {
total: VarInt(self.timer.duration.as_millis() as u32),
remaining: VarInt(remaining as u32),
};
self.broadcast(packet);
if remaining == 0 {
self.timer.need_sync = false;
}
}
}
|
realtime.py
|
# -*- coding: utf-8 -*-
"""
module for realtime watch and notfication
"""
import datetime as dt
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.utils import formataddr, parseaddr
from re import match
import pandas as pd
from xalpha.cons import today
from xalpha.info import _download, fundinfo
from xalpha.trade import trade
def
|
(s):
"""
parse the email sender and receiver, Chinese encode and support
:param s: eg. 'name <[email protected]>, name2 <[email protected]>'
"""
name, addr = parseaddr(s)
return formataddr((Header(name, "utf-8").encode(), addr))
def mail(
title,
content,
sender=None,
receiver=None,
password=None,
server=None,
port=None,
sender_name="sender",
receiver_name=None,
):
"""
send email
:param title: str, title of the email
:param content: str, content of the email, plain text only
:param conf: all other paramters can be import as a dictionay, eg.conf = {'sender': '[email protected]',
'sender_name':'name', 'receiver':['[email protected]','[email protected]'], 'password':'123456',
'server':'smtp.bb.com','port':123, 'receiver_name':['me','guest']}.
The receiver_name and sender_name options can be omitted.
"""
ret = True
try:
if receiver_name is None:
receiver_name = ["receiver" for _ in receiver]
msg = MIMEText(content, "plain", "utf-8")
msg["From"] = _format_addr("%s <%s>" % (sender_name, sender))
# 括号里的对应发件人邮箱昵称、发件人邮箱账号
receivestr = ""
for i, s in enumerate(receiver):
receivestr += receiver_name[i]
receivestr += " <"
receivestr += s
receivestr += ">, "
msg["To"] = _format_addr(receivestr) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg["Subject"] = title # 邮件的主题,即标题
server = smtplib.SMTP_SSL(server, port) # 发件人邮箱中的SMTP服务器和端口号
server.login(sender, password) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(
sender, receiver, msg.as_string()
) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit()
except Exception:
ret = False
return ret
class rtdata:
"""
get real time data of specific funds
:param code: string of six digitals for funds
"""
def __init__(self, code):
url = "http://fundgz.1234567.com.cn/js/" + code + ".js"
page = _download(url)
self.code = code
self.rtvalue = float(match(r'.*"gsz":"(\d*\.\d*)",.*', page.text)[1])
self.name = match(r'.*"name":"([^,]*)",.*', page.text)[1]
self.time = dt.datetime.strptime(
match(r'.*"gztime":"([\d\s\-\:]*)".*', page.text)[1], "%Y-%m-%d %H:%M"
)
def rfundinfo(
code, round_label=0, dividend_label=0, fetch=False, save=False, path="", form="csv"
):
"""
give a fundinfo object with todays estimate netvalue at running time
:param code: string of six digitals for funds
:param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init
:param save: boolean, when open the save option, info classes automatically save the class to files
:param path: string, the file path prefix of IO
:param form: string, the format of IO, options including: 'csv'
:returns: the fundinfo object
"""
fundobj = fundinfo(
code,
round_label=round_label,
dividend_label=dividend_label,
fetch=fetch,
save=save,
path=path,
form=form,
)
rt = rtdata(code)
rtdate = dt.datetime.combine(rt.time, dt.time.min)
rtvalue = rt.rtvalue
if (rtdate - fundobj.price.iloc[-1].date).days > 0:
fundobj.price = fundobj.price.append(
pd.DataFrame(
[[rtdate, rtvalue, fundobj.price.iloc[-1].totvalue, 0]],
columns=["date", "netvalue", "totvalue", "comment"],
),
ignore_index=True,
)
return fundobj
class review:
"""
review policys and give the realtime purchase suggestions
:param policylist: list of policy object
:param namelist: list of names of corresponding policy, default as 0 to n-1
:param date: object of datetime, check date, today is prefered, date other than is not guaranteed
"""
def __init__(self, policylist, namelist=None, date=today()):
self.warn = []
self.message = []
self.policylist = policylist
if namelist is None:
self.namelist = [i for i in range(len(policylist))]
else:
self.namelist = namelist
assert len(self.policylist) == len(self.namelist)
for i, policy in enumerate(policylist):
row = policy.status[policy.status["date"] == date]
if len(row) == 1:
warn = (
policy.aim.name,
policy.aim.code,
row.iloc[0].loc[policy.aim.code],
self.namelist[i],
)
self.warn.append(warn)
if warn[2] > 0:
sug = "买入%s元" % warn[2]
elif warn[2] < 0:
ratio = -warn[2] / 0.005 * 100
share = (
trade(fundinfo(warn[1]), policy.status)
.briefdailyreport()
.get("currentshare", 0)
)
share = -warn[2] / 0.005 * share
sug = "卖出%s%%的份额,也即%s份额" % (ratio, share)
self.message.append(
"根据%s计划,建议%s,%s(%s)" % (warn[3], sug, warn[0], warn[1])
)
self.content = "\n".join(map(str, self.message))
def __str__(self):
return self.content
def notification(self, conf):
"""
send email of self.content, at least support for qq email sender
:param conf: the configuration dictionary for email send settings, no ** before the dict in needed.
eg.conf = {'sender': '[email protected]',
'sender_name':'name', 'receiver':['[email protected]','[email protected]'], 'password':'123456',
'server':'smtp.bb.com','port':123, 'receiver_name':['me','guest']}.
The receiver_name and sender_name options can be omitted.
"""
if self.content:
ret = mail("Notification", self.content, **conf)
if ret:
print("邮件发送成功")
else:
print("邮件发送失败")
else:
print("没有提醒待发送")
|
_format_addr
|
lib.rs
|
//! A library that allows [proc_macro] function-like macros to be parsed using
//! the [combine] parser combinator crate.
//!
//! [proc_macro]: https://doc.rust-lang.org/stable/proc_macro/index.html
//! [combine]: https://docs.rs/crate/combine
//!
//! ## Motivation
//! When writing a `#[proc_macro_derive]` the input is Rust source code which is
//! well supported by the `syn` crate. However, when writing a `#[proc_macro]`
//! macro, it is common to want to define a custom domain specific language.
//!
//! This crate allows you to write a parser for your DSL using the `combine`
//! parser combinator library. It also preserves the source _span_ information
//! in the parsed result such that `rustc` can provide correct source locations
//! for identifiers and literals that are re-used in the output.
//!
//! ## Implementing a parser
//! This is a basic example using base `combine` parsers.
//!
//! ```rust
//! # extern crate proc_macro;
//! use combine::{ParseError, Parser, Stream};
//! use combine_proc_macro::{Token, Literal};
//! use combine_proc_macro::parser::{delim, keyword, literal, punct};
//!
//! /// Parses expressions like `{ hello "world"! }`.
//! fn hello_grammar<I>() -> impl Parser<I, Output = Literal>
//! where
//! I: Stream<Token = Token>,
//! I::Error: ParseError<I::Token, I::Range, I::Position>,
//! {
//! ( delim('{')
//! , keyword("hello")
//! , literal()
//! , punct('!')
//! , delim('}')
//! ).map(|(_, _, greeting, _, _)| greeting)
//! }
//! ```
//!
//! Using the `parser!` macro can help remove boilerplate.
//!
//! ```rust
|
//! use combine_proc_macro::Literal;
//! use combine_proc_macro::parser;
//! use combine_proc_macro::parser::{delim, keyword, literal, punct};
//!
//! parser!(fn hello_grammar() -> Literal {
//! ( delim('{')
//! , keyword("hello")
//! , literal()
//! , punct('!')
//! , delim('}')
//! ).map(|(_, _, greeting, _, _)| greeting)
//! });
//! ```
//!
//! ## Implementing a macro
//! A proc macro must be defined at the crate root within the `lib.rs` file.
//!
//! ```rust,ignore
//! extern crate proc_macro;
//!
//! use combine::parser::Parser;
//! use combine_proc_macro::{Input, Incomplete};
//! use proc_macro::TokenStream;
//!
//! #[proc_macro]
//! pub fn hello_macro(input: TokenStream) -> TokenStream {
//! let input = Input::from(input).with_lookahead(1);
//! let result = hello_grammar().easy_parse(input);
//! let (ast, trailing) = match result {
//! Ok(ok) => ok,
//! Err(err) => panic!("error parsing in `hello_macro` macro: {:#?}", err),
//! };
//! if let Some(diagnostic) = Incomplete::from_stream(trailing) {
//! panic!("unexpected tokens at end of input:\n\n{}", diagnostic);
//! }
//!
//! impl_hello_macro(&ast) // generate rust output; e.g. using the `quote` crate
//! }
//!
//! # use combine::{ParseError, Stream};
//! # use combine_proc_macro::Token;
//! # use combine_proc_macro::parser::literal;
//! # use proc_macro::Literal;
//! #
//! # fn hello_grammar<I>() -> impl Parser<Input = I, Output = Literal>
//! # where
//! # I: Stream<Item = Token>,
//! # I::Error: ParseError<I::Item, I::Range, I::Position> { literal() }
//! #
//! # fn impl_hello_macro(ast: &Literal) -> TokenStream { unimplemented!() }
//! ```
extern crate proc_macro;
extern crate proc_macro2;
mod boilerplate;
pub mod diagnostic;
pub mod input;
pub mod parser;
pub use diagnostic::Incomplete;
pub use input::{Input, Token};
pub use proc_macro2::{Ident, Literal, Punct};
|
//! # extern crate proc_macro;
//! use combine::Parser;
|
main.rs
|
#[macro_use]
extern crate log;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
#[macro_use]
extern crate anyhow;
use crate::config::Config;
use crate::database::DataBase;
use crate::exloli::ExLoli;
use anyhow::Error;
use futures::executor::block_on;
use once_cell::sync::Lazy;
use teloxide::prelude::*;
use tokio::time::sleep;
use std::env;
use std::str::FromStr;
use std::time;
mod bot;
mod config;
mod database;
mod exhentai;
mod exloli;
mod schema;
mod trans;
mod utils;
mod xpath;
static CONFIG: Lazy<Config> = Lazy::new(|| {
let config_file = std::env::var("EXLOLI_CONFIG");
let config_file = config_file.as_deref().unwrap_or("config.toml");
Config::new(config_file).expect("配置文件解析失败")
});
static BOT: Lazy<AutoSend<Bot>> =
Lazy::new(|| teloxide::Bot::new(&CONFIG.telegram.token).auto_send());
static DB: Lazy<DataBase> = Lazy::new(|| DataBase::init().expect("数据库初始化失败"));
static EXLOLI: Lazy<ExLoli> = Lazy::new(|| block_on(ExLoli::new()).expect("登录失败"));
#[tokio::main]
async fn main() {
env_logger::builder()
|
format_timestamp_secs()
.write_style(env_logger::WriteStyle::Auto)
.filter(Some("teloxide"), log::LevelFilter::Error)
.filter(
Some("exloli"),
log::LevelFilter::from_str(&CONFIG.log_level).expect("LOG 等级设置错误"),
)
.init();
env::set_var("DATABASE_URL", &CONFIG.database_url);
if let Err(e) = run().await {
error!("{}", e);
}
}
fn init_args() -> getopts::Matches {
let args = env::args().collect::<Vec<_>>();
let mut opts = getopts::Options::new();
opts.optflag("", "debug", "调试模式,不自动爬本");
opts.optflag("h", "help", "打印帮助");
let matches = match opts.parse(&args[1..]) {
Ok(v) => v,
Err(e) => panic!("{}", e),
};
if matches.opt_present("h") {
let brief = format!("Usage: {} [options]", args[0]);
print!("{}", opts.usage(&brief));
std::process::exit(0);
}
matches
}
async fn run() -> Result<(), Error> {
let matches = init_args();
env::var("DATABASE_URL").expect("请设置 DATABASE_URL");
let debug_mode = matches.opt_present("debug");
tokio::spawn(async move {
loop {
if !debug_mode {
info!("定时更新开始");
let result = EXLOLI.scan_and_upload().await;
if let Err(e) = result {
error!("定时更新出错:{}", e);
} else {
info!("定时更新完成");
}
}
info!("休眠中,预计 {} 分钟后开始工作", CONFIG.interval / 60);
sleep(time::Duration::from_secs(CONFIG.interval)).await;
}
});
crate::bot::start_bot().await;
Ok(())
}
|
.
|
leetcode_210.py
|
""" Leetcode 210 - Course Schedule II
https://leetcode.com/problems/course-schedule-ii/
1. Topological-Sorting & BFS: Time: O(E+V) Space: O(E+V)
"""
from typing import List
class Solution1:
""" 1. Topological Sorting & BFS """
def find_order(self, numCourses: int,
prerequisites: List[List[int]]) -> List[int]:
|
if __name__ == '__main__':
num_courses = 3
prerequisites = [[0, 1], [0, 2], [1, 2]]
res = Solution1().find_order(num_courses, prerequisites)
print(res)
|
if numCourses == 1:
return [0]
out_dict = {}
in_dict = {}
for x in range(numCourses):
in_dict[x] = 0
for pair in prerequisites:
if out_dict.get(pair[0], 0):
out_dict[pair[0]].append(pair[1])
else:
out_dict[pair[0]] = [pair[1]]
in_dict[pair[1]] += 1
courses_without_in = []
order = []
for item in in_dict.items():
if item[1] == 0:
courses_without_in.append(item[0])
while courses_without_in:
course_no_pre = courses_without_in.pop()
order.append(course_no_pre)
for x in out_dict.get(course_no_pre, []):
in_dict[x] -= 1
if in_dict[x] == 0:
courses_without_in.insert(0, x)
return order[::-1] if len(order) == numCourses else []
|
static-files.rs
|
// Copyright (c) 2016 The Rouille developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
#[macro_use]
extern crate rouille;
use rouille::Response;
fn main() {
// This example shows how to serve static files with rouille.
// Note that like all examples we only listen on `localhost`, so you can't access this server
// from another machine than your own.
println!("Now listening on localhost:8000");
rouille::start_server("localhost:8000", move |request| {
|
// This point of the code is reached only if no static file matched the request URL.
// In a real website you probably want to serve non-static files here (with the `router!`
// macro for example), but here we just return a 404 response.
Response::html("404 error. Try <a href=\"/README.md\"`>README.md</a> or \
<a href=\"/src/lib.rs\">src/lib.rs</a> for example.")
.with_status_code(404)
});
}
|
{
// The `match_assets` function tries to find a file whose name corresponds to the URL
// of the request. The second parameter (`"."`) tells where the files to look for are
// located.
// In order to avoid potential security threats, `match_assets` will never return any
// file outside of this directory even if the URL is for example `/../../foo.txt`.
let response = rouille::match_assets(&request, ".");
// If a file is found, the `match_assets` function will return a response with a 200
// status code and the content of the file. If no file is found, it will instead return
// an empty 404 response.
// Here we check whether if a file is found, and if so we return the response.
if response.is_success() {
return response;
}
}
|
InjectableStore.ts
|
import { applyMiddleware, createStore, combineReducers } from "redux";
|
import { initReducers, initEpics } from "./init/InitRedux";
const epicMiddleware = createEpicMiddleware<AnyAction, AnyAction, Success<any, any>, any>();
function createReducer(asyncReducers: any) {
return combineReducers({
...initReducers,
...asyncReducers
});
}
function createInjectableStore() {
const injectableStore = createStore(
createReducer({}),
applyMiddleware(epicMiddleware)
);
(injectableStore as any).asyncReducers = {};
(injectableStore as any).injectReducer = (key: any, asyncReducer: any) => {
(injectableStore as any).asyncReducers[key] = asyncReducer;
injectableStore.replaceReducer(createReducer((injectableStore as any).asyncReducers));
}
(injectableStore as any).injectEpic = (epic: any) => {
epicMiddleware.run(epic);
}
return injectableStore;
}
const injectableStore = createInjectableStore();
export const epic$ = new BehaviorSubject(initEpics);
const rootEpic = (action$: any, state$: any, deps: any) => epic$.pipe(
mergeMap(epic => epic(action$, state$, deps))
);
epicMiddleware.run(rootEpic);
export default injectableStore;
|
import { createEpicMiddleware } from "redux-observable";
import { AnyAction, Success } from "typescript-fsa";
import { BehaviorSubject } from 'rxjs';
import { mergeMap } from 'rxjs/operators';
|
service.go
|
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
// Package svc provides everything required to build Windows service.
//
package svc
import (
"errors"
"runtime"
"syscall"
"unsafe"
"github.com/mjgrzybek/sysinternal/unsafeheader"
"github.com/mjgrzybek/syswindows"
)
// State describes service execution state (Stopped, Running and so on).
type State uint32
const (
Stopped = State(windows.SERVICE_STOPPED)
StartPending = State(windows.SERVICE_START_PENDING)
StopPending = State(windows.SERVICE_STOP_PENDING)
Running = State(windows.SERVICE_RUNNING)
ContinuePending = State(windows.SERVICE_CONTINUE_PENDING)
PausePending = State(windows.SERVICE_PAUSE_PENDING)
Paused = State(windows.SERVICE_PAUSED)
)
// Cmd represents service state change request. It is sent to a service
// by the service manager, and should be actioned upon by the service.
type Cmd uint32
const (
Stop = Cmd(windows.SERVICE_CONTROL_STOP)
Pause = Cmd(windows.SERVICE_CONTROL_PAUSE)
Continue = Cmd(windows.SERVICE_CONTROL_CONTINUE)
Interrogate = Cmd(windows.SERVICE_CONTROL_INTERROGATE)
Shutdown = Cmd(windows.SERVICE_CONTROL_SHUTDOWN)
ParamChange = Cmd(windows.SERVICE_CONTROL_PARAMCHANGE)
NetBindAdd = Cmd(windows.SERVICE_CONTROL_NETBINDADD)
NetBindRemove = Cmd(windows.SERVICE_CONTROL_NETBINDREMOVE)
NetBindEnable = Cmd(windows.SERVICE_CONTROL_NETBINDENABLE)
NetBindDisable = Cmd(windows.SERVICE_CONTROL_NETBINDDISABLE)
DeviceEvent = Cmd(windows.SERVICE_CONTROL_DEVICEEVENT)
HardwareProfileChange = Cmd(windows.SERVICE_CONTROL_HARDWAREPROFILECHANGE)
PowerEvent = Cmd(windows.SERVICE_CONTROL_POWEREVENT)
SessionChange = Cmd(windows.SERVICE_CONTROL_SESSIONCHANGE)
)
// Accepted is used to describe commands accepted by the service.
// Note that Interrogate is always accepted.
type Accepted uint32
const (
AcceptStop = Accepted(windows.SERVICE_ACCEPT_STOP)
AcceptShutdown = Accepted(windows.SERVICE_ACCEPT_SHUTDOWN)
AcceptPauseAndContinue = Accepted(windows.SERVICE_ACCEPT_PAUSE_CONTINUE)
AcceptParamChange = Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)
AcceptNetBindChange = Accepted(windows.SERVICE_ACCEPT_NETBINDCHANGE)
AcceptHardwareProfileChange = Accepted(windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE)
AcceptPowerEvent = Accepted(windows.SERVICE_ACCEPT_POWEREVENT)
AcceptSessionChange = Accepted(windows.SERVICE_ACCEPT_SESSIONCHANGE)
)
// Status combines State and Accepted commands to fully describe running service.
type Status struct {
State State
Accepts Accepted
CheckPoint uint32 // used to report progress during a lengthy operation
WaitHint uint32 // estimated time required for a pending operation, in milliseconds
ProcessId uint32 // if the service is running, the process identifier of it, and otherwise zero
}
// ChangeRequest is sent to the service Handler to request service status change.
type ChangeRequest struct {
Cmd Cmd
EventType uint32
EventData uintptr
CurrentStatus Status
Context uintptr
}
// Handler is the interface that must be implemented to build Windows service.
type Handler interface {
// Execute will be called by the package code at the start of
// the service, and the service will exit once Execute completes.
// Inside Execute you must read service change requests from r and
// act accordingly. You must keep service control manager up to date
// about state of your service by writing into s as required.
// args contains service name followed by argument strings passed
// to the service.
// You can provide service exit code in exitCode return parameter,
// with 0 being "no error". You can also indicate if exit code,
// if any, is service specific or not by using svcSpecificEC
// parameter.
Execute(args []string, r <-chan ChangeRequest, s chan<- Status) (svcSpecificEC bool, exitCode uint32)
}
var (
// These are used by asm code.
goWaitsH uintptr
cWaitsH uintptr
ssHandle uintptr
sName *uint16
sArgc uintptr
sArgv **uint16
ctlHandlerExProc uintptr
cSetEvent uintptr
cWaitForSingleObject uintptr
cRegisterServiceCtrlHandlerExW uintptr
)
func init() {
k := windows.NewLazySystemDLL("kernel32.dll")
cSetEvent = k.NewProc("SetEvent").Addr()
cWaitForSingleObject = k.NewProc("WaitForSingleObject").Addr()
a := windows.NewLazySystemDLL("advapi32.dll")
cRegisterServiceCtrlHandlerExW = a.NewProc("RegisterServiceCtrlHandlerExW").Addr()
}
type ctlEvent struct {
cmd Cmd
eventType uint32
eventData uintptr
context uintptr
errno uint32
}
// service provides access to windows service api.
type service struct {
name string
h windows.Handle
cWaits *event
goWaits *event
c chan ctlEvent
handler Handler
}
func newService(name string, handler Handler) (*service, error) {
var s service
var err error
s.name = name
s.c = make(chan ctlEvent)
s.handler = handler
s.cWaits, err = newEvent()
if err != nil {
return nil, err
}
s.goWaits, err = newEvent()
if err != nil {
s.cWaits.Close()
return nil, err
}
return &s, nil
}
func (s *service) close() error {
s.cWaits.Close()
s.goWaits.Close()
return nil
}
type exitCode struct {
isSvcSpecific bool
errno uint32
}
func (s *service) updateStatus(status *Status, ec *exitCode) error {
if s.h == 0 {
return errors.New("updateStatus with no service status handle")
}
var t windows.SERVICE_STATUS
t.ServiceType = windows.SERVICE_WIN32_OWN_PROCESS
t.CurrentState = uint32(status.State)
if status.Accepts&AcceptStop != 0 {
t.ControlsAccepted |= windows.SERVICE_ACCEPT_STOP
}
if status.Accepts&AcceptShutdown != 0 {
t.ControlsAccepted |= windows.SERVICE_ACCEPT_SHUTDOWN
}
if status.Accepts&AcceptPauseAndContinue != 0 {
t.ControlsAccepted |= windows.SERVICE_ACCEPT_PAUSE_CONTINUE
}
if status.Accepts&AcceptParamChange != 0 {
t.ControlsAccepted |= windows.SERVICE_ACCEPT_PARAMCHANGE
}
if status.Accepts&AcceptNetBindChange != 0 {
t.ControlsAccepted |= windows.SERVICE_ACCEPT_NETBINDCHANGE
}
if status.Accepts&AcceptHardwareProfileChange != 0 {
t.ControlsAccepted |= windows.SERVICE_ACCEPT_HARDWAREPROFILECHANGE
}
if status.Accepts&AcceptPowerEvent != 0 {
t.ControlsAccepted |= windows.SERVICE_ACCEPT_POWEREVENT
}
if status.Accepts&AcceptSessionChange != 0 {
t.ControlsAccepted |= windows.SERVICE_ACCEPT_SESSIONCHANGE
}
if ec.errno == 0 {
t.Win32ExitCode = windows.NO_ERROR
t.ServiceSpecificExitCode = windows.NO_ERROR
} else if ec.isSvcSpecific {
t.Win32ExitCode = uint32(windows.ERROR_SERVICE_SPECIFIC_ERROR)
t.ServiceSpecificExitCode = ec.errno
} else {
t.Win32ExitCode = ec.errno
t.ServiceSpecificExitCode = windows.NO_ERROR
}
t.CheckPoint = status.CheckPoint
t.WaitHint = status.WaitHint
return windows.SetServiceStatus(s.h, &t)
}
const (
sysErrSetServiceStatusFailed = uint32(syscall.APPLICATION_ERROR) + iota
sysErrNewThreadInCallback
)
func (s *service) run() {
s.goWaits.Wait()
s.h = windows.Handle(ssHandle)
var argv []*uint16
hdr := (*unsafeheader.Slice)(unsafe.Pointer(&argv))
hdr.Data = unsafe.Pointer(sArgv)
hdr.Len = int(sArgc)
hdr.Cap = int(sArgc)
args := make([]string, len(argv))
for i, a := range argv {
args[i] = windows.UTF16PtrToString(a)
}
cmdsToHandler := make(chan ChangeRequest)
changesFromHandler := make(chan Status)
exitFromHandler := make(chan exitCode)
go func() {
ss, errno := s.handler.Execute(args, cmdsToHandler, changesFromHandler)
exitFromHandler <- exitCode{ss, errno}
}()
ec := exitCode{isSvcSpecific: true, errno: 0}
outcr := ChangeRequest{
CurrentStatus: Status{State: Stopped},
}
var outch chan ChangeRequest
inch := s.c
loop:
for {
select {
case r := <-inch:
if r.errno != 0 {
ec.errno = r.errno
break loop
}
inch = nil
outch = cmdsToHandler
outcr.Cmd = r.cmd
outcr.EventType = r.eventType
outcr.EventData = r.eventData
outcr.Context = r.context
case outch <- outcr:
|
inch = s.c
outch = nil
case c := <-changesFromHandler:
err := s.updateStatus(&c, &ec)
if err != nil {
// best suitable error number
ec.errno = sysErrSetServiceStatusFailed
if err2, ok := err.(syscall.Errno); ok {
ec.errno = uint32(err2)
}
break loop
}
outcr.CurrentStatus = c
case ec = <-exitFromHandler:
break loop
}
}
s.updateStatus(&Status{State: Stopped}, &ec)
s.cWaits.Set()
}
func newCallback(fn interface{}) (cb uintptr, err error) {
defer func() {
r := recover()
if r == nil {
return
}
cb = 0
switch v := r.(type) {
case string:
err = errors.New(v)
case error:
err = v
default:
err = errors.New("unexpected panic in syscall.NewCallback")
}
}()
return syscall.NewCallback(fn), nil
}
// BUG(brainman): There is no mechanism to run multiple services
// inside one single executable. Perhaps, it can be overcome by
// using RegisterServiceCtrlHandlerEx Windows api.
// Run executes service name by calling appropriate handler function.
func Run(name string, handler Handler) error {
runtime.LockOSThread()
tid := windows.GetCurrentThreadId()
s, err := newService(name, handler)
if err != nil {
return err
}
ctlHandler := func(ctl, evtype, evdata, context uintptr) uintptr {
e := ctlEvent{cmd: Cmd(ctl), eventType: uint32(evtype), eventData: evdata, context: context}
// We assume that this callback function is running on
// the same thread as Run. Nowhere in MS documentation
// I could find statement to guarantee that. So putting
// check here to verify, otherwise things will go bad
// quickly, if ignored.
i := windows.GetCurrentThreadId()
if i != tid {
e.errno = sysErrNewThreadInCallback
}
s.c <- e
// Always return NO_ERROR (0) for now.
return windows.NO_ERROR
}
var svcmain uintptr
getServiceMain(&svcmain)
t := []windows.SERVICE_TABLE_ENTRY{
{ServiceName: syscall.StringToUTF16Ptr(s.name), ServiceProc: svcmain},
{ServiceName: nil, ServiceProc: 0},
}
goWaitsH = uintptr(s.goWaits.h)
cWaitsH = uintptr(s.cWaits.h)
sName = t[0].ServiceName
ctlHandlerExProc, err = newCallback(ctlHandler)
if err != nil {
return err
}
go s.run()
err = windows.StartServiceCtrlDispatcher(&t[0])
if err != nil {
return err
}
return nil
}
// StatusHandle returns service status handle. It is safe to call this function
// from inside the Handler.Execute because then it is guaranteed to be set.
// This code will have to change once multiple services are possible per process.
func StatusHandle() windows.Handle {
return windows.Handle(ssHandle)
}
| |
control.py
|
#!/home/zhuqingjie/env/py3_tf_low/bin/python
'''
@Time : 07.26 0026 下午 01:19
@Author : zhuqingjie
@User : zhu
@FileName: control.py
@Software: PyCharm
'''
'''
总的控制逻辑
1,control只向外部暴露一个端口,外部向control发请求,control根据mode来去调用其他server模块
2,同时还解决了外部不能直接访问ai节点的问题。主服务跑在ai节点,control服务跑在登陆节点,这样外部就能访问了
'''
import json, os, requests, sys, time
from flask import Flask, request
# param
ai01_ip = '10.11.1.81'
ai02_ip = '10.11.1.82'
ai03_ip = '10.11.1.83'
ai04_ip = '10.11.1.84'
ai05_ip = '10.11.1.85'
IP = ai05_ip # 主服务的IP地址
app = Flask(__name__)
print_ = lambda x: print(f"--> [{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}]: {x}")
printc = lambda s: print(f"\033[1;35m{s}\033[0m")
mode_list = ['1', '2', '21', '22', '3', '4', '5', '51', '6']
def do_request(port, body):
url = f'http://{IP}:{port}'
printc(url)
printc(body)
response = requests.post(url, data=body)
printc('do_request ok')
return response.text
@app.route('/', met
|
c_url = request.form
print_(f'\n\tparams: {dic_url}')
error_param = 'error_param'
mode = dic_url.get('mode', error_param)
if mode == error_param:
return json.dumps({
'status': -1,
'info': 'param error: not find "mode"!',
'dst_path': 'null',
})
elif mode not in mode_list:
return json.dumps({
'status': -1,
'info': 'param error: "mode" must in 1-6!',
'dst_path': 'null',
})
elif mode == '1':
return do_request(9001, dic_url)
elif mode == '2':
return do_request(9002, dic_url)
elif mode == '21':
return do_request(9021, dic_url)
elif mode == '22':
return do_request(9022, dic_url)
elif mode == '3':
return do_request(9003, dic_url)
elif mode == '4':
return do_request(9004, dic_url)
elif mode == '5':
return do_request(9005, dic_url)
elif mode == '51':
return do_request(9051, dic_url)
elif mode == '6':
return do_request(9006, dic_url)
# elif mode in ['10', '11']:
# return do_request(9010, dic_url)
else:
return json.dumps({
'status': 2,
'info': 'error: An impossible error.',
'dst_path': 'null',
})
if __name__ == '__main__':
# app.run(host='0.0.0.0', port='7006')
body = {
'mode': '1',
'donotsave': '0',
'userID': 'zhuqingj',
'src_path': '/home/zhangli_lab/zhuqingjie/prj/tunet/res_test/0x.bmp',
}
res = do_request(9001, body)
print(res)
|
hods=['POST'])
def handle():
print('\n')
print('-' * 50)
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# 读取参数
di
|
main.go
|
package main
import (
"fmt"
"log"
"os"
"os/signal"
hashpkg "github.com/reecerussell/adaptive-password-hasher"
core "github.com/reecerussell/open-social"
"github.com/reecerussell/open-social/cmd/users/handler"
"github.com/reecerussell/open-social/cmd/users/password"
"github.com/reecerussell/open-social/cmd/users/provider"
"github.com/reecerussell/open-social/cmd/users/repository"
"github.com/reecerussell/open-social/database"
"github.com/reecerussell/open-social/util"
)
const (
connectionStringVar = "CONNECTION_STRING"
configFileVar = "CONFIG_FILE"
)
func main() {
cnf := buildConfig()
ctn := buildServices(cnf)
db := ctn.GetService("Database").(database.Database)
createUser := ctn.GetService("CreateUserHandler").(*handler.CreateUserHandler)
getClaims := ctn.GetService("GetClaimsHandler").(*handler.GetClaimsHandler)
getIDByReference := ctn.GetService("GetIDByReferenceHandler").(*handler.GetIDByReferenceHandler)
getProfile := ctn.GetService("GetProfileHandler").(*handler.GetProfileHandler)
getInfo := ctn.GetService("GetInfoHandler").(*handler.GetInfoHandler)
followUser := ctn.GetService("FollowUserHandler").(*handler.FollowUserHandler)
unfollowUser := ctn.GetService("UnfollowUserHandler").(*handler.UnfollowUserHandler)
app := core.NewApp()
app.AddHealthCheck(database.NewHealthCheck(db))
app.AddMiddleware(core.NewLoggingMiddleware())
app.Post("/users", createUser)
app.Get("/users/id/{referenceId}", getIDByReference)
app.Post("/claims", getClaims)
app.Get("/profile/{username}/{userReferenceID}", getProfile)
app.Get("/info/{userReferenceID}", getInfo)
app.Post("/follow/{userReferenceId}/{followerReferenceId}", followUser)
app.Post("/unfollow/{userReferenceId}/{followerReferenceId}", unfollowUser)
go app.Serve()
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, os.Kill)
<-stop
log.Println("App stopped.")
}
// Config is a configuration model for the service.
type Config struct {
PasswordValidatorOptions *password.Options `json:"passwordValidator"`
PasswordHasherOptions *password.HashOptions `json:"passwordHasher"`
}
func
|
() *Config {
filename := util.ReadEnv(configFileVar, "config.json")
var cnf Config
err := core.ReadConfig(filename, &cnf)
if err != nil {
panic(fmt.Errorf("failed to read config: %v", err))
}
return &cnf
}
func buildServices(cnf *Config) *core.Container {
ctn := core.NewContainer()
ctn.AddSingleton("Config", func(ctn *core.Container) interface{} {
return cnf
})
ctn.AddSingleton("Database", func(ctn *core.Container) interface{} {
url := os.Getenv(connectionStringVar)
db, err := database.New(url)
if err != nil {
panic(err)
}
return db
})
ctn.AddService("PasswordValidator", func(ctn *core.Container) interface{} {
cnf := ctn.GetService("Config").(*Config)
val := password.New(cnf.PasswordValidatorOptions)
return val
})
ctn.AddService("PasswordHasher", func(ctn *core.Container) interface{} {
cnf := ctn.GetService("Config").(*Config)
hasher, err := hashpkg.New(
cnf.PasswordHasherOptions.IterationCount,
cnf.PasswordHasherOptions.SaltSize,
cnf.PasswordHasherOptions.KeySize,
cnf.PasswordHasherOptions.HashKey)
if err != nil {
panic(fmt.Errorf("failed to build PasswordHasher: %v", err))
}
return hasher
})
ctn.AddService("UserRepository", func(ctn *core.Container) interface{} {
url := os.Getenv(connectionStringVar)
return repository.NewUserRepository(url)
})
ctn.AddService("FollowerRepository", func(ctn *core.Container) interface{} {
db := ctn.GetService("Database").(database.Database)
return repository.NewFollowerRepository(db)
})
ctn.AddService("UserProvider", func(ctn *core.Container) interface{} {
db := ctn.GetService("Database").(database.Database)
return provider.NewUserProvider(db)
})
ctn.AddService("CreateUserHandler", func(ctn *core.Container) interface{} {
val := ctn.GetService("PasswordValidator").(password.Validator)
hasher := ctn.GetService("PasswordHasher").(hashpkg.Hasher)
repo := ctn.GetService("UserRepository").(repository.UserRepository)
return handler.NewCreateUserHandler(val, hasher, repo)
})
ctn.AddService("GetClaimsHandler", func(ctn *core.Container) interface{} {
hasher := ctn.GetService("PasswordHasher").(hashpkg.Hasher)
repo := ctn.GetService("UserRepository").(repository.UserRepository)
return handler.NewGetClaimsHandler(hasher, repo)
})
ctn.AddService("GetIDByReferenceHandler", func(ctn *core.Container) interface{} {
repo := ctn.GetService("UserRepository").(repository.UserRepository)
return handler.NewGetIDByReferenceHandler(repo)
})
ctn.AddService("GetProfileHandler", func(ctn *core.Container) interface{} {
provider := ctn.GetService("UserProvider").(provider.UserProvider)
return handler.NewGetProfileHandler(provider)
})
ctn.AddService("GetInfoHandler", func(ctn *core.Container) interface{} {
provider := ctn.GetService("UserProvider").(provider.UserProvider)
return handler.NewGetInfoHandler(provider)
})
ctn.AddService("FollowUserHandler", func(ctn *core.Container) interface{} {
repo := ctn.GetService("UserRepository").(repository.UserRepository)
followers := ctn.GetService("FollowerRepository").(repository.FollowerRepository)
return handler.NewFollowUserHandler(repo, followers)
})
ctn.AddService("UnfollowUserHandler", func(ctn *core.Container) interface{} {
repo := ctn.GetService("UserRepository").(repository.UserRepository)
followers := ctn.GetService("FollowerRepository").(repository.FollowerRepository)
return handler.NewUnfollowUserHandler(repo, followers)
})
return ctn
}
|
buildConfig
|
test_ffi.py
|
import sys, py
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class Test__ffi(BaseTestPyPyC):
def test__ffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
i = 0
res = 0
while i < 300:
tmp = pow(2, 3) # ID: fficall
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
if 'ConstClass(pow)' in repr(loop): # e.g. OS/X
pow_addr = 'ConstClass(pow)'
assert loop.match_by_id('fficall', """
guard_not_invalidated(descr=...)
i17 = force_token()
setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>)
f21 = call_release_gil(%s, 2.000000, 3.000000, descr=<Callf 8 ff EF=7>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""" % pow_addr)
def test__ffi_call_frame_does_not_escape(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
def mypow(a, b):
return pow(a, b)
i = 0
res = 0
while i < 300:
tmp = mypow(2, 3)
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
# we only force the virtualref, not its content
assert opnames.count('new_with_vtable') == 1
def
|
(self):
from rpython.rlib.clibffi import get_libc_name
def main(libc_name, n):
import time
import os
from threading import Thread
#
if os.name == 'nt':
from _rawffi.alt import WinDLL, types
libc = WinDLL('Kernel32.dll')
sleep = libc.getfunc('Sleep', [types.uint], types.uint)
delays = [0]*n + [1000]
else:
from _rawffi.alt import CDLL, types
libc = CDLL(libc_name)
sleep = libc.getfunc('sleep', [types.uint], types.uint)
delays = [0]*n + [1]
#
def loop_of_sleeps(i, delays):
for delay in delays:
sleep(delay) # ID: sleep
#
threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)]
start = time.time()
for i, thread in enumerate(threads):
thread.start()
for thread in threads:
thread.join()
end = time.time()
return end - start
log = self.run(main, [get_libc_name(), 200], threshold=150,
import_site=True)
assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead
loops = log.loops_by_id('sleep')
assert len(loops) == 1 # make sure that we actually JITted the loop
def test_ctypes_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
import ctypes
libm = ctypes.CDLL(libm_name)
fabs = libm.fabs
fabs.argtypes = [ctypes.c_double]
fabs.restype = ctypes.c_double
x = -4
i = 0
while i < 300:
x = fabs(x)
x = x - 100
i += 1
return fabs._ptr.getaddr(), x
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name], import_site=True)
fabs_addr, res = log.result
assert res == -4.0
loop, = log.loops_by_filename(self.filepath)
ops = loop.allops()
opnames = log.opnames(ops)
assert opnames.count('new_with_vtable') == 1 # only the virtualref
py.test.skip("XXX re-optimize _ffi for the JIT?")
assert opnames.count('call_release_gil') == 1
idx = opnames.index('call_release_gil')
call = ops[idx]
assert (call.args[0] == 'ConstClass(fabs)' or # e.g. OS/X
int(call.args[0]) == fabs_addr)
def test__ffi_struct(self):
def main():
from _rawffi.alt import _StructDescr, Field, types
fields = [
Field('x', types.slong),
]
descr = _StructDescr('foo', fields)
struct = descr.allocate()
i = 0
while i < 300:
x = struct.getfield('x') # ID: getfield
x = x+1
struct.setfield('x', x) # ID: setfield
i += 1
return struct.getfield('x')
#
log = self.run(main, [])
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('getfield', """
guard_not_invalidated(descr=...)
i57 = getfield_raw(i46, descr=<FieldS dynamic 0>)
""")
assert loop.match_by_id('setfield', """
setfield_raw(i44, i57, descr=<FieldS dynamic 0>)
""")
def test__cffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BInt = _cffi_backend.new_primitive_type("int")
BPow = _cffi_backend.new_function_type([BDouble, BInt], BDouble)
ldexp = libm.load_function(BPow, 'ldexp')
i = 0
res = 0
while i < 300:
tmp = ldexp(1, 3) # ID: cfficall
res += tmp
i += 1
BLong = _cffi_backend.new_primitive_type("long")
ldexp_addr = int(_cffi_backend.cast(BLong, ldexp))
return ldexp_addr, res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
ldexp_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
f97 = call_release_gil(91, i59, 1.0, 3, descr=<Callf 8 fi EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test__cffi_call_c_int(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BClose = _cffi_backend.new_function_type([BInt], BInt)
_dup = libc.load_function(BClose, 'dup')
i = 0
fd0, fd1 = os.pipe()
while i < 300:
tmp = _dup(fd0) # ID: cfficall
os.close(tmp)
i += 1
os.close(fd0)
os.close(fd1)
BLong = _cffi_backend.new_primitive_type("long")
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
if sys.maxint > 2**32:
extra = "i98 = int_signext(i97, 4)"
else:
extra = ""
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil(91, i59, i50, descr=<Calli 4 i EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
%s
""" % extra, ignore_ops=['guard_not_invalidated'])
def test__cffi_call_size_t(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BSizeT = _cffi_backend.new_primitive_type("size_t")
BChar = _cffi_backend.new_primitive_type("char")
BCharP = _cffi_backend.new_pointer_type(BChar)
BWrite = _cffi_backend.new_function_type([BInt, BCharP, BSizeT],
BSizeT) # not signed here!
_write = libc.load_function(BWrite, 'write')
i = 0
fd0, fd1 = os.pipe()
buffer = _cffi_backend.newp(BCharP, 'A')
while i < 300:
tmp = _write(fd1, buffer, 1) # ID: cfficall
assert tmp == 1
assert os.read(fd0, 2) == 'A'
i += 1
os.close(fd0)
os.close(fd1)
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil(91, i59, i10, i12, 1, descr=<Calli . iii EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
p98 = call(ConstClass(fromrarith_int__r_uint), i97, descr=<Callr . i EF=4>)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test_cffi_call_guard_not_forced_fails(self):
# this is the test_pypy_c equivalent of
# rpython/jit/metainterp/test/test_fficall::test_guard_not_forced_fails
#
# it requires cffi to be installed for pypy in order to run
def main():
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
typedef void (*functype)(int);
int foo(int n, functype func);
""")
lib = ffi.verify("""
#include <signal.h>
typedef void (*functype)(int);
int foo(int n, functype func) {
if (n >= 2000) {
func(n);
}
return n*2;
}
""")
@ffi.callback("functype")
def mycallback(n):
if n < 5000:
return
# make sure that guard_not_forced fails
d = {}
f = sys._getframe()
while f:
d.update(f.f_locals)
f = f.f_back
n = 0
while n < 10000:
res = lib.foo(n, mycallback) # ID: cfficall
# this is the real point of the test: before the
# refactor-call_release_gil branch, the assert failed when
# res == 5000
assert res == n*2
n += 1
return n
log = self.run(main, [], import_site=True,
discard_stdout_before_last_line=True) # <- for Win32
assert log.result == 10000
loop, = log.loops_by_id('cfficall')
assert loop.match_by_id('cfficall', """
...
f1 = call_release_gil(..., descr=<Calli 4 ii EF=7 OS=62>)
...
""")
def test__cffi_bug1(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BSin = _cffi_backend.new_function_type([BDouble], BDouble)
sin = libm.load_function(BSin, 'sin')
def f(*args):
for i in range(300):
sin(*args)
f(1.0)
f(1)
#
libm_name = get_libm_name(sys.platform)
self.run(main, [libm_name])
# assert did not crash
def test_cffi_init_struct_with_list(self):
def main(n):
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
struct s {
short x;
short y;
short z;
};
""")
for i in xrange(n):
ffi.new("struct s *", [i, i, i])
log = self.run(main, [300])
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i161 = int_lt(i160, i43)
guard_true(i161, descr=...)
i162 = int_add(i160, 1)
setfield_gc(p22, i162, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_current .>)
guard_not_invalidated(descr=...)
p163 = force_token()
p164 = force_token()
p165 = getarrayitem_gc(p67, 0, descr=<ArrayP .>)
guard_value(p165, ConstPtr(ptr70), descr=...)
p166 = getfield_gc(p165, descr=<FieldP pypy.objspace.std.dictmultiobject.W_DictMultiObject.inst_strategy .+>)
guard_value(p166, ConstPtr(ptr72), descr=...)
p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=<Callr . EF=5>)
guard_no_exception(descr=...)
i112 = int_signext(i160, 2)
setfield_gc(p167, ConstPtr(ptr85), descr=<FieldP pypy.module._cffi_backend.cdataobj.W_CData.inst_ctype .+>)
setfield_gc(p167, -1, descr=<FieldS pypy.module._cffi_backend.cdataobj.W_CDataNewOwning.inst_length .+>)
i114 = int_ne(i160, i112)
guard_false(i114, descr=...)
--TICK--
i119 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=<Calli . i EF=5 OS=110>)
raw_store(i119, 0, i160, descr=<ArrayS 2>)
raw_store(i119, 2, i160, descr=<ArrayS 2>)
raw_store(i119, 4, i160, descr=<ArrayS 2>)
setfield_gc(p167, i119, descr=<FieldU pypy.module._cffi_backend.cdataobj.W_CData.inst__ptr .+>)
i123 = arraylen_gc(p67, descr=<ArrayP .>)
jump(..., descr=...)
""")
|
test__ffi_call_releases_gil
|
mutability_errors.rs
|
use rustc_hir as hir;
use rustc_hir::Node;
use rustc_index::vec::Idx;
use rustc_middle::mir::{self, ClearCrossCrate, Local, LocalInfo, Location};
use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::source_map::DesugaringKind;
use rustc_span::symbol::kw;
use rustc_span::Span;
use crate::borrow_check::diagnostics::BorrowedContentSource;
use crate::borrow_check::MirBorrowckCtxt;
use crate::util::collect_writes::FindAssignments;
use rustc_errors::{Applicability, DiagnosticBuilder};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(crate) enum AccessKind {
MutableBorrow,
|
impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
pub(crate) fn report_mutability_error(
&mut self,
access_place: Place<'tcx>,
span: Span,
the_place_err: PlaceRef<'tcx>,
error_access: AccessKind,
location: Location,
) {
debug!(
"report_mutability_error(\
access_place={:?}, span={:?}, the_place_err={:?}, error_access={:?}, location={:?},\
)",
access_place, span, the_place_err, error_access, location,
);
let mut err;
let item_msg;
let reason;
let mut opt_source = None;
let access_place_desc = self.describe_place(access_place.as_ref());
debug!("report_mutability_error: access_place_desc={:?}", access_place_desc);
match the_place_err {
PlaceRef { local, projection: [] } => {
item_msg = format!("`{}`", access_place_desc.unwrap());
if access_place.as_local().is_some() {
reason = ", as it is not declared as mutable".to_string();
} else {
let name = self.local_names[local].expect("immutable unnamed local");
reason = format!(", as `{}` is not declared as mutable", name);
}
}
PlaceRef {
local,
projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
} => {
debug_assert!(is_closure_or_generator(
Place::ty_from(local, proj_base, *self.body, self.infcx.tcx).ty
));
item_msg = format!("`{}`", access_place_desc.unwrap());
if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
reason = ", as it is not declared as mutable".to_string();
} else {
let name = self.upvars[upvar_index.index()].name;
reason = format!(", as `{}` is not declared as mutable", name);
}
}
PlaceRef { local, projection: [ProjectionElem::Deref] }
if self.body.local_decls[local].is_ref_for_guard() =>
{
item_msg = format!("`{}`", access_place_desc.unwrap());
reason = ", as it is immutable for the pattern guard".to_string();
}
PlaceRef { local, projection: [ProjectionElem::Deref] }
if self.body.local_decls[local].is_ref_to_static() =>
{
if access_place.projection.len() == 1 {
item_msg = format!("immutable static item `{}`", access_place_desc.unwrap());
reason = String::new();
} else {
item_msg = format!("`{}`", access_place_desc.unwrap());
let local_info = &self.body.local_decls[local].local_info;
if let LocalInfo::StaticRef { def_id, .. } = *local_info {
let static_name = &self.infcx.tcx.item_name(def_id);
reason = format!(", as `{}` is an immutable static item", static_name);
} else {
bug!("is_ref_to_static return true, but not ref to static?");
}
}
}
PlaceRef { local: _, projection: [proj_base @ .., ProjectionElem::Deref] } => {
if the_place_err.local == Local::new(1)
&& proj_base.is_empty()
&& !self.upvars.is_empty()
{
item_msg = format!("`{}`", access_place_desc.unwrap());
debug_assert!(self.body.local_decls[Local::new(1)].ty.is_region_ptr());
debug_assert!(is_closure_or_generator(
Place::ty_from(
the_place_err.local,
the_place_err.projection,
*self.body,
self.infcx.tcx
)
.ty
));
reason = if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
", as it is a captured variable in a `Fn` closure".to_string()
} else {
", as `Fn` closures cannot mutate their captured variables".to_string()
}
} else {
let source = self.borrowed_content_source(PlaceRef {
local: the_place_err.local,
projection: proj_base,
});
let pointer_type = source.describe_for_immutable_place(self.infcx.tcx);
opt_source = Some(source);
if let Some(desc) = access_place_desc {
item_msg = format!("`{}`", desc);
reason = match error_access {
AccessKind::Mutate => format!(" which is behind {}", pointer_type),
AccessKind::MutableBorrow => {
format!(", as it is behind {}", pointer_type)
}
}
} else {
item_msg = format!("data in {}", pointer_type);
reason = String::new();
}
}
}
PlaceRef {
local: _,
projection:
[.., ProjectionElem::Index(_)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Downcast(..)],
} => bug!("Unexpected immutable place."),
}
debug!("report_mutability_error: item_msg={:?}, reason={:?}", item_msg, reason);
// `act` and `acted_on` are strings that let us abstract over
// the verbs used in some diagnostic messages.
let act;
let acted_on;
let span = match error_access {
AccessKind::Mutate => {
err = self.cannot_assign(span, &(item_msg + &reason));
act = "assign";
acted_on = "written";
span
}
AccessKind::MutableBorrow => {
act = "borrow as mutable";
acted_on = "borrowed as mutable";
let borrow_spans = self.borrow_spans(span, location);
let borrow_span = borrow_spans.args_or_use();
err = self.cannot_borrow_path_as_mutable_because(borrow_span, &item_msg, &reason);
borrow_spans.var_span_label(
&mut err,
format!(
"mutable borrow occurs due to use of {} in closure",
self.describe_any_place(access_place.as_ref()),
),
);
borrow_span
}
};
debug!("report_mutability_error: act={:?}, acted_on={:?}", act, acted_on);
match the_place_err {
// Suggest making an existing shared borrow in a struct definition a mutable borrow.
//
// This is applicable when we have a deref of a field access to a deref of a local -
// something like `*((*_1).0`. The local that we get will be a reference to the
// struct we've got a field access of (it must be a reference since there's a deref
// after the field access).
PlaceRef {
local,
projection:
[proj_base @ .., ProjectionElem::Deref, ProjectionElem::Field(field, _), ProjectionElem::Deref],
} => {
err.span_label(span, format!("cannot {ACT}", ACT = act));
if let Some((span, message)) = annotate_struct_field(
self.infcx.tcx,
Place::ty_from(local, proj_base, *self.body, self.infcx.tcx).ty,
field,
) {
err.span_suggestion(
span,
"consider changing this to be mutable",
message,
Applicability::MaybeIncorrect,
);
}
}
// Suggest removing a `&mut` from the use of a mutable reference.
PlaceRef { local, projection: [] }
if {
self.body
.local_decls
.get(local)
.map(|local_decl| {
if let LocalInfo::User(ClearCrossCrate::Set(
mir::BindingForm::ImplicitSelf(kind),
)) = local_decl.local_info
{
// Check if the user variable is a `&mut self` and we can therefore
// suggest removing the `&mut`.
//
// Deliberately fall into this case for all implicit self types,
// so that we don't fall in to the next case with them.
kind == mir::ImplicitSelfKind::MutRef
} else if Some(kw::SelfLower) == self.local_names[local] {
// Otherwise, check if the name is the self kewyord - in which case
// we have an explicit self. Do the same thing in this case and check
// for a `self: &mut Self` to suggest removing the `&mut`.
if let ty::Ref(_, _, hir::Mutability::Mut) = local_decl.ty.kind {
true
} else {
false
}
} else {
false
}
})
.unwrap_or(false)
} =>
{
err.span_label(span, format!("cannot {ACT}", ACT = act));
err.span_label(span, "try removing `&mut` here");
}
// We want to suggest users use `let mut` for local (user
// variable) mutations...
PlaceRef { local, projection: [] }
if self.body.local_decls[local].can_be_made_mutable() =>
{
// ... but it doesn't make sense to suggest it on
// variables that are `ref x`, `ref mut x`, `&self`,
// or `&mut self` (such variables are simply not
// mutable).
let local_decl = &self.body.local_decls[local];
assert_eq!(local_decl.mutability, Mutability::Not);
err.span_label(span, format!("cannot {ACT}", ACT = act));
err.span_suggestion(
local_decl.source_info.span,
"consider changing this to be mutable",
format!("mut {}", self.local_names[local].unwrap()),
Applicability::MachineApplicable,
);
}
// Also suggest adding mut for upvars
PlaceRef {
local,
projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
} => {
debug_assert!(is_closure_or_generator(
Place::ty_from(local, proj_base, *self.body, self.infcx.tcx).ty
));
err.span_label(span, format!("cannot {ACT}", ACT = act));
let upvar_hir_id = self.upvars[upvar_index.index()].var_hir_id;
if let Some(Node::Binding(pat)) = self.infcx.tcx.hir().find(upvar_hir_id) {
if let hir::PatKind::Binding(
hir::BindingAnnotation::Unannotated,
_,
upvar_ident,
_,
) = pat.kind
{
err.span_suggestion(
upvar_ident.span,
"consider changing this to be mutable",
format!("mut {}", upvar_ident.name),
Applicability::MachineApplicable,
);
}
}
}
// complete hack to approximate old AST-borrowck
// diagnostic: if the span starts with a mutable borrow of
// a local variable, then just suggest the user remove it.
PlaceRef { local: _, projection: [] }
if {
if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
snippet.starts_with("&mut ")
} else {
false
}
} =>
{
err.span_label(span, format!("cannot {ACT}", ACT = act));
err.span_label(span, "try removing `&mut` here");
}
PlaceRef { local, projection: [ProjectionElem::Deref] }
if self.body.local_decls[local].is_ref_for_guard() =>
{
err.span_label(span, format!("cannot {ACT}", ACT = act));
err.note(
"variables bound in patterns are immutable until the end of the pattern guard",
);
}
// We want to point out when a `&` can be readily replaced
// with an `&mut`.
//
// FIXME: can this case be generalized to work for an
// arbitrary base for the projection?
PlaceRef { local, projection: [ProjectionElem::Deref] }
if self.body.local_decls[local].is_user_variable() =>
{
let local_decl = &self.body.local_decls[local];
let (pointer_sigil, pointer_desc) = if local_decl.ty.is_region_ptr() {
("&", "reference")
} else {
("*const", "pointer")
};
match self.local_names[local] {
Some(name) if !local_decl.from_compiler_desugaring() => {
let label = match local_decl.local_info {
LocalInfo::User(ClearCrossCrate::Set(
mir::BindingForm::ImplicitSelf(_),
)) => {
let (span, suggestion) =
suggest_ampmut_self(self.infcx.tcx, local_decl);
Some((true, span, suggestion))
}
LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::Var(
mir::VarBindingForm {
binding_mode: ty::BindingMode::BindByValue(_),
opt_ty_info,
..
},
))) => {
// check if the RHS is from desugaring
let locations = self.body.find_assignments(local);
let opt_assignment_rhs_span = locations
.first()
.map(|&location| self.body.source_info(location).span);
let opt_desugaring_kind =
opt_assignment_rhs_span.and_then(|span| span.desugaring_kind());
match opt_desugaring_kind {
// on for loops, RHS points to the iterator part
Some(DesugaringKind::ForLoop) => Some((
false,
opt_assignment_rhs_span.unwrap(),
format!(
"this iterator yields `{SIGIL}` {DESC}s",
SIGIL = pointer_sigil,
DESC = pointer_desc
),
)),
// don't create labels for compiler-generated spans
Some(_) => None,
None => {
let (span, suggestion) = suggest_ampmut(
self.infcx.tcx,
local_decl,
opt_assignment_rhs_span,
opt_ty_info,
);
Some((true, span, suggestion))
}
}
}
LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::Var(
mir::VarBindingForm {
binding_mode: ty::BindingMode::BindByReference(_),
..
},
))) => {
let pattern_span = local_decl.source_info.span;
suggest_ref_mut(self.infcx.tcx, pattern_span)
.map(|replacement| (true, pattern_span, replacement))
}
LocalInfo::User(ClearCrossCrate::Clear) => {
bug!("saw cleared local state")
}
_ => unreachable!(),
};
match label {
Some((true, err_help_span, suggested_code)) => {
err.span_suggestion(
err_help_span,
&format!(
"consider changing this to be a mutable {}",
pointer_desc
),
suggested_code,
Applicability::MachineApplicable,
);
}
Some((false, err_label_span, message)) => {
err.span_label(err_label_span, &message);
}
None => {}
}
err.span_label(
span,
format!(
"`{NAME}` is a `{SIGIL}` {DESC}, \
so the data it refers to cannot be {ACTED_ON}",
NAME = name,
SIGIL = pointer_sigil,
DESC = pointer_desc,
ACTED_ON = acted_on
),
);
}
_ => {
err.span_label(
span,
format!(
"cannot {ACT} through `{SIGIL}` {DESC}",
ACT = act,
SIGIL = pointer_sigil,
DESC = pointer_desc
),
);
}
}
}
PlaceRef {
local,
projection: [ProjectionElem::Deref],
// FIXME document what is this 1 magic number about
} if local == Local::new(1) && !self.upvars.is_empty() => {
self.expected_fn_found_fn_mut_call(&mut err, span, act);
}
PlaceRef { local: _, projection: [.., ProjectionElem::Deref] } => {
err.span_label(span, format!("cannot {ACT}", ACT = act));
match opt_source {
Some(BorrowedContentSource::OverloadedDeref(ty)) => {
err.help(&format!(
"trait `DerefMut` is required to modify through a dereference, \
but it is not implemented for `{}`",
ty,
));
}
Some(BorrowedContentSource::OverloadedIndex(ty)) => {
err.help(&format!(
"trait `IndexMut` is required to modify indexed content, \
but it is not implemented for `{}`",
ty,
));
}
_ => (),
}
}
_ => {
err.span_label(span, format!("cannot {ACT}", ACT = act));
}
}
err.buffer(&mut self.errors_buffer);
}
/// Targeted error when encountering an `FnMut` closure where an `Fn` closure was expected.
fn expected_fn_found_fn_mut_call(&self, err: &mut DiagnosticBuilder<'_>, sp: Span, act: &str) {
err.span_label(sp, format!("cannot {}", act));
let hir = self.infcx.tcx.hir();
let closure_id = hir.as_local_hir_id(self.mir_def_id).unwrap();
let fn_call_id = hir.get_parent_node(closure_id);
let node = hir.get(fn_call_id);
let item_id = hir.get_parent_item(fn_call_id);
let mut look_at_return = true;
// If we can detect the expression to be an `fn` call where the closure was an argument,
// we point at the `fn` definition argument...
if let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Call(func, args), .. }) = node {
let arg_pos = args
.iter()
.enumerate()
.filter(|(_, arg)| arg.span == self.body.span)
.map(|(pos, _)| pos)
.next();
let def_id = hir.local_def_id(item_id);
let tables = self.infcx.tcx.typeck_tables_of(def_id);
if let Some(ty::FnDef(def_id, _)) =
tables.node_type_opt(func.hir_id).as_ref().map(|ty| &ty.kind)
{
let arg = match hir.get_if_local(*def_id) {
Some(
hir::Node::Item(hir::Item {
ident, kind: hir::ItemKind::Fn(sig, ..), ..
})
| hir::Node::TraitItem(hir::TraitItem {
ident,
kind: hir::TraitItemKind::Fn(sig, _),
..
})
| hir::Node::ImplItem(hir::ImplItem {
ident,
kind: hir::ImplItemKind::Fn(sig, _),
..
}),
) => Some(
arg_pos
.and_then(|pos| {
sig.decl.inputs.get(
pos + if sig.decl.implicit_self.has_implicit_self() {
1
} else {
0
},
)
})
.map(|arg| arg.span)
.unwrap_or(ident.span),
),
_ => None,
};
if let Some(span) = arg {
err.span_label(span, "change this to accept `FnMut` instead of `Fn`");
err.span_label(func.span, "expects `Fn` instead of `FnMut`");
if self.infcx.tcx.sess.source_map().is_multiline(self.body.span) {
err.span_label(self.body.span, "in this closure");
}
look_at_return = false;
}
}
}
if look_at_return && hir.get_return_block(closure_id).is_some() {
// ...otherwise we are probably in the tail expression of the function, point at the
// return type.
match hir.get(hir.get_parent_item(fn_call_id)) {
hir::Node::Item(hir::Item { ident, kind: hir::ItemKind::Fn(sig, ..), .. })
| hir::Node::TraitItem(hir::TraitItem {
ident,
kind: hir::TraitItemKind::Fn(sig, _),
..
})
| hir::Node::ImplItem(hir::ImplItem {
ident,
kind: hir::ImplItemKind::Fn(sig, _),
..
}) => {
err.span_label(ident.span, "");
err.span_label(
sig.decl.output.span(),
"change this to return `FnMut` instead of `Fn`",
);
err.span_label(self.body.span, "in this closure");
}
_ => {}
}
}
}
}
fn suggest_ampmut_self<'tcx>(
tcx: TyCtxt<'tcx>,
local_decl: &mir::LocalDecl<'tcx>,
) -> (Span, String) {
let sp = local_decl.source_info.span;
(
sp,
match tcx.sess.source_map().span_to_snippet(sp) {
Ok(snippet) => {
let lt_pos = snippet.find('\'');
if let Some(lt_pos) = lt_pos {
format!("&{}mut self", &snippet[lt_pos..snippet.len() - 4])
} else {
"&mut self".to_string()
}
}
_ => "&mut self".to_string(),
},
)
}
// When we want to suggest a user change a local variable to be a `&mut`, there
// are three potential "obvious" things to highlight:
//
// let ident [: Type] [= RightHandSideExpression];
// ^^^^^ ^^^^ ^^^^^^^^^^^^^^^^^^^^^^^
// (1.) (2.) (3.)
//
// We can always fallback on highlighting the first. But chances are good that
// the user experience will be better if we highlight one of the others if possible;
// for example, if the RHS is present and the Type is not, then the type is going to
// be inferred *from* the RHS, which means we should highlight that (and suggest
// that they borrow the RHS mutably).
//
// This implementation attempts to emulate AST-borrowck prioritization
// by trying (3.), then (2.) and finally falling back on (1.).
fn suggest_ampmut<'tcx>(
tcx: TyCtxt<'tcx>,
local_decl: &mir::LocalDecl<'tcx>,
opt_assignment_rhs_span: Option<Span>,
opt_ty_info: Option<Span>,
) -> (Span, String) {
if let Some(assignment_rhs_span) = opt_assignment_rhs_span {
if let Ok(src) = tcx.sess.source_map().span_to_snippet(assignment_rhs_span) {
if let (true, Some(ws_pos)) =
(src.starts_with("&'"), src.find(|c: char| -> bool { c.is_whitespace() }))
{
let lt_name = &src[1..ws_pos];
let ty = &src[ws_pos..];
return (assignment_rhs_span, format!("&{} mut {}", lt_name, ty));
} else if src.starts_with('&') {
let borrowed_expr = &src[1..];
return (assignment_rhs_span, format!("&mut {}", borrowed_expr));
}
}
}
let highlight_span = match opt_ty_info {
// if this is a variable binding with an explicit type,
// try to highlight that for the suggestion.
Some(ty_span) => ty_span,
// otherwise, just highlight the span associated with
// the (MIR) LocalDecl.
None => local_decl.source_info.span,
};
if let Ok(src) = tcx.sess.source_map().span_to_snippet(highlight_span) {
if let (true, Some(ws_pos)) =
(src.starts_with("&'"), src.find(|c: char| -> bool { c.is_whitespace() }))
{
let lt_name = &src[1..ws_pos];
let ty = &src[ws_pos..];
return (highlight_span, format!("&{} mut{}", lt_name, ty));
}
}
let ty_mut = local_decl.ty.builtin_deref(true).unwrap();
assert_eq!(ty_mut.mutbl, hir::Mutability::Not);
(
highlight_span,
if local_decl.ty.is_region_ptr() {
format!("&mut {}", ty_mut.ty)
} else {
format!("*mut {}", ty_mut.ty)
},
)
}
fn is_closure_or_generator(ty: Ty<'_>) -> bool {
ty.is_closure() || ty.is_generator()
}
/// Adds a suggestion to a struct definition given a field access to a local.
/// This function expects the local to be a reference to a struct in order to produce a suggestion.
///
/// ```text
/// LL | s: &'a String
/// | ---------- use `&'a mut String` here to make mutable
/// ```
fn annotate_struct_field(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
field: &mir::Field,
) -> Option<(Span, String)> {
// Expect our local to be a reference to a struct of some kind.
if let ty::Ref(_, ty, _) = ty.kind {
if let ty::Adt(def, _) = ty.kind {
let field = def.all_fields().nth(field.index())?;
// Use the HIR types to construct the diagnostic message.
let hir_id = tcx.hir().as_local_hir_id(field.did)?;
let node = tcx.hir().find(hir_id)?;
// Now we're dealing with the actual struct that we're going to suggest a change to,
// we can expect a field that is an immutable reference to a type.
if let hir::Node::Field(field) = node {
if let hir::TyKind::Rptr(
lifetime,
hir::MutTy { mutbl: hir::Mutability::Not, ref ty },
) = field.ty.kind
{
// Get the snippets in two parts - the named lifetime (if there is one) and
// type being referenced, that way we can reconstruct the snippet without loss
// of detail.
let type_snippet = tcx.sess.source_map().span_to_snippet(ty.span).ok()?;
let lifetime_snippet = if !lifetime.is_elided() {
format!("{} ", tcx.sess.source_map().span_to_snippet(lifetime.span).ok()?)
} else {
String::new()
};
return Some((
field.ty.span,
format!("&{}mut {}", lifetime_snippet, &*type_snippet,),
));
}
}
}
}
None
}
/// If possible, suggest replacing `ref` with `ref mut`.
fn suggest_ref_mut(tcx: TyCtxt<'_>, binding_span: Span) -> Option<String> {
let hi_src = tcx.sess.source_map().span_to_snippet(binding_span).ok()?;
if hi_src.starts_with("ref") && hi_src["ref".len()..].starts_with(rustc_lexer::is_whitespace) {
let replacement = format!("ref mut{}", &hi_src["ref".len()..]);
Some(replacement)
} else {
None
}
}
|
Mutate,
}
|
test_sso.py
|
from __future__ import absolute_import
import six
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import AuthProviderTestCase
from sentry.utils.auth import SSO_SESSION_KEY
class OrganizationAuthLoginTest(AuthProviderTestCase):
def test_sso_auth_required(self):
user = self.create_user('[email protected]', is_superuser=False)
organization = self.create_organization(name='foo')
member = self.create_member(user=user, organization=organization)
setattr(member.flags, 'sso:linked', True) # noqa: B010
member.save()
auth_provider = AuthProvider.objects.create(
organization=organization,
provider='dummy',
flags=0,
)
AuthIdentity.objects.create(
auth_provider=auth_provider,
user=user,
)
self.login_as(user)
path = u'/{}/'.format(organization.slug)
redirect_uri = u'/auth/login/{}/'.format(organization.slug)
|
# we should be redirecting the user to the authentication form as they
# haven't verified this specific organization
resp = self.client.get(path)
assert resp.status_code == 302
assert resp['Location'] == redirect_uri
# superuser should still require SSO as they're a member of the org
user.update(is_superuser=True)
resp = self.client.get(path)
assert resp.status_code == 302
assert resp['Location'] == redirect_uri
# XXX(dcramer): using internal API as exposing a request object is hard
self.session[SSO_SESSION_KEY] = six.text_type(organization.id)
self.save_session()
# now that SSO is marked as complete, we should be able to access dash
resp = self.client.get(path)
assert resp.status_code == 200
| |
generate-tests.py
|
#!/usr/bin/python
import sys
import os
import shutil, errno
from pathlib import Path
import hashlib
import json
import urllib.request
import re
from xml.dom.minidom import parseString
from python_lib.gweis.isoduration import parse_duration
TYPE_AUDIO = "audio"
TYPE_VIDEO = "video"
if len(sys.argv) < 3:
print("Please provide a CSV and destination directory!")
sys.exit(1)
TESTS_DIR = Path(sys.argv[0]).absolute().parent
LIB_DIR = Path(TESTS_DIR, "lib")
SUB_TEST_DIR = Path(TESTS_DIR, "subtests")
PLACEHOLDER_FILE = Path(TESTS_DIR, "placeholder.js")
CSV_FILE = sys.argv[1]
DEST_DIR = sys.argv[2]
MPD_ROOT_DIR = "."
if len(sys.argv) >= 4:
MPD_ROOT_DIR = sys.argv[3]
LIB_DEST_DIR = Path(DEST_DIR, "lib")
MPD_PARAMETERS = {
"cmaf_track_duration": r'<MPD .*mediaPresentationDuration="([^"]+)"',
"fragment_duration": r'<MPD .*maxSegmentDuration="([^"]+)"',
}
mpd_files = {}
def main():
csv_file = load_csv(CSV_FILE)
mpd_video_parameters = {}
mpd_audio_parameters = {}
tests = []
current_test_id = None
for test in csv_file:
video_mpd_url = test[1]
audio_mpd_url = test[2]
grouping_dir = test[3]
test_template_path = get_test_path(test[0])
template_file = str(test_template_path).split("/")[-1]
video_file_name = str(video_mpd_url).split("/")[-1]
video_file_name = ".".join(video_file_name.split(".")[0:-1])
audio_file_name = str(audio_mpd_url).split("/")[-1]
audio_file_name = ".".join(audio_file_name.split(".")[0:-1])
test_id = generate_test_id(template_file + video_mpd_url + audio_mpd_url)
video_parameters = None
if video_mpd_url:
if video_mpd_url in mpd_video_parameters:
video_parameters = mpd_video_parameters[video_mpd_url]
else:
mpd_content = load_mpd_content(video_mpd_url)
video_parameters = parse_mpd_parameters(mpd_content, [TYPE_VIDEO])
mpd_video_parameters[video_mpd_url] = video_parameters
audio_parameters = None
if audio_mpd_url:
if audio_mpd_url in mpd_audio_parameters:
audio_parameters = mpd_audio_parameters[audio_mpd_url]
else:
mpd_content = load_mpd_content(audio_mpd_url)
audio_parameters = parse_mpd_parameters(mpd_content, [TYPE_AUDIO])
mpd_audio_parameters[audio_mpd_url] = audio_parameters
if test[0] == "":
for test in tests:
if test["id"] != current_test_id: continue
if video_mpd_url != "":
test["video"].append(video_mpd_url)
if audio_mpd_url != "":
test["audio"].append(audio_mpd_url)
if video_parameters:
test["switching_sets"]["video"].append(video_parameters)
if audio_parameters:
test["switching_sets"]["audio"].append(audio_parameters)
else:
video_urls = []
if video_mpd_url != "":
video_urls.append(video_mpd_url)
audio_urls = []
if audio_mpd_url != "":
audio_urls.append(audio_mpd_url)
video_switching_sets = []
if video_parameters:
video_switching_sets.append(video_parameters)
audio_switching_sets = []
if audio_parameters:
audio_switching_sets.append(audio_parameters)
tests.append({
"id": test_id,
"template": test_template_path,
"video": video_urls,
"audio": audio_urls,
"switching_sets": { "video": video_switching_sets, "audio": audio_switching_sets },
"template_file": template_file,
"group": grouping_dir
})
current_test_id = test_id
for test in tests:
test_template_path = test["template"]
video_mpd_urls = test["video"]
audio_mpd_urls = test["audio"]
template_file = test["template_file"]
grouping_dir = test["group"]
template_file_name = ".".join(template_file.split(".")[0:-1])
test_path_relative = generate_test_path(grouping_dir, template_file_name, video_mpd_urls, audio_mpd_urls)
test["id"] = generate_test_id(test_path_relative)
test_path = "{}/{}".format(DEST_DIR, test_path_relative)
test["path"] = test_path
content = load_file(test_template_path)
content = generate_test(content, video_mpd_urls, audio_mpd_urls, test_path_relative, template_file)
write_file(test_path, content)
test_json_content = generate_test_json(tests)
test_json_content = json.dumps(test_json_content, indent=4)
write_file(Path(DEST_DIR, "tests.json"), test_json_content)
copy(LIB_DIR, LIB_DEST_DIR)
def parse_mpd_parameters(content, types):
parameters = {}
if (content == ""): return parameters
if type(content) != str:
content = content.decode("utf-8")
for parameter in MPD_PARAMETERS:
match = re.search(MPD_PARAMETERS[parameter], content)
if match is None: continue
parameters[parameter] = match.group(1)
representation_parameters = {}
dom_tree = parseString(content)
periods = dom_tree.getElementsByTagName("Period")
periodNumber = 0
for period in periods:
periodNumber += 1
periodDuration = period.getAttribute("duration")
if periodDuration != "":
periodDuration = parse_duration(periodDuration).seconds
representations = period.getElementsByTagName("Representation")
for representation in representations:
representationId = representation.getAttribute("id")
rep_parameters = {}
rep_parameters["period"] = periodNumber
if periodDuration != "":
rep_parameters["duration"] = periodDuration
mime_type = representation.getAttribute("mimeType")
content_type = re.search("^(.+)\/", mime_type).group(1)
if content_type not in types: continue
rep_parameters["type"] = content_type
if representation.hasAttribute("frameRate"):
frame_rate = representation.getAttribute("frameRate")
rep_parameters["frame_rate"] = frame_rate
segment_templates = representation.getElementsByTagName("SegmentTemplate")
if len(segment_templates) == 0 or len(segment_templates[0].getElementsByTagName("S")) == 0:
adaptation_set = get_parent_by_name(representation, "AdaptationSet")
segment_templates = adaptation_set.getElementsByTagName("SegmentTemplate")
seg_template_params = parse_segment_template(segment_templates[0])
rep_parameters = merge_parameters(rep_parameters, seg_template_params)
else:
seg_template_params = parse_segment_template(segment_templates[0])
rep_parameters = merge_parameters(rep_parameters, seg_template_params)
representation_parameters[representationId] = rep_parameters
parameters["representations"] = representation_parameters
return parameters
def parse_segment_template(node):
parameters = {}
segments = node.getElementsByTagName("S")
timescale = node.getAttribute("timescale")
timescale = int(timescale)
fragment_duration = None
sum = 0
for segment in segments:
r = segment.getAttribute("r")
if r == "":
r = 0
r = int(r)
d = segment.getAttribute("d")
d = int(d)
sum = sum + (r + 1) * d
if fragment_duration is None:
fragment_duration = d / timescale
duration = sum / timescale
parameters["duration"] = duration
parameters["fragment_duration"] = fragment_duration
return parameters
def merge_parameters(setA, setB):
parameter_names = [
"duration",
"fragment_duration",
]
for parameter_name in parameter_names:
if parameter_name in setA: continue
if parameter_name not in setB: continue
setA[parameter_name] = setB[parameter_name]
return setA
def get_parent_by_name(node, name):
parent = None
while parent is None:
node = node.parentNode
if node.tagName == name:
parent = node
return parent
def load_mpd_content(mpd_path):
if mpd_path in mpd_files:
return mpd_files[mpd_path]
content = ""
if mpd_path.startswith("http"):
print("Fetching MPD {}".format(mpd_path))
try:
content = urllib.request.urlopen(mpd_path).read()
except urllib.error.HTTPError:
print("Could not load http url:", mpd_path)
else:
# print("Fetching MPD {}".format(mpd_path))
# mpd_path = mpd_path.replace("/content/", "https://dash.akamaized.net/WAVE/vectors/")
# try:
# content = urllib.request.urlopen(mpd_path).read()
# except urllib.error.HTTPError:
# print("Could not load http url:", mpd_path)
file_path = os.path.join(MPD_ROOT_DIR, mpd_path[1:])
file_path = Path(file_path).absolute()
print("Reading MPD {}".format(file_path))
if not os.path.isfile(file_path):
print("Could not find file:", file_path)
return content
with open(file_path, "r") as file:
return file.read()
mpd_files[mpd_path] = content
return content
def generate_test_json(tests):
json = {"tests": {}}
for test in tests:
test_id = test["id"]
video = test["video"]
audio = test["audio"]
path = str(test["path"]).replace(DEST_DIR + "/", "")
template = str(test["template"]).split("/")[-1]
switching_sets = test["switching_sets"]
json["tests"][test_id] = {}
json["tests"][test_id]["path"] = path
json["tests"][test_id]["video"] = video
json["tests"][test_id]["audio"] = audio
json["tests"][test_id]["code"] = template
json["tests"][test_id]["switchingSets"] = switching_sets
return json
def generate_test_id(test_path_relative):
hashobj = hashlib.md5(test_path_relative.encode("utf-8"))
hash = hashobj.hexdigest()
return hash
def load_file(path):
with open(path, "r") as file:
return file.read()
def write_file(path, content):
parent = Path(path).parent
if not parent.exists():
os.makedirs(parent)
with open(path, "w+") as file:
file.write(content)
def copy(src, dest):
if Path(dest).exists():
return
try:
shutil.copytree(src, dest)
except OSError as error:
if error.errno == errno.ENOTDIR:
shutil.copy(src, dest)
else: raise
def load_csv(path):
content = load_file(path)
csv = []
for line in content.split("\n"):
if line == "":
continue
line = line[1:-1]
row = []
for column in line.split("\",\""):
row.append(column)
csv.append(row)
return csv
def get_test_path(test_id):
return Path(TESTS_DIR, test_id + ".html")
def generate_test(template, video_mpd_url, audio_mpd_url, test_path, template_name):
|
def generate_test_path(grouping_dir, template_file_name, video_file_paths, audio_file_paths):
identifiers = []
for video_file_path in video_file_paths:
if video_file_path.startswith("http"):
video_file_path = urllib.parse.urlparse(video_file_path).path
dir_path, file_name = os.path.split(video_file_path)
dir_split = list(filter(lambda element: element != "" and element != ".", dir_path.split("/")))
video_identifier = ""
if len(dir_split) >= 1:
video_identifier = "_".join(dir_split[-1:])
else:
video_identifier = ".".join(file_name.split(".")[:-1])
if video_identifier not in identifiers:
identifiers.append(video_identifier)
for audio_file_path in audio_file_paths:
if audio_file_path.startswith("http"):
audio_file_path = urllib.parse.urlparse(audio_file_path).path
dir_path, file_name = os.path.split(audio_file_path)
dir_split = list(filter(lambda element: element != "" and element != ".", dir_path.split("/")))
audio_identifier = ""
if len(dir_split) >= 1:
audio_identifier = "_".join(dir_split[-1:])
else:
audio_identifier = ".".join(file_name.split(".")[:-1])
if audio_identifier not in identifiers:
identifiers.append(audio_identifier)
test_path = "{}/{}__{}".format(grouping_dir, template_file_name, "_".join(identifiers), "_")
count = 1
suffix = ""
while os.path.exists(test_path + suffix + ".html"):
suffix = str(count)
count += 1
return test_path + suffix + ".html"
main()
|
template = template.replace("\"{{VIDEO_MPD_URL}}\"", json.dumps(video_mpd_url))
template = template.replace("\"{{AUDIO_MPD_URL}}\"", json.dumps(audio_mpd_url))
template = template.replace("{{TEST_PATH}}", test_path)
template = template.replace("{{TEMPLATE_NAME}}", template_name)
return template
|
SearchBar.tsx
|
import { SearchIcon } from '@chakra-ui/icons';
import { Input, InputGroup, InputLeftElement } from '@chakra-ui/react';
import React from 'react';
export interface ISearchBarProps {
isDisabled?: boolean;
onSearch?: (e: any) => void;
}
function
|
({ isDisabled, onSearch }: ISearchBarProps) {
return (
<form onSubmit={(e: any) => {
e.preventDefault();
onSearch && onSearch(e.target[0].value);
}}>
<InputGroup>
<InputLeftElement
pointerEvents="none"
children={<SearchIcon color="gray.300" />}
/>
<Input isDisabled={isDisabled} onSubmit={() => console.log("submitted")} type="text" placeholder="Search OMDB..." />
</InputGroup>
</form>
);
}
export default SearchBar;
|
SearchBar
|
main.ts
|
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore: DataWorker import
import DataWorker from './worker/vaccine.worker.ts';
import { IstanbulCoverageStore, makeProxy } from './Interceptor';
import * as unload from 'unload';
import { getWindow, universe, hasWindow, universeAttribute } from './utils';
import { ProtocolMessageTypes } from './protocol';
// Prepare our global JavaScript object. This will hold
// a reference to the WebWorker thread.
const globalAgentObject: Record<string, unknown> = universeAttribute('__TS_AGENT', {});
/**
* Get the WebWorker instance, if already initialized.
*/
function getWorker(): DataWorker {
return globalAgentObject._$BcWorker;
}
/**
* Set the WebWorker instance in the global object.
*/
function setWorker(worker: DataWorker): DataWorker {
globalAgentObject._$BcWorker = worker;
return worker;
}
|
*/
universe().makeCoverageInterceptor = function (coverage: IstanbulCoverageStore) {
// The `fileId` is used to map coverage and source maps. Note that
// a browser window (tab) can run multiple JavaScript files, with different source maps, ... .
const fileId = coverage.hash;
if (!getWorker()) {
// Create the worker with the worker code
// (we use the tool 'rollup' to produce this object---see rollup.config.js)
const worker = setWorker(new DataWorker());
(function handleUnloading() {
const protectWindowEvent = function (name: 'onunload' | 'onbeforeunload') {
// Save the existing handler, wrap it in our handler
let wrappedHandler = (getWindow() as Window)[name];
getWindow()[name] = function (...args) {
// Ask the worker to send all remaining coverage infos
worker.postMessage('unload'); // The string "unload" is by accident the same as the window event
if (wrappedHandler) {
return wrappedHandler.apply(this, args);
}
};
// Define a proxy that prevents overwriting
if (hasWindow()) {
Object.defineProperty(getWindow(), name, {
get: function () {
return wrappedHandler;
},
set: function (newHandler: never) {
wrappedHandler = newHandler;
}
});
}
};
protectWindowEvent('onunload');
protectWindowEvent('onbeforeunload');
unload.add(() => worker.postMessage('unload'));
})();
}
(function sendSourceMaps() {
// Send the source maps
const sentMaps = universeAttribute('sentMaps', new Set());
if (coverage.inputSourceMap) {
if (!sentMaps.has(coverage.path)) {
getWorker().postMessage(
`${ProtocolMessageTypes.MESSAGE_TYPE_SOURCEMAP} ${fileId}:${JSON.stringify(
coverage.inputSourceMap
)}`
);
sentMaps.add(coverage.path);
}
}
})();
(function registerCoverageReporter() {
const reported = new Set<string>();
universe()._$Bc = (
fileId: string,
startLine: number,
startColumn: number,
endLine: number,
endColumn: number
) => {
// Do not send lines that have already been sent to reduce the network load
const coverageMessage = `${fileId}:${startLine}:${startColumn}:${endLine}:${endColumn}`;
if (!reported.has(coverageMessage)) {
getWorker().postMessage(coverageMessage);
reported.add(coverageMessage);
}
};
})();
return makeProxy(coverage, coverage, []);
};
|
/**
* The function that intercepts changes to the Istanbul code coverage.
* Also, the Web worker to forward the coverage information is started.
|
f1578ff17ae1_new_fields_in_user_moodel.py
|
"""new fields in user moodel
Revision ID: f1578ff17ae1
Revises: bda639e5aafd
Create Date: 2021-01-11 10:01:54.417977
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f1578ff17ae1'
down_revision = 'bda639e5aafd'
branch_labels = None
depends_on = None
def
|
():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
|
upgrade
|
exec.rs
|
use std::io::{self, Write};
use crate::error::error_exit;
use crate::lexer::TokenType;
use crate::parser::{Operation, Parser};
use crate::var_map::VariableMap;
mod ffi {
extern "C" {
pub fn clock() -> ::libc::clock_t;
}
}
// executer
impl Parser {
pub fn exec(&self, var_map: &mut VariableMap) {
let t0 = unsafe { ffi::clock() };
let mut pc = 0;
while pc < self.internal_code.len() {
match self.internal_code[pc] {
Operation::Copy(ref dist, ref var) => {
let val = var_map.get(var);
var_map.set(dist, val);
}
Operation::Add(ref dist, ref lhs, ref rhs) => {
let lhs_val = var_map.get(lhs);
let rhs_val = var_map.get(rhs);
var_map.set(dist, lhs_val + rhs_val);
}
Operation::Sub(ref dist, ref lhs, ref rhs) => {
let lhs_val = var_map.get(lhs);
let rhs_val = var_map.get(rhs);
var_map.set(dist, lhs_val - rhs_val);
}
Operation::Mul(ref dist, ref lhs, ref rhs) => {
let lhs_val = var_map.get(lhs);
let rhs_val = var_map.get(rhs);
var_map.set(dist, lhs_val * rhs_val);
}
Operation::Div(ref dist, ref lhs, ref rhs) => {
let lhs_val = var_map.get(lhs);
let rhs_val = var_map.get(rhs);
if rhs_val == 0 {
error_exit(String::from("Zero division error"));
}
var_map.set(dist, lhs_val / rhs_val);
}
Operation::Eq(ref dist, ref lhs, ref rhs) => {
let lhs_val = var_map.get(lhs);
let rhs_val = var_map.get(rhs);
var_map.set(dist, if lhs_val == rhs_val { 1 } else { 0 });
}
Operation::Ne(ref dist, ref lhs, ref rhs) => {
let lhs_val = var_map.get(lhs);
let rhs_val = var_map.get(rhs);
var_map.set(dist, if lhs_val != rhs_val { 1 } else { 0 });
}
Operation::Lt(ref dist, ref lhs, ref rhs) => {
let lhs_val = var_map.get(lhs);
let rhs_val = var_map.get(rhs);
var_map.set(dist, if lhs_val < rhs_val { 1 } else { 0 });
}
Operation::Le(ref dist, ref lhs, ref rhs) => {
let lhs_val = var_map.get(lhs);
let rhs_val = var_map.get(rhs);
var_map.set(dist, if lhs_val <= rhs_val { 1 } else { 0 });
}
Operation::Print(ref val_tok) => {
match &val_tok.ty {
TokenType::Ident | TokenType::NumLiteral(_) => {
let val = var_map.get(val_tok);
print!("{}", val);
}
TokenType::StrLiteral => {
print!("{}", val_tok.string);
}
_ => error_exit(format!("Cannot print {}", val_tok.string)),
}
io::stdout().flush().unwrap();
}
Operation::Println(ref val_tok) => match &val_tok.ty {
TokenType::Ident | TokenType::NumLiteral(_) => {
let val = var_map.get(val_tok);
println!("{}", val);
}
TokenType::StrLiteral =>
|
_ => error_exit(format!("Cannot print {}", val_tok.string)),
},
Operation::Goto(ref label) => {
pc = var_map.label_get(label) as usize;
continue;
}
Operation::IfGoto(ref cond, ref label) => {
let cond_val = var_map.get(cond);
if cond_val != 0 {
pc = var_map.label_get(label) as usize;
continue;
}
}
Operation::Time => unsafe {
println!("time: {}", ffi::clock() - t0);
},
Operation::ArrayNew(ref ident, ref size_tok) => {
let size = var_map.get(size_tok) as usize;
var_map.array_init(ident, size);
}
Operation::ArrayGet(ref dist, ref ident, ref index_tok) => {
let index = var_map.get(index_tok) as usize;
let val = var_map.array_get(ident, index);
var_map.set(dist, val);
}
Operation::ArraySet(ref ident, ref index_tok, ref val_tok) => {
let index = var_map.get(index_tok) as usize;
let val = var_map.get(val_tok);
var_map.array_set(ident, index, val);
}
Operation::Nop => (),
}
pc += 1;
}
}
}
|
{
println!("{}", val_tok.string);
}
|
FadeSnackbar.js
|
/*======= Fade Snackbar ======*/
import React from 'react';
import Button from '@material-ui/core/Button';
import Snackbar from '@material-ui/core/Snackbar';
import Fade from '@material-ui/core/Fade';
export default class FadeSnackbar extends React.Component {
state = {
|
this.setState({ open: true });
};
handleClose = () => {
this.setState({ open: false });
};
render() {
return (
<div className="d-inline-block">
<Button variant="contained" color="primary" color="primary" className="text-white mb-10" onClick={this.handleClick}>Open with Fade Transition</Button>
<Snackbar
open={this.state.open}
onClose={this.handleClose}
message={<span id="message-id">I love snacks</span>}
/>
</div>
);
}
}
|
open: false,
};
handleClick = () => {
|
app_stop_test.go
|
package carbon
import (
"os"
"runtime"
"runtime/pprof"
"testing"
"time"
"github.com/lomik/go-carbon/qa"
"github.com/stretchr/testify/assert"
)
func TestStartStop(t *testing.T) {
assert := assert.New(t)
startGoroutineNum := runtime.NumGoroutine()
for i := 0; i < 10; i++ {
qa.Root(t, func(root string) {
configFile := TestConfig(root)
app := New(configFile)
assert.NoError(app.ParseConfig())
assert.NoError(app.Start())
app.Stop()
})
}
endGoroutineNum := runtime.NumGoroutine()
// GC worker etc
if !assert.InDelta(startGoroutineNum, endGoroutineNum, 4) {
p := pprof.Lookup("goroutine")
p.WriteTo(os.Stdout, 1)
}
}
func
|
(t *testing.T) {
//go func() {
// http.ListenAndServe("localhost:6060", nil)
//}()
qa.Root(t, func(root string) {
configFile := TestConfig(root)
app := New(configFile)
assert.NoError(t, app.ParseConfig())
app.Config.Common.MetricInterval = &Duration{time.Microsecond}
assert.NoError(t, app.Start())
reloadChan := make(chan struct{}, 1)
N := 1024
// start reload loop
go func() {
for i := N; i > 0; i-- {
app.ReloadConfig()
reloadChan <- struct{}{}
}
}()
ticker := time.NewTimer(0)
// goroutine doing reloadConfig should send N notifications if there were no deadlock
for rN := 0; rN < N; {
if !ticker.Stop() {
<-ticker.C
}
ticker.Reset(1 * time.Second)
select {
case <-reloadChan:
rN++
case <-ticker.C:
t.Fatalf("Collector and SIGHUP handers deadlocked")
}
}
})
}
|
TestReloadAndCollectorDeadlock
|
help.go
|
package controller
import (
"fmt"
"github.com/keiko233/V2Board-Bot/lib/tgbot"
"github.com/keiko233/V2Board-Bot/model"
tb "gopkg.in/tucnak/telebot.v2"
)
func Help(ctx *tgbot.Context) error {
menu := &tb.ReplyMarkup{ResizeReplyKeyboard: true}
CheckinBtn := menu.Text(model.MenuCheckinBtn)
AccountBtn := menu.Text(model.MenuAccountBtn)
BindBtn := menu.Text(model.MenuBindBtn)
|
UnbindBtn := menu.Text(model.MenuUnbindBtn)
historyBtn := menu.Text(model.MenuhistoryBtn)
reportBtn := menu.Text(model.MenuReportBtn)
menuList := make([]tb.Row, 0)
// 群聊发起, 不展示解绑和绑定
if ctx.Message.Chat.ID < 0 {
menuList = append(menuList, menu.Row(CheckinBtn, AccountBtn), menu.Row(historyBtn, reportBtn))
} else {
menuList = append(menuList, menu.Row(CheckinBtn, AccountBtn), menu.Row(BindBtn, UnbindBtn), menu.Row(historyBtn, reportBtn))
}
menu.Reply(menuList...)
msg := fmt.Sprintf("%s\n为你提供以下服务:\n\n每日签到 /checkin\n账户信息 /account\n绑定账户 /bind\n解绑账户 /unbind\n强制解绑账号 /unbind <订阅链接>\n签到历史 /history\n签到统计 /report\n请注意, 绑定账号和解绑账号需要私聊我哦~", model.Config.Bot.Name)
return ctx.Reply(msg, menu)
}
| |
jira_status.py
|
from .configuration import with_credentials
from .configuration import jira_url
from jira import JIRA
from jira.exceptions import JIRAError
import logging
@with_credentials(service='Jira')
def
|
(issue_id, _usr, _pwd):
if _usr is None or _pwd is None:
logging.error('Jira username or password unset.')
return None
jira = None
try:
jira = JIRA(
jira_url,
auth=(_usr, _pwd)
)
issue = jira.issue(issue_id)
if str(issue.fields.status) == 'Review':
return True
else:
return False
except JIRAError:
if jira is None:
print('Could not connect to Jira.')
else:
print('Could not check Jira ticket status on %s.' % issue_id)
return None
|
in_review
|
packageFunction.py
|
#coding=utf-8
def MultiplePackage(N,C,weight,value,num,physic):
'''
多重背包问题(每个物品都有次数限制)
:param N: 预测的虚拟机种类,如N=pre_num
:param C:输入文件是CPU,那么背包总容量就是MEM,如C=
:param weight: 每个物品的容量数组表示,如weight=[0,5,4,7,2,6]
:param value: 每个物品的价值数组表示,如value=[0,12,3,10,3,6]
:param num:每个物品的个数限制,如num=[0,2,4,1,5,3]
:return: 返回总价值矩阵
'''
#初始化f[N+1][C+1]为0,f[i][j]表示前i件物品恰好放入一个容器为j的背包可以获得的最大价值
f=[[0 for col in range(C+1)] for row in range(N+1)]
for i in range(1,N+1):
for j in range(1,C+1):
#对于物品i最多能取的次数是j/weight[i]与num[i]的较小者
max_num_i=min(j/weight[i],num[i])
#初始取k=0为最大,下面的循环是把取了k个物品i能获得的最大价值赋值给f[i][j]
f[i][j]=f[i-1][j]
for k in range(max_num_i+1):
if f[i][j]<f[i-1][j-k*weight[i]]+k*value[i]<=physic:
#状态方程
f[i][j]=f[i-1][j-k*weight[i]]+k*value[i]
return f
def FindWhat(f,value,weight,i,j,item,num):
if i>=0:
if f[i][j]==f[i-1][j]:
item[i]=0
FindWhat(f,value,weight,i-1,j,item,num)
elif j-weight[i]>=0:
for k in range(num[i]+1):
if f[i][j]==f[i-1][j-k*weight[i]]+k*value[i]:
|
item[i]=k
break
FindWhat(f,value,weight,i-1,j-item[i]*weight[i],item,num)
|
|
blocks_finder_bucket_scan.go
|
package querier
import (
"context"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/thanos-io/thanos/pkg/objstore"
"github.com/cortexproject/cortex/pkg/storage/bucket"
cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb"
"github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex"
"github.com/cortexproject/cortex/pkg/storegateway"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/services"
)
var (
errBucketScanBlocksFinderNotRunning = errors.New("bucket scan blocks finder is not running")
errInvalidBlocksRange = errors.New("invalid blocks time range")
)
type BucketScanBlocksFinderConfig struct {
ScanInterval time.Duration
TenantsConcurrency int
MetasConcurrency int
CacheDir string
ConsistencyDelay time.Duration
IgnoreDeletionMarksDelay time.Duration
}
// BucketScanBlocksFinder is a BlocksFinder implementation periodically scanning the bucket to discover blocks.
type BucketScanBlocksFinder struct {
services.Service
cfg BucketScanBlocksFinderConfig
logger log.Logger
bucketClient objstore.Bucket
fetchersMetrics *storegateway.MetadataFetcherMetrics
usersScanner *cortex_tsdb.UsersScanner
// We reuse the metadata fetcher instance for a given tenant both because of performance
// reasons (the fetcher keeps a in-memory cache) and being able to collect and group metrics.
fetchersMx sync.Mutex
fetchers map[string]userFetcher
// Keep the per-tenant/user metas found during the last run.
userMx sync.RWMutex
userMetas map[string]bucketindex.Blocks
userMetasLookup map[string]map[ulid.ULID]*bucketindex.Block
userDeletionMarks map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark
scanDuration prometheus.Histogram
scanLastSuccess prometheus.Gauge
}
func NewBucketScanBlocksFinder(cfg BucketScanBlocksFinderConfig, bucketClient objstore.Bucket, logger log.Logger, reg prometheus.Registerer) *BucketScanBlocksFinder {
d := &BucketScanBlocksFinder{
cfg: cfg,
logger: logger,
bucketClient: bucketClient,
fetchers: make(map[string]userFetcher),
usersScanner: cortex_tsdb.NewUsersScanner(bucketClient, cortex_tsdb.AllUsers, logger),
userMetas: make(map[string]bucketindex.Blocks),
userMetasLookup: make(map[string]map[ulid.ULID]*bucketindex.Block),
userDeletionMarks: map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark{},
fetchersMetrics: storegateway.NewMetadataFetcherMetrics(),
scanDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "cortex_querier_blocks_scan_duration_seconds",
Help: "The total time it takes to run a full blocks scan across the storage.",
Buckets: []float64{1, 10, 20, 30, 60, 120, 180, 240, 300, 600},
}),
scanLastSuccess: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "cortex_querier_blocks_last_successful_scan_timestamp_seconds",
Help: "Unix timestamp of the last successful blocks scan.",
}),
}
if reg != nil {
prometheus.WrapRegistererWith(prometheus.Labels{"component": "querier"}, reg).MustRegister(d.fetchersMetrics)
}
// Apply a jitter to the sync frequency in order to increase the probability
// of hitting the shared cache (if any).
scanInterval := util.DurationWithJitter(cfg.ScanInterval, 0.2)
d.Service = services.NewTimerService(scanInterval, d.starting, d.scan, nil)
return d
}
// GetBlocks returns known blocks for userID containing samples within the range minT
// and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending.
func (d *BucketScanBlocksFinder) GetBlocks(_ context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) {
// We need to ensure the initial full bucket scan succeeded.
if d.State() != services.Running {
return nil, nil, errBucketScanBlocksFinderNotRunning
}
if maxT < minT {
return nil, nil, errInvalidBlocksRange
}
d.userMx.RLock()
defer d.userMx.RUnlock()
userMetas, ok := d.userMetas[userID]
if !ok {
return nil, nil, nil
}
// Given we do expect the large majority of queries to have a time range close
// to "now", we're going to find matching blocks iterating the list in reverse order.
var matchingMetas bucketindex.Blocks
for i := len(userMetas) - 1; i >= 0; i-- {
if userMetas[i].Within(minT, maxT) {
matchingMetas = append(matchingMetas, userMetas[i])
}
// We can safely break the loop because metas are sorted by MaxTime.
if userMetas[i].MaxTime <= minT {
break
}
}
// Filter deletion marks by matching blocks only.
matchingDeletionMarks := map[ulid.ULID]*bucketindex.BlockDeletionMark{}
if userDeletionMarks, ok := d.userDeletionMarks[userID]; ok {
for _, m := range matchingMetas {
if d := userDeletionMarks[m.ID]; d != nil {
matchingDeletionMarks[m.ID] = d
}
}
}
return matchingMetas, matchingDeletionMarks, nil
}
func (d *BucketScanBlocksFinder) starting(ctx context.Context) error {
// Before the service is in the running state it must have successfully
// complete the initial scan.
if err := d.scanBucket(ctx); err != nil {
level.Error(d.logger).Log("msg", "unable to run the initial blocks scan", "err", err)
return err
}
return nil
}
func (d *BucketScanBlocksFinder) scan(ctx context.Context) error {
if err := d.scanBucket(ctx); err != nil {
level.Error(d.logger).Log("msg", "failed to scan bucket storage to find blocks", "err", err)
}
// Never return error, otherwise the service terminates.
return nil
}
func (d *BucketScanBlocksFinder) scanBucket(ctx context.Context) (returnErr error) {
defer func(start time.Time) {
d.scanDuration.Observe(time.Since(start).Seconds())
if returnErr == nil {
d.scanLastSuccess.SetToCurrentTime()
}
}(time.Now())
// Discover all users first. This helps cacheability of the object store call.
userIDs, _, err := d.usersScanner.ScanUsers(ctx)
if err != nil {
return err
}
jobsChan := make(chan string)
resMx := sync.Mutex{}
resMetas := map[string]bucketindex.Blocks{}
resMetasLookup := map[string]map[ulid.ULID]*bucketindex.Block{}
resDeletionMarks := map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark{}
resErrs := tsdb_errors.NewMulti()
// Create a pool of workers which will synchronize metas. The pool size
// is limited in order to avoid to concurrently sync a lot of tenants in
// a large cluster.
wg := &sync.WaitGroup{}
wg.Add(d.cfg.TenantsConcurrency)
for i := 0; i < d.cfg.TenantsConcurrency; i++ {
go func() {
defer wg.Done()
for userID := range jobsChan {
metas, deletionMarks, err := d.scanUserBlocksWithRetries(ctx, userID)
// Build the lookup map.
lookup := map[ulid.ULID]*bucketindex.Block{}
for _, m := range metas {
lookup[m.ID] = m
}
resMx.Lock()
if err != nil {
resErrs.Add(err)
} else {
resMetas[userID] = metas
resMetasLookup[userID] = lookup
resDeletionMarks[userID] = deletionMarks
}
resMx.Unlock()
}
}()
}
// Push a job for each user whose blocks need to be discovered.
pushJobsLoop:
for _, userID := range userIDs {
select {
case jobsChan <- userID:
// Nothing to do.
case <-ctx.Done():
resMx.Lock()
resErrs.Add(ctx.Err())
resMx.Unlock()
break pushJobsLoop
}
}
// Wait until all workers completed.
close(jobsChan)
wg.Wait()
d.userMx.Lock()
if len(resErrs) == 0 {
// Replace the map, so that we discard tenants fully deleted from storage.
d.userMetas = resMetas
d.userMetasLookup = resMetasLookup
d.userDeletionMarks = resDeletionMarks
} else {
// If an error occurred, we prefer to partially update the metas map instead of
// not updating it at all. At least we'll update blocks for the successful tenants.
for userID, metas := range resMetas {
d.userMetas[userID] = metas
}
for userID, metas := range resMetasLookup {
d.userMetasLookup[userID] = metas
}
for userID, deletionMarks := range resDeletionMarks {
d.userDeletionMarks[userID] = deletionMarks
}
}
d.userMx.Unlock()
return resErrs.Err()
}
// scanUserBlocksWithRetries runs scanUserBlocks() retrying multiple times
// in case of error.
func (d *BucketScanBlocksFinder) scanUserBlocksWithRetries(ctx context.Context, userID string) (metas bucketindex.Blocks, deletionMarks map[ulid.ULID]*bucketindex.BlockDeletionMark, err error) {
retries := util.NewBackoff(ctx, util.BackoffConfig{
MinBackoff: time.Second,
MaxBackoff: 30 * time.Second,
MaxRetries: 3,
})
for retries.Ongoing() {
metas, deletionMarks, err = d.scanUserBlocks(ctx, userID)
if err == nil {
return
}
retries.Wait()
}
return
}
func (d *BucketScanBlocksFinder) scanUserBlocks(ctx context.Context, userID string) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) {
fetcher, userBucket, deletionMarkFilter, err := d.getOrCreateMetaFetcher(userID)
if err != nil {
return nil, nil, errors.Wrapf(err, "create meta fetcher for user %s", userID)
}
metas, partials, err := fetcher.Fetch(ctx)
if err != nil {
return nil, nil, errors.Wrapf(err, "scan blocks for user %s", userID)
}
// In case we've found any partial block we log about it but continue cause we don't want
// to break the scanner just because there's a spurious block.
if len(partials) > 0 {
logPartialBlocks(userID, partials, d.logger)
}
res := make(bucketindex.Blocks, 0, len(metas))
for _, m := range metas {
blockMeta := bucketindex.BlockFromThanosMeta(*m)
// If the block is already known, we can get the remaining attributes from there
// because a block is immutable.
prevMeta := d.getBlockMeta(userID, m.ULID)
if prevMeta != nil {
blockMeta.UploadedAt = prevMeta.UploadedAt
} else {
attrs, err := userBucket.Attributes(ctx, path.Join(m.ULID.String(), metadata.MetaFilename))
if err != nil {
return nil, nil, errors.Wrapf(err, "read %s attributes of block %s for user %s", metadata.MetaFilename, m.ULID.String(), userID)
}
// Since the meta.json file is the last file of a block being uploaded and it's immutable
// we can safely assume that the last modified timestamp of the meta.json is the time when
// the block has completed to be uploaded.
blockMeta.UploadedAt = attrs.LastModified.Unix()
}
res = append(res, blockMeta)
}
// The blocks scanner expects all blocks to be sorted by max time.
sortBlocksByMaxTime(res)
// Convert deletion marks to our onw data type.
marks := map[ulid.ULID]*bucketindex.BlockDeletionMark{}
for id, m := range deletionMarkFilter.DeletionMarkBlocks() {
marks[id] = bucketindex.BlockDeletionMarkFromThanosMarker(m)
}
return res, marks, nil
}
func (d *BucketScanBlocksFinder) getOrCreateMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) {
d.fetchersMx.Lock()
defer d.fetchersMx.Unlock()
if f, ok := d.fetchers[userID]; ok {
return f.metadataFetcher, f.userBucket, f.deletionMarkFilter, nil
}
fetcher, userBucket, deletionMarkFilter, err := d.createMetaFetcher(userID)
if err != nil {
return nil, nil, nil, err
}
d.fetchers[userID] = userFetcher{
metadataFetcher: fetcher,
deletionMarkFilter: deletionMarkFilter,
userBucket: userBucket,
}
return fetcher, userBucket, deletionMarkFilter, nil
}
func (d *BucketScanBlocksFinder) createMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) {
userLogger := util.WithUserID(userID, d.logger)
userBucket := bucket.NewUserBucketClient(userID, d.bucketClient)
userReg := prometheus.NewRegistry()
// The following filters have been intentionally omitted:
// - Consistency delay filter: omitted because we should discover all uploaded blocks.
// The consistency delay is taken in account when running the consistency check at query time.
// - Deduplicate filter: omitted because it could cause troubles with the consistency check if
// we "hide" source blocks because recently compacted by the compactor before the store-gateway instances
// discover and load the compacted ones.
deletionMarkFilter := block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, d.cfg.IgnoreDeletionMarksDelay, d.cfg.MetasConcurrency)
filters := []block.MetadataFilter{deletionMarkFilter}
f, err := block.NewMetaFetcher(
userLogger,
d.cfg.MetasConcurrency,
userBucket,
// The fetcher stores cached metas in the "meta-syncer/" sub directory.
filepath.Join(d.cfg.CacheDir, userID),
userReg,
filters,
nil,
)
if err != nil {
return nil, nil, nil, err
}
d.fetchersMetrics.AddUserRegistry(userID, userReg)
return f, userBucket, deletionMarkFilter, nil
}
func (d *BucketScanBlocksFinder) getBlockMeta(userID string, blockID ulid.ULID) *bucketindex.Block {
d.userMx.RLock()
defer d.userMx.RUnlock()
metas, ok := d.userMetasLookup[userID]
if !ok {
return nil
}
return metas[blockID]
}
func sortBlocksByMaxTime(blocks bucketindex.Blocks) {
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].MaxTime < blocks[j].MaxTime
})
}
func logPartialBlocks(userID string, partials map[ulid.ULID]error, logger log.Logger)
|
type userFetcher struct {
metadataFetcher block.MetadataFetcher
deletionMarkFilter *block.IgnoreDeletionMarkFilter
userBucket objstore.Bucket
}
|
{
ids := make([]string, 0, len(partials))
errs := make([]string, 0, len(partials))
for id, err := range partials {
ids = append(ids, id.String())
errs = append(errs, err.Error())
}
level.Warn(logger).Log("msg", "found partial blocks", "user", userID, "blocks", strings.Join(ids, ","), "err", strings.Join(errs, ","))
}
|
writer_test.go
|
package report_test
import (
"bytes"
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
dbTypes "github.com/aquasecurity/trivy-db/pkg/types"
"github.com/aquasecurity/trivy/pkg/report"
"github.com/aquasecurity/trivy/pkg/types"
)
func
|
(t *testing.T) {
testCases := []struct {
name string
detectedVulns []types.DetectedVulnerability
expectedOutput string
light bool
}{
{
name: "happy path full",
detectedVulns: []types.DetectedVulnerability{
{
VulnerabilityID: "123",
PkgName: "foo",
InstalledVersion: "1.2.3",
FixedVersion: "3.4.5",
Vulnerability: dbTypes.Vulnerability{
Title: "foobar",
Description: "baz",
Severity: "HIGH",
},
},
},
expectedOutput: `+---------+------------------+----------+-------------------+---------------+--------+
| LIBRARY | VULNERABILITY ID | SEVERITY | INSTALLED VERSION | FIXED VERSION | TITLE |
+---------+------------------+----------+-------------------+---------------+--------+
| foo | 123 | HIGH | 1.2.3 | 3.4.5 | foobar |
+---------+------------------+----------+-------------------+---------------+--------+
`,
},
{
name: "happy path light",
light: true,
detectedVulns: []types.DetectedVulnerability{
{
VulnerabilityID: "123",
PkgName: "foo",
InstalledVersion: "1.2.3",
FixedVersion: "3.4.5",
Vulnerability: dbTypes.Vulnerability{
Title: "foobar",
Description: "baz",
Severity: "HIGH",
},
},
},
expectedOutput: `+---------+------------------+----------+-------------------+---------------+
| LIBRARY | VULNERABILITY ID | SEVERITY | INSTALLED VERSION | FIXED VERSION |
+---------+------------------+----------+-------------------+---------------+
| foo | 123 | HIGH | 1.2.3 | 3.4.5 |
+---------+------------------+----------+-------------------+---------------+
`,
},
{
name: "no title for vuln",
detectedVulns: []types.DetectedVulnerability{
{
VulnerabilityID: "123",
PkgName: "foo",
InstalledVersion: "1.2.3",
FixedVersion: "3.4.5",
Vulnerability: dbTypes.Vulnerability{
Description: "foobar",
Severity: "HIGH",
},
},
},
expectedOutput: `+---------+------------------+----------+-------------------+---------------+--------+
| LIBRARY | VULNERABILITY ID | SEVERITY | INSTALLED VERSION | FIXED VERSION | TITLE |
+---------+------------------+----------+-------------------+---------------+--------+
| foo | 123 | HIGH | 1.2.3 | 3.4.5 | foobar |
+---------+------------------+----------+-------------------+---------------+--------+
`,
},
{
name: "long title for vuln",
detectedVulns: []types.DetectedVulnerability{
{
VulnerabilityID: "123",
PkgName: "foo",
InstalledVersion: "1.2.3",
FixedVersion: "3.4.5",
Vulnerability: dbTypes.Vulnerability{
Title: "a b c d e f g h i j k l m n o p q r s t u v",
Severity: "HIGH",
},
},
},
expectedOutput: `+---------+------------------+----------+-------------------+---------------+----------------------------+
| LIBRARY | VULNERABILITY ID | SEVERITY | INSTALLED VERSION | FIXED VERSION | TITLE |
+---------+------------------+----------+-------------------+---------------+----------------------------+
| foo | 123 | HIGH | 1.2.3 | 3.4.5 | a b c d e f g h i j k l... |
+---------+------------------+----------+-------------------+---------------+----------------------------+
`,
},
{
name: "no vulns",
detectedVulns: []types.DetectedVulnerability{},
expectedOutput: ``,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
inputResults := report.Results{
{
Target: "foo",
Vulnerabilities: tc.detectedVulns,
},
}
tableWritten := bytes.Buffer{}
assert.NoError(t, report.WriteResults("table", &tableWritten, nil, inputResults, "", tc.light), tc.name)
assert.Equal(t, tc.expectedOutput, tableWritten.String(), tc.name)
})
}
}
func TestReportWriter_JSON(t *testing.T) {
testCases := []struct {
name string
detectedVulns []types.DetectedVulnerability
expectedJSON report.Results
}{
{
name: "happy path",
detectedVulns: []types.DetectedVulnerability{
{
VulnerabilityID: "123",
PkgName: "foo",
InstalledVersion: "1.2.3",
FixedVersion: "3.4.5",
Vulnerability: dbTypes.Vulnerability{
Title: "foobar",
Description: "baz",
Severity: "HIGH",
},
},
},
expectedJSON: report.Results{
report.Result{
Target: "foojson",
Vulnerabilities: []types.DetectedVulnerability{
{
VulnerabilityID: "123",
PkgName: "foo",
InstalledVersion: "1.2.3",
FixedVersion: "3.4.5",
Vulnerability: dbTypes.Vulnerability{
Title: "foobar",
Description: "baz",
Severity: "HIGH",
},
},
},
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
jw := report.JsonWriter{}
jsonWritten := bytes.Buffer{}
jw.Output = &jsonWritten
inputResults := report.Results{
{
Target: "foojson",
Vulnerabilities: tc.detectedVulns,
},
}
assert.NoError(t, report.WriteResults("json", &jsonWritten, nil, inputResults, "", false), tc.name)
writtenResults := report.Results{}
errJson := json.Unmarshal([]byte(jsonWritten.String()), &writtenResults)
assert.NoError(t, errJson, "invalid json written", tc.name)
assert.Equal(t, tc.expectedJSON, writtenResults, tc.name)
})
}
}
func TestReportWriter_Template(t *testing.T) {
testCases := []struct {
name string
detectedVulns []types.DetectedVulnerability
template string
expected string
}{
{
name: "happy path",
detectedVulns: []types.DetectedVulnerability{
{
VulnerabilityID: "CVE-2019-0000",
PkgName: "foo",
Vulnerability: dbTypes.Vulnerability{
Severity: dbTypes.SeverityHigh.String(),
},
},
{
VulnerabilityID: "CVE-2019-0000",
PkgName: "bar",
Vulnerability: dbTypes.Vulnerability{
Severity: dbTypes.SeverityHigh.String()},
},
{
VulnerabilityID: "CVE-2019-0001",
PkgName: "baz",
Vulnerability: dbTypes.Vulnerability{
Severity: dbTypes.SeverityCritical.String(),
},
},
},
template: "{{ range . }}{{ range .Vulnerabilities}}{{ println .VulnerabilityID .Severity }}{{ end }}{{ end }}",
expected: "CVE-2019-0000 HIGH\nCVE-2019-0000 HIGH\nCVE-2019-0001 CRITICAL\n",
},
{
name: "happy path",
detectedVulns: []types.DetectedVulnerability{
{
VulnerabilityID: "123",
PkgName: "foo",
InstalledVersion: "1.2.3",
FixedVersion: "3.4.5",
Vulnerability: dbTypes.Vulnerability{
Title: `gcc: POWER9 "DARN" RNG intrinsic produces repeated output`,
Description: `curl version curl 7.20.0 to and including curl 7.59.0 contains a CWE-126: Buffer Over-read vulnerability in denial of service that can result in curl can be tricked into reading data beyond the end of a heap based buffer used to store downloaded RTSP content.. This vulnerability appears to have been fixed in curl < 7.20.0 and curl >= 7.60.0.`,
Severity: "HIGH",
},
},
},
template: `<testsuites>
{{- range . -}}
{{- $failures := len .Vulnerabilities }}
<testsuite tests="1" failures="{{ $failures }}" time="" name="{{ .Target }}">
{{- if not (eq .Type "") }}
<properties>
<property name="type" value="{{ .Type }}"></property>
</properties>
{{- end -}}
{{ range .Vulnerabilities }}
<testcase classname="{{ .PkgName }}-{{ .InstalledVersion }}" name="[{{ .Vulnerability.Severity }}] {{ .VulnerabilityID }}" time="">
<failure message={{escapeXML .Title | printf "%q" }} type="description">{{escapeXML .Description | printf "%q" }}</failure>
</testcase>
{{- end }}
</testsuite>
{{- end }}
</testsuites>`,
expected: `<testsuites>
<testsuite tests="1" failures="1" time="" name="foojunit">
<properties>
<property name="type" value="test"></property>
</properties>
<testcase classname="foo-1.2.3" name="[HIGH] 123" time="">
<failure message="gcc: POWER9 "DARN" RNG intrinsic produces repeated output" type="description">"curl version curl 7.20.0 to and including curl 7.59.0 contains a CWE-126: Buffer Over-read vulnerability in denial of service that can result in curl can be tricked into reading data beyond the end of a heap based buffer used to store downloaded RTSP content.. This vulnerability appears to have been fixed in curl < 7.20.0 and curl >= 7.60.0."</failure>
</testcase>
</testsuite>
</testsuites>`,
},
{
name: "happy path with/without period description should return with period",
detectedVulns: []types.DetectedVulnerability{
{
VulnerabilityID: "CVE-2019-0000",
PkgName: "foo",
Vulnerability: dbTypes.Vulnerability{
Description: "without period",
},
},
{
VulnerabilityID: "CVE-2019-0000",
PkgName: "bar",
Vulnerability: dbTypes.Vulnerability{
Description: "with period.",
},
},
{
VulnerabilityID: "CVE-2019-0000",
PkgName: "bar",
Vulnerability: dbTypes.Vulnerability{
Description: `with period and unescaped string curl: Use-after-free when closing 'easy' handle in Curl_close().`,
},
},
},
template: `{{ range . }}{{ range .Vulnerabilities}}{{.VulnerabilityID}} {{ endWithPeriod (escapeString .Description) | printf "%q" }}{{ end }}{{ end }}`,
expected: `CVE-2019-0000 "without period."CVE-2019-0000 "with period."CVE-2019-0000 "with period and unescaped string curl: Use-after-free when closing 'easy' handle in Curl_close()."`,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tmplWritten := bytes.Buffer{}
inputResults := report.Results{
{
Target: "foojunit",
Type: "test",
Vulnerabilities: tc.detectedVulns,
},
}
assert.NoError(t, report.WriteResults("template", &tmplWritten, nil, inputResults, tc.template, false))
assert.Equal(t, tc.expected, tmplWritten.String())
})
}
}
|
TestReportWriter_Table
|
ErrorBoundary.js
|
import React from 'react'
export default class ErrorBoundary extends React.Component {
constructor(props) {
super(props);
this.state = {
hasError: false
}
}
static getDerivedStateFromError(error) {
// console.log("getDerivedStateFromError",error);
return { hasError: true }
}
componentDidCatch(error, info) {
console.log('error, info',error, info)
}
render() {
if (this.state.hasError) {
return <div>error</div>
}
return this.props.children;
|
}
}
| |
middleware.go
|
package middleware
type Middleware interface {
}
|
||
visitorRegistration.js
|
Alloy.Globals.module = "visitorRegistration";
var referenceModel = Alloy.createCollection('resicon_references');
var args = arguments[0] || {};
var id = args.id || "";
COMMON.construct($);
if(OS_ANDROID){
MENU.construct($,$.visitorRegistrationView.contentView);
MENU.initMenu();
}
$.visitorRegistrationView.headerView.titleLabel.text= Ti.App.Properties.getString('Title') + " " +Ti.App.Properties.getString('FirstName') + " "+Ti.App.Properties.getString('LastName')+", who will be visiting you? ";
$.visitorRegistrationView.init({id:id});
$.visitorRegistrationView.saveButton.addEventListener('click', function(){
COMMON.showLoading();
setTimeout(function(){
COMMON.hideLoading();
}, 2500);
});
$.visitorRegistrationView.visitor.addEventListener('click', function(){
Alloy.Globals.module = "visitor";
COMMON.closeWindow($.myWin);
});
$.visitorRegistrationView.deleteButton.addEventListener('click', function(){
var dialog = Ti.UI.createAlertDialog({
cancel: 0,
buttonNames: ['Cancel','Confirm'],
message: 'Would you like to delete this visitor information?',
title: 'Delete confirmation'
});
dialog.addEventListener('click', function(e){
if (e.index === e.source.cancel){
//Do nothing
}
if (e.index === 1){
|
"Header" : {
"AccountSignature" :{
"AccountId" : 0,
"Signature" : Ti.App.Properties.getString('Signature')
},
"UUID" : Ti.App.Properties.getString('deviceToken')
},
"Visitor" :{
"VisitorId" : id,
"Status" : 3
}
};
COMMON.showLoading();
API.callByPost({url:"updateVisitorUrl", params: param}, function(responseText){
COMMON.hideLoading();
var res = JSON.parse(responseText);
if(res.Header.Error == null){
Ti.App.fireEvent("refreshVisitorList");
COMMON.closeWindow($.myWin);
COMMON.hideLoading();
COMMON.createAlert("Success","Successfully deleted visitor information!");
return false;
}else{
COMMON.createAlert("Error",res.Header.Error.ErrorMessage);
return false;
}
});
}
});
dialog.show();
});
$.myWin.addEventListener('swipe', function(e){
if (e.direction == 'up') {
MENU.loadMenu();
}
});
|
var param = {
|
multistep_optimizer.py
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-step optimizers simulating large batches.
Optimizer variants which make it possible to use very large batch sizes with
limited GPU memory. Optimizers in this module accumulate the gradients for n
batches, and call the optimizer's update rule every n batches with the
accumulated gradients.
See [Saunders et al., 2018](https://arxiv.org/abs/1805.00456) for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
class MultistepAdamOptimizer(tf.compat.v1.train.AdamOptimizer):
"""Adam with SGD updates every n steps with accumulated gradients."""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam", n=1):
super(MultistepAdamOptimizer, self).__init__(
learning_rate=learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon,
use_locking=use_locking, name=name)
self._n = n # Call Adam optimizer every n batches with accumulated grads
self._n_t = None # n as tensor
def _create_slots(self, var_list):
"""Create slot variables for Adam with accumulated gradients."""
super(MultistepAdamOptimizer, self)._create_slots(var_list)
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1,
name="iter",
colocate_with=first_var)
for v in var_list:
self._zeros_slot(v, "grad_acc", self._name)
def _get_iter_variable(self):
|
def _prepare(self):
super(MultistepAdamOptimizer, self)._prepare()
self._n_t = tf.convert_to_tensor(self._n, name="n")
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
"""Apply conditionally if counter is zero."""
grad_acc = self.get_slot(var, "grad_acc")
def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
adam_op = apply_fn(total_grad, var, *args, **kwargs)
with tf.control_dependencies([adam_op]):
grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
use_locking=self._use_locking)
return tf.group(adam_op, grad_acc_to_zero_op)
def accumulate_gradient(grad_acc, grad):
assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
return tf.group(assign_op) # Strip return value
return tf.cond(
tf.equal(self._get_iter_variable(), 0),
lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
lambda: accumulate_gradient(grad_acc, grad))
def _apply_dense(self, grad, var):
return self._apply_cond(
super(MultistepAdamOptimizer, self)._apply_dense, grad, var)
def _resource_apply_dense(self, grad, var):
return self._apply_cond(
super(MultistepAdamOptimizer, self)._resource_apply_dense, grad, var)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
return self._apply_cond(
super(MultistepAdamOptimizer, self)._apply_sparse_shared, grad, var,
indices, scatter_add)
def _apply_sparse(self, grad, var):
# TODO(fstahlberg): Implement a sparse version
tf.logging.warning("MultistepAdamOptimizer does not support sparse updates")
dense_grad = tf.convert_to_tensor(grad)
return self._apply_cond(
super(MultistepAdamOptimizer, self)._apply_dense, dense_grad, var)
def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):
tf.logging.warning("MultistepAdamOptimizer does not support sparse updates")
# Note that conversion to a dense Tensor handles duplicate `indices`
# correctly (summing them). A real sparse implementation will probably want
# to override _resource_apply_sparse instead so it gets them de-duplicated
# automatically.
dense_grad = tf.convert_to_tensor(
tf.IndexedSlices(values=grad, indices=indices,
dense_shape=tf.shape(var)))
return self._apply_cond(
super(MultistepAdamOptimizer, self)._resource_apply_dense,
dense_grad, var)
def _finish(self, update_ops, name_scope):
"""Updates beta_power variables every n batches and incrs counter."""
iter_ = self._get_iter_variable()
beta1_power, beta2_power = self._get_beta_accumulators()
with tf.control_dependencies(update_ops):
with tf.colocate_with(iter_):
def update_beta_op():
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t,
use_locking=self._use_locking)
return tf.group(update_beta1, update_beta2)
maybe_update_beta = tf.cond(
tf.equal(iter_, 0), update_beta_op, tf.no_op)
with tf.control_dependencies([maybe_update_beta]):
update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
use_locking=self._use_locking)
return tf.group(
*update_ops + [update_iter, maybe_update_beta], name=name_scope)
|
graph = (
None if tf.executing_eagerly() else tf.get_default_graph())
return self._get_non_slot_variable("iter", graph=graph)
|
aae.py
|
# Copyright 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Adversarial autoencoder.
"""
|
from __future__ import print_function
import math
from absl import app
from absl import flags
import tensorflow as tf
from lib import data, layers, train, utils, classifiers, eval
FLAGS = flags.FLAGS
class AAE(train.AE):
def model(self, latent, depth, scales, adversary_lr, disc_layer_sizes):
x = tf.placeholder(tf.float32,
[None, self.height, self.width, self.colors], 'x')
l = tf.placeholder(tf.float32, [None, self.nclass], 'label')
h = tf.placeholder(
tf.float32,
[None, self.height >> scales, self.width >> scales, latent], 'h')
def encoder(x):
return layers.encoder(x, scales, depth, latent, 'ae_enc')
def decoder(h):
return layers.decoder(h, scales, depth, self.colors, 'ae_dec')
def discriminator(h):
with tf.variable_scope('disc', reuse=tf.AUTO_REUSE):
h = tf.layers.flatten(h)
for size in [int(s) for s in disc_layer_sizes.split(',')]:
h = tf.layers.dense(h, size, tf.nn.leaky_relu)
return tf.layers.dense(h, 1)
encode = encoder(x)
decode = decoder(h)
ae = decoder(encode)
loss_ae = tf.losses.mean_squared_error(x, ae)
prior_samples = tf.random_normal(tf.shape(encode), dtype=encode.dtype)
adversary_logit_latent = discriminator(encode)
adversary_logit_prior = discriminator(prior_samples)
adversary_loss_latents = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_latent,
labels=tf.zeros_like(adversary_logit_latent)))
adversary_loss_prior = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_prior,
labels=tf.ones_like(adversary_logit_prior)))
autoencoder_loss_latents = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_latent,
labels=tf.ones_like(adversary_logit_latent)))
def _accuracy(logits, label):
labels = tf.logical_and(label, tf.ones_like(logits, dtype=bool))
correct = tf.equal(tf.greater(logits, 0), labels)
return tf.reduce_mean(tf.to_float(correct))
latent_accuracy = _accuracy(adversary_logit_latent, False)
prior_accuracy = _accuracy(adversary_logit_prior, True)
adversary_accuracy = (latent_accuracy + prior_accuracy)/2
utils.HookReport.log_tensor(loss_ae, 'loss_ae')
utils.HookReport.log_tensor(adversary_loss_latents, 'loss_adv_latent')
utils.HookReport.log_tensor(adversary_loss_prior, 'loss_adv_prior')
utils.HookReport.log_tensor(autoencoder_loss_latents, 'loss_ae_latent')
utils.HookReport.log_tensor(adversary_accuracy, 'adversary_accuracy')
xops = classifiers.single_layer_classifier(
tf.stop_gradient(encode), l, self.nclass)
xloss = tf.reduce_mean(xops.loss)
utils.HookReport.log_tensor(xloss, 'classify_latent')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ae_vars = tf.global_variables('ae_')
disc_vars = tf.global_variables('disc')
xl_vars = tf.global_variables('single_layer_classifier')
with tf.control_dependencies(update_ops):
train_ae = tf.train.AdamOptimizer(FLAGS.lr).minimize(
loss_ae + autoencoder_loss_latents, var_list=ae_vars)
train_disc = tf.train.AdamOptimizer(adversary_lr).minimize(
adversary_loss_prior + adversary_loss_latents,
var_list=disc_vars)
train_xl = tf.train.AdamOptimizer(FLAGS.lr).minimize(
xloss, tf.train.get_global_step(), var_list=xl_vars)
ops = train.AEOps(x, h, l, encode, decode, ae,
tf.group(train_ae, train_disc, train_xl),
classify_latent=xops.output)
n_interpolations = 16
n_images_per_interpolation = 16
def gen_images():
return self.make_sample_grid_and_save(
ops, interpolation=n_interpolations,
height=n_images_per_interpolation)
recon, inter, slerp, samples = tf.py_func(
gen_images, [], [tf.float32]*4)
tf.summary.image('reconstruction', tf.expand_dims(recon, 0))
tf.summary.image('interpolation', tf.expand_dims(inter, 0))
tf.summary.image('slerp', tf.expand_dims(slerp, 0))
tf.summary.image('samples', tf.expand_dims(samples, 0))
if FLAGS.dataset == 'lines32':
batched = (n_interpolations, 32, n_images_per_interpolation, 32, 1)
batched_interp = tf.transpose(
tf.reshape(inter, batched), [0, 2, 1, 3, 4])
mean_distance, mean_smoothness = tf.py_func(
eval.line_eval, [batched_interp], [tf.float32, tf.float32])
tf.summary.scalar('mean_distance', mean_distance)
tf.summary.scalar('mean_smoothness', mean_smoothness)
return ops
def main(argv):
del argv # Unused.
batch = FLAGS.batch
dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))
scales = int(round(math.log(dataset.width // FLAGS.latent_width, 2)))
model = AAE(
dataset,
FLAGS.train_dir,
latent=FLAGS.latent,
depth=FLAGS.depth,
scales=scales,
adversary_lr=FLAGS.adversary_lr,
disc_layer_sizes=FLAGS.disc_layer_sizes)
model.train()
if __name__ == '__main__':
flags.DEFINE_integer('depth', 64, 'Depth of first for convolution.')
flags.DEFINE_integer(
'latent', 16,
'Latent space depth, the total latent size is the depth multiplied by '
'latent_width ** 2.')
flags.DEFINE_integer('latent_width', 4, 'Width of the latent space.')
flags.DEFINE_float('adversary_lr', 1e-4,
'Learning rate for discriminator.')
flags.DEFINE_string('disc_layer_sizes', '100,100',
'Comma-separated list of discriminator layer sizes.')
app.run(main)
|
from __future__ import absolute_import
from __future__ import division
|
constants.rs
|
//! Constants provided by D3D11
#![allow(non_upper_case_globals)]
use winapi::{GUID, UINT};
pub const D3D11_SDK_VERSION: UINT = 7;
// d3d11.h
define_guid!(IID_ID3D11DeviceChild, 0x1841e5c8, 0x16b0, 0x489b, 0xbc, 0xc8, 0x44, 0xcf, 0xb0, 0xd5, 0xde, 0xae);
define_guid!(IID_ID3D11DepthStencilState, 0x03823efb, 0x8d8f, 0x4e1c, 0x9a, 0xa2, 0xf6, 0x4b, 0xb2, 0xcb, 0xfd, 0xf1);
define_guid!(IID_ID3D11BlendState, 0x75b68faa, 0x347d, 0x4159, 0x8f, 0x45, 0xa0, 0x64, 0x0f, 0x01, 0xcd, 0x9a);
define_guid!(IID_ID3D11RasterizerState, 0x9bb4ab81, 0xab1a, 0x4d8f, 0xb5, 0x06, 0xfc, 0x04, 0x20, 0x0b, 0x6e, 0xe7);
define_guid!(IID_ID3D11Resource, 0xdc8e63f3, 0xd12b, 0x4952, 0xb4, 0x7b, 0x5e, 0x45, 0x02, 0x6a, 0x86, 0x2d);
define_guid!(IID_ID3D11Buffer, 0x48570b85, 0xd1ee, 0x4fcd, 0xa2, 0x50, 0xeb, 0x35, 0x07, 0x22, 0xb0, 0x37);
define_guid!(IID_ID3D11Texture1D, 0xf8fb5c27, 0xc6b3, 0x4f75, 0xa4, 0xc8, 0x43, 0x9a, 0xf2, 0xef, 0x56, 0x4c);
define_guid!(IID_ID3D11Texture2D, 0x6f15aaf2, 0xd208, 0x4e89, 0x9a, 0xb4, 0x48, 0x95, 0x35, 0xd3, 0x4f, 0x9c);
define_guid!(IID_ID3D11Texture3D, 0x037e866e, 0xf56d, 0x4357, 0xa8, 0xaf, 0x9d, 0xab, 0xbe, 0x6e, 0x25, 0x0e);
define_guid!(IID_ID3D11View, 0x839d1216, 0xbb2e, 0x412b, 0xb7, 0xf4, 0xa9, 0xdb, 0xeb, 0xe0, 0x8e, 0xd1);
define_guid!(IID_ID3D11ShaderResourceView, 0xb0e06fe0, 0x8192, 0x4e1a, 0xb1, 0xca, 0x36, 0xd7, 0x41, 0x47, 0x10, 0xb2);
define_guid!(IID_ID3D11RenderTargetView, 0xdfdba067, 0x0b8d, 0x4865, 0x87, 0x5b, 0xd7, 0xb4, 0x51, 0x6c, 0xc1, 0x64);
define_guid!(IID_ID3D11DepthStencilView, 0x9fdac92a, 0x1876, 0x48c3, 0xaf, 0xad, 0x25, 0xb9, 0x4f, 0x84, 0xa9, 0xb6);
define_guid!(IID_ID3D11UnorderedAccessView, 0x28acf509, 0x7f5c, 0x48f6, 0x86, 0x11, 0xf3, 0x16, 0x01, 0x0a, 0x63, 0x80);
define_guid!(IID_ID3D11VertexShader, 0x3b301d64, 0xd678, 0x4289, 0x88, 0x97, 0x22, 0xf8, 0x92, 0x8b, 0x72, 0xf3);
define_guid!(IID_ID3D11HullShader, 0x8e5c6061, 0x628a, 0x4c8e, 0x82, 0x64, 0xbb, 0xe4, 0x5c, 0xb3, 0xd5, 0xdd);
define_guid!(IID_ID3D11DomainShader, 0xf582c508, 0x0f36, 0x490c, 0x99, 0x77, 0x31, 0xee, 0xce, 0x26, 0x8c, 0xfa);
define_guid!(IID_ID3D11GeometryShader, 0x38325b96, 0xeffb, 0x4022, 0xba, 0x02, 0x2e, 0x79, 0x5b, 0x70, 0x27, 0x5c);
define_guid!(IID_ID3D11PixelShader, 0xea82e40d, 0x51dc, 0x4f33, 0x93, 0xd4, 0xdb, 0x7c, 0x91, 0x25, 0xae, 0x8c);
define_guid!(IID_ID3D11ComputeShader, 0x4f5b196e, 0xc2bd, 0x495e, 0xbd, 0x01, 0x1f, 0xde, 0xd3, 0x8e, 0x49, 0x69);
define_guid!(IID_ID3D11InputLayout, 0xe4819ddc, 0x4cf0, 0x4025, 0xbd, 0x26, 0x5d, 0xe8, 0x2a, 0x3e, 0x07, 0xb7);
define_guid!(IID_ID3D11SamplerState, 0xda6fea51, 0x564c, 0x4487, 0x98, 0x10, 0xf0, 0xd0, 0xf9, 0xb4, 0xe3, 0xa5);
define_guid!(IID_ID3D11Asynchronous, 0x4b35d0cd, 0x1e15, 0x4258, 0x9c, 0x98, 0x1b, 0x13, 0x33, 0xf6, 0xdd, 0x3b);
define_guid!(IID_ID3D11Query, 0xd6c00747, 0x87b7, 0x425e, 0xb8, 0x4d, 0x44, 0xd1, 0x08, 0x56, 0x0a, 0xfd);
define_guid!(IID_ID3D11Predicate, 0x9eb576dd, 0x9f77, 0x4d86, 0x81, 0xaa, 0x8b, 0xab, 0x5f, 0xe4, 0x90, 0xe2);
define_guid!(IID_ID3D11Counter, 0x6e8c49fb, 0xa371, 0x4770, 0xb4, 0x40, 0x29, 0x08, 0x60, 0x22, 0xb7, 0x41);
define_guid!(IID_ID3D11ClassInstance, 0xa6cd7faa, 0xb0b7, 0x4a2f, 0x94, 0x36, 0x86, 0x62, 0xa6, 0x57, 0x97, 0xcb);
define_guid!(IID_ID3D11ClassLinkage, 0xddf57cba, 0x9543, 0x46e4, 0xa1, 0x2b, 0xf2, 0x07, 0xa0, 0xfe, 0x7f, 0xed);
define_guid!(IID_ID3D11CommandList, 0xa24bc4d1, 0x769e, 0x43f7, 0x80, 0x13, 0x98, 0xff, 0x56, 0x6c, 0x18, 0xe2);
define_guid!(IID_ID3D11DeviceContext, 0xc0bfa96c, 0xe089, 0x44fb, 0x8e, 0xaf, 0x26, 0xf8, 0x79, 0x61, 0x90, 0xda);
define_guid!(IID_ID3D11VideoDecoder, 0x3C9C5B51, 0x995D, 0x48d1, 0x9B, 0x8D, 0xFA, 0x5C, 0xAE, 0xDE, 0xD6, 0x5C);
|
define_guid!(IID_ID3D11CryptoSession, 0x9B32F9AD, 0xBDCC, 0x40a6, 0xA3, 0x9D, 0xD5, 0xC8, 0x65, 0x84, 0x57, 0x20);
define_guid!(IID_ID3D11VideoDecoderOutputView, 0xC2931AEA, 0x2A85, 0x4f20, 0x86, 0x0F, 0xFB, 0xA1, 0xFD, 0x25, 0x6E, 0x18);
define_guid!(IID_ID3D11VideoProcessorInputView, 0x11EC5A5F, 0x51DC, 0x4945, 0xAB, 0x34, 0x6E, 0x8C, 0x21, 0x30, 0x0E, 0xA5);
define_guid!(IID_ID3D11VideoProcessorOutputView, 0xA048285E, 0x25A9, 0x4527, 0xBD, 0x93, 0xD6, 0x8B, 0x68, 0xC4, 0x42, 0x54);
define_guid!(IID_ID3D11VideoContext, 0x61F21C45, 0x3C0E, 0x4a74, 0x9C, 0xEA, 0x67, 0x10, 0x0D, 0x9A, 0xD5, 0xE4);
define_guid!(IID_ID3D11VideoDevice, 0x10EC4D5B, 0x975A, 0x4689, 0xB9, 0xE4, 0xD0, 0xAA, 0xC3, 0x0F, 0xE3, 0x33);
define_guid!(IID_ID3D11Device, 0xdb6f6ddb, 0xac77, 0x4e88, 0x82, 0x53, 0x81, 0x9d, 0xf9, 0xbb, 0xf1, 0x40);
// d3d11_1.h
define_guid!(IID_ID3D11BlendState1, 0xcc86fabe, 0xda55, 0x401d, 0x85, 0xe7, 0xe3, 0xc9, 0xde, 0x28, 0x77, 0xe9);
define_guid!(IID_ID3D11RasterizerState1, 0x1217d7a6, 0x5039, 0x418c, 0xb0, 0x42, 0x9c, 0xbe, 0x25, 0x6a, 0xfd, 0x6e);
define_guid!(IID_ID3DDeviceContextState, 0x5c1e0d8a, 0x7c23, 0x48f9, 0x8c, 0x59, 0xa9, 0x29, 0x58, 0xce, 0xff, 0x11);
define_guid!(IID_ID3D11DeviceContext1, 0xbb2c6faa, 0xb5fb, 0x4082, 0x8e, 0x6b, 0x38, 0x8b, 0x8c, 0xfa, 0x90, 0xe1);
define_guid!(IID_ID3D11Device1, 0xa04bfb29, 0x08ef, 0x43d6, 0xa4, 0x9c, 0xa9, 0xbd, 0xbd, 0xcb, 0xe6, 0x86);
define_guid!(IID_ID3DUserDefinedAnnotation, 0xb2daad8b, 0x03d4, 0x4dbf, 0x95, 0xeb, 0x32, 0xab, 0x4b, 0x63, 0xd0, 0xab);
// d3d11_2.h
define_guid!(IID_ID3D11DeviceContext2,0x420d5b32,0xb90c,0x4da4,0xbe,0xf0,0x35,0x9f,0x6a,0x24,0xa8,0x3a);
define_guid!(IID_ID3D11Device2,0x9d06dffa,0xd1e5,0x4d07,0x83,0xa8,0x1b,0xb1,0x23,0xf2,0xf8,0x41);
// d3dcommon.h
define_guid!(IID_ID3D10Blob, 0x8ba5fb08, 0x5195, 0x40e2, 0xac, 0x58, 0xd, 0x98, 0x9c, 0x3a, 0x1, 0x2);
// d3d11shader.h
define_guid!(IID_ID3D11ShaderReflectionType, 0x6e6ffa6a, 0x9bae, 0x4613, 0xa5, 0x1e, 0x91, 0x65, 0x2d, 0x50, 0x8c, 0x21);
define_guid!(IID_ID3D11ShaderReflectionVariable, 0x51f23923, 0xf3e5, 0x4bd1, 0x91, 0xcb, 0x60, 0x61, 0x77, 0xd8, 0xdb, 0x4c);
define_guid!(IID_ID3D11ShaderReflectionConstantBuffer, 0xeb62d63d, 0x93dd, 0x4318, 0x8a, 0xe8, 0xc6, 0xf8, 0x3a, 0xd3, 0x71, 0xb8);
define_guid!(IID_ID3D11ShaderReflection,0x8d536ca1, 0x0cca, 0x4956, 0xa8, 0x37, 0x78, 0x69, 0x63, 0x75, 0x55, 0x84);
define_guid!(IID_ID3D11LibraryReflection, 0x54384f1b, 0x5b3e, 0x4bb7, 0xae, 0x1, 0x60, 0xba, 0x30, 0x97, 0xcb, 0xb6);
define_guid!(IID_ID3D11FunctionReflection, 0x207bcecb, 0xd683, 0x4a06, 0xa8, 0xa3, 0x9b, 0x14, 0x9b, 0x9f, 0x73, 0xa4);
define_guid!(IID_ID3D11FunctionParameterReflection, 0x42757488, 0x334f, 0x47fe, 0x98, 0x2e, 0x1a, 0x65, 0xd0, 0x8c, 0xc4, 0x62);
define_guid!(IID_ID3D11Module, 0xcac701ee, 0x80fc, 0x4122, 0x82, 0x42, 0x10, 0xb3, 0x9c, 0x8c, 0xec, 0x34);
define_guid!(IID_ID3D11ModuleInstance, 0x469e07f7, 0x45a, 0x48d5, 0xaa, 0x12, 0x68, 0xa4, 0x78, 0xcd, 0xf7, 0x5d);
define_guid!(IID_ID3D11Linker, 0x59a6cd0e, 0xe10d, 0x4c1f, 0x88, 0xc0, 0x63, 0xab, 0xa1, 0xda, 0xf3, 0xe);
define_guid!(IID_ID3D11LinkingNode, 0xd80dd70c, 0x8d2f, 0x4751, 0x94, 0xa1, 0x3, 0xc7, 0x9b, 0x35, 0x56, 0xdb);
define_guid!(IID_ID3D11FunctionLinkingGraph, 0x54133220, 0x1ce8, 0x43d3, 0x82, 0x36, 0x98, 0x55, 0xc5, 0xce, 0xec, 0xff);
// d3d11sdklayers.h
define_guid!(IID_ID3D11Debug, 0x79cf2233, 0x7536, 0x4948, 0x9d, 0x36, 0x1e, 0x46, 0x92, 0xdc, 0x57, 0x60);
define_guid!(IID_ID3D11SwitchToRef, 0x1ef337e3, 0x58e7, 0x4f83, 0xa6, 0x92, 0xdb, 0x22, 0x1f, 0x5e, 0xd4, 0x7e);
define_guid!(IID_ID3D11TracingDevice, 0x1911c771, 0x1587, 0x413e, 0xa7, 0xe0, 0xfb, 0x26, 0xc3, 0xde, 0x02, 0x68);
define_guid!(IID_ID3D11RefTrackingOptions, 0x193dacdf, 0x0db2, 0x4c05, 0xa5, 0x5c, 0xef, 0x06, 0xca, 0xc5, 0x6f, 0xd9);
define_guid!(IID_ID3D11RefDefaultTrackingOptions, 0x03916615, 0xc644, 0x418c, 0x9b, 0xf4, 0x75, 0xdb, 0x5b, 0xe6, 0x3c, 0xa0);
define_guid!(IID_ID3D11InfoQueue, 0x6543dbb6, 0x1b48, 0x42f5, 0xab, 0x82, 0xe9, 0x7e, 0xc7, 0x43, 0x26, 0xf6);
|
define_guid!(IID_ID3D11VideoProcessorEnumerator, 0x31627037, 0x53AB, 0x4200, 0x90, 0x61, 0x05, 0xFA, 0xA9, 0xAB, 0x45, 0xF9);
define_guid!(IID_ID3D11VideoProcessor, 0x1D7B0652, 0x185F, 0x41c6, 0x85, 0xCE, 0x0C, 0x5B, 0xE3, 0xD4, 0xAE, 0x6C);
define_guid!(IID_ID3D11AuthenticatedChannel, 0x3015A308, 0xDCBD, 0x47aa, 0xA7, 0x47, 0x19, 0x24, 0x86, 0xD1, 0x4D, 0x4A);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.