file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
completionListInObjectBindingPattern15.ts
/// <reference path="fourslash.ts"/> ////class Foo { //// private xxx1 = 1; //// protected xxx2 = 2; //// public xxx3 = 3; //// private static xxx4 = 4; //// protected static xxx5 = 5; //// public static xxx6 = 6; //// foo() { //// const { /*1*/ } = this; //// const { /*2*/ } = Foo; //// } ////} //// ////const { /*3*/ } = new Foo(); ////const { /*4*/ } = Foo; verify.completions({ marker: "1", exact: ["xxx1", "xxx2", "xxx3", "foo"] }); verify.completions({ marker: "2", exact: ["prototype", "xxx4", "xxx5", "xxx6"] }); verify.completions({ marker: "3", exact: ["xxx3", "foo"] });
verify.completions({ marker: "4", exact: ["prototype", "xxx6"] });
run.go
/* Copyright © 2019 Samori Gorse <[email protected]> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package commands import ( "fmt" "os" "os/signal" "syscall" "github.com/gin-gonic/gin" "github.com/shinuza/horloge" "github.com/spf13/cobra" "github.com/spf13/viper" ) var bind string var port int var sync string var runCmd = &cobra.Command{ Use: "run", Short: "Runs an horloge runner with a web interface", Long: `Starts a web API with the following routes: GET /jobs returns a list of jobs POST /jobs creates a new job GET /jobs/{id} returns job with id {id} DELETE /jobs/{id} delets job with id {id}`, Run: func(cmd *cobra.Command, args []string) { runner = horloge.NewRunner() if sync != "" { fmt.Println("Sync activated") fmt.Println("Connecting to redis database at %s on db %d", redisAddr, redisDB) runner.Sync(horloge.NewSyncRedis(runner, redisAddr, redisPasswd, redisDB)) } r := gin.Default() // Routes r.GET("/ping", horloge.HTTPHandlerPing()) r.GET("/health_check", horloge.HTTPHandlerHealthCheck()) r.GET("/version", horloge.HTTPHandlerVersion()) r.POST("/jobs", horloge.HTTPHandlerRegisterJob(runner)) r.GET("/jobs", horloge.HTTPHandlerListJobs(runner)) r.GET("/jobs/:id", horloge.HTTPHandlerJobDetail(runner)) r.DELETE("/jobs/:id", horloge.HTTPHandlerDeleteJob(runner)) go func() { addr := fmt.Sprintf("%s:%d", bind, port) fmt.Printf("🕒 Horloge v%s\n", horloge.Version) fmt.Printf("Http server powered by Gin %s\n", gin.Version) r.Run(addr) }() signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) <-signalChan fmt.Println("Shutdown signal received, exiting...") }, } func init
rootCmd.AddCommand(runCmd) runCmd.Flags().StringVarP(&bind, "bind", "b", "127.0.0.1", "Addr to listen to") runCmd.Flags().IntVarP(&port, "port", "p", 6432, "Port to listen on") runCmd.Flags().StringVarP(&sync, "sync", "s", "", "Sync method to use") viper.BindPFlag("run.bind", mqttBridgeCmd.Flags().Lookup("bind")) viper.BindPFlag("run.port", mqttBridgeCmd.Flags().Lookup("port")) viper.BindPFlag("run.sync", mqttBridgeCmd.Flags().Lookup("sync")) }
() {
client.go
package botrest import ( "fmt" "net/url" "strconv" "time" "github.com/jonas747/discordgo" "github.com/jonas747/retryableredis" "github.com/jonas747/yagpdb/common" "github.com/jonas747/yagpdb/common/internalapi" ) var clientLogger = common.GetFixedPrefixLogger("botrest_client") func GetGuild(guildID int64) (g *discordgo.Guild, err error) { err = internalapi.GetWithGuild(guildID, discordgo.StrID(guildID)+"/guild", &g) return } func GetBotMember(guildID int64) (m *discordgo.Member, err error) { err = internalapi.GetWithGuild(guildID, discordgo.StrID(guildID)+"/botmember", &m) return } func GetOnlineCount(guildID int64) (c int64, err error) { err = internalapi.GetWithGuild(guildID, discordgo.StrID(guildID)+"/onlinecount", &c) return } func GetMembers(guildID int64, members ...int64) (m []*discordgo.Member, err error) { stringed := make([]string, 0, len(members)) for _, v := range members { stringed = append(stringed, strconv.FormatInt(v, 10)) } query := url.Values{"users": stringed} encoded := query.Encode() err = internalapi.GetWithGuild(guildID, discordgo.StrID(guildID)+"/members?"+encoded, &m) return } func GetMemberColors(guildID int64, members ...int64) (m map[string]int, err error) { m = make(map[string]int) stringed := make([]string, 0, len(members)) for _, v := range members { stringed = append(stringed, strconv.FormatInt(v, 10)) } query := url.Values{"users": stringed} encoded := query.Encode() err = internalapi.GetWithGuild(guildID, discordgo.StrID(guildID)+"/membercolors?"+encoded, &m) return } func GetMemberMultiGuild(userID int64, guilds ...int64) (members []*discordgo.Member, err error) { members = make([]*discordgo.Member, 0, len(guilds)) for _, v := range guilds { m, err := GetMembers(v, userID) if err == nil && len(m) > 0 { members = append(members, m[0]) } } return } func GetChannelPermissions(guildID, channelID int64) (perms int64, err error) { err = internalapi.GetWithGuild(guildID, discordgo.StrID(guildID)+"/channelperms/"+discordgo.StrID(channelID), &perms) return } func
(addr string) (st []*shardSessionInfo, err error) { err = internalapi.GetWithAddress(addr, "/shard_sessions", &st) return } type NodeStatus struct { ID string `json:"id"` Shards []*ShardStatus `json:"shards"` Host string `json:"host"` Uptime time.Duration `json:"uptime"` } type NodeStatusesResponse struct { Nodes []*NodeStatus `json:"nodes"` MissingShards []int `json:"missing_shards"` TotalShards int `json:"total_shards"` } func GetNodeStatuses() (st *NodeStatusesResponse, err error) { // retrieve a list of nodes // Special handling if were in clustered mode var clustered bool err = common.RedisPool.Do(retryableredis.Cmd(&clustered, "EXISTS", "dshardorchestrator_nodes_z")) if err != nil { return nil, err } if clustered { return getNodeStatusesClustered() } var status *NodeStatus err = internalapi.GetWithShard(0, "node_status", &status) if err != nil { return nil, err } status.ID = "N/A" return &NodeStatusesResponse{ Nodes: []*NodeStatus{status}, TotalShards: 1, }, nil } func getNodeStatusesClustered() (st *NodeStatusesResponse, err error) { nodeIDs, err := common.GetActiveNodes() if err != nil { return nil, err } totalShards, _ := common.ServicePoller.GetShardCount() st = &NodeStatusesResponse{ TotalShards: int(totalShards), } // send requests resultCh := make(chan interface{}, len(nodeIDs)) for _, n := range nodeIDs { go getNodeStatus(n, resultCh) } timeout := time.After(time.Second * 3) // fetch responses for index := 0; index < len(nodeIDs); index++ { select { case <-timeout: clientLogger.Errorf("Timed out waiting for %d nodes", len(nodeIDs)-index) break case result := <-resultCh: switch t := result.(type) { case error: continue case *NodeStatus: st.Nodes = append(st.Nodes, t) } } } // check for missing nodes/shards OUTER: for i := 0; i < int(totalShards); i++ { for _, node := range st.Nodes { for _, shard := range node.Shards { if shard.ShardID == i { continue OUTER // shard found } } } // shard not found st.MissingShards = append(st.MissingShards, i) } return } func getNodeStatus(nodeID string, retCh chan interface{}) { // retrieve the REST address for this node addr, err := common.ServicePoller.GetNodeAddress(nodeID) if err != nil { clientLogger.WithError(err).Error("failed retrieving rest address for bot for node id: ", nodeID) retCh <- err return } var status *NodeStatus err = internalapi.GetWithAddress(addr, "node_status", &status) if err != nil { clientLogger.WithError(err).Error("failed retrieving shard status for node ", nodeID) retCh <- err return } retCh <- status } func SendReconnectShard(shardID int, reidentify bool) (err error) { queryParams := "" if reidentify { queryParams = "?reidentify=1" } err = internalapi.PostWithShard(shardID, fmt.Sprintf("shard/%d/reconnect"+queryParams, shardID), nil, nil) return } func SendReconnectAll(reidentify bool) (err error) { queryParams := "" if reidentify { queryParams = "?reidentify=1" } err = internalapi.PostWithShard(0, "shard/*/reconnect"+queryParams, nil, nil) return }
GetSessionInfo
next_id.rs
use chain_crypto::Blake2b256; use hex; use jcli_app::utils::{DebugFlag, HostAddr, RestApiSender}; use structopt::StructOpt; #[derive(StructOpt)] #[structopt(rename_all = "kebab-case")] pub enum
{ /// Get block descendant ID Get { #[structopt(flatten)] addr: HostAddr, #[structopt(flatten)] debug: DebugFlag, /// Maximum number of IDs, must be between 1 and 100, default 1 #[structopt(short, long)] count: Option<usize>, }, } impl NextId { pub fn exec(self, block_id: String) { match self { NextId::Get { addr, debug, count } => exec_get(block_id, addr, debug, count), } } } fn exec_get(block_id: String, addr: HostAddr, debug: DebugFlag, count: Option<usize>) { let url = addr .with_segments(&["v0", "block", &block_id, "next_id"]) .unwrap() .into_url(); let builder = reqwest::Client::new().get(url).query(&[("count", count)]); let response = RestApiSender::new(builder, &debug).send().unwrap(); response.response().error_for_status_ref().unwrap(); let body = response.body().binary(); for block_id in body.chunks(Blake2b256::HASH_SIZE) { println!("{}", hex::encode(block_id)); } }
NextId
translator.go
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package translator import ( "crypto/sha256" "strconv" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/hash" "github.com/pkg/errors" "github.com/hyperledger-labs/fabric-token-sdk/token/services/vault/keys" token2 "github.com/hyperledger-labs/fabric-token-sdk/token/token" ) var logger = flogging.MustGetLogger("token-sdk.vault.translator") // Translator validates token requests and generates the corresponding RWSets type Translator struct { IssuingValidator IssuingValidator RWSet RWSet TxID string counter uint64 sigCounter uint64 namespace string } func New(issuingValidator IssuingValidator, txID string, rwSet RWSet, namespace string) *Translator { w := &Translator{ IssuingValidator: issuingValidator, RWSet: rwSet, TxID: txID, counter: 0, sigCounter: 0, namespace: namespace, } return w } // Write checks that transactions are correct wrt. the most recent rwset state. // Write checks are ones that shall be done sequentially, since transactions within a block may introduce dependencies. func (w *Translator) Write(action interface{}) error { logger.Debugf("checking transaction with txID '%s'", w.TxID) err := w.checkProcess(action) if err != nil { return err } logger.Debugf("committing transaction with txID '%s'", w.TxID) err = w.commitProcess(action) if err != nil { logger.Errorf("error committing transaction with txID '%s': %s", w.TxID, err) return err } logger.Debugf("successfully processed transaction with txID '%s'", w.TxID) return nil } func (w *Translator) CommitTokenRequest(raw []byte, storeHash bool) error { key, err := keys.CreateTokenRequestKey(w.TxID) if err != nil { return errors.Errorf("can't create for token request '%s'", w.TxID) } tr, err := w.RWSet.GetState(w.namespace, key) if err != nil { return errors.Wrapf(err, "failed to read token request'%s'", w.TxID) } if len(tr) != 0 { return errors.Wrapf(errors.New("token request with same ID already exists"), "failed to write token request'%s'", w.TxID) } if storeHash { hash := sha256.New() n, err := hash.Write(raw) if n != len(raw) { return errors.Errorf("failed to write token request, hash failure '%s'", w.TxID) } if err != nil { return errors.Wrapf(err, "failed to write token request, hash failure '%s'", w.TxID) } raw = hash.Sum(nil) } err = w.RWSet.SetState(w.namespace, key, raw) if err != nil { return errors.Wrapf(err, "failed to write token request'%s'", w.TxID) } return nil } func (w *Translator) ReadTokenRequest() ([]byte, error) { key, err := keys.CreateTokenRequestKey(w.TxID) if err != nil { return nil, errors.Errorf("can't create for token request '%s'", w.TxID) } tr, err := w.RWSet.GetState(w.namespace, key) if err != nil { return nil, errors.Wrapf(err, "failed to read token request'%s'", w.TxID) } return tr, nil } func (w *Translator) ReadSetupParameters() ([]byte, error) { setupKey, err := keys.CreateSetupKey() if err != nil { return nil, errors.Wrapf(err, "failed to create setup key") } raw, err := w.RWSet.GetState(w.namespace, setupKey) if err != nil { return nil, errors.Wrapf(err, "failed to get setup parameters") } return raw, nil } func (w *Translator) QueryTokens(ids []*token2.ID) ([][]byte, error) { var res [][]byte var errs []error for _, id := range ids { outputID, err := keys.CreateTokenKey(id.TxId, id.Index) if err != nil { errs = append(errs, errors.Errorf("error creating output ID: %s", err)) continue // return nil, errors.Errorf("error creating output ID: %s", err) } logger.Debugf("query state [%s:%s]", id, outputID) bytes, err := w.RWSet.GetState(w.namespace, outputID) if err != nil { errs = append(errs, errors.Wrapf(err, "failed getting output for [%s]", outputID)) // return nil, errors.Wrapf(err, "failed getting output for [%s]", outputID) continue } if len(bytes) == 0 { errs = append(errs, errors.Errorf("output for key [%s] does not exist", outputID)) // return nil, errors.Errorf("output for key [%s] does not exist", outputID) continue } res = append(res, bytes) } if len(errs) != 0 { return nil, errors.Errorf("failed quering tokens [%v] with errs [%d][%v]", ids, len(errs), errs) } return res, nil } func (w *Translator) IsSigMetadataKey(k string) (bool, error) { prefix, _, err := keys.SplitCompositeKey(k) if err != nil { return false, errors.Wrapf(err, "failed to split composite key [%s]", k) } return prefix == keys.SignaturePrefix, nil } func (w *Translator) checkProcess(action interface{}) error { if err := w.checkAction(action); err != nil { return err } return nil } func (w *Translator) checkAction(tokenAction interface{}) error { switch action := tokenAction.(type) { case IssueAction: return w.checkIssue(action) case TransferAction: return w.checkTransfer(action) case SetupAction: return nil case Signature: return nil default: return errors.Errorf("unknown token action: %T", action) } } func (w *Translator) checkIssue(issue IssueAction) error { // check if issuer is allowed to issue type err := w.checkIssuePolicy(issue) if err != nil { return errors.Wrapf(err, "invalid issue: verification of issue policy failed") } // check if the keys of issued tokens aren't already used. // check is assigned owners are valid for i := 0; i < issue.NumOutputs(); i++ { err = w.checkTokenDoesNotExist(w.counter+uint64(i), w.TxID) if err != nil { return err } } return nil } func (w *Translator) checkTransfer(t TransferAction) error { keys, err := t.GetInputs() if err != nil { return errors.Wrapf(err, "invalid transfer: failed getting input IDs") } if !t.IsGraphHiding() { for _, key := range keys { bytes, err := w.RWSet.GetState(w.namespace, key) if err != nil { return errors.Wrapf(err, "invalid transfer: failed getting state [%s]", key) } if len(bytes) == 0 { return errors.Errorf("invalid transfer: input is already spent [%s]", key) } } } else { for _, key := range keys { bytes, err := w.RWSet.GetState(w.namespace, key) if err != nil { return errors.Wrapf(err, "invalid transfer: failed getting state [%s]", key) } if len(bytes) != 0 { return errors.Errorf("invalid transfer: input is already spent [%s:%v]", key, bytes) } } } // check if the keys of the new tokens aren't already used. for i := 0; i < t.NumOutputs(); i++ { if !t.IsRedeemAt(i) { // this is not a redeemed output err := w.checkTokenDoesNotExist(w.counter+uint64(i), w.TxID) if err != nil { return err } } } return nil } func (w *Translator) checkTokenDoesNotExist(index uint64, txID string) error { tokenKey, err := keys.CreateTokenKey(txID, index) if err != nil { return errors.Wrapf(err, "error creating output ID") } outputBytes, err := w.RWSet.GetState(w.namespace, tokenKey) if err != nil { return err } if len(outputBytes) != 0 { return errors.Errorf("token already exists: %s", tokenKey) } return nil } func (w *Translator) checkIssuePolicy(issue IssueAction) error { // TODO: retrieve type from action return w.IssuingValidator.Validate(issue.GetIssuer(), "") } func (w *Translator) commitProcess(action interface{}) error { logger.Debugf("committing action with txID '%s'", w.TxID) err := w.commitAction(action) if err != nil { logger.Errorf("error committing action with txID '%s': %s", w.TxID, err) return err } logger.Debugf("action with txID '%s' committed successfully", w.TxID) return nil } func (w *Translator) commitAction(tokenAction interface{}) (err error) { switch action := tokenAction.(type) { case IssueAction: err = w.commitIssueAction(action) case TransferAction: err = w.commitTransferAction(action) case SetupAction: err = w.commitSetupAction(action) case Signature: err = w.commitSignature(action) } return } func (w *Translator) commitSetupAction(setup SetupAction) error { raw, err := setup.GetSetupParameters() if err != nil { return err } setupKey, err := keys.CreateSetupKey() if err != nil { return err } err = w.RWSet.SetState(w.namespace, setupKey, raw) if err != nil { return err } return nil } func (w *Translator) commitIssueAction(issueAction IssueAction) error { base := w.counter outputs, err := issueAction.GetSerializedOutputs() if err != nil { return err } for i, output := range outputs { outputID, err := keys.CreateTokenKey(w.TxID, base+uint64(i)) if err != nil { return errors.Errorf("error creating output ID: %s", err) } if err := w.RWSet.SetState(w.namespace, outputID, output); err != nil { return err } } metadata := issueAction.GetMetadata() if len(metadata) != 0 { key, err := keys.CreateIssueActionMetadataKey(hash.Hashable(metadata).String()) if err != nil { return errors.Wrapf(err, "failed constructing metadata key") } raw, err := w.RWSet.GetState(w.namespace, key) if err != nil { return err } if len(raw) != 0 { return errors.Errorf("entry with issue metadata key [%s] is already occupied by [%s]", key, string(raw)) } if err := w.RWSet.SetState(w.namespace, key, metadata); err != nil { return err } } w.counter = w.counter + uint64(len(outputs)) return nil } // commitTransferAction is called for both transfer and redeem transactions // Check the owner of each output to determine how to generate the key func (w *Translator) commitTransferAction(transferAction TransferAction) error { base := w.counter for i := 0; i < transferAction.NumOutputs(); i++ { if !transferAction.IsRedeemAt(i) { outputID, err := keys.CreateTokenKey(w.TxID, base+uint64(i)) if err != nil { return errors.Errorf("error creating output ID: %s", err) } bytes, err := transferAction.SerializeOutputAt(i) if err != nil { return err } err = w.RWSet.SetState(w.namespace, outputID, bytes) if err != nil { return err } } } ids, err := transferAction.GetInputs() if err != nil { return err } err = w.spendTokens(ids, transferAction.IsGraphHiding()) if err != nil { return err } metadata := transferAction.GetMetadata() if len(metadata) != 0
w.counter = w.counter + uint64(transferAction.NumOutputs()) return nil } func (w *Translator) commitSignature(sig Signature) error { for k, value := range sig.Metadata() { key, err := keys.CreateSigMetadataKey(w.TxID, w.sigCounter, k) if err != nil { return errors.Errorf("error creating output ID: %s", err) } err = w.RWSet.SetState(w.namespace, key, value) if err != nil { return errors.Wrapf(err, "error setting state for key [%s]", key) } } w.sigCounter++ return nil } func (w *Translator) spendTokens(ids []string, graphHiding bool) error { if !graphHiding { for _, id := range ids { logger.Debugf("Delete state %s\n", id) err := w.RWSet.DeleteState(w.namespace, id) if err != nil { return err } } } else { for _, id := range ids { logger.Debugf("add serial number %s\n", id) err := w.RWSet.SetState(w.namespace, id, []byte(strconv.FormatBool(true))) if err != nil { return errors.Wrapf(err, "failed to add serial number %s", id) } } } return nil }
{ key, err := keys.CreateTransferActionMetadataKey(hash.Hashable(metadata).String()) if err != nil { return errors.Wrapf(err, "failed constructing metadata key") } raw, err := w.RWSet.GetState(w.namespace, key) if err != nil { return err } if len(raw) != 0 { return errors.Errorf("entry with transfer metadata key [%s] is already occupied by [%s]", key, string(raw)) } if err := w.RWSet.SetState(w.namespace, key, metadata); err != nil { return err } }
ec.rs
#[doc = "Register `EC` reader"] pub struct R(crate::R<EC_SPEC>); impl core::ops::Deref for R { type Target = crate::R<EC_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<EC_SPEC>> for R { fn from(reader: crate::R<EC_SPEC>) -> Self { R(reader) } } #[doc = "Field `XCOL` reader - Excessive Collisions"] pub struct XCOL_R(crate::FieldReader<u16, u16>); impl XCOL_R { pub(crate) fn
(bits: u16) -> Self { XCOL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for XCOL_R { type Target = crate::FieldReader<u16, u16>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl R { #[doc = "Bits 0:9 - Excessive Collisions"] #[inline(always)] pub fn xcol(&self) -> XCOL_R { XCOL_R::new((self.bits & 0x03ff) as u16) } } #[doc = "Excessive Collisions Register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ec](index.html) module"] pub struct EC_SPEC; impl crate::RegisterSpec for EC_SPEC { type Ux = u32; } #[doc = "`read()` method returns [ec::R](R) reader structure"] impl crate::Readable for EC_SPEC { type Reader = R; } #[doc = "`reset()` method sets EC to value 0"] impl crate::Resettable for EC_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
new
kubelet.py
import json import logging import requests import urllib3 from enum import Enum from kube_hunter.core.types import Discovery, Kubelet from kube_hunter.core.events import handler from kube_hunter.core.events.types import OpenPortEvent, Vulnerability, Event, Service urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) """ Services """ class ReadOnlyKubeletEvent(Service, Event): """The read-only port on the kubelet serves health probing endpoints, and is relied upon by many kubernetes components""" def __init__(self): Service.__init__(self, name="Kubelet API (readonly)") class SecureKubeletEvent(Service, Event): """The Kubelet is the main component in every Node, all pod operations goes through the kubelet""" def __init__(self, cert=False, token=False, anonymous_auth=True, **kwargs): self.cert = cert self.token = token self.anonymous_auth = anonymous_auth Service.__init__(self, name="Kubelet API", **kwargs) class KubeletPorts(Enum): SECURED = 10250 READ_ONLY = 10255 @handler.subscribe(OpenPortEvent, predicate= lambda x: x.port == 10255 or x.port == 10250) class KubeletDiscovery(Discovery): """Kubelet Discovery Checks for the existence of a Kubelet service, and its open ports """ def __init__(self, event): self.event = event def get_read_only_access(self): logging.debug("Passive hunter is attempting to get kubelet read access at {}:{}".format(self.event.host, self.event.port)) r = requests.get("http://{host}:{port}/pods".format(host=self.event.host, port=self.event.port)) if r.status_code == 200: self.publish_event(ReadOnlyKubeletEvent()) def get_secure_access(self): logging.debug("Attempting to get kubelet secure access") ping_status = self.ping_kubelet() if ping_status == 200: self.publish_event(SecureKubeletEvent(secure=False)) elif ping_status == 403: self.publish_event(SecureKubeletEvent(secure=True)) elif ping_status == 401: self.publish_event(SecureKubeletEvent(secure=True, anonymous_auth=False)) def ping_kubelet(self): logging.debug("Attempting to get pod info from kubelet") try: return requests.get("https://{host}:{port}/pods".format(host=self.event.host, port=self.event.port), verify=False).status_code except Exception as ex: logging.debug("Failed pinging https port 10250 on {} : {}".format(self.event.host, ex)) def execute(self):
if self.event.port == KubeletPorts.SECURED.value: self.get_secure_access() elif self.event.port == KubeletPorts.READ_ONLY.value: self.get_read_only_access()
opt.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. '''This module includes a set of optimizers for updating model parameters. It replaces the old optimizers from optimizer.py''' from singa import tensor class Optimizer(object): r"""Base optimizer. Args: config (Dict): specify the default values of configurable variables. """ def __init__(self, config): self.default_config = config self.iter = 0 self.param2config = {} self.param2state = {} def update(self, param, grad): r"""Update the param values with given gradients. Args: param(Tensor): param values to be updated in-place grad(Tensor): param gradients; the values may be updated in this function; do not use it anymore """ pass def step(self): r"""To increment the step counter""" self.iter += 1 def register(self, param_group, config): for param in param_group: assert param not in self.param2config, 'param is already registered' self.param2config[param] = config def load(self): pass def save(self): pass class SGD(Optimizer): r"""Implements stochastic gradient descent (optionally with momentum). Nesterov momentum is based on the formula from `On the importance of initialization and momentum in deep learning`__. Args: lr(float): learning rate momentum(float, optional): momentum factor(default: 0) weight_decay(float, optional): weight decay(L2 penalty)(default: 0) dampening(float, optional): dampening for momentum(default: 0) nesterov(bool, optional): enables Nesterov momentum(default: False) Example: >> > from singa import opt >> > optimizer = opt.SGD(lr=0.1, momentum=0.9) >> > optimizer.update() __ http: // www.cs.toronto.edu / %7Ehinton / absps / momentum.pdf .. note:: The implementation of SGD with Momentum / Nesterov subtly differs from Sutskever et. al. and implementations in some other frameworks. Considering the specific case of Momentum, the update can be written as .. math:: v = \rho * v + g \\ p = p - lr * v where p, g, v and: math: `\rho` denote the parameters, gradient, velocity, and momentum respectively. This is in contrast to Sutskever et. al. and other frameworks which employ an update of the form .. math:: v = \rho * v + lr * g \\ p = p - v The Nesterov version is analogously modified. """ def __init__(self, lr=0.1, momentum=0, dampening=0, weight_decay=0, nesterov=False): if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError( "Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError( "Nesterov momentum requires a momentum and zero dampening") super(SGD, self).__init__(defaults) def update(self, param, grad): """Performs a single optimization step.
in this function; cannot use it anymore """ group = self.default_config if param in self.param2config: group = self.param2config[param] weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] if weight_decay != 0: grad += param * weight_decay if momentum != 0: if param not in self.param2state: self.param2state[param] = {} param_state = self.param2state[param] if 'momentum_buffer' not in param_state: buf = param_state[ 'momentum_buffer'] = tensor.zeros_like(param) buf *= momentum buf += grad else: buf = param_state['momentum_buffer'] buf *= momentum buf += (1 - dampening) * grad if nesterov: grad += momentum * buf else: grad = buf param -= grad * group['lr']
Arguments: param(Tensor): param values to be update in-place grad(Tensor): param gradients; the values may be updated
PortfolioItem.js
/* Framework */ import * as React from "react"; import Link from "next/link"; /* Bootstrap Components */ import { Card, CardImg, CardBody, CardTitle, CardSubtitle, Button, } from "reactstrap"; export const PortfolioItem = (data) => ( <> <Link href={data.href}> <Card className="cards"> <CardImg top width="100%" src={data.image} alt={data.title} /> <CardBody> <CardTitle> <h4>{data.title}</h4> </CardTitle> <CardSubtitle className="my-3 sub-one"> <span>{data.subtitle}</span> </CardSubtitle> <Button className="clicking"> <span>See More</span> </Button> </CardBody> </Card> </Link>
</> );
identity.py
import hashlib from ecdsa.curves import Ed25519, SECP256k1 from .principal import Principal import ecdsa class Identity: def __init__(self, privkey = "", type = "ed25519", anonymous = False): privkey = bytes(bytearray.fromhex(privkey)) self.anonymous = anonymous if anonymous: return self.key_type = type if type == 'secp256k1': if len(privkey) > 0: self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256) else: self.sk = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256) self._privkey = self.sk.to_string().hex() self.vk = self.sk.get_verifying_key() self._pubkey = self.vk.to_string().hex() self._der_pubkey = self.vk.to_der() elif type == 'ed25519': if len(privkey) > 0: self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.Ed25519) else: self.sk = ecdsa.SigningKey.generate(curve=ecdsa.Ed25519) self._privkey = self.sk.to_string().hex() self.vk = self.sk.get_verifying_key() self._pubkey = self.vk.to_string().hex() self._der_pubkey = self.vk.to_der() else: raise 'unsupported identity type' @staticmethod def from_pem(pem: str): key = ecdsa.SigningKey.from_pem(pem) privkey = key.to_string().hex() type = "unknown" if key.curve == Ed25519: type = 'ed25519' elif key.curve == SECP256k1: type = 'secp256k1' return Identity(privkey=privkey, type=type) def to_pem(self): pem = self.sk.to_pem(format="pkcs8") return pem def sender(self): if self.anonymous: return Principal.anonymous() return Principal.self_authenticating(self._der_pubkey) def sign(self, msg: bytes): if self.anonymous: return (None, None) if self.key_type == 'ed25519': sig = self.sk.sign(msg) return (self._der_pubkey, sig) elif self.key_type == 'secp256k1': sig = self.sk.sign(msg) return (self._der_pubkey, sig) @property def privkey(self): return self._privkey
def pubkey(self): return self._pubkey @property def der_pubkey(self): return self._der_pubkey def __repr__(self): return "Identity(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")" def __str__(self): return "(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
@property
path.config.js
'use strict'; const path = require('path'); const fs = require('fs'); const moduleFileExtensions = [ 'mjs', 'js', 'ts', 'tsx', 'jsx', ]; // Resolve file paths in the same order as webpack const resolveModule = (resolveFn, filePath) => { const extension = moduleFileExtensions.find(extension => fs.existsSync(resolveFn(`${filePath}.${extension}`)) ); if (extension) { return resolveFn(`${filePath}.${extension}`); } return resolveFn(`${filePath}.js`); }; // Make sure any symlinks in the project folder are resolved: // https://github.com/facebookincubator/create-react-app/issues/637 const appDirectory = fs.realpathSync(process.cwd()); function resolveApp(relativePath) { return path.resolve(appDirectory, relativePath); } const nodePaths = (process.env.NODE_PATH || '') .split(process.platform === 'win32' ? ';' : ':') .filter(Boolean) .map(resolveApp); const paths = { appDirectory: appDirectory, appBuild: resolveApp(process.env.OUTPUT_PATH || 'build'), appDist: resolveApp('dist'), appPublic: resolveApp('public'), appHtml: resolveApp('public/index.html'), appIndexJs: resolveModule(resolveApp, 'src/index'), appPackageJson: resolveApp('package.json'), appSrc: resolveApp('src'), appDocument: resolveApp('src/document/index.js'),
appNodeModules: resolveApp('node_modules'), componentDemoJs: resolveModule(resolveApp, 'demo/index'), nodePaths: nodePaths, }; module.exports = paths;
appShell: resolveApp('src/shell/index.js'), appManifest: resolveApp('manifest.json'), universalAppEntry: resolveApp('app.js'), appConfig: resolveApp('app.json'),
lib.rs
pub fn is_armstrong_number(num: u32) -> bool
{ let num_str = num.to_string(); let len = num_str.len(); num == num_str .chars() .map(|c| c.to_digit(10).unwrap().pow(len as u32)) .sum() }
admin.py
from django.contrib import admin from authentication.models import CustomUser # Register your models here. class CustomUserAdmin(admin.ModelAdmin):
admin.site.register(CustomUser, CustomUserAdmin)
list_display = ['username','first_name','last_name','email']
radio.rs
extern crate cursive; use cursive::Cursive; use cursive::views::{Dialog, DummyView, LinearLayout, RadioGroup}; // This example uses radio buttons. fn
() { let mut siv = Cursive::default(); // We need to pre-create the groups for our RadioButtons. let mut color_group: RadioGroup<String> = RadioGroup::new(); let mut size_group: RadioGroup<u32> = RadioGroup::new(); siv.add_layer( Dialog::new() .title("Make your selection") // We'll have two columns side-by-side .content(LinearLayout::horizontal() .child(LinearLayout::vertical() // The color group uses the label itself as stored value // By default, the first item is selected. .child(color_group.button_str("Red")) .child(color_group.button_str("Green")) .child(color_group.button_str("Blue"))) // A DummyView is used as a spacer .child(DummyView) .child(LinearLayout::vertical() // For the size, we store a number separately .child(size_group.button(5, "Small")) // The initial selection can also be overriden .child(size_group.button(15, "Medium").selected()) // The large size is out of stock, sorry! .child(size_group.button(25, "Large").disabled()))) .button("Ok", move |s| { // We retrieve the stored value for both group. let color = color_group.selection(); let size = size_group.selection(); s.pop_layer(); // And we simply print the result. let text = format!("Color: {}\nSize: {}cm", color, size); s.add_layer(Dialog::text(text) .button("Ok", |s| s.quit())); }), ); siv.run(); }
main
utils.go
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package // // Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at http://mozilla.org/MPL/2.0/. package mysql import ( "crypto/sha1" //"crypto/tls" "database/sql/driver" "encoding/binary" "fmt" "io" "strings" "time" csptls "github.com/hyperledger/fabric/bccsp/tls" ) var ( tlsConfigRegister map[string]*csptls.Config // Register for custom tls.Configs ) // RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. // Use the key as a value in the DSN where tls=value. // // rootCertPool := x509.NewCertPool() // pem, err := ioutil.ReadFile("/path/ca-cert.pem") // if err != nil { // log.Fatal(err) // } // if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { // log.Fatal("Failed to append PEM.") // } // clientCert := make([]tls.Certificate, 0, 1) // certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") // if err != nil { // log.Fatal(err) // } // clientCert = append(clientCert, certs) // mysql.RegisterTLSConfig("custom", &tls.Config{ // RootCAs: rootCertPool, // Certificates: clientCert, // }) // db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") // func RegisterTLSConfig(key string, config *csptls.Config) error { if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { return fmt.Errorf("key '%s' is reserved", key) } if tlsConfigRegister == nil { tlsConfigRegister = make(map[string]*csptls.Config) } tlsConfigRegister[key] = config return nil } // DeregisterTLSConfig removes the tls.Config associated with key. func DeregisterTLSConfig(key string) { if tlsConfigRegister != nil { delete(tlsConfigRegister, key) } } // Returns the bool value of the input. // The 2nd return value indicates if the input was a valid bool value func readBool(input string) (value bool, valid bool) { switch input { case "1", "true", "TRUE", "True": return true, true case "0", "false", "FALSE", "False": return false, true } // Not a valid bool value return } /****************************************************************************** * Authentication * ******************************************************************************/ // Encrypt password using 4.1+ method func scramblePassword(scramble, password []byte) []byte { if len(password) == 0 { return nil } // stage1Hash = SHA1(password) crypt := sha1.New() crypt.Write(password) stage1 := crypt.Sum(nil) // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) // inner Hash crypt.Reset() crypt.Write(stage1) hash := crypt.Sum(nil) // outer Hash crypt.Reset() crypt.Write(scramble) crypt.Write(hash) scramble = crypt.Sum(nil) // token = scrambleHash XOR stage1Hash for i := range scramble { scramble[i] ^= stage1[i] } return scramble } // Encrypt password using pre 4.1 (old password) method // https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c type myRnd struct { seed1, seed2 uint32 } const myRndMaxVal = 0x3FFFFFFF // Pseudo random number generator func newMyRnd(seed1, seed2 uint32) *myRnd { return &myRnd{ seed1: seed1 % myRndMaxVal, seed2: seed2 % myRndMaxVal, } } // Tested to be equivalent to MariaDB's floating point variant // http://play.golang.org/p/QHvhd4qved // http://play.golang.org/p/RG0q4ElWDx func (r *myRnd) NextByte() byte { r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal return byte(uint64(r.seed1) * 31 / myRndMaxVal) } // Generate binary hash from byte string using insecure pre 4.1 method func pwHash(password []byte) (result [2]uint32) { var add uint32 = 7 var tmp uint32 result[0] = 1345345333 result[1] = 0x12345671 for _, c := range password { // skip spaces and tabs in password if c == ' ' || c == '\t' { continue } tmp = uint32(c) result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) result[1] += (result[1] << 8) ^ result[0] add += tmp } // Remove sign bit (1<<31)-1) result[0] &= 0x7FFFFFFF result[1] &= 0x7FFFFFFF return } // Encrypt password using insecure pre 4.1 method func scrambleOldPassword(scramble, password []byte) []byte { if len(password) == 0 { return nil } scramble = scramble[:8] hashPw := pwHash(password) hashSc := pwHash(scramble) r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) var out [8]byte for i := range out { out[i] = r.NextByte() + 64 } mask := r.NextByte() for i := range out { out[i] ^= mask } return out[:] } /****************************************************************************** * Time related utils * ******************************************************************************/ // NullTime represents a time.Time that may be NULL. // NullTime implements the Scanner interface so // it can be used as a scan destination: // // var nt NullTime // err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) // ... // if nt.Valid { // // use nt.Time // } else { // // NULL value // } // // This NullTime implementation is not driver-specific type NullTime struct { Time time.Time Valid bool // Valid is true if Time is not NULL } // Scan implements the Scanner interface. // The value type must be time.Time or string / []byte (formatted time-string), // otherwise Scan fails. func (nt *NullTime) Scan(value interface{}) (err error) { if value == nil { nt.Time, nt.Valid = time.Time{}, false return } switch v := value.(type) { case time.Time: nt.Time, nt.Valid = v, true return case []byte: nt.Time, err = parseDateTime(string(v), time.UTC) nt.Valid = (err == nil) return case string: nt.Time, err = parseDateTime(v, time.UTC) nt.Valid = (err == nil) return } nt.Valid = false return fmt.Errorf("Can't convert %T to time.Time", value) } // Value implements the driver Valuer interface. func (nt NullTime) Value() (driver.Value, error) { if !nt.Valid { return nil, nil } return nt.Time, nil } func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { base := "0000-00-00 00:00:00.0000000" switch len(str) { case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" if str == base[:len(str)] { return } t, err = time.Parse(timeFormat[:len(str)], str) default: err = fmt.Errorf("invalid time string: %s", str) return } // Adjust location if err == nil && loc != time.UTC { y, mo, d := t.Date() h, mi, s := t.Clock() t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil } return } func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { switch num { case 0: return time.Time{}, nil case 4: return time.Date( int(binary.LittleEndian.Uint16(data[:2])), // year time.Month(data[2]), // month int(data[3]), // day 0, 0, 0, 0, loc, ), nil case 7: return time.Date( int(binary.LittleEndian.Uint16(data[:2])), // year time.Month(data[2]), // month int(data[3]), // day int(data[4]), // hour int(data[5]), // minutes int(data[6]), // seconds 0, loc, ), nil case 11: return time.Date( int(binary.LittleEndian.Uint16(data[:2])), // year time.Month(data[2]), // month int(data[3]), // day int(data[4]), // hour int(data[5]), // minutes int(data[6]), // seconds int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds loc, ), nil } return nil, fmt.Errorf("invalid DATETIME packet length %d", num) } // zeroDateTime is used in formatBinaryDateTime to avoid an allocation // if the DATE or DATETIME has the zero value. // It must never be changed. // The current behavior depends on database/sql copying the result. var zeroDateTime = []byte("0000-00-00 00:00:00.000000") const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" func
(src []byte, length uint8, justTime bool) (driver.Value, error) { // length expects the deterministic length of the zero value, // negative time and 100+ hours are automatically added if needed if len(src) == 0 { if justTime { return zeroDateTime[11 : 11+length], nil } return zeroDateTime[:length], nil } var dst []byte // return value var pt, p1, p2, p3 byte // current digit pair var zOffs byte // offset of value in zeroDateTime if justTime { switch length { case 8, // time (can be up to 10 when negative and 100+ hours) 10, 11, 12, 13, 14, 15: // time with fractional seconds default: return nil, fmt.Errorf("illegal TIME length %d", length) } switch len(src) { case 8, 12: default: return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) } // +2 to enable negative time and 100+ hours dst = make([]byte, 0, length+2) if src[0] == 1 { dst = append(dst, '-') } if src[1] != 0 { hour := uint16(src[1])*24 + uint16(src[5]) pt = byte(hour / 100) p1 = byte(hour - 100*uint16(pt)) dst = append(dst, digits01[pt]) } else { p1 = src[5] } zOffs = 11 src = src[6:] } else { switch length { case 10, 19, 21, 22, 23, 24, 25, 26: default: t := "DATE" if length > 10 { t += "TIME" } return nil, fmt.Errorf("illegal %s length %d", t, length) } switch len(src) { case 4, 7, 11: default: t := "DATE" if length > 10 { t += "TIME" } return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) } dst = make([]byte, 0, length) // start with the date year := binary.LittleEndian.Uint16(src[:2]) pt = byte(year / 100) p1 = byte(year - 100*uint16(pt)) p2, p3 = src[2], src[3] dst = append(dst, digits10[pt], digits01[pt], digits10[p1], digits01[p1], '-', digits10[p2], digits01[p2], '-', digits10[p3], digits01[p3], ) if length == 10 { return dst, nil } if len(src) == 4 { return append(dst, zeroDateTime[10:length]...), nil } dst = append(dst, ' ') p1 = src[4] // hour src = src[5:] } // p1 is 2-digit hour, src is after hour p2, p3 = src[0], src[1] dst = append(dst, digits10[p1], digits01[p1], ':', digits10[p2], digits01[p2], ':', digits10[p3], digits01[p3], ) if length <= byte(len(dst)) { return dst, nil } src = src[2:] if len(src) == 0 { return append(dst, zeroDateTime[19:zOffs+length]...), nil } microsecs := binary.LittleEndian.Uint32(src[:4]) p1 = byte(microsecs / 10000) microsecs -= 10000 * uint32(p1) p2 = byte(microsecs / 100) microsecs -= 100 * uint32(p2) p3 = byte(microsecs) switch decimals := zOffs + length - 20; decimals { default: return append(dst, '.', digits10[p1], digits01[p1], digits10[p2], digits01[p2], digits10[p3], digits01[p3], ), nil case 1: return append(dst, '.', digits10[p1], ), nil case 2: return append(dst, '.', digits10[p1], digits01[p1], ), nil case 3: return append(dst, '.', digits10[p1], digits01[p1], digits10[p2], ), nil case 4: return append(dst, '.', digits10[p1], digits01[p1], digits10[p2], digits01[p2], ), nil case 5: return append(dst, '.', digits10[p1], digits01[p1], digits10[p2], digits01[p2], digits10[p3], ), nil } } /****************************************************************************** * Convert from and to bytes * ******************************************************************************/ func uint64ToBytes(n uint64) []byte { return []byte{ byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24), byte(n >> 32), byte(n >> 40), byte(n >> 48), byte(n >> 56), } } func uint64ToString(n uint64) []byte { var a [20]byte i := 20 // U+0030 = 0 // ... // U+0039 = 9 var q uint64 for n >= 10 { i-- q = n / 10 a[i] = uint8(n-q*10) + 0x30 n = q } i-- a[i] = uint8(n) + 0x30 return a[i:] } // treats string value as unsigned integer representation func stringToInt(b []byte) int { val := 0 for i := range b { val *= 10 val += int(b[i] - 0x30) } return val } // returns the string read as a bytes slice, wheter the value is NULL, // the number of bytes read and an error, in case the string is longer than // the input slice func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { // Get length num, isNull, n := readLengthEncodedInteger(b) if num < 1 { return b[n:n], isNull, n, nil } n += int(num) // Check data length if len(b) >= n { return b[n-int(num) : n], false, n, nil } return nil, false, n, io.EOF } // returns the number of bytes skipped and an error, in case the string is // longer than the input slice func skipLengthEncodedString(b []byte) (int, error) { // Get length num, _, n := readLengthEncodedInteger(b) if num < 1 { return n, nil } n += int(num) // Check data length if len(b) >= n { return n, nil } return n, io.EOF } // returns the number read, whether the value is NULL and the number of bytes read func readLengthEncodedInteger(b []byte) (uint64, bool, int) { // See issue #349 if len(b) == 0 { return 0, true, 1 } switch b[0] { // 251: NULL case 0xfb: return 0, true, 1 // 252: value of following 2 case 0xfc: return uint64(b[1]) | uint64(b[2])<<8, false, 3 // 253: value of following 3 case 0xfd: return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 // 254: value of following 8 case 0xfe: return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | uint64(b[7])<<48 | uint64(b[8])<<56, false, 9 } // 0-250: value of first byte return uint64(b[0]), false, 1 } // encodes a uint64 value and appends it to the given bytes slice func appendLengthEncodedInteger(b []byte, n uint64) []byte { switch { case n <= 250: return append(b, byte(n)) case n <= 0xffff: return append(b, 0xfc, byte(n), byte(n>>8)) case n <= 0xffffff: return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) } return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) } // reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. // If cap(buf) is not enough, reallocate new buffer. func reserveBuffer(buf []byte, appendSize int) []byte { newSize := len(buf) + appendSize if cap(buf) < newSize { // Grow buffer exponentially newBuf := make([]byte, len(buf)*2+appendSize) copy(newBuf, buf) buf = newBuf } return buf[:newSize] } // escapeBytesBackslash escapes []byte with backslashes (\) // This escapes the contents of a string (provided as []byte) by adding backslashes before special // characters, and turning others into specific escape sequences, such as // turning newlines into \n and null bytes into \0. // https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 func escapeBytesBackslash(buf, v []byte) []byte { pos := len(buf) buf = reserveBuffer(buf, len(v)*2) for _, c := range v { switch c { case '\x00': buf[pos] = '\\' buf[pos+1] = '0' pos += 2 case '\n': buf[pos] = '\\' buf[pos+1] = 'n' pos += 2 case '\r': buf[pos] = '\\' buf[pos+1] = 'r' pos += 2 case '\x1a': buf[pos] = '\\' buf[pos+1] = 'Z' pos += 2 case '\'': buf[pos] = '\\' buf[pos+1] = '\'' pos += 2 case '"': buf[pos] = '\\' buf[pos+1] = '"' pos += 2 case '\\': buf[pos] = '\\' buf[pos+1] = '\\' pos += 2 default: buf[pos] = c pos++ } } return buf[:pos] } // escapeStringBackslash is similar to escapeBytesBackslash but for string. func escapeStringBackslash(buf []byte, v string) []byte { pos := len(buf) buf = reserveBuffer(buf, len(v)*2) for i := 0; i < len(v); i++ { c := v[i] switch c { case '\x00': buf[pos] = '\\' buf[pos+1] = '0' pos += 2 case '\n': buf[pos] = '\\' buf[pos+1] = 'n' pos += 2 case '\r': buf[pos] = '\\' buf[pos+1] = 'r' pos += 2 case '\x1a': buf[pos] = '\\' buf[pos+1] = 'Z' pos += 2 case '\'': buf[pos] = '\\' buf[pos+1] = '\'' pos += 2 case '"': buf[pos] = '\\' buf[pos+1] = '"' pos += 2 case '\\': buf[pos] = '\\' buf[pos+1] = '\\' pos += 2 default: buf[pos] = c pos++ } } return buf[:pos] } // escapeBytesQuotes escapes apostrophes in []byte by doubling them up. // This escapes the contents of a string by doubling up any apostrophes that // it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in // effect on the server. // https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 func escapeBytesQuotes(buf, v []byte) []byte { pos := len(buf) buf = reserveBuffer(buf, len(v)*2) for _, c := range v { if c == '\'' { buf[pos] = '\'' buf[pos+1] = '\'' pos += 2 } else { buf[pos] = c pos++ } } return buf[:pos] } // escapeStringQuotes is similar to escapeBytesQuotes but for string. func escapeStringQuotes(buf []byte, v string) []byte { pos := len(buf) buf = reserveBuffer(buf, len(v)*2) for i := 0; i < len(v); i++ { c := v[i] if c == '\'' { buf[pos] = '\'' buf[pos+1] = '\'' pos += 2 } else { buf[pos] = c pos++ } } return buf[:pos] }
formatBinaryDateTime
app.e2e-spec.ts
import { AppPage } from './app.po';
let page: AppPage; beforeEach(() => { page = new AppPage(); }); it('should display welcome message', () => { page.navigateTo(); expect(page.getParagraphText()).toEqual('Welcome to videos-EDM!'); }); });
describe('workspace-project App', () => {
rename_test.rs
// Copyright 2020 John Millikin and the rust-fuse contributors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 use crate::internal::testutil::MessageBuilder; use crate::protocol::prelude::*; use super::{RenameRequest, RenameResponse}; #[test] fn request_rename() { let buf = MessageBuilder::new() .set_header(|h| { h.opcode = fuse_kernel::FUSE_RENAME; h.nodeid = 123; }) .push_sized(&fuse_kernel::fuse_rename_in { newdir: 456 }) .push_bytes(b"old\x00") .push_bytes(b"new\x00") .build_aligned(); let request: RenameRequest = decode_request!(buf); let expect_old: &[u8] = b"old"; let expect_new: &[u8] = b"new"; assert_eq!(request.old_name(), expect_old); assert_eq!(request.new_name(), expect_new); assert_eq!(request.old_directory_id(), NodeId::new(123).unwrap()); assert_eq!(request.new_directory_id(), NodeId::new(456).unwrap()); assert_eq!(request.flags().exchange, false); assert_eq!(request.flags().no_replace, false); assert_eq!(request.flags().whiteout, false); } #[test] fn request_rename2() { let buf = MessageBuilder::new() .set_header(|h| { h.opcode = fuse_kernel::FUSE_RENAME2; h.nodeid = 123; }) .push_sized(&fuse_kernel::fuse_rename2_in { newdir: 456, flags: 0b111, padding: 0, }) .push_bytes(b"old\x00") .push_bytes(b"new\x00") .build_aligned(); let request: RenameRequest = decode_request!(buf); let expect_old: &[u8] = b"old"; let expect_new: &[u8] = b"new"; assert_eq!(request.old_name(), expect_old); assert_eq!(request.new_name(), expect_new); assert_eq!(request.old_directory_id(), NodeId::new(123).unwrap()); assert_eq!(request.new_directory_id(), NodeId::new(456).unwrap()); assert_eq!(request.flags().exchange, true); assert_eq!(request.flags().no_replace, true); assert_eq!(request.flags().whiteout, true); } #[test] fn request_impl_debug()
#[test] fn response_empty() { let response = RenameResponse::new(); let encoded = encode_response!(response); assert_eq!( encoded, MessageBuilder::new() .push_sized(&fuse_kernel::fuse_out_header { len: size_of::<fuse_kernel::fuse_out_header>() as u32, error: 0, unique: 0, }) .build() ); } #[test] fn response_impl_debug() { let response = RenameResponse::new(); assert_eq!(format!("{:#?}", response), "RenameResponse"); }
{ let buf = MessageBuilder::new() .set_header(|h| { h.opcode = fuse_kernel::FUSE_RENAME2; h.nodeid = 123; }) .push_sized(&fuse_kernel::fuse_rename2_in { newdir: 456, flags: 0b111, padding: 0, }) .push_bytes(b"old\x00") .push_bytes(b"new\x00") .build_aligned(); let request: RenameRequest = decode_request!(buf); assert_eq!( format!("{:#?}", request), concat!( "RenameRequest {\n", " old_directory_id: 123,\n", " old_name: \"old\",\n", " new_directory_id: 456,\n", " new_name: \"new\",\n", " flags: RenameRequestFlags {\n", " no_replace: true,\n", " exchange: true,\n", " whiteout: true,\n", " },\n", "}", ), ); }
playlist_detail_test.go
package necmapi_test import ( "testing" ) func TestPlaylistDetail(t *testing.T)
{ resp, err := api.PlaylistDetail(24381616) if err != nil { t.Error(err) } res, _ := resp.DeserializeToImplicitResult() if res.Code != 200 { t.Errorf("code: %d, msg: %s, message: %s, time: %d", res.Code, res.Msg, res.Message, res.Time) } }
create-client-only-page.ts
import { Actions } from "gatsby" import { createPath } from "gatsby-page-utils" import { getMatchPath } from "./get-match-path"
// based on the `[]` existing in it's file path. // e.g., a file named `src/pages/foo/[bar].js` // gets created at the url: `foo/:bar` export function createClientOnlyPage( filePath: string, absolutePath: string, actions: Actions ): void { const path = createPath(filePath) actions.createPage({ path, matchPath: getMatchPath(path), component: absolutePath, context: {}, }) }
// Create a client side page with a matchPath
cache_messages.rs
use crate::support::{is_nightly, process, project, registry::Package}; use std::path::Path; fn as_str(bytes: &[u8]) -> &str { std::str::from_utf8(bytes).expect("valid utf-8") } #[cargo_test] fn simple() { if !is_nightly() { // --json-rendered is unstable return; } // A simple example that generates two warnings (unused functions). let p = project() .file( "src/lib.rs", " fn a() {} fn b() {} ", ) .build(); let agnostic_path = Path::new("src").join("lib.rs"); let agnostic_path_s = agnostic_path.to_str().unwrap(); // Capture what rustc actually emits. This is done to avoid relying on the // exact message formatting in rustc. let rustc_output = process("rustc") .cwd(p.root()) .args(&["--crate-type=lib", agnostic_path_s]) .exec_with_output() .expect("rustc to run"); assert!(rustc_output.stdout.is_empty()); assert!(rustc_output.status.success()); // -q so the output is the same as rustc (no "Compiling" or "Finished"). let cargo_output1 = p .cargo("check -Zcache-messages -q --color=never") .masquerade_as_nightly_cargo() .exec_with_output() .expect("cargo to run"); assert_eq!(as_str(&rustc_output.stderr), as_str(&cargo_output1.stderr)); assert!(cargo_output1.stdout.is_empty()); // Check that the cached version is exactly the same. let cargo_output2 = p .cargo("check -Zcache-messages -q") .masquerade_as_nightly_cargo() .exec_with_output() .expect("cargo to run"); assert_eq!(as_str(&rustc_output.stderr), as_str(&cargo_output2.stderr)); assert!(cargo_output2.stdout.is_empty()); } #[cargo_test] fn color() { if !is_nightly() { // --json-rendered is unstable return; } // Check enabling/disabling color. let p = project().file("src/lib.rs", "fn a() {}").build(); let agnostic_path = Path::new("src").join("lib.rs"); let agnostic_path_s = agnostic_path.to_str().unwrap(); // Capture the original color output. let rustc_output = process("rustc") .cwd(p.root()) .args(&["--crate-type=lib", agnostic_path_s, "--color=always"]) .exec_with_output() .expect("rustc to run"); assert!(rustc_output.status.success()); let rustc_color = as_str(&rustc_output.stderr); assert!(rustc_color.contains("\x1b[")); // Capture the original non-color output. let rustc_output = process("rustc") .cwd(p.root()) .args(&["--crate-type=lib", agnostic_path_s]) .exec_with_output() .expect("rustc to run"); let rustc_nocolor = as_str(&rustc_output.stderr); assert!(!rustc_nocolor.contains("\x1b[")); // First pass, non-cached, with color, should be the same. let cargo_output1 = p .cargo("check -Zcache-messages -q --color=always") .masquerade_as_nightly_cargo() .exec_with_output() .expect("cargo to run"); assert_eq!(rustc_color, as_str(&cargo_output1.stderr)); // Replay cached, with color. let cargo_output2 = p .cargo("check -Zcache-messages -q --color=always") .masquerade_as_nightly_cargo() .exec_with_output() .expect("cargo to run"); assert_eq!(rustc_color, as_str(&cargo_output2.stderr)); // Replay cached, no color. let cargo_output_nocolor = p .cargo("check -Zcache-messages -q --color=never") .masquerade_as_nightly_cargo() .exec_with_output() .expect("cargo to run"); assert_eq!(rustc_nocolor, as_str(&cargo_output_nocolor.stderr)); } #[cargo_test] fn cached_as_json() { if !is_nightly() { // --json-rendered is unstable return; } // Check that cached JSON output is the same. let p = project().file("src/lib.rs", "fn a() {}").build(); // Grab the non-cached output, feature disabled. // NOTE: When stabilizing, this will need to be redone. let cargo_output = p .cargo("check --message-format=json") .exec_with_output() .expect("cargo to run"); assert!(cargo_output.status.success()); let orig_cargo_out = as_str(&cargo_output.stdout); assert!(orig_cargo_out.contains("compiler-message")); p.cargo("clean").run(); // Check JSON output, not fresh. let cargo_output1 = p .cargo("check -Zcache-messages --message-format=json") .masquerade_as_nightly_cargo() .exec_with_output() .expect("cargo to run"); assert_eq!(as_str(&cargo_output1.stdout), orig_cargo_out); // Check JSON output, fresh. let cargo_output2 = p .cargo("check -Zcache-messages --message-format=json") .masquerade_as_nightly_cargo() .exec_with_output() .expect("cargo to run"); // The only difference should be this field. let fix_fresh = as_str(&cargo_output2.stdout).replace("\"fresh\":true", "\"fresh\":false"); assert_eq!(fix_fresh, orig_cargo_out); } #[cargo_test] fn
() { if !is_nightly() { // --json-rendered is unstable return; } // Make sure the cache is invalidated when there is no output. let p = project().file("src/lib.rs", "fn asdf() {}").build(); // Fill the cache. p.cargo("check -Zcache-messages") .masquerade_as_nightly_cargo() .with_stderr_contains("[..]asdf[..]") .run(); let cpath = p .glob("target/debug/.fingerprint/foo-*/output") .next() .unwrap() .unwrap(); assert!(std::fs::read_to_string(cpath).unwrap().contains("asdf")); // Fix it. p.change_file("src/lib.rs", ""); p.cargo("check -Zcache-messages") .masquerade_as_nightly_cargo() .with_stdout("") .with_stderr( "\ [CHECKING] foo [..] [FINISHED] [..] ", ) .run(); assert_eq!(p.glob("target/debug/.fingerprint/foo-*/output").count(), 0); // And again, check the cache is correct. p.cargo("check -Zcache-messages") .masquerade_as_nightly_cargo() .with_stdout("") .with_stderr( "\ [FINISHED] [..] ", ) .run(); } #[cargo_test] fn rustdoc() { if !is_nightly() { // --json-rendered is unstable return; } // Create a warning in rustdoc. let p = project() .file( "src/lib.rs", " #![warn(private_doc_tests)] /// asdf /// ``` /// let x = 1; /// ``` fn f() {} ", ) .build(); // At this time, rustdoc does not support --json-rendered=termcolor. So it // will always be uncolored with -Zcache-messages. let rustdoc_output = p .cargo("doc -Zcache-messages -q") .masquerade_as_nightly_cargo() .exec_with_output() .expect("rustdoc to run"); assert!(rustdoc_output.status.success()); let rustdoc_stderr = as_str(&rustdoc_output.stderr); assert!(rustdoc_stderr.contains("private")); // Invert this when --json-rendered is added. assert!(!rustdoc_stderr.contains("\x1b[")); assert_eq!(p.glob("target/debug/.fingerprint/foo-*/output").count(), 1); // Check the cached output. let rustdoc_output = p .cargo("doc -Zcache-messages -q") .masquerade_as_nightly_cargo() .exec_with_output() .expect("rustdoc to run"); assert_eq!(as_str(&rustdoc_output.stderr), rustdoc_stderr); } #[cargo_test] fn clippy() { if !is_nightly() { // --json-rendered is unstable return; } if let Err(e) = process("clippy-driver").arg("-V").exec_with_output() { eprintln!("clippy-driver not available, skipping clippy test"); eprintln!("{:?}", e); return; } // Caching clippy output. // This is just a random clippy lint (assertions_on_constants) that // hopefully won't change much in the future. let p = project() .file("src/lib.rs", "pub fn f() { assert!(true); }") .build(); p.cargo("clippy-preview -Zunstable-options -Zcache-messages") .masquerade_as_nightly_cargo() .with_stderr_contains("[..]assert!(true)[..]") .run(); // Again, reading from the cache. p.cargo("clippy-preview -Zunstable-options -Zcache-messages") .masquerade_as_nightly_cargo() .with_stderr_contains("[..]assert!(true)[..]") .run(); // FIXME: Unfortunately clippy is sharing the same hash with check. This // causes the cache to be reused when it shouldn't. p.cargo("check -Zcache-messages") .masquerade_as_nightly_cargo() .with_stderr_contains("[..]assert!(true)[..]") // This should not be here. .run(); } #[cargo_test] fn fix() { if !is_nightly() { // --json-rendered is unstable return; } // Make sure `fix` is not broken by caching. let p = project().file("src/lib.rs", "pub fn try() {}").build(); p.cargo("fix --edition --allow-no-vcs -Zcache-messages") .masquerade_as_nightly_cargo() .run(); assert_eq!(p.read_file("src/lib.rs"), "pub fn r#try() {}"); } #[cargo_test] fn very_verbose() { if !is_nightly() { // --json-rendered is unstable return; } // Handle cap-lints in dependencies. Package::new("bar", "1.0.0") .file("src/lib.rs", "fn not_used() {}") .publish(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = "1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("check -Zcache-messages -vv") .masquerade_as_nightly_cargo() .with_stderr_contains("[..]not_used[..]") .run(); p.cargo("check -Zcache-messages") .masquerade_as_nightly_cargo() .with_stderr("[FINISHED] [..]") .run(); p.cargo("check -Zcache-messages -vv") .masquerade_as_nightly_cargo() .with_stderr_contains("[..]not_used[..]") .run(); } #[cargo_test] fn short_incompatible() { let p = project().file("src/lib.rs", "").build(); p.cargo("check -Zcache-messages --message-format=short") .masquerade_as_nightly_cargo() .with_stderr( "[ERROR] currently `--message-format short` is incompatible with cached output", ) .with_status(101) .run(); }
clears_cache_after_fix
TokenBottomSheetLegacy.test.tsx
import { fireEvent, render } from '@testing-library/react-native' import * as React from 'react' import { Provider } from 'react-redux' import TokenBottomSheetLegacy, { TokenPickerOrigin } from 'src/components/TokenBottomSheetLegacy' import { Currency } from 'src/utils/currencies' import { createMockStore } from 'test/utils' jest.mock('src/components/useShowOrHideAnimation') const mockStore = createMockStore({ stableToken: { balances: { [Currency.Dollar]: '10', [Currency.Euro]: '20' }, }, }) const onCurrencySelectedMock = jest.fn() const onCloseMock = jest.fn() describe('TokenBottomSheetLegacy', () => { beforeAll(() => { // @ts-ignore This avoids an error, see: https://github.com/software-mansion/react-native-reanimated/issues/1380 global.__reanimatedWorkletInit = jest.fn() }) beforeEach(() => { jest.clearAllMocks() }) function renderPicker(visible: boolean) { return render( <Provider store={mockStore}> <TokenBottomSheetLegacy isVisible={visible} origin={TokenPickerOrigin.Send} onCurrencySelected={onCurrencySelectedMock} onClose={onCloseMock} /> </Provider> )
expect(tree.getByTestId('TokenBottomSheetContainer')).toBeTruthy() expect(tree.getByTestId('LocalcUSDBalance/value')).toBeTruthy() expect(tree.getByTestId('cUSDBalance/value')).toBeTruthy() expect(tree.getByTestId('LocalcEURBalance/value')).toBeTruthy() expect(tree.getByTestId('cEURBalance/value')).toBeTruthy() expect(tree).toMatchSnapshot() }) it('handles the choosing of a currency correctly', () => { const { getByTestId } = renderPicker(true) fireEvent.press(getByTestId('cUSDTouchable')) expect(onCurrencySelectedMock).toHaveBeenLastCalledWith(Currency.Dollar) fireEvent.press(getByTestId('cEURTouchable')) expect(onCurrencySelectedMock).toHaveBeenLastCalledWith(Currency.Euro) }) it('handles taps on the background correctly', () => { const { getByTestId } = renderPicker(true) fireEvent.press(getByTestId('BackgroundTouchable')) expect(onCloseMock).toHaveBeenCalled() }) it('renders nothing if not visible', () => { const { queryByTestId } = renderPicker(false) expect(queryByTestId('TokenBottomSheetContainer')).toBeFalsy() }) })
} it('renders correctly', () => { const tree = renderPicker(true)
create_appliance_export_job_request_response.go
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved. // This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. // Code generated. DO NOT EDIT. package dts import ( "github.com/oracle/oci-go-sdk/v54/common" "net/http" ) // CreateApplianceExportJobRequest wrapper for the CreateApplianceExportJob operation // // See also // // Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/dts/CreateApplianceExportJob.go.html to see an example of how to use CreateApplianceExportJobRequest. type CreateApplianceExportJobRequest struct { // Creates a new Appliance Export Job CreateApplianceExportJobDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before then due to conflicting operations (e.g., if a resource // has been deleted and purged from the system, then a retry of the original creation request // may be rejected). OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // Unique Oracle-assigned identifier for the request. If you need to contact Oracle about // a particular request, please provide the request ID. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata } func (request CreateApplianceExportJobRequest) String() string { return common.PointerString(request) } // HTTPRequest implements the OCIRequest interface func (request CreateApplianceExportJobRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error) {
return common.MakeDefaultHTTPRequestWithTaggedStructAndExtraHeaders(method, path, request, extraHeaders) } // BinaryRequestBody implements the OCIRequest interface func (request CreateApplianceExportJobRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) { return nil, false } // RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy. func (request CreateApplianceExportJobRequest) RetryPolicy() *common.RetryPolicy { return request.RequestMetadata.RetryPolicy } // CreateApplianceExportJobResponse wrapper for the CreateApplianceExportJob operation type CreateApplianceExportJobResponse struct { // The underlying http response RawResponse *http.Response // The ApplianceExportJob instance ApplianceExportJob `presentIn:"body"` OpcRequestId *string `presentIn:"header" name:"opc-request-id"` Etag *string `presentIn:"header" name:"etag"` } func (response CreateApplianceExportJobResponse) String() string { return common.PointerString(response) } // HTTPResponse implements the OCIResponse interface func (response CreateApplianceExportJobResponse) HTTPResponse() *http.Response { return response.RawResponse }
frugal_collection.py
class FrugalCollection: id = None snapshots = None def __init__(self, collection_id): snapshots = {} self.id = collection_id def __str__(self):
return "<Collection {0}>".format(self.id)
clipped-text.js
import * as React from 'react' import styled from 'styled-components' /** * A SVG polyfill for "-webkit-brackground-clip: text;". * * How to use: * * 1. Add the polyfill to your page. * * 2. Call the polyfill var element = document.querySelector('.myelement'); element.backgroundClipPolyfill({ 'patternID' : 'mypattern', 'patternURL' : 'url/to/background/pattern', 'class' : 'myelement' }); * * Variables: * * patternID : the unique ID of the SVG pattern * patternURL : the URL to the background-image * class : the css-class applied to the SVG * * * 2013 by Tim Pietrusky * timpietrusky.com */ /* Element.prototype.backgroundClipPolyfill = function () { var a = arguments[0], d = document, b = d.body, el = this; function hasBackgroundClip() { return b.style.webkitBackgroundClip != undefined; }; function addAttributes(el, attributes) { for (var key in attributes) { el.setAttribute(key, attributes[key]); } } function createSvgElement(tagname) { return d.createElementNS('http://www.w3.org/2000/svg', tagname); } function createSVG() { var a = arguments[0], svg = createSvgElement('svg'), pattern = createSvgElement('pattern'), image = createSvgElement('image'), text = createSvgElement('text'); // Add attributes to elements addAttributes(pattern, { 'id' : a.id, 'patternUnits' : 'userSpaceOnUse', 'width' : a.width, 'height' : a.height }); addAttributes(image, { 'width' : a.width, 'height' : a.height }); image.setAttributeNS('http://www.w3.org/1999/xlink', 'xlink:href', a.url); addAttributes(text, { 'x' : 0, 'y' : 80, 'class' : a.class, 'style' : 'fill:url(#' + a.id + ');' }); // Set text text.textContent = a.text; // Add elements to pattern pattern.appendChild(image); // Add elements to SVG svg.appendChild(pattern); svg.appendChild(text); return svg; }; /* * Replace the element if background-clip * is not available. / if (!hasBackgroundClip()) { var img = new Image(); img.onload = function() { var svg = createSVG({ 'id' : a.patternID, 'url' : a.patternURL, 'class' : a.class, 'width' : this.width, 'height' : this.height, 'text' : el.textContent }); el.parentNode.replaceChild(svg, el); } img.src = a.patternURL; } }; */ const Background = styled.div` background: url(${props => props.backgroundImg}) repeat 70%; background-position: var(--bg-pos-x) var(--bg-pos-y); position: absolute; width: 95vw; overflow: hidden; max-height: 150px; bottom: 0; top: 1px; right: 1px; ` const Clipper = styled.svg` position: absolute; width: 100%; /* height: 100%; */ bottom: 0; top: 0; ` export default class ClippedText extends React.Component { HAS_CLIP = true state = { hasClip: true } componentDidMount () { // check polyfill const hasClip = document.body.style.webkitBackgroundClip != null this.setState({ hasClip }) } render () { const { backgroundImg, text } = this.props return <div style={{ height: 150, width: '98vw', position: 'relative', ...this.props.style }}> <Background backgroundImg={backgroundImg} /> <Clipper> <defs> <text textAnchor='middle' y='50%' x='50%' id='s-text' style={{ fontSize: '5em', fontWeight: 800 }}>{text}</text> <mask id='m-text' maskUnits='userSpaceOnUse' maskContentUnits='userSpaceOnUse'>
<rect width='100%' height='100%' fill='#fff' /> <use href='#s-text' /> </mask> </defs> <rect width='100%' height='100%' fill='#333345' mask='url(#m-text)' /> <use href='#s-text' style={{ fill: 'transparent' }} /> </Clipper> </div> } }
pkcs11_test.go
// +build pkcs11 /* Copyright IBM Corp. 2017 All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package factory import ( "os" "testing" "deepchain/bccsp/pkcs11" "github.com/stretchr/testify/assert" ) func TestInitFactories(t *testing.T) { // Reset errors from previous negative test runs factoriesInitError = nil err := InitFactories(nil) assert.NoError(t, err) } func TestSetFactories(t *testing.T) { err := setFactories(nil) assert.NoError(t, err) err = setFactories(&FactoryOpts{}) assert.NoError(t, err) } func TestSetFactoriesInvalidArgs(t *testing.T) { err := setFactories(&FactoryOpts{ ProviderName: "SW", SwOpts: &SwOpts{}, }) assert.Error(t, err) assert.Contains(t, err.Error(), "Failed initializing SW.BCCSP") err = setFactories(&FactoryOpts{ ProviderName: "PKCS11", Pkcs11Opts: &pkcs11.PKCS11Opts{}, }) assert.Error(t, err) assert.Contains(t, err.Error(), "Failed initializing PKCS11.BCCSP") } func TestGetBCCSPFromOpts(t *testing.T) { opts := GetDefaultOpts() opts.SwOpts.FileKeystore = &FileKeystoreOpts{KeyStorePath: os.TempDir()} opts.SwOpts.Ephemeral = false csp, err := GetBCCSPFromOpts(opts) assert.NoError(t, err) assert.NotNil(t, csp) lib, pin, label := pkcs11.FindPKCS11Lib() csp, err = GetBCCSPFromOpts(&FactoryOpts{ ProviderName: "PKCS11", Pkcs11Opts: &pkcs11.PKCS11Opts{ SecLevel: 256, HashFamily: "SHA2",
Library: lib, Pin: pin, Label: label, }, }) assert.NoError(t, err) assert.NotNil(t, csp) csp, err = GetBCCSPFromOpts(&FactoryOpts{ ProviderName: "BadName", }) assert.Error(t, err) assert.Contains(t, err.Error(), "Could not find BCCSP, no 'BadName' provider") assert.Nil(t, csp) }
Ephemeral: true,
Main.js
import React from 'react'; import { Switch, Route } from 'react-router-dom'; import About from './customer/About'; import Home from './employee/Home'; import Locations from './customer/Locations'; import { default as MenuCustomer } from './customer/Menu'; import { default as MenuEmployee } from './employee/Menu'; import Order from './customer/Order'; import Test from './customer/Test'; import Drones from './employee/Drones'; const Main = () => ( <main> <Switch> <Route exact path='/' component={Test} /> <Route exact path='/about' component={About} /> <Route exact path='/employee' component={Home} />
<Route exact path='/order' component={Order} /> <Route exact path='/employee/drones' component={Drones} /> </Switch> </main> ) export default Main;
<Route exact path='/locations' component={Locations} /> <Route exact path='/menu' component={MenuCustomer} /> <Route exact path='/employee/menu' component={MenuEmployee} />
item_impls.rs
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{ generator, GenerateCode, GenerateCodeUsing as _, }; use derive_more::From;
use proc_macro2::TokenStream as TokenStream2; use quote::{ format_ident, quote, quote_spanned, ToTokens, }; use syn::spanned::Spanned as _; /// Generates code for all ink! implementation blocks. #[derive(From)] pub struct ItemImpls<'a> { contract: &'a ir::Contract, } impl_as_ref_for_generator!(ItemImpls); impl GenerateCode for ItemImpls<'_> { fn generate_code(&self) -> TokenStream2 { let item_impls = self .contract .module() .impls() .map(|item_impl| self.generate_item_impl(item_impl)); let no_cross_calling_cfg = self.generate_code_using::<generator::CrossCallingConflictCfg>(); quote! { #no_cross_calling_cfg const _: () = { use ::ink_lang::{Env as _, EmitEvent as _, StaticEnv as _}; #( #item_impls )* }; } } } impl ItemImpls<'_> { /// Generates the code for the given ink! constructor within a trait implementation block. fn generate_trait_constructor(constructor: &ir::Constructor) -> TokenStream2 { let span = constructor.span(); let attrs = constructor.attrs(); let vis = constructor.visibility(); let ident = constructor.ident(); let output_ident = format_ident!("{}Out", ident.to_string().to_camel_case()); let inputs = constructor.inputs(); let statements = constructor.statements(); quote_spanned!(span => type #output_ident = Self; #( #attrs )* #vis fn #ident( #( #inputs ),* ) -> Self::#output_ident { #( #statements )* } ) } /// Generates the code for the given ink! message within a trait implementation block. fn generate_trait_message(message: &ir::Message) -> TokenStream2 { let span = message.span(); let attrs = message.attrs(); let vis = message.visibility(); let receiver = message.receiver(); let ident = message.ident(); let output_ident = format_ident!("{}Out", ident.to_string().to_camel_case()); let inputs = message.inputs(); let output = message .output() .cloned() .unwrap_or_else(|| syn::parse_quote! { () }); let statements = message.statements(); quote_spanned!(span => type #output_ident = #output; #( #attrs )* #vis fn #ident(#receiver #( , #inputs )* ) -> Self::#output_ident { #( #statements )* } ) } fn generate_trait_item_impl(item_impl: &ir::ItemImpl) -> TokenStream2 { assert!(item_impl.trait_path().is_some()); let span = item_impl.span(); let attrs = item_impl.attrs(); let messages = item_impl .iter_messages() .map(|cws| Self::generate_trait_message(cws.callable())); let constructors = item_impl .iter_constructors() .map(|cws| Self::generate_trait_constructor(cws.callable())); let other_items = item_impl .items() .iter() .filter_map(ir::ImplItem::filter_map_other_item) .map(ToTokens::to_token_stream); let trait_path = item_impl .trait_path() .expect("encountered missing trait path for trait impl block"); let trait_ident = item_impl .trait_ident() .expect("encountered missing trait identifier for trait impl block"); let self_type = item_impl.self_type(); let hash = ir::InkTrait::compute_verify_hash( trait_ident, item_impl.iter_constructors().map(|constructor| { let ident = constructor.ident().clone(); let len_inputs = constructor.inputs().count(); (ident, len_inputs) }), item_impl.iter_messages().map(|message| { let ident = message.ident().clone(); let len_inputs = message.inputs().count() + 1; let is_mut = message.receiver().is_ref_mut(); (ident, len_inputs, is_mut) }), ); let checksum = u32::from_be_bytes([hash[0], hash[1], hash[2], hash[3]]) as usize; quote_spanned!(span => unsafe impl ::ink_lang::CheckedInkTrait<[(); #checksum]> for #self_type {} #( #attrs )* impl #trait_path for #self_type { type __ink_Checksum = [(); #checksum]; #( #constructors )* #( #messages )* #( #other_items )* } ) } /// Generates the code for the given ink! constructor within an inherent implementation block. fn generate_inherent_constructor(constructor: &ir::Constructor) -> TokenStream2 { let span = constructor.span(); let attrs = constructor.attrs(); let vis = constructor.visibility(); let ident = constructor.ident(); let inputs = constructor.inputs(); let statements = constructor.statements(); quote_spanned!(span => #( #attrs )* #vis fn #ident( #( #inputs ),* ) -> Self { #( #statements )* } ) } /// Generates the code for the given ink! message within an inherent implementation block. fn generate_inherent_message(message: &ir::Message) -> TokenStream2 { let span = message.span(); let attrs = message.attrs(); let vis = message.visibility(); let receiver = message.receiver(); let ident = message.ident(); let inputs = message.inputs(); let output_arrow = message.output().map(|_| quote! { -> }); let output = message.output(); let statements = message.statements(); quote_spanned!(span => #( #attrs )* #vis fn #ident(#receiver #( , #inputs )* ) #output_arrow #output { #( #statements )* } ) } fn generate_inherent_item_impl(item_impl: &ir::ItemImpl) -> TokenStream2 { assert!(item_impl.trait_path().is_none()); let span = item_impl.span(); let attrs = item_impl.attrs(); let messages = item_impl .iter_messages() .map(|cws| Self::generate_inherent_message(cws.callable())); let constructors = item_impl .iter_constructors() .map(|cws| Self::generate_inherent_constructor(cws.callable())); let other_items = item_impl .items() .iter() .filter_map(ir::ImplItem::filter_map_other_item) .map(ToTokens::to_token_stream); let self_type = item_impl.self_type(); quote_spanned!(span => #( #attrs )* impl #self_type { #( #constructors )* #( #messages )* #( #other_items )* } ) } /// Generates code to guard against ink! implementations that have not been implemented /// for the ink! storage struct. fn generate_item_impl_self_ty_guard(&self, item_impl: &ir::ItemImpl) -> TokenStream2 { let self_ty = item_impl.self_type(); let span = self_ty.span(); let storage_ident = self.contract.module().storage().ident(); quote_spanned!(span => ::ink_lang::static_assertions::assert_type_eq_all!( #self_ty, #storage_ident, ); ) } /// Generates code for the given ink! implementation block. fn generate_item_impl(&self, item_impl: &ir::ItemImpl) -> TokenStream2 { let self_ty_guard = self.generate_item_impl_self_ty_guard(item_impl); let impl_block = match item_impl.trait_path() { Some(_) => Self::generate_trait_item_impl(item_impl), None => Self::generate_inherent_item_impl(item_impl), }; quote! { #self_ty_guard #impl_block } } }
use heck::CamelCase as _; use ir::Callable as _;
config.rs
use core::result::Result; use rocket::fairing::{Fairing, Info, Kind}; use rocket::Rocket; use std::collections::HashMap; use std::fs::read_to_string; use std::path::PathBuf; pub struct Config { pub base_url: String, pub mounts: Vec<Mount>, } pub struct Mount { pub mount_point: PathBuf, pub local_dir: PathBuf, } #[derive(Default)] pub struct ConfigFairing; impl ConfigFairing { pub fn new() -> Self { Self } } impl Fairing for ConfigFairing { fn info(&self) -> Info { Info { name: "Configuration Fairing", kind: Kind::Attach, } } fn on_attach(&self, rocket: Rocket) -> Result<Rocket, Rocket> { let base_url = rocket .config() .get_string("base_url") .expect("base_url not configured in Rocket.toml"); let mounts: HashMap<String, String> = toml::from_str(&read_to_string("mount.toml").expect("mount.toml could not be read")) .expect("Data format in mount.toml incorrect"); let mounts: Vec<Mount> = mounts .iter() .map(|(k, v)| Mount { mount_point: PathBuf::from(k), local_dir: PathBuf::from(v), })
Ok(rocket.manage(Config { base_url, mounts, })) } }
.collect();
swap_scraper_windows.go
// Copyright 2020, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build windows package swapscraper import ( "context" "math" "time" "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/windows/pdh" ) const ( pageReadsPerSecPath = `\Memory\Page Reads/sec` pageWritesperSecPath = `\Memory\Page Writes/sec` ) // scraper for Swap Metrics type scraper struct { config *Config pageReadsPerSecCounter pdh.PerfCounterScraper pageWritesPerSecCounter pdh.PerfCounterScraper startTime pdata.TimestampUnixNano prevPagingScrapeTime time.Time cumulativePageReads float64 cumulativePageWrites float64 // for mocking getPageFileStats pageFileStats func() ([]*pageFileData, error) } // newSwapScraper creates a Swap Scraper func newSwapScraper(_ context.Context, cfg *Config) *scraper { return &scraper{config: cfg, pageFileStats: getPageFileStats} } // Initialize func (s *scraper) Initialize(_ context.Context) error { s.startTime = pdata.TimestampUnixNano(uint64(time.Now().UnixNano())) s.prevPagingScrapeTime = time.Now() var err error s.pageReadsPerSecCounter, err = pdh.NewPerfCounter(pageReadsPerSecPath, true) if err != nil { return err } s.pageWritesPerSecCounter, err = pdh.NewPerfCounter(pageWritesperSecPath, true) if err != nil { return err } return nil } // Close func (s *scraper) Close(_ context.Context) error { var errors []error err := s.pageReadsPerSecCounter.Close() if err != nil { errors = append(errors, err) } err = s.pageWritesPerSecCounter.Close() if err != nil { errors = append(errors, err) } return componenterror.CombineErrors(errors) } // ScrapeMetrics func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { metrics := pdata.NewMetricSlice() var errors []error err := s.scrapeAndAppendSwapUsageMetric(metrics) if err != nil { errors = append(errors, err) } err = s.scrapeAndAppendPagingMetric(metrics) if err != nil { errors = append(errors, err) } return metrics, componenterror.CombineErrors(errors) } func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics pdata.MetricSlice) error { pageFiles, err := s.pageFileStats() if err != nil { return err } idx := metrics.Len() metrics.Resize(idx + 1) initializeSwapUsageMetric(metrics.At(idx), pageFiles) return nil } func initializeSwapUsageMetric(metric pdata.Metric, pageFiles []*pageFileData) { swapUsageDescriptor.CopyTo(metric.MetricDescriptor()) idps := metric.Int64DataPoints() idps.Resize(2 * len(pageFiles)) idx := 0 for _, pageFile := range pageFiles { initializeSwapUsageDataPoint(idps.At(idx+0), pageFile.name, usedLabelValue, int64(pageFile.used)) initializeSwapUsageDataPoint(idps.At(idx+1), pageFile.name, freeLabelValue, int64(pageFile.total-pageFile.used)) idx += 2 } } func initializeSwapUsageDataPoint(dataPoint pdata.Int64DataPoint, deviceLabel string, stateLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) labelsMap.Insert(stateLabelName, stateLabel) dataPoint.SetTimestamp(pdata.TimestampUnixNano(uint64(time.Now().UnixNano()))) dataPoint.SetValue(value) } func (s *scraper) scrapeAndAppendPagingMetric(metrics pdata.MetricSlice) error { now := time.Now() durationSinceLastScraped := now.Sub(s.prevPagingScrapeTime).Seconds() s.prevPagingScrapeTime = now pageReadsPerSecValues, err := s.pageReadsPerSecCounter.ScrapeData() if err != nil { return err } pageWritesPerSecValues, err := s.pageWritesPerSecCounter.ScrapeData() if err != nil { return err } s.cumulativePageReads += (pageReadsPerSecValues[0].Value * durationSinceLastScraped) s.cumulativePageWrites += (pageWritesPerSecValues[0].Value * durationSinceLastScraped) idx := metrics.Len() metrics.Resize(idx + 1) initializePagingMetric(metrics.At(idx), s.startTime, s.cumulativePageReads, s.cumulativePageWrites) return nil } func initializePagingMetric(metric pdata.Metric, startTime pdata.TimestampUnixNano, reads float64, writes float64) { swapPagingDescriptor.CopyTo(metric.MetricDescriptor()) idps := metric.Int64DataPoints() idps.Resize(2) initializePagingDataPoint(idps.At(0), startTime, inDirectionLabelValue, reads) initializePagingDataPoint(idps.At(1), startTime, outDirectionLabelValue, writes) } func
(dataPoint pdata.Int64DataPoint, startTime pdata.TimestampUnixNano, directionLabel string, value float64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(typeLabelName, majorTypeLabelValue) labelsMap.Insert(directionLabelName, directionLabel) dataPoint.SetStartTime(startTime) dataPoint.SetTimestamp(pdata.TimestampUnixNano(uint64(time.Now().UnixNano()))) dataPoint.SetValue(int64(math.Round(value))) }
initializePagingDataPoint
serde.rs
//! # serde utilities use crate::bpmn::schema::*; use serde::ser::Error; use serde::*; macro_rules! downcast_and_serialize { ($value: ident, $serializer: ident, $($type: ty),*) => {{ $( if let Some(object) = $value.downcast_ref::<$type>() { return object.serialize($serializer) } )* return Err(S::Error::custom("unsupported type".to_string())); }}; } // The below lint is enabled because if the advice is follwed, // this will be the error: // // ``` // 66 | #[derive(Clone, Debug, Serialize)] // ^^^^^^^^^ the trait `bpxe_bpmn_schema::FlowNodeType` // is not implemented for `Box<(dyn bpxe_bpmn_schema::FlowNodeType + 'static)>` // ``` #[allow(clippy::borrowed_box)] pub(crate) fn serialize_flow_node<S>( value: &Box<dyn FlowNodeType>, serializer: S, ) -> Result<S::Ok, S::Error> where S: Serializer,
{ downcast_and_serialize!( value, serializer, AdHocSubProcess, BoundaryEvent, BusinessRuleTask, CallActivity, CallChoreography, ChoreographyTask, ComplexGateway, EndEvent, EventBasedGateway, ExclusiveGateway, Gateway, ImplicitThrowEvent, InclusiveGateway, IntermediateCatchEvent, IntermediateThrowEvent, ManualTask, ParallelGateway, ReceiveTask, ScriptTask, SendTask, ServiceTask, StartEvent, SubChoreography, SubProcess, Task, Transaction, UserTask ); }
views.py
from django.shortcuts import render,redirect from django.db import connection from django.core.files.storage import FileSystemStorage from django.contrib.auth import get_user_model,authenticate from django.contrib import messages,auth from django.conf import settings from .models import Profile, Appointments, Prescription, Invoice User = get_user_model() def nregistration(request): if request.method == "POST": first_name=request.POST["fname"] last_name=request.POST["lname"] username=request.POST["username"] email=request.POST["email"] password=request.POST["password"] cnfpassword=request.POST["cnfpassword"] profession="reception" if password==cnfpassword: if User.objects.filter(username=username).exists(): messages.warning(request,'User Already Exists') return render(request,'nregistration.html') else: user=User.objects.create_user(first_name=first_name,last_name=last_name,username=username,email=email,password=cnfpassword,profession=profession,is_staff=True,is_active=True) user.groups.add(1) user.save() storage = messages.get_messages(request) storage.used = True messages.info(request,'Receptionist Registered Successfully') return redirect('/home') else: messages.warning(request,'Passwords are not matching') return render(request,'nregistration.html') else: return render(request, 'nregistration.html') def registration(request): if request.method == "POST": first_name=request.POST["fname"] last_name=request.POST["lname"] username=request.POST["username"] email=request.POST["email"] password=request.POST["password"] cnfpassword=request.POST["cnfpassword"] profession=request.POST["profession"] if password==cnfpassword: if User.objects.filter(username=username).exists(): messages.warning(request,'User Already Exists') return render(request,'registration.html') else: user=User.objects.create_user(first_name=first_name,last_name=last_name,username=username,email=email,password=cnfpassword,profession=profession) user.save() storage = messages.get_messages(request) storage.used = True messages.info(request,'User Registered Successfully') return redirect('/home') else: messages.warning(request,'Passwords are not matching') return render(request,'registration.html') else: return render(request, 'registration.html') def login(request): if request.method == "POST": username=request.POST["username"] password=request.POST["password"] user=authenticate(username=username,password=password) if user is not None: auth.login(request,user) storage = messages.get_messages(request) storage.used = True messages.info(request,'User Login Success') return redirect('/home') else: storage = messages.get_messages(request) storage.used = True messages.warning(request,'Wrong Password') return render(request,'login.html') else: return render(request,'login.html') def logout(request): auth.logout(request) storage = messages.get_messages(request) storage.used = True messages.info(request,'Logged Out Successfully') return redirect('/home') def profile(request): if request.method=="POST": username=request.POST.get("username") profession=request.POST["profession"] gender=request.POST.get("gender") age=int(request.POST["age"]) aptname=request.POST["aptname"] stname=request.POST["stname"] cityname=request.POST["cityname"] distname=request.POST["distname"] statename=request.POST["statename"] countryname=request.POST["countryname"] insurance=request.POST.get("insurance") phone=request.POST["phone"] medicalhistory=request.FILES.get("medicalhistory") bloodgroup=request.POST.get("bloodgroup") filename=0 if medicalhistory is not None: fs = FileSystemStorage() filename = fs.save(medicalhistory.name,medicalhistory) if Profile.objects.filter(username=username).exists(): obj=Profile.objects.filter(username=username).update(age=age,aptname=aptname,stname=stname,cityname=cityname,distname=distname,statename=statename,countryname=countryname,phone=phone) else: obj=Profile.objects.create(username=username,profession=profession,gender=gender,age=age,aptname=aptname,stname=stname,cityname=cityname,distname=distname,statename=statename,countryname=countryname,phone=phone,insurance=insurance,MedicalHistory=filename,bloodgroup=bloodgroup) obj.save() storage = messages.get_messages(request) storage.used = True messages.info(request,'Profile Updated Successfully') return redirect('/home') else: obj=Profile.objects.all().filter(username=request.user.username) if len(obj)==0: return render(request,'profile1.html') else: return render(request,'profile.html',{'obj':obj[0]}) def nappointment(request): if request.method=="POST": duser=request.POST.get('duser') puser=request.POST.get('puser') date=request.POST["date"] time=request.POST["time"] status=request.POST["status"] disease=request.POST["disease"] obj=Appointments.objects.create(duser=duser,puser=puser,date=date,time=time,status=status,disease=disease) obj.save() storage = messages.get_messages(request) storage.used = True messages.info(request,'Appointment Created Successfully') return redirect('/home') else: doctor_objects=User.objects.all().filter(profession="doctor") patient_objects=User.objects.all().filter(profession="patient") return render(request,'nurseappointment.html',{'doctor_obj':doctor_objects,'patient_obj':patient_objects}) def appointment(request): if request.user.profession == "doctor": objects=Appointments.objects.all().filter(duser=request.user.username) return render(request,'dashboard.html',{'objects':objects}) if request.user.profession == "patient": objects=Appointments.objects.all().filter(puser=request.user.username) return render(request,'dashboard.html',{'objects':objects}) if request.user.profession == "reception" or request.user.is_superuser==True: objects=Appointments.objects.all() total_appointments=len(objects) completed_appointments=len(Appointments.objects.filter(status="Completed")) active_appointments=len(Appointments.objects.filter(status="Active")) return render(request,'dashboard.html',{'objects':objects,'ac':active_appointments,'com':completed_appointments,'tot':total_appointments}) def prescription(request): if request.method=="POST": puser=request.POST["puser"] disease=request.POST["disease"] date=request.POST["date"] medicine=request.POST["medicine"] duser=request.user.username care=request.POST["care"] amount=request.POST["amount"] update_id=request.POST["id"] obj=Prescription.objects.create(puser=puser,disease=disease,date=date,medicine=medicine,duser=duser,care=care) Appointments.objects.filter(puser=puser,duser=request.user.username,id=update_id).update(status="Completed") obj2=Invoice.objects.create(puser=puser,disease=disease,duser=duser,amount=amount,payment="NotPaid") obj2.save() obj.save() storage = messages.get_messages(request) storage.used = True messages.info(request,'Prescription Created Successfully') return redirect('/home') else: patient_objects=Appointments.objects.all().filter(duser=request.user.username,status="Active") if len(patient_objects)>0: return render(request,"prescription.html",{'p_appoint':patient_objects[0]}) else: storage = messages.get_messages(request) storage.used = True messages.info(request,'No Active Prescription') return redirect('/registration/medical') def medical(request): if request.user.profession=='doctor': pres_objs=Prescription.objects.all().filter(duser=request.user.username) return render(request,'medical.html',{'pres_obj':pres_objs}) if request.user.profession=='patient': pres_objs=Prescription.objects.all().filter(puser=request.user.username) return render(request,'medical.html',{'pres_obj':pres_objs}) if request.user.is_superuser==True: pres_objs=Prescription.objects.all() return render(request,'medical.html',{'pres_obj':pres_objs}) def invoice(request): if request.user.profession=="patient": objects=Invoice.objects.all().filter(puser=request.user.username) return render(request,'invoice.html',{'objects':objects}) if request.user.profession=="reception" or request.user.is_superuser==True:
def analytics(request): obj=Invoice.objects.all() total_invoice=len(obj) obj1=Invoice.objects.all().filter(payment="Paid") paid_invoice=len(obj1) recieved_amount=0 for item in obj1: recieved_amount+=item.amount obj1=Invoice.objects.all().filter(payment="NotPaid") pending_invoice=len(obj1) need_amount=0 for item in obj1: need_amount+=item.amount return render(request,'analytics.html',{'objects':obj,'total_invoice':total_invoice,'pinvoice':paid_invoice,'npinvoice':pending_invoice,'ramount':recieved_amount,'pamount':need_amount,'rpamount':recieved_amount+need_amount}) def showinv(request,number): obj=Invoice.objects.all().filter(id=number) return render(request,'showinv.html',{'objects':obj})
objects=Invoice.objects.all() return render(request,'invoice.html',{'objects':objects})
hilbert_test.py
# Copyright 2014 Diamond Light Source Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: hilbert_test :platform: Unix :synopsis: Tests the hilbert filter plugin .. moduleauthor:: Tunhe Zhou <[email protected]> """ import unittest from savu.test import test_utils as tu from savu.test.travis.framework_tests.plugin_runner_test import \ run_protected_plugin_runner class HilbertTest(unittest.TestCase):
if __name__ == "__main__": unittest.main()
def test_hilbert(self): data_file = tu.get_test_data_path('24737.nxs') process_file = tu.get_test_process_path('hilbert_test.nxs') run_protected_plugin_runner(tu.set_options(data_file, process_file=process_file))
gbxml.py
# -*- coding: utf-8 -*- from lxml import etree import pkgutil from io import BytesIO from . import xml_functions, construction_functions, layer_functions from . import surface_functions, space_functions, building_functions from . import opening_functions, zone_functions class Gbxml(): "A class that represents a gbXML file and the gbXML schema" def __init__(self, gbxml_fp=None, gbxsd_fp=None):
# general query methods def get_ids(self, tag=None): """Returns the id attributes of elements :param tag: an element tag to filter on :type tag: str, optional :return: a list of element ids :rtype: list """ if tag is None: tag='*' element=self._ElementTree.getroot() return xml_functions.get_ids(element,tag) def get_xmlstring(self,id=None): """Returns a string of an xml element :param id: an element id to filter on :type id: str, optional :return: a string of xml contents :rtype: str """ element=self._ElementTree.getroot() if not id is None: st='//gbxml:*[@id="%s"]' % id element=element.xpath(st,namespaces=self.ns)[0] return xml_functions.get_xmlstring(element) def get_attributes(self,id): """Returns the attributes of an element :param id: an element id :type id: str :return: the attributes of the element :rtype: dict """ st='//gbxml:*[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] return xml_functions.get_attributes(element) def get_child_tags(self,id): """Returns the child tags of an element :param id: an element id :type id: str :return: a list of the tags of the child elements :rtype: list """ st='//gbxml:*[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] return xml_functions.get_child_tags(element) def get_child_tag_text(self,id,child_tag): """Returns the text of child elements :param id: an element id :type id: str :param child_tag: a tag of a child element :type child_tag: str :return: a list of the text of child elements with the child_tag tag :rtype: list """ st='//gbxml:*[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] return xml_functions.get_child_tag_text(element,child_tag) def get_child_tag_attributes(self,id,child_tag): """Returns the attributes of child elements :param id: an element id :type id: str :param child_tag: a tag of a child element :type child_tag: str :return: a list of the attributes of each child element with the child_tag tag :rtype: list """ st='//gbxml:*[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] return xml_functions.get_child_tag_attributes(element,child_tag) def get_children_list(self,id): """Returns a list of dicts representing each child element :param id: an element id :type id: str :return: a list of dicts {'tag':(str),'text':(str),'attributes':(dict)} :rtype: list """ st='//gbxml:*[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] return xml_functions.get_children_list(element) # campus query methods def get_campus_location_tags(self,id): """Returns the child tags of the Location element of a campus :param id: a Campus element id :type id: str :return: a list of the tags of the Location element :rtype: list """ st='./gbxml:Campus[@id="%s"]/gbxml:Location' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] return xml_functions.get_child_tags(element) def get_campus_location_tag_text(self,id,child_tag): """Returns the text of Location child elements of a campus :param id: a Campus element id :type id: str :param child_tag: a tag of a child element of the Location element :type child_tag: str :return: a list of the text of child elements of the Location element with the child_tag tag :rtype: list """ st='./gbxml:Campus[@id="%s"]/gbxml:Location' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] return xml_functions.get_child_tag_text(element,child_tag) # building query methods def get_building_space_ids(self,id): """Returns the ids of all spaces in a building :param id: a Building element id :type id: str :return: a list of Space ids :rtype: list """ # get element from id st='./gbxml:Campus/gbxml:Building[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get space ids return building_functions.get_space_ids(element) def get_building_surface_ids(self,id): """Returns the ids of all surfaces in a building :param id: a Building element id :type id: str :return: a list of Surface ids :rtype: list """ # get element from id st='./gbxml:Campus/gbxml:Building[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get surface ids return building_functions.get_surface_ids(element) # space query methods def get_space_surface_ids(self,id): """Returns the ids of all surfaces adjacent to a space :param id: a Space element id :type id: str :return: a list of surface ids :rtype: list """ # get element from id st='./gbxml:Campus/gbxml:Building/gbxml:Space[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get surface ids return space_functions.get_surface_ids(element) # construction query methods def get_construction_layer_ids(self,id): """Returns the layer ids of a construction :param id: a Construction element id :type id: str :return: a list of layer ids :rtype: list """ # get element from id st='./gbxml:Construction[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get layer ids return construction_functions.get_layer_ids(element) def get_construction_material_ids(self,id): """Returns the material ids of a construction :param id: a Construction element id :type id: str :return: a list of material ids :rtype: list """ # get element from id st='./gbxml:Construction[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get material ids return construction_functions.get_material_ids(element) # layer query methods def get_layer_material_ids(self,id): """Returns the material ids of a construction :param id: a Layer element id :type id: str :return: a list of material ids :rtype: list """ # get element from id st='./gbxml:Layer[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get layer ids return layer_functions.get_material_ids(element) # surface query methods def get_surface_inner_space_id(self,id): """Returns the inner space id of a surface :param id: a Surface element id :type id: str :return: the inner Space id :rtype: str or None """ # get element from id st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get inner space id return surface_functions.get_inner_space_id(element) def get_surface_outer_space_id(self,id): """Returns the outer space id of a surface :param id: a Surface element id :type id: str :return: the outer Space id :rtype: str or None """ # get element from id st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get outer space id return surface_functions.get_outer_space_id(element) def get_surface_azimuth(self,id): """Returns the azimuth of a surface :param id: a Surface element id :type id: str :return: the azimuth value :rtype: float or None """ # get element from id st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get azimuth return surface_functions.get_azimuth(element) def get_surface_tilt(self,id): """Returns the tilt of a surface :param id: a Surface element id :type id: str :return: the tilt value :rtype: float or None """ # get element from id st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get tilt return surface_functions.get_tilt(element) def get_surface_coordinates(self,id): """Returns the coordinates of a surface :param id: a Surface element id :type id: str :return: a list of coordinate tuples (x,y,z) :rtype: list (of tuples) """ # get element from id st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get coordinates return surface_functions.get_coordinates(element) def get_surface_area(self,id): """Returns the area of a surface This is calculated using the surface coordiantes and includes the area of any openings. :param id: a Surface element id :type id: str :return: the area value :rtype: float or None """ # get element from id st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get area return surface_functions.get_area(element) def get_surface_opening_ids(self,id): """Returns the opening ids of a surface :param id: a Surface element id :type id: str :return: a list of Opening ids :rtype: list """ # get element from id st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get opening ids return surface_functions.get_opening_ids(element) # opening query methods def get_opening_surface_id(self,id): """Returns the parent surface id of an opening :param id: a Opening element id :type id: str :return: a Surface id :rtype: str """ # get element from id st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get surface id return opening_functions.get_surface_id(element) def get_opening_coordinates(self,id): """Returns the coordinates of an opening :param id: a Opening element id :type id: str :return: a list of coordinate tuples (x,y,z) :rtype: list (of tuples) """ # get element from id st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get coordinates return opening_functions.get_coordinates(element) def get_opening_area(self,id): """Returns the area of an opening :param id: a Opening element id :type id: str :return: the area value :rtype: float or None """ # get element from id st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get area return opening_functions.get_area(element) # zone query methods def get_zone_space_ids(self,id): """Returns the ids of all spaces in a zone :param id: a Zone element id :type id: str :return: a list of Space ids :rtype: list """ # get element from id st='./gbxml:Zone[@id="%s"]' % id element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0] # get space ids return zone_functions.get_space_ids(element) ## OUTPUT # # # def __xmlstring(self,element=None): # """Returns a string of an xml element # # Arguments: # - element (lxml.etree._Element): default is root node # # """ # if element is None: element=self.root() # return etree.tostring(element,pretty_print=True).decode() # # # def xpath(self,element,st_xpath): # """Returns the result of an xpath operation on the gbXML file # # Arguments # - st_xpath (str): the xpath string # - element (lxml.etree._Element): the element for the xpath operation. The # default is the root element # # """ # return element.xpath(st_xpath,namespaces=self.ns) # # # def write(self,fp): # """Writes the gbXML file to disc # # Arguments: # fp (str): the filepath # """ # st=etree.tostring(self.root(),xml_declaration=True) # with open(fp,'wb') as f: # f.write(st) # ## VALIDATION # # def validate(self): # """Validates the gbXMl file using the schema # # Returns True if the gbXML file is valid, otherwise False # # """ # xmlschema = etree.XMLSchema(self.gbxsd._ElementTree) # result=xmlschema.validate(self._ElementTree) # return result # ## EDITING # # def add_element(self,parent_element,label,text=None,**kwargs): # """Adds an element to the gbXML # # Returns the newly created element # # Arguments: # - parent_element (lxml._Element or str): the parent element that the # new element is added to. This can be either a lxml._Element object # or a string with the element id. # - label (str): the label or tag of the new element # - text (str): the text of the new element # - **kwargs (keywords): the attributes of the new element # # """ # if isinstance(parent_element,str): # parent_element=self.element(parent_element) # e=etree.SubElement(parent_element,'{%s}%s' % (self.ns['gbxml'],label)) # if text: e.text=text # if kwargs: # for k,v in kwargs.items(): # e.set(k,v) # return e # # def set_attribute(self,element,key,value): # """Sets the attribute of an element # # Returns the modified element # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # - key (str): the name of the attribute # - value (str): the value of the attribute # # """ # if isinstance(element,str): # element=self.element(element) # element.set(key,value) # return element # # # def set_element_id(self,element,new_id): # """Sets a new id attribute for an element and updates all links # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # - new_id (str): # # Return value: # - new_id (str) # # """ # #check if new_id already exists # l=self.elements() # ids=[x.get('id') for x in l if x.get('id')] # if new_id in ids: # raise ValueError('new_id %s already exists' % new_id) # # #get element # if isinstance(element,str): # element=self.element(element) # # #get old id # old_id=element.get('id') # # #set new id # element.set('id',new_id) # # #find all elements with attribute labelRefId=old_id # label=self.label(element) # prefix=label[0].lower()+label[1:] # st='.//gbxml:*[@%sIdRef="%s"]' % (prefix,old_id) # l=self.xpath(self.root(),st) # # #update with id # for e in l: # e.set('%sIdRef' % prefix,new_id) # #return new id # return new_id # # # def set_text(self,element,text): # """Sets the text of an element # # Returns the modified element # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # - text (str): the text # # """ # if isinstance(element,str): # element=self.element(element) # element.text=text # return element # # # def remove_element(self,element): # """Removes an element # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # # """ # if isinstance(element,str): # element=self.element(element) # # #remove links to element # id=element.get('id') # label=self.label(element) # prefix=label[0].lower()+label[1:] # st='.//gbxml:*[@%sIdRef="%s"]' % (prefix,id) # l=self.xpath(self.root(),st) # for x in l: # self.remove_attribute(x,'%sIdRef' % prefix) # # #remove element # parent=element.getparent() # parent.remove(element) # # # def remove_attribute(self,element,key): # """Removes an element # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # - key (str): The name of the attribute to delete # # """ # if isinstance(element,str): # element=self.element(element) # element.attrib.pop(key) # # # def remove_text(self,element): # pass # # # ## QUERYING # # def elements(self,label='*'): # """Returns the elements of the gbXML file # # Arguments: # - label (str): the label of the elements # # """ # st='//gbxml:%s' % label # return self.xpath(self.root(),st) # # # def root(self): # "Returns the root element" # return self._ElementTree.getroot() # # # def element(self,id,label='*'): # """Returns an element from the gbXML file # # Arguments: # - id (str): the id of the element # - label (str): the label of the element # # """ # st='//gbxml:%s[@id="%s"]' % (label,id) # try: # return self.xpath(self.root(),st)[0] # except IndexError: # raise KeyError('there is no element with an id of %s' % id) # # # def label(self,element): # """Returns the label of an element # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # """ # if isinstance(element,str): # element=self.element(element) # return element.tag.split('}')[1] # # # def attributes(self,element): # """Returns the attributes of an element # # Return value is a dictionary # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # """ # if isinstance(element,str): # element=self.element(element) # return dict(element.attrib) # # # def text(self,element): # """Returns the text of an element, or None # # Return value is a string # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # """ # if isinstance(element,str): # element=self.element(element) # return element.text # # # def text_value(self,element): # """Returns the text value of an element, i.e the text converted # according to its schema data type # # Return value is an object with data type dependent on the schema # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # """ # # #JUST RETURNS STRINGS AT PRESENT - TO DO # # if isinstance(element,str): # element=self.element(element) # text=element.text # return text # # # def child_elements(self,element,label='*'): # """Returns the child elements of an element # # Return value is a list of elements # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # - label (str): the label of the element # """ # if isinstance(element,str): # element=self.element(element) # st='./gbxml:%s' % label # return self.xpath(element,st) # # # def descendent_elements(self,element,label='*'): # """Returns the descendent elements of an element # # Return value is a list of elements # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # - label (str): the label of the element # """ # if isinstance(element,str): # element=self.element(element) # st='.//gbxml:%s' % label # return self.xpath(element,st) # # ## CONSTRUCTION FUNCTIONS # # def construction_layers(self,construction_element): # "Returns the layer elements of a construction" # if isinstance(construction_element,str): # construction_element=self.element(construction_element,label='Construction') # layerId_elements=self.child_elements(construction_element,'LayerId') # layer_elements=[self.element(layerId_element.get('layerIdRef'),'Layer') # for layerId_element in layerId_elements] # return layer_elements # # def construction_materials(self,construction_element): # "Returns the layer elements of a construction" # if isinstance(construction_element,str): # construction_element=self.element(construction_element,label='Construction') # layer_elements=self.construction_layers(construction_element) # material_elements=[] # for layer_element in layer_elements: # material_elements+=self.layer_materials(layer_element) # return material_elements # # ## LAYER FUNCTIONS # # def layer_materials(self,layer_element): # "Returns the layer elements of a construction" # if isinstance(layer_element,str): # layer_element=self.element(layer_element,label='Layer') # materialId_elements=self.child_elements(layer_element,'MaterialId') # material_elements=[self.element(materialId_element.get('materialIdRef'),'Material') # for materialId_element in materialId_elements] # return material_elements # # # ## OPENING FUNCTIONS # # def opening_coordinates(self,opening_element): # """Returns a list of coordinate tuples # # Arguments: # - opening_element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # # Return value: # - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates. # i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...] # or None # # """ # if isinstance(opening_element,str): # opening_element=self.element(opening_element,label='Opening') # l=[] # st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint' # cartesian_points=self.xpath(opening_element,st) # for cartesian_point in cartesian_points: # st='./gbxml:Coordinate' # coordinates=self.xpath(cartesian_point,st) # t=(float(self.text_value(coordinates[0])), # float(self.text_value(coordinates[1])), # float(self.text_value(coordinates[2]))) # l.append(t) # return l # ## SURFACE FUNCTIONS # # def surface_azimuth(self,surface_element): # """Returns the azimuth of a surface # # Arguments: # - surface_element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # # Return value: # - azimuth (float) or None # # """ # if isinstance(surface_element,str): # surface_element=self.element(surface_element,label='Surface') # l=self.xpath(surface_element,'./gbxml:RectangularGeometry/gbxml:Azimuth') # if len(l)>0: # azimuth=l[0] # return float(self.text_value(azimuth)) # # # def surface_coordinates(self,surface_element): # """Returns a list of coordinate tuples # # Arguments: # - surface_element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # # Return value: # - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates. # i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...] # or None # # """ # if isinstance(surface_element,str): # surface_element=self.element(surface_element,label='Surface') # l=[] # st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint' # cartesian_points=self.xpath(surface_element,st) # for cartesian_point in cartesian_points: # st='./gbxml:Coordinate' # coordinates=self.xpath(cartesian_point,st) # t=(float(self.text_value(coordinates[0])), # float(self.text_value(coordinates[1])), # float(self.text_value(coordinates[2]))) # l.append(t) # return l # # # def surface_inner_space(self,surface_element): # """Returns the inner Space element of a Surface, or None # # Arguments: # - surface_element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # # Return value: # - space (lxml._Element) or None # # """ # if isinstance(surface_element,str): # surface_element=self.element(surface_element,label='Surface') # adjacentSpaceIds=self.child_elements(surface_element,label='AdjacentSpaceId') # if len(adjacentSpaceIds)>0: # adjacentSpaceId=adjacentSpaceIds[0] # spaceIdRef=adjacentSpaceId.get('spaceIdRef') # return self.element(spaceIdRef) # # # def surface_outer_space(self,surface_element): # """Returns the outer Space element of a Surface, or None # # Arguments: # - surface_element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # # Return value: # - space (lxml._Element) or None # # """ # if isinstance(surface_element,str): # surface_element=self.element(surface_element,label='Surface') # adjacentSpaceIds=self.child_elements(surface_element,label='AdjacentSpaceId') # if len(adjacentSpaceIds)>1: # adjacentSpaceId=adjacentSpaceIds[1] # spaceIdRef=adjacentSpaceId.get('spaceIdRef') # return self.element(spaceIdRef) # # # def surface_tilt(self,surface_element): # """Returns the tilt of a surface # # Arguments: # - surface_element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # # Return value: # - tilt (float) or None # # """ # if isinstance(surface_element,str): # surface_element=self.element(surface_element,label='Surface') # l=self.xpath(surface_element,'./gbxml:RectangularGeometry/gbxml:Tilt') # if len(l)>0: # tilt=l[0] # return float(self.text_value(tilt)) # # def surface_construction(self,surface_element): # "Returns the construction element of a surface" # if isinstance(surface_element,str): # surface_element=self.element(surface_element,label='Surface') # construction_id=surface_element.get('constructionIdRef') # construction_element=self.element(construction_id,'Construction') # return construction_element # # def surface_layers(self,surface_element): # "Returns the layer elements of a surface" # if isinstance(surface_element,str): # surface_element=self.element(surface_element,label='Surface') # construction_element=self.surface_construction(surface_element) # layer_elements=self.construction_layers(construction_element) # return layer_elements # # def surface_materials(self,surface_element): # "Returns the layer elements of a surface" # if isinstance(surface_element,str): # surface_element=self.element(surface_element,label='Surface') # construction_element=self.surface_construction(surface_element) # material_elements=self.construction_materials(construction_element) # return material_elements # # # # # # # # # # ### SPACE FUNCTIONS ## ## def set_space_id(self,space_element,id): ## """Sets a new id attribute for a Space element and updates all links ## ## ## """ ## if isinstance(space_element,str): ## space_element=self.element(space_element) ## #get old id ## old_id=space_element.get('id') ## #set new id ## space_element.set('id',id) ## #find all elements with attribute spaceRefId=old_id ## st='.//gbxml:*[@spaceIdRef="%s"]' % old_id ## l=self.xpath(self.root(),st) ## #update with id ## for e in l: ## e.set('spaceIdRef',id) ## #return new id ## return id # # ## WINDOWTYPE FUNCTIONS # # def windowType_materials(self,windowType_element): # """Returns the Glaze and Gap elements of a windowType in order # # Arguments: # - windowType_element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # # Return value: # - glaze_and_gap_elements (list) # # """ # l=[] # if isinstance(windowType_element,str): # windowType_element=self.element(windowType_element,label='WindowType') # l=self.child_elements(windowType_element) # return [x for x in l if self.label(x) in ['Glaze','Gap']] # # ## ZONE FUNCTIONS # # def add_zone(self,zone_id,space_ids): # """Adds a zone element and the IdRef links to it. # # Arguments: # - zone_id (str): the id of the new zone # - space_ids (str or list): the ids of the spaces that link to the zone # """ # #adds element # parent=self.root() # e=self.add_element(parent,'Zone') # self.set_attribute(e,'id',zone_id) # #adds links # if isinstance(space_ids,str): # space_ids=[space_ids] # for space_id in space_ids: # space=self.element(space_id,'Space') # self.set_attribute(space,'zoneIdRef',zone_id) # #returns the new zone element # return e # # # def remove_zone(self,zone_element): # """Removes a Zone element and all IdRef links to the zone. # # Arguments: # - zone_element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # """ # #find id # if isinstance(zone_element,str): # id=zone_element # else: # id=zone_element.get('id') # #find all elements with attribute zoneRefId=id # st='.//gbxml:*[@zoneIdRef="%s"]' % id # l=self.xpath(self.root(),st) # #removes all attributes zoneRefId=id # for x in l: # self.remove_attribute(x,'zoneIdRef') # #remove node # self.remove_element(zone_element) # # # # # # LAYERS # # # ## OUTPUT # #def xpath(element,st_xpath): # """Returns the result of an xpath operation on the gbXML file # # Arguments # - st_xpath (str): the xpath string # - element (lxml.etree._Element): the element for the xpath operation. The # default is the root element # # """ # return element.xpath(st_xpath,namespaces=ns) # ## QUERYING # #def get_child(element,id=None,label='*'): # """Returns the child of an element # # Arguments: # - id (str): the id of the element # - label (str): the label of the element # # """ # if id is None: # return get_children(element,label)[0] # else: # st='./gbxml:%s[@id="%s"]' % (label,id) # return xpath(element,st)[0] # # #def get_child_text(element,label='*',dtype=None): # "Returns the first child text value, or None" # children=get_children(element,label) # if children: # if dtype is None: # return children[0].text # else: # return dtype(children[0].text) # else: # return None # #def get_children(element,label='*'): # """Returns the child elements of an element # # Return value is a list of elements # # Arguments: # - element (lxml._Element or str): This a lxml._Element object # or a string with the element id. # - label (str): the label of the element # """ # st='./gbxml:%s' % label # return xpath(element,st) # #def get_descendents(element,label='*'): # """Returns the descendent elements of an element # # Return value is a list of elements # # Arguments: # - element (lxml._Element): This a lxml._Element object # - label (str): the label of the element # """ # st='.//gbxml:%s' % label # return xpath(element,st) # #def get_element(element,id,label='*'): # """Returns an element from the gbXML file # """ # st='//gbxml:%s[@id="%s"]' % (label,id) # return xpath(element.getroottree(),st)[0] # # ## CONSTRUCTION FUNCTIONS # #def construction_layers(construction_element): # "Returns the layer elements of a construction" # layerId_elements=get_children(construction_element,'LayerId') # layer_elements=[get_layer(layerId_element, # layerId_element.get('layerIdRef')) # for layerId_element in layerId_elements] # return layer_elements # #def construction_materials(construction_element): # "Returns the layer elements of a construction" # layer_elements=construction_layers(construction_element) # material_elements=[] # for layer_element in layer_elements: # material_elements+=layer_materials(layer_element) # return material_elements # # ## LAYER FUNCTIONS # #def get_layer(element,id): # root=element.getroottree() # result=xpath(root,'./gbxml:Layer[@id="%s"]' % id) # return result[0] # #def layer_materials(layer_element): # "Returns the layer elements of a construction" # materialId_elements=get_children(layer_element,'MaterialId') # material_elements=[get_element(materialId_element, # materialId_element.get('materialIdRef'), # 'Material') # for materialId_element in materialId_elements] # return material_elements # ## MATERIAL FUNCTIONS # #def get_material(element,id): # root=element.getroottree() # result=xpath(root,'./gbxml:Material[@id="%s"]' % id) # return result[0] # # ## SURFACE FUNCTION # #def get_surface_coordinates(surface_element): # """Returns a list of coordinate tuples # # Arguments: # - surface_element (lxml._Element or str): This a lxml._Element object # # Return value: # - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates. # i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...] # or None # # """ # l=[] # st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint' # cartesian_points=xpath(surface_element,st) # for cartesian_point in cartesian_points: # st='./gbxml:Coordinate' # coordinates=xpath(cartesian_point,st) # t=(float(coordinates[0].text), # float(coordinates[1].text), # float(coordinates[2].text)) # l.append(t) # return l # #def get_surface_inner_space(surface_element): # """Returns the inner Space element of a Surface, or None # """ # adjacentSpaceIds=get_children(surface_element,label='AdjacentSpaceId') # if len(adjacentSpaceIds)>0: # adjacentSpaceId=adjacentSpaceIds[0] # spaceIdRef=adjacentSpaceId.get('spaceIdRef') # return get_element(surface_element,spaceIdRef) # #def get_surface_outer_space(surface_element): # """Returns the outer Space element of a Surface, or None # """ # adjacentSpaceIds=get_children(surface_element,label='AdjacentSpaceId') # if len(adjacentSpaceIds)>1: # adjacentSpaceId=adjacentSpaceIds[1] # spaceIdRef=adjacentSpaceId.get('spaceIdRef') # return get_element(surface_element,spaceIdRef) # # # # # # # # ## def child_node_text(self,id,label='*'): ## """Returns a dictionary listing any child nodes which have text ## ## Return values is {tag:text} ## ## """ ## e=self._element(id,label) ## d={} ## for e1 in e: ## if e1.text: ## label=e1.tag.split('}')[1] ## d[label]=e1.text ## return d ## ## ## def child_node_values(self,id,label='*'): ## """Returns a dictionary listing any child nodes which have text ## ## Node text values are converted from strings into their datatype ## i.e. the text from an 'Area' node is converted into a float ## ## Return values is {label:value} ## ## """ ## d=self.xml.child_node_text(id=id,label=label) ## d1={} ## for k,v in d.items(): ## xml_type=self.xsd.element_type(k) ## #print(xml_type) ## if xml_type=='xsd:string': ## value=v ## elif xml_type=='xsd:decimal': ## value=float(v) ## else: ## raise Exception(xml_type) ## d1[k]=value ## return d1 ## ## ## ## def node_attributes(self,id,label='*'): ## "Returns the attribute dict of node with id 'id'" ## e=self._element(id,label) ## return dict(e.attrib) ## ## ## def node_ids(self,label='*'): ## """Returns the ids of all nodes ## ## Arguments: ## label (str): the node tag to filter on ## ## """ ## #filter by label ## st='//a:%s' % (label) ## l=self._ElementTree.getroot().xpath(st,namespaces=self.ns) ## return [x.get('id') for x in l] ## ## ## def parent_object(self,id,label='*'): ## """Returns the parent of an element ## ## Return value is a dictionary {'id':value,'label':value} ## ## """ ## e=self._element(id,label) ## parent=e.getparent() ## return {'id':self._id(parent), ## 'label':self._label(parent)} ## ## ## ## ## ## def surface_adjacent_objects(self,id): ## """Returns the objects adjacent to the surface ## ## Return value is a 2 item list of dictionaries [{'id':value,'label':value}] ## ## """ ## label='Surface' ## e=self._element(id,label) ## st='./a:AdjacentSpaceId/@spaceIdRef' ## l=e.xpath(st,namespaces=self.ns) ## l=l+[None]*(2-len(l)) ## surfaceType=e.get('surfaceType') ## d=\ ## {'InteriorWall':None, ## 'ExteriorWall':{'id':'Climate1','label':'Climate'}, ## 'Roof':{'id':'Climate1','label':'Climate'}, ## 'InteriorFloor':None, ## 'ExposedFloor':{'id':'Climate1','label':'Climate'}, ## 'Shade':{'id':'Climate1','label':'Climate'}, ## 'UndergroundWall':{'id':'Ground1','label':'Ground'}, ## 'UndergroundSlab':{'id':'Ground1','label':'Ground'}, ## 'Ceiling':None, ## 'Air':None, ## 'UndergroundCeiling':{'id':'Ground1','label':'Ground'}, ## 'RaisedFloor':{'id':'Climate1','label':'Climate'}, ## 'SlabOnGrade':{'id':'Ground1','label':'Ground'}, ## 'FreestandingColumn':None, ## 'EmbeddedColumn':None ## } ## l1=[] ## for x in l: ## if not x is None: ## l1.append({'id':x,'label':'Space'}) ## else: ## l1.append(d[surfaceType]) ## return l1 ## ## ## def surface_building_ids(self,id): ## """Returns a list of building ids that the surface belongs to ## """ ## l=self.surface_adjacent_objects(id) ## l=[self.parent_object(x['id'])['id'] for x in l if x['label']=='Space'] ## return l ## ## ## # ## def elements(xml, tag='*'): ## """Returns a list of lxml elements, filtered by tag ## ## Arguments: ## xml (lxml.etree._ElementTree): the gbXML instance ## tag (str): the tag name, not including the namespace ## ## """ ## st='//a:%s' % (tag) ## #print(st) ## return xml.getroot().xpath(st,namespaces=ns) # #
"""Initialises a new Gbxml instance Arguments: gbxml_fp (str): filepath to a gbXML file. This is read in as an lxml._ElementTree object. If not supplied then a new lxml._ElementTree object with only a root element is created. gbxsd_fp (str): filepath to a gbXML schema file. If not supplied then a default gbXMl schema file is used. """ if gbxml_fp: self._ElementTree=etree.parse(gbxml_fp) else: st = pkgutil.get_data(__package__, 'blank.xml') self._ElementTree=etree.parse(BytesIO(st)) if gbxsd_fp: self._ElementTree_gbxsd=etree.parse(gbxml_fp) else: st = pkgutil.get_data(__package__, 'GreenBuildingXML_Ver6.01.xsd') self._ElementTree_gbxsd=etree.parse(BytesIO(st)) self.ns={'gbxml':'http://www.gbxml.org/schema'}
instance_iam_roles_list_type_json.go
/* Copyright (c) 2020 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // IMPORTANT: This file has been generated automatically, refrain from modifying it manually as all // your changes will be lost when the file is generated again. package v1 // github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1 import ( "io" jsoniter "github.com/json-iterator/go" "github.com/openshift-online/ocm-sdk-go/helpers" ) // MarshalInstanceIAMRolesList writes a list of values of the 'instance_IAM_roles' type to // the given writer. func MarshalInstanceIAMRolesList(list []*InstanceIAMRoles, writer io.Writer) error { stream := helpers.NewStream(writer) writeInstanceIAMRolesList(list, stream) stream.Flush() return stream.Error } // writeInstanceIAMRolesList writes a list of value of the 'instance_IAM_roles' type to // the given stream. func writeInstanceIAMRolesList(list []*InstanceIAMRoles, stream *jsoniter.Stream) { stream.WriteArrayStart() for i, value := range list { if i > 0 { stream.WriteMore() } writeInstanceIAMRoles(value, stream) } stream.WriteArrayEnd() } // UnmarshalInstanceIAMRolesList reads a list of values of the 'instance_IAM_roles' type // from the given source, which can be a slice of bytes, a string or a reader. func UnmarshalInstanceIAMRolesList(source interface{}) (items []*InstanceIAMRoles, err error) { iterator, err := helpers.NewIterator(source) if err != nil { return } items = readInstanceIAMRolesList(iterator) err = iterator.Error return } // readInstanceIAMRolesList reads list of values of the ''instance_IAM_roles' type from // the given iterator. func readInstanceIAMRolesList(iterator *jsoniter.Iterator) []*InstanceIAMRoles
{ list := []*InstanceIAMRoles{} for iterator.ReadArray() { item := readInstanceIAMRoles(iterator) list = append(list, item) } return list }
types.ts
export interface User { /** * 用户名 */ username: string; /** * 密码 */ password?: string; /** * 所属部门 */ department: string; /** * 所属权限 */ roles: string[]; /** * 称呼 */ name?: string; /** * 电子邮件 */ email?: string; /** * 联系电话 */ phone?: string; /** * 地区 */ region?: string; /** * 城市 */ city?: string; /** * 地址 */ address?: string; /** * 头像
avatar?: string; /** * 简介 */ introduction?: string; /** * 标签 */ labels: string[]; /** * 状态 */ status: boolean; }
*/
core.py
import cv_datetime_utils import cv2 as cv import numpy as np import matplotlib.pyplot as plt import scipy.optimize import json import os def compose_transformations( rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2): rotation_vector_1 = np.asarray(rotation_vector_1).reshape(3) translation_vector_1 = np.asarray(translation_vector_1).reshape(3) rotation_vector_2 = np.asarray(rotation_vector_2).reshape(3) translation_vector_2 = np.asarray(translation_vector_2).reshape(3) rotation_vector_composed, translation_vector_composed = cv.composeRT( rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2)[:2] rotation_vector_composed = np.squeeze(rotation_vector_composed) translation_vector_composed = np.squeeze(translation_vector_composed) return rotation_vector_composed, translation_vector_composed def invert_transformation( rotation_vector, translation_vector): rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) new_rotation_vector, new_translation_vector = compose_transformations( np.array([0.0, 0.0, 0.0]), -translation_vector, -rotation_vector, np.array([0.0, 0.0, 0.0])) new_rotation_vector = np.squeeze(new_rotation_vector) new_translation_vector = np.squeeze(new_translation_vector) return new_rotation_vector, new_translation_vector def quaternion_vector_to_rotation_vector(quaternion_vector): quaternion_vector = np.asarray(quaternion_vector).reshape(4) spatial_vector = quaternion_vector[1:] qw = quaternion_vector[0] spatial_vector_length = np.linalg.norm(spatial_vector) unit_vector = spatial_vector/spatial_vector_length theta = 2*np.arctan2(spatial_vector_length, qw) rotation_vector = theta*unit_vector return rotation_vector def quaternion_vector_to_rotation_matrix(quaternion_vector): quaternion_tuple = tuple(np.asarray(quaternion_vector).reshape(4)) qw, qx, qy, qz = quaternion_tuple R = np.array([ [qw**2 + qx**2 - qy**2 - qz**2, 2*(qx*qy - qw*qz), 2*(qw*qy + qx*qz)], [2*(qx*qy + qw*qz), qw**2 - qx**2 + qy**2 - qz**2, 2*(qy*qz - qw*qx)], [2*(qx*qz - qw*qy), 2*(qw*qx + qy*qz), qw**2 - qx**2 - qy**2 + qz**2] ]) return R def rotation_vector_to_rotation_matrix(rotation_vector): rotation_vector = np.asarray(rotation_vector).reshape(3) rotation_matrix = cv.Rodrigues(rotation_vector)[0] return rotation_matrix def transform_object_points( object_points, rotation_vector=np.array([0.0, 0.0, 0.0]), translation_vector=np.array([0.0, 0.0, 0.0])): object_points = np.asarray(object_points) rotation_vector = np.asarray(rotation_vector) translation_vector = np.asarray(translation_vector) if object_points.size == 0: return object_points object_points = object_points.reshape((-1, 3)) rotation_vector = rotation_vector.reshape(3) translation_vector = translation_vector.reshape(3) transformed_points = np.add( np.matmul( cv.Rodrigues(rotation_vector)[0], object_points.T).T, translation_vector.reshape((1, 3))) transformed_points = np.squeeze(transformed_points) return transformed_points def generate_camera_pose( camera_position=np.array([0.0, 0.0, 0.0]), yaw=0.0, pitch=0.0, roll=0.0): # yaw: 0.0 points north (along the positive y-axis), positive angles rotate counter-clockwise # pitch: 0.0 is level with the ground, positive angles rotate upward # roll: 0.0 is level with the ground, positive angles rotate clockwise # All angles in radians camera_position = np.asarray(camera_position).reshape(3) # First: Move the camera to the specified position rotation_vector_1 = np.array([0.0, 0.0, 0.0]) translation_vector_1 = -camera_position # Second: Rotate the camera so when we lower to the specified inclination, it will point in the specified compass direction rotation_vector_2 = np.array([0.0, 0.0, -(yaw - np.pi / 2)]) translation_vector_2 = np.array([0.0, 0.0, 0.0]) # Third: Lower to the specified inclination rotation_vector_2_3 = np.array([(np.pi / 2 - pitch), 0.0, 0.0]) translation_vector_2_3 = np.array([0.0, 0.0, 0.0]) # Fourth: Roll the camera by the specified angle rotation_vector_2_3_4 = np.array([0.0, 0.0, -roll]) translation_vector_2_3_4 = np.array([0.0, 0.0, 0.0]) # Combine these four moves rotation_vector_1_2, translation_vector_1_2 = compose_transformations( rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2) rotation_vector_1_2_3, translation_vector_1_2_3 = compose_transformations( rotation_vector_1_2, translation_vector_1_2, rotation_vector_2_3, translation_vector_2_3) rotation_vector, translation_vector = compose_transformations( rotation_vector_1_2_3, translation_vector_1_2_3, rotation_vector_2_3_4, translation_vector_2_3_4) rotation_vector = np.squeeze(rotation_vector) translation_vector = np.squeeze(translation_vector) return rotation_vector, translation_vector def extract_camera_position( rotation_vector, translation_vector): rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) new_rotation_vector, new_translation_vector = compose_transformations( rotation_vector, translation_vector, -rotation_vector, np.array([0.0, 0.0, 0.0])) camera_position = -np.squeeze(new_translation_vector) return camera_position def extract_camera_position_rotation_matrix(rotation_matrix, translation_vector): rotation_matrix = np.asarray(rotation_matrix).reshape((3,3)) translation_vector = np.asarray(translation_vector).reshape(3) position = np.matmul(rotation_matrix.T, -translation_vector.T) return position def extract_camera_direction( rotation_vector, translation_vector): rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) camera_direction = np.matmul( cv.Rodrigues(-rotation_vector)[0], np.array([[0.0], [0.0], [1.0]])) camera_direction = np.squeeze(camera_direction) return camera_direction def reconstruct_z_rotation(x, y): if x >= 0.0 and y >= 0.0: return np.arctan(y / x) if x >= 0.0 and y < 0.0: return np.arctan(y / x) + 2 * np.pi return np.arctan(y / x) + np.pi # Currently unused; needs to be fixed up for cases in which x and/or y are close # to zero def extract_yaw_from_camera_direction( camera_direction): camera_direction = np.asarray(camera_direction).reshape(3) yaw = reconstruct_z_rotation( camera_direction[0], camera_direction[1]) return yaw def generate_camera_matrix( focal_length, principal_point): focal_length = np.asarray(focal_length).reshape(2) principal_point = np.asarray(principal_point).reshape(2) camera_matrix = np.array([ [focal_length[0], 0, principal_point[0]], [0, focal_length[1], principal_point[1]], [0, 0, 1.0]]) return camera_matrix def generate_projection_matrix( camera_matrix, rotation_vector, translation_vector): camera_matrix = np.asarray(camera_matrix).reshape((3, 3)) rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) projection_matrix = np.matmul( camera_matrix, np.concatenate(( cv.Rodrigues(rotation_vector)[0], translation_vector.reshape((3, 1))), axis=1)) return(projection_matrix) def ground_grid_camera_view( image_width, image_height, rotation_vector, translation_vector, camera_matrix, distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]), fill_image=False, step=0.1 ): grid_corners = ground_rectangle_camera_view( image_width=image_width, image_height=image_height, rotation_vector=rotation_vector, translation_vector=translation_vector, camera_matrix=camera_matrix, distortion_coefficients=distortion_coefficients, fill_image=fill_image ) grid_points = generate_ground_grid( grid_corners=grid_corners, step=step ) return grid_points def ground_rectangle_camera_view( image_width, image_height, rotation_vector, translation_vector, camera_matrix, distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]), fill_image=False ): image_points = np.array([ [0.0, 0.0], [image_width, 0.0], [image_width, image_height], [0.0, image_height] ]) ground_points=np.empty((4, 3)) for i in range(4): ground_points[i] = ground_point( image_point=image_points[i], rotation_vector=rotation_vector, translation_vector=translation_vector, camera_matrix=camera_matrix, distortion_coefficients=distortion_coefficients ) x_values_sorted = np.sort(ground_points[:, 0]) y_values_sorted = np.sort(ground_points[:, 1]) if fill_image: x_min = x_values_sorted[0] x_max = x_values_sorted[3] y_min = y_values_sorted[0] y_max = y_values_sorted[3] else: x_min = x_values_sorted[1] x_max = x_values_sorted[2] y_min = y_values_sorted[1] y_max = y_values_sorted[2] return np.array([ [x_min, y_min], [x_max, y_max] ]) def ground_point( image_point, rotation_vector, translation_vector, camera_matrix, distortion_coefficients=np.array([0.0, 0.0, 0.0, 0.0]) ): image_point = np.asarray(image_point) rotation_vector = np.asarray(rotation_vector) translation_vector = np.asarray(translation_vector) camera_matrix = np.asarray(camera_matrix) distortion_coefficients = np.asarray(distortion_coefficients) image_point = image_point.reshape((2)) rotation_vector = rotation_vector.reshape(3) translation_vector = translation_vector.reshape(3) camera_matrix = camera_matrix.reshape((3, 3)) image_point_undistorted = cv.undistortPoints( image_point, camera_matrix, distortion_coefficients, P=camera_matrix ) image_point_undistorted = np.squeeze(image_point_undistorted) camera_position = np.matmul( cv.Rodrigues(-rotation_vector)[0], -translation_vector.T ).T camera_point_homogeneous = np.matmul( np.linalg.inv(camera_matrix), np.array([image_point_undistorted[0], image_point_undistorted[1], 1.0]).T ).T camera_direction = np.matmul( cv.Rodrigues(-rotation_vector)[0], camera_point_homogeneous.T ).T theta = -camera_position[2]/camera_direction[2] ground_point = camera_position + theta*camera_direction return ground_point def generate_ground_grid( grid_corners, step=0.1 ): x_grid, y_grid = np.meshgrid( np.arange(grid_corners[0, 0], grid_corners[1, 0], step=step), np.arange(grid_corners[0, 1], grid_corners[1, 1], step=step) ) grid = np.stack((x_grid, y_grid, np.full_like(x_grid, 0.0)), axis=-1) points = grid.reshape((-1, 3)) return points def project_points( object_points, rotation_vector, translation_vector, camera_matrix, distortion_coefficients, remove_behind_camera=False, remove_outside_frame=False, image_corners=None ): object_points = np.asarray(object_points).reshape((-1, 3)) rotation_vector = np.asarray(rotation_vector).reshape(3) translation_vector = np.asarray(translation_vector).reshape(3) camera_matrix = np.asarray(camera_matrix).reshape((3, 3)) distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients)) if object_points.size == 0: return np.zeros((0, 2)) image_points = cv.projectPoints( object_points, rotation_vector, translation_vector, camera_matrix, distortion_coefficients )[0] if remove_behind_camera: behind_camera_boolean = behind_camera( object_points, rotation_vector, translation_vector ) image_points[behind_camera_boolean] = np.array([np.nan, np.nan]) if remove_outside_frame: outside_frame_boolean = outside_frame( object_points, rotation_vector, translation_vector, camera_matrix, distortion_coefficients, image_corners ) image_points[outside_frame_boolean] = np.array([np.nan, np.nan]) image_points = np.squeeze(image_points) return image_points def behind_camera( object_points, rotation_vector, translation_vector): object_points = np.asarray(object_points) rotation_vector = np.asarray(rotation_vector) translation_vector = np.asarray(translation_vector) if object_points.size == 0: return np.zeros((0, 2)) object_points = object_points.reshape((-1, 3)) rotation_vector = rotation_vector.reshape(3) translation_vector = translation_vector.reshape(3) object_points_transformed = transform_object_points( object_points, rotation_vector, translation_vector ) behind_camera_boolean = (object_points_transformed <= 0)[..., 2] return behind_camera_boolean def outside_frame( object_points, rotation_vector, translation_vector, camera_matrix, distortion_coefficients, image_corners ): object_points = np.asarray(object_points).reshape((-1, 3)) rotation_vector = np.asarray(rotation_vector) translation_vector = np.asarray(translation_vector).reshape(3) camera_matrix = np.asarray(camera_matrix).reshape((3,3)) distortion_coefficients = np.squeeze(np.asarray(distortion_coefficients)) image_corners = np.asarray(image_corners).reshape((2,2)) if object_points.size == 0: return np.zeros((0, 2)) image_points = cv.projectPoints( object_points, rotation_vector, translation_vector, camera_matrix, np.array([0.0, 0.0, 0.0, 0.0]) )[0] image_points = image_points.reshape((-1, 2)) outside_frame_boolean = ( (image_points[:, 0] < image_corners[0, 0]) | (image_points[:, 0] > image_corners[1, 0]) | (image_points[:, 1] < image_corners[0, 1]) | (image_points[:, 1] > image_corners[1, 1]) ) return outside_frame_boolean def undistort_points( image_points, camera_matrix, distortion_coefficients): image_points = np.asarray(image_points) camera_matrix = np.asarray(camera_matrix) distortion_coefficients = np.asarray(distortion_coefficients) if image_points.size == 0: return image_points image_points = image_points.reshape((-1, 1, 2)) camera_matrix = camera_matrix.reshape((3, 3)) undistorted_points = cv.undistortPoints( image_points, camera_matrix, distortion_coefficients, P=camera_matrix) undistorted_points = np.squeeze(undistorted_points) return undistorted_points def estimate_camera_pose_from_image_points( image_points_1, image_points_2, camera_matrix, rotation_vector_1=np.array([0.0, 0.0, 0.0]), translation_vector_1=np.array([0.0, 0.0, 0.0]), distance_between_cameras=1.0):
def reconstruct_object_points_from_camera_poses( image_points_1, image_points_2, camera_matrix, rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) rotation_vector_1 = np.asarray(rotation_vector_1) translation_vector_1 = np.asarray(translation_vector_1) rotation_vector_2 = np.asarray(rotation_vector_2) translation_vector_2 = np.asarray(translation_vector_2) if image_points_1.size == 0 or image_points_2.size == 0: return np.zeros((0, 3)) image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) rotation_vector_1 = rotation_vector_1.reshape(3) translation_vector_1 = translation_vector_1.reshape(3) rotation_vector_2 = rotation_vector_2.reshape(3) translation_vector_2 = translation_vector_2.reshape(3) projection_matrix_1 = generate_projection_matrix( camera_matrix, rotation_vector_1, translation_vector_1) projection_matrix_2 = generate_projection_matrix( camera_matrix, rotation_vector_2, translation_vector_2) object_points_homogeneous = cv.triangulatePoints( projection_matrix_1, projection_matrix_2, image_points_1.T, image_points_2.T) object_points = cv.convertPointsFromHomogeneous( object_points_homogeneous.T) object_points = np.squeeze(object_points) return object_points def reconstruct_object_points_from_relative_camera_pose( image_points_1, image_points_2, camera_matrix, relative_rotation_vector, relative_translation_vector, rotation_vector_1=np.array([[0.0], [0.0], [0.0]]), translation_vector_1=np.array([[0.0], [0.0], [0.0]]), distance_between_cameras=1.0): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) relative_rotation_vector = np.asarray(relative_rotation_vector) relative_translation_vector = np.asarray(relative_translation_vector) rotation_vector_1 = np.asarray(rotation_vector_1) translation_vector_1 = np.asarray(translation_vector_1) if image_points_1.size == 0 or image_points_2.size == 0: return np.zeros((0, 3)) image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) relative_rotation_vector = relative_rotation_vector.reshape(3) relative_translation_vector = relative_translation_vector.reshape(3) rotation_vector_1 = rotation_vector_1.reshape(3) translation_vector_1 = translation_vector_1.reshape(3) rotation_vector_2, translation_vector_2 = cv.composeRT( rotation_vector_1, translation_vector_1, relative_rotation_vector, relative_translation_vector * distance_between_cameras)[:2] object_points = reconstruct_object_points_from_camera_poses( image_points_1, image_points_2, camera_matrix, rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2) return object_points def reconstruct_object_points_from_image_points( image_points_1, image_points_2, camera_matrix, rotation_vector_1=np.array([[0.0], [0.0], [0.0]]), translation_vector_1=np.array([[0.0], [0.0], [0.0]]), distance_between_cameras=1.0): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) rotation_vector_1 = np.asarray(rotation_vector_1) translation_vector_1 = np.asarray(translation_vector_1) if image_points_1.size == 0 or image_points_2.size == 0: return np.zeros((0, 3)) image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) rotation_vector_1 = rotation_vector_1.reshape(3) translation_vector_1 = translation_vector_1.reshape(3) rotation_vector_2, translation_vector_2 = estimate_camera_pose_from_image_points( image_points_1, image_points_2, camera_matrix, rotation_vector_1, translation_vector_1, distance_between_cameras) object_points = reconstruct_object_points_from_camera_poses( image_points_1, image_points_2, camera_matrix, rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2) return object_points def estimate_camera_pose_from_plane_object_points( input_object_points, height, origin_index, x_axis_index, y_reference_point, y_reference_point_sign, distance_calibration_indices, calibration_distance): input_object_points = np.asarray(input_object_points) if input_object_points.size == 0: raise ValueError('Obect point array appears to be empty') input_object_points = input_object_points.reshape((-1, 3)) scale_factor = np.divide( calibration_distance, np.linalg.norm( np.subtract( input_object_points[distance_calibration_indices[0]], input_object_points[distance_calibration_indices[1]]))) object_points_1 = np.multiply( input_object_points, scale_factor) def objective_function(parameters): rotation_x = parameters[0] rotation_y = parameters[1] translation_z = parameters[2] object_points_transformed = transform_object_points( object_points_1, np.array([rotation_x, rotation_y, 0.0]), np.array([0.0, 0.0, translation_z])) return np.sum(np.square(object_points_transformed[:, 2] - height)) optimization_solution = scipy.optimize.minimize( objective_function, np.array([0.0, 0.0, 0.0])) rotation_x_a = optimization_solution['x'][0] rotation_y_a = optimization_solution['x'][1] translation_z_a = optimization_solution['x'][2] rotation_x_rotation_y_a_norm = np.linalg.norm([rotation_x_a, rotation_y_a]) rotation_x_b = rotation_x_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm) rotation_y_b = rotation_y_a * ((rotation_x_rotation_y_a_norm + np.pi) / rotation_x_rotation_y_a_norm) translation_z_b = - translation_z_a rotation_vector_2_a = np.array([rotation_x_a, rotation_y_a, 0.0]) translation_vector_2_a = np.array([0.0, 0.0, translation_z_a]) object_points_2_a = transform_object_points( object_points_1, rotation_vector_2_a, translation_vector_2_a) rotation_vector_2_b = np.array([rotation_x_b, rotation_y_b, 0.0]) translation_vector_2_b = np.array([0.0, 0.0, translation_z_b]) object_points_2_b = transform_object_points( object_points_1, rotation_vector_2_b, translation_vector_2_b) sign_a = np.sign( np.cross( np.subtract( object_points_2_a[x_axis_index], object_points_2_a[origin_index]), np.subtract( object_points_2_a[y_reference_point], object_points_2_a[origin_index]))[2]) sign_b = np.sign( np.cross( np.subtract( object_points_2_b[x_axis_index], object_points_2_b[origin_index]), np.subtract( object_points_2_b[y_reference_point], object_points_2_b[origin_index]))[2]) if sign_a == y_reference_point_sign: rotation_vector_2 = rotation_vector_2_a translation_vector_2 = translation_vector_2_a object_points_2 = object_points_2_a else: rotation_vector_2 = rotation_vector_2_b translation_vector_2 = translation_vector_2_b object_points_2 = object_points_2_b xy_shift = - object_points_2[origin_index, :2] rotation_vector_3 = np.array([0.0, 0.0, 0.0]) translation_vector_3 = np.array([xy_shift[0], xy_shift[1], 0.0]) object_points_3 = transform_object_points( object_points_2, rotation_vector_3, translation_vector_3) final_z_rotation = - reconstruct_z_rotation( object_points_3[x_axis_index, 0], object_points_3[x_axis_index, 1]) rotation_vector_4 = np.array([0.0, 0.0, final_z_rotation]) translation_vector_4 = np.array([0.0, 0.0, 0.0]) object_points_4 = transform_object_points( object_points_3, rotation_vector_4, translation_vector_4) rotation_vector_2_3, translation_vector_2_3 = compose_transformations( rotation_vector_2, translation_vector_2, rotation_vector_3, translation_vector_3) rotation_vector_2_3_4, translation_vector_2_3_4 = compose_transformations( rotation_vector_2_3, translation_vector_2_3, rotation_vector_4, translation_vector_4) camera_rotation_vector, camera_translation_vector = invert_transformation( rotation_vector_2_3_4, translation_vector_2_3_4) return camera_rotation_vector, camera_translation_vector, scale_factor, object_points_4 def estimate_camera_poses_from_plane_image_points( image_points_1, image_points_2, camera_matrix, height, origin_index, x_axis_index, y_reference_point, y_reference_point_sign, distance_calibration_indices, calibration_distance): image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) if image_points_1.size == 0 or image_points_2.size == 0: raise ValueError('One or both sets of image points appear to be empty') image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) relative_rotation_vector, relative_translation_vector = estimate_camera_pose_from_image_points( image_points_1, image_points_2, camera_matrix) input_object_points = reconstruct_object_points_from_image_points( image_points_1, image_points_2, camera_matrix) rotation_vector_1, translation_vector_1, scale_factor = estimate_camera_pose_from_plane_object_points( input_object_points, height, origin_index, x_axis_index, y_reference_point, y_reference_point_sign, distance_calibration_indices, calibration_distance)[:3] rotation_vector_2, translation_vector_2 = compose_transformations( rotation_vector_1, translation_vector_1, relative_rotation_vector, relative_translation_vector * scale_factor) return rotation_vector_1, translation_vector_1, rotation_vector_2, translation_vector_2
image_points_1 = np.asarray(image_points_1) image_points_2 = np.asarray(image_points_2) camera_matrix = np.asarray(camera_matrix) rotation_vector_1 = np.asarray(rotation_vector_1) translation_vector_1 = np.asarray(translation_vector_1) if image_points_1.size == 0 or image_points_2.size == 0: raise ValueError('One or both sets of image points appear to be empty') image_points_1 = image_points_1.reshape((-1, 2)) image_points_2 = image_points_2.reshape((-1, 2)) if image_points_1.shape != image_points_2.shape: raise ValueError('Sets of image points do not appear to be the same shape') camera_matrix = camera_matrix.reshape((3, 3)) rotation_vector_1 = rotation_vector_1.reshape(3) translation_vector_1 = translation_vector_1.reshape(3) essential_matrix, mask = cv.findEssentialMat( image_points_1, image_points_2, camera_matrix) relative_rotation_matrix, relative_translation_vector = cv.recoverPose( essential_matrix, image_points_1, image_points_2, camera_matrix, mask=mask)[1:3] relative_rotation_vector = cv.Rodrigues(relative_rotation_matrix)[0] relative_translation_vector = relative_translation_vector * distance_between_cameras rotation_vector_2, translation_vector_2 = compose_transformations( rotation_vector_1, translation_vector_1, relative_rotation_vector, relative_translation_vector) rotation_vector_2 = np.squeeze(rotation_vector_2) translation_vector_2 = np.squeeze(translation_vector_2) return rotation_vector_2, translation_vector_2
model.go
package polaris type Report struct { PolarisOutputVersion string `json:"PolarisOutputVersion"` SourceType string `json:"SourceType"` ClusterInfo *ClusterInfo `json:"ClusterInfo"` Results []Result `json:"Results"` } type ClusterInfo struct { Version string `json:"Version"` Nodes int `json:"Nodes"` Pods int `json:"Pods"` Namespaces int `json:"Namespaces"` Controllers int `json:"Controllers"` }
type Result struct { Name string `json:"Name"` Namespace string `json:"Namespace"` Kind string `json:"Kind"` PodResult PodResult `json:"PodResult"` } type PodResult struct { Name string `json:"Name"` Results map[string]Check `json:"Results"` ContainerResults []ContainerResult `json:"ContainerResults"` } type ContainerResult struct { Name string `json:"Name"` Results map[string]Check `json:"Results"` } type Check struct { ID string `json:"ID"` Message string `json:"Message"` Success bool `json:"Success"` Severity string `json:"Severity"` Category string `json:"Category"` }
thread_state.rs
use crate::{ZxError, ZxResult}; use kernel_hal::UserContext; use numeric_enum_macro::numeric_enum; numeric_enum! { #[repr(u32)] #[derive(Debug, Copy, Clone)] pub enum ThreadStateKind { General = 0, FloatPoint = 1, Vector = 2, Debug = 4, SingleStep = 5, FS = 6, GS = 7, } } pub trait ContextExt { fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize>; fn write_state(&mut self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult; } impl ContextExt for UserContext { fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> { match kind { ThreadStateKind::General => buf.write_struct(&self.general), #[cfg(target_arch = "x86_64")] ThreadStateKind::FS => buf.write_struct(&self.general.fsbase), #[cfg(target_arch = "x86_64")] ThreadStateKind::GS => buf.write_struct(&self.general.gsbase), _ => unimplemented!(), } } fn write_state(&mut self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult { match kind { ThreadStateKind::General => self.general = buf.read_struct()?, #[cfg(target_arch = "x86_64")] ThreadStateKind::FS => self.general.fsbase = buf.read_struct()?, #[cfg(target_arch = "x86_64")] ThreadStateKind::GS => self.general.gsbase = buf.read_struct()?, _ => unimplemented!(), } Ok(()) } } trait BufExt { fn read_struct<T>(&self) -> ZxResult<T>; fn write_struct<T: Copy>(&mut self, value: &T) -> ZxResult<usize>; } #[allow(unsafe_code)] impl BufExt for [u8] { fn read_struct<T>(&self) -> ZxResult<T> { if self.len() < core::mem::size_of::<T>() { return Err(ZxError::BUFFER_TOO_SMALL); } Ok(unsafe { (self.as_ptr() as *const T).read() }) } fn write_struct<T: Copy>(&mut self, value: &T) -> ZxResult<usize> { if self.len() < core::mem::size_of::<T>()
unsafe { *(self.as_mut_ptr() as *mut T) = *value; } Ok(core::mem::size_of::<T>()) } }
{ return Err(ZxError::BUFFER_TOO_SMALL); }
test_auth.py
import json from unittest import TestCase from app import create_app, db as test_db from app.config import TestConfig from app.models import User, Verification test_app = create_app(TestConfig) class TestAuth(TestCase): def
(self): context = test_app.app_context() context.push() self.db = test_db self.db.create_all() self.app = test_app.test_client() def tearDown(self): self.db.drop_all() def test_register(self): user_count = User.query.count() verification_count = Verification.query.count() response = self.app.post("/api/auth/register", data=json.dumps(dict( email='[email protected]', username='test', password='test123' )), content_type='application/json') self.assertEqual(response.status_code, 200) body = json.loads(str(response.data, "utf8")) self.assertDictEqual(body, {"message": "Successfully registered. Please verify your account."}) self.assertEqual(User.query.count(), user_count + 1) self.assertEqual(Verification.query.count(), verification_count + 1) def test_verify(self): # first register a user, we can just use the registration test above self.test_register() verification_count = Verification.query.count() user = User.query.filter_by(username='test').first() verification = Verification.query.filter_by(user_id=user.id).first() response = self.app.post("/api/auth/verify?key=badkey") self.assertEqual(response.status_code, 400) body = json.loads(str(response.data, "utf8")) self.assertDictEqual(body, {"message": "Invalid verification."}) response = self.app.post(f"/api/auth/verify?key={verification.key}") self.assertEqual(response.status_code, 200) body = json.loads(str(response.data, "utf8")) self.assertDictEqual(body, {"message": "You've successfully verified your account."}) # a successful verification deletes its row from the verification table self.assertEqual(Verification.query.count(), verification_count - 1) def test_login(self): # first register a user and verify them, we can just use the verification test above self.test_verify() response = self.app.post("/api/auth/login", data=json.dumps(dict( username='test', password='incorrectpassword' )), content_type='application/json') self.assertEqual(response.status_code, 400) body = json.loads(str(response.data, "utf8")) self.assertEqual(body, {"message": "Incorrect username and/or password."}) response = self.app.post("/api/auth/login", data=json.dumps(dict( username='test', password='test123' )), content_type='application/json') self.assertEqual(response.status_code, 200) body = json.loads(str(response.data, "utf8")) self.assertTrue('token' in body)
setUp
dimse.go
package dimse //go:generate ./generate_dimse_messages.py //go:generate stringer -type StatusCode // Implements message types defined in P3.7. // // http://dicom.nema.org/medical/dicom/current/output/pdf/part07.pdf import ( "encoding/binary" "fmt" "sort" dicom "github.com/grailbio/go-dicom" "github.com/grailbio/go-dicom/dicomio" "github.com/grailbio/go-dicom/dicomlog" "github.com/grailbio/go-dicom/dicomtag" "github.com/NikolaiKovalenko/go-netdicom/pdu" ) // Message defines the common interface for all DIMSE message types. type Message interface { fmt.Stringer // Print human-readable description for debugging. Encode(*dicomio.Encoder) // GetMessageID extracts the message ID field. GetMessageID() MessageID // CommandField returns the command field value of this message. CommandField() int // GetStatus returns the the response status value. It is nil for request message // types, and non-nil for response message types. GetStatus() *Status // HasData is true if we expect P_DATA_TF packets after the command packets. HasData() bool } // Status represents a result of a DIMSE call. P3.7 C defines list of status // codes and error payloads. type Status struct { // Status==StatusSuccess on success. A non-zero value on error. Status StatusCode // Optional error payloads. ErrorComment string // Encoded as (0000,0902) } // Helper class for extracting values from a list of DicomElement. type messageDecoder struct { elems []*dicom.Element parsed []bool // true if this element was parsed into a message field. err error } type isOptionalElement int const ( requiredElement isOptionalElement = iota optionalElement ) func (d *messageDecoder) setError(err error) { if d.err == nil { d.err = err } } // Find an element with the given tag. If optional==OptionalElement, returns nil // if not found. If optional==RequiredElement, sets d.err and return nil if not found. func (d *messageDecoder) findElement(tag dicomtag.Tag, optional isOptionalElement) *dicom.Element { for i, elem := range d.elems { if elem.Tag == tag { dicomlog.Vprintf(3, "dimse.findElement: Return %v for %s", elem, tag.String()) d.parsed[i] = true return elem } } if optional == requiredElement { d.setError(fmt.Errorf("dimse.findElement: Element %s not found during DIMSE decoding", dicomtag.DebugString(tag))) } return nil } // Return the list of elements that did not match any of the prior getXXX calls. func (d *messageDecoder) unparsedElements() (unparsed []*dicom.Element) { for i, parsed := range d.parsed { if !parsed { unparsed = append(unparsed, d.elems[i]) } } return unparsed } func (d *messageDecoder) getStatus() (s Status) { s.Status = StatusCode(d.getUInt16(dicomtag.Status, requiredElement)) s.ErrorComment = d.getString(dicomtag.ErrorComment, optionalElement) return s } // Find an element with "tag", and extract a string value from it. Errors are reported in d.err. func (d *messageDecoder) getString(tag dicomtag.Tag, optional isOptionalElement) string { e := d.findElement(tag, optional) if e == nil { return "" } v, err := e.GetString() if err != nil { d.setError(err) } return v } // Find an element with "tag", and extract a uint16 from it. Errors are reported in d.err. func (d *messageDecoder) getUInt16(tag dicomtag.Tag, optional isOptionalElement) uint16 { e := d.findElement(tag, optional) if e == nil
v, err := e.GetUInt16() if err != nil { d.setError(err) } return v } // Encode the given elements. The elements are sorted in ascending tag order. func encodeElements(e *dicomio.Encoder, elems []*dicom.Element) { sort.Slice(elems, func(i, j int) bool { return elems[i].Tag.Compare(elems[j].Tag) < 0 }) for _, elem := range elems { dicom.WriteElement(e, elem) } } // Create a list of elements that represent the dimse status. The list contains // multiple elements for non-ok status. func newStatusElements(s Status) []*dicom.Element { elems := []*dicom.Element{newElement(dicomtag.Status, uint16(s.Status))} if s.ErrorComment != "" { elems = append(elems, newElement(dicomtag.ErrorComment, s.ErrorComment)) } return elems } // Create a new element. The value type must match the tag's. func newElement(tag dicomtag.Tag, v interface{}) *dicom.Element { return &dicom.Element{ Tag: tag, VR: "", // autodetect UndefinedLength: false, Value: []interface{}{v}, } } // CommandDataSetTypeNull indicates that the DIMSE message has no data payload, // when set in dicom.TagCommandDataSetType. Any other value indicates the // existence of a payload. const CommandDataSetTypeNull uint16 = 0x101 // CommandDataSetTypeNonNull indicates that the DIMSE message has a data // payload, when set in dicom.TagCommandDataSetType. const CommandDataSetTypeNonNull uint16 = 1 // Success is an OK status for a call. var Success = Status{Status: StatusSuccess} // StatusCode represents a DIMSE service response code, as defined in P3.7 type StatusCode uint16 const ( StatusSuccess StatusCode = 0 StatusCancel StatusCode = 0xFE00 StatusSOPClassNotSupported StatusCode = 0x0112 StatusInvalidArgumentValue StatusCode = 0x0115 StatusInvalidAttributeValue StatusCode = 0x0106 StatusInvalidObjectInstance StatusCode = 0x0117 StatusUnrecognizedOperation StatusCode = 0x0211 StatusNotAuthorized StatusCode = 0x0124 StatusPending StatusCode = 0xff00 // C-STORE-specific status codes. P3.4 GG4-1 CStoreOutOfResources StatusCode = 0xa700 CStoreCannotUnderstand StatusCode = 0xc000 CStoreDataSetDoesNotMatchSOPClass StatusCode = 0xa900 // C-FIND-specific status codes. CFindUnableToProcess StatusCode = 0xc000 // C-MOVE/C-GET-specific status codes. CMoveOutOfResourcesUnableToCalculateNumberOfMatches StatusCode = 0xa701 CMoveOutOfResourcesUnableToPerformSubOperations StatusCode = 0xa702 CMoveMoveDestinationUnknown StatusCode = 0xa801 CMoveDataSetDoesNotMatchSOPClass StatusCode = 0xa900 // Warning codes. StatusAttributeValueOutOfRange StatusCode = 0x0116 StatusAttributeListError StatusCode = 0x0107 ) // ReadMessage constructs a typed dimse.Message object, given a set of // dicom.Elements, func ReadMessage(d *dicomio.Decoder) Message { // A DIMSE message is a sequence of Elements, encoded in implicit // LE. // // TODO(saito) make sure that's the case. Where the ref? var elems []*dicom.Element d.PushTransferSyntax(binary.LittleEndian, dicomio.ImplicitVR) defer d.PopTransferSyntax() for !d.EOF() { elem := dicom.ReadElement(d, dicom.ReadOptions{}) if d.Error() != nil { break } elems = append(elems, elem) } // Convert elems[] into a golang struct. dd := messageDecoder{ elems: elems, parsed: make([]bool, len(elems)), err: nil, } commandField := dd.getUInt16(dicomtag.CommandField, requiredElement) if dd.err != nil { d.SetError(dd.err) return nil } v := decodeMessageForType(&dd, commandField) if dd.err != nil { d.SetError(dd.err) return nil } return v } // EncodeMessage serializes the given message. Errors are reported through e.Error() func EncodeMessage(e *dicomio.Encoder, v Message) { // DIMSE messages are always encoded Implicit+LE. See P3.7 6.3.1. subEncoder := dicomio.NewBytesEncoder(binary.LittleEndian, dicomio.ImplicitVR) v.Encode(subEncoder) if err := subEncoder.Error(); err != nil { e.SetError(err) return } bytes := subEncoder.Bytes() e.PushTransferSyntax(binary.LittleEndian, dicomio.ImplicitVR) defer e.PopTransferSyntax() dicom.WriteElement(e, newElement(dicomtag.CommandGroupLength, uint32(len(bytes)))) e.WriteBytes(bytes) } // CommandAssembler is a helper that assembles a DIMSE command message and data // payload from a sequence of P_DATA_TF PDUs. type CommandAssembler struct { contextID byte commandBytes []byte command Message dataBytes []byte readAllCommand bool readAllData bool } // AddDataPDU is to be called for each P_DATA_TF PDU received from the // network. If the fragment is marked as the last one, AddDataPDU returns // <SOPUID, TransferSyntaxUID, payload, nil>. If it needs more fragments, it // returns <"", "", nil, nil>. On error, it returns a non-nil error. func (a *CommandAssembler) AddDataPDU(pdu *pdu.PDataTf) (byte, Message, []byte, error) { for _, item := range pdu.Items { if a.contextID == 0 { a.contextID = item.ContextID } else if a.contextID != item.ContextID { return 0, nil, nil, fmt.Errorf("Mixed context: %d %d", a.contextID, item.ContextID) } if item.Command { a.commandBytes = append(a.commandBytes, item.Value...) if item.Last { if a.readAllCommand { return 0, nil, nil, fmt.Errorf("P_DATA_TF: found >1 command chunks with the Last bit set") } a.readAllCommand = true } } else { a.dataBytes = append(a.dataBytes, item.Value...) if item.Last { if a.readAllData { return 0, nil, nil, fmt.Errorf("P_DATA_TF: found >1 data chunks with the Last bit set") } a.readAllData = true } } } if !a.readAllCommand { return 0, nil, nil, nil } if a.command == nil { d := dicomio.NewBytesDecoder(a.commandBytes, nil, dicomio.UnknownVR) a.command = ReadMessage(d) if err := d.Finish(); err != nil { return 0, nil, nil, err } } if a.command.HasData() && !a.readAllData { return 0, nil, nil, nil } contextID := a.contextID command := a.command dataBytes := a.dataBytes *a = CommandAssembler{} return contextID, command, dataBytes, nil // TODO(saito) Verify that there's no unread items after the last command&data. } type MessageID = uint16
{ return 0 }
cmd.go
package cmd import ( "os" "github.com/xxjwxc/public/mylog" "github.com/xxjwxc/gormt/data/view/gtools" "github.com/xxjwxc/gormt/data/config" "github.com/spf13/cobra" "gopkg.in/go-playground/validator.v9" ) var mysqlInfo config.MysqlDbInfo var outDir string var singularTable bool var foreignKey bool var funcKey bool var ui bool var urlTag string var rootCmd = &cobra.Command{ Use: "main", Short: "gorm mysql reflect tools", Long: `base on gorm tools for mysql database to golang struct`, Run: func(cmd *cobra.Command, args []string) { gtools.Execute() // Start doing things.开始做事情 }, } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { if err := rootCmd.Execute(); err != nil { os.Exit(1) } } func init() { cobra.OnInitialize(initConfig) rootCmd.PersistentFlags().StringVarP(&mysqlInfo.Host, "host", "H", "", "数据库地址.(注意-H为大写)") rootCmd.MarkFlagRequired("host") rootCmd.PersistentFlags().StringVarP(&mysqlInfo.Username, "user", "u", "", "用户名.") rootCmd.MarkFlagRequired("user") rootCmd.PersistentFlags().StringVarP(&mysqlInfo.Password, "password", "p", "", "密码.") rootCmd.MarkFlagRequired("password") rootCmd.PersistentFlags().StringVarP(&mysqlInfo.Database, "database", "d", "", "数据库名") rootCmd.MarkFlagRequired("database") rootCmd.PersistentFlags().StringVarP(&outDir, "outdir", "o", "", "输出目录") rootCmd.MarkFlagRequired("outdir") rootCmd.PersistentFlags().BoolVarP(&singularTable, "singular", "s", true, "是否禁用表名复数") rootCmd.MarkFlagRequired("singular") rootCmd.PersistentFlags().BoolVarP(&foreignKey, "foreign", "f", false, "是否导出外键关联") rootCmd.MarkFlagRequired("foreign key") rootCmd.PersistentFlags().BoolVarP(&funcKey, "fun", "F", false, "是否导出函数") rootCmd.MarkFlagRequired("func export") rootCmd.PersistentFlags().BoolVarP(&ui, "gui", "g", false, "是否ui显示模式") rootCmd.MarkFlagRequired("show on gui") rootCmd.PersistentFlags().StringVarP(&urlTag, "url", "l", "", "url标签(json,url)") rootCmd.MarkFlagRequired("url tag") rootCmd.Flags().IntVar(&mysqlInfo.Port, "port", 3306, "端口号") } // initConfig reads in config file and ENV variables if set. func initConfig() { MergeMysqlDbInfo() validate := validator.New() err := validate.Struct(config.GetMysqlDbInfo()) if err != nil { mylog.Info("Can't read cmd: using (-h, --help) to get more info") mylog.Error(err) os.Exit(1) } else { mylog.Info("using database info:") mylog.JSON(config.GetMysqlDbInfo()) } } // MergeMysqlDbInfo merge parm func MergeMysqlDbInfo() { var tmp = config.GetMysqlDbInfo() if len(mysqlInfo.Database) > 0 { tmp.Database = mysqlInfo.Database } if len(mysqlI
nfo.Host) > 0 { tmp.Host = mysqlInfo.Host } if len(mysqlInfo.Password) > 0 { tmp.Password = mysqlInfo.Password } if mysqlInfo.Port != 3306 { tmp.Port = mysqlInfo.Port } if len(mysqlInfo.Username) > 0 { tmp.Username = mysqlInfo.Username } if len(urlTag) > 0 { config.SetURLTag(urlTag) } config.SetMysqlDbInfo(&tmp) if len(outDir) > 0 { config.SetOutDir(outDir) } if singularTable { config.SetSingularTable(singularTable) } if foreignKey { config.SetForeignKey(foreignKey) } if funcKey { config.SetIsOutFunc(funcKey) } if ui { config.SetIsGUI(ui) } }
one-to-one-room-service.js
'use strict'; var env = require('gitter-web-env'); var stats = env.stats; var userService = require('gitter-web-users'); var persistence = require('gitter-web-persistence'); var userDefaultFlagsService = require('./user-default-flags-service'); var Troupe = persistence.Troupe; var assert = require('assert'); var mongoUtils = require('gitter-web-persistence-utils/lib/mongo-utils'); var Promise = require('bluebird'); var ObjectID = require('mongodb').ObjectID; var mongooseUtils = require('gitter-web-persistence-utils/lib/mongoose-utils'); var StatusError = require('statuserror'); var roomMembershipService = require('./room-membership-service'); var policyFactory = require('gitter-web-permissions/lib/policy-factory'); var debug = require('debug')('gitter:app:one-to-one-room-service'); function
(userId1, userId2) { // Need to use $elemMatch due to a regression in Mongo 2.6, see https://jira.mongodb.org/browse/SERVER-13843 return { $and: [ { oneToOne: true }, { oneToOneUsers: { $elemMatch: { userId: userId1 } } }, { oneToOneUsers: { $elemMatch: { userId: userId2 } } } ] }; } function findOneToOneRoom(fromUserId, toUserId) { assert(fromUserId, 'Need to provide fromUserId'); assert(toUserId, 'Need to provide toUserId'); fromUserId = mongoUtils.asObjectID(fromUserId); toUserId = mongoUtils.asObjectID(toUserId); if (mongoUtils.objectIDsEqual(fromUserId, toUserId)) throw new StatusError(417); // You cannot be in a troupe with yourself. var query = getOneToOneRoomQuery(fromUserId, toUserId); /* Find the existing one-to-one.... */ return persistence.Troupe.findOne(query).exec(); } function findOneToOneRoomsForUserId(userId) { assert(userId, 'userId required'); return persistence.Troupe.find({ oneToOne: true, oneToOneUsers: { $elemMatch: { userId: mongoUtils.asObjectID(userId) } } }) .lean() .exec(); } /** * Internal method. * Returns [troupe, existing] */ function upsertNewOneToOneRoom(userId1, userId2) { var query = getOneToOneRoomQuery(userId1, userId2); // Second attempt is an upsert var insertFields = { oneToOne: true, status: 'ACTIVE', githubType: 'ONETOONE', groupId: null, // One-to-ones are never in a group oneToOneUsers: [ { _id: new ObjectID(), userId: userId1 }, { _id: new ObjectID(), userId: userId2 } ], userCount: 0, sd: { type: 'ONE_TO_ONE', public: false // One-to-ones are always private } }; debug('Attempting upsert for new one-to-one room'); // Upsert returns [model, existing] already return mongooseUtils.upsert(Troupe, query, { $setOnInsert: insertFields }); } function addOneToOneMemberToRoom(troupeId, userId) { // Deal with https://github.com/troupe/gitter-webapp/issues/1227 return userDefaultFlagsService.getDefaultFlagsOneToOneForUserId(userId).then(function(flags) { return roomMembershipService.addRoomMember(troupeId, userId, flags, null); }); } /** * Ensure that the current user is in the one-to-one room */ function ensureUsersInRoom(troupeId, fromUserId, toUserId) { return roomMembershipService .findMembershipForUsersInRoom(troupeId, [fromUserId, toUserId]) .then(function(userIds) { // Both members are in the room if (userIds.length === 2) return; var fromUserInRoom = userIds.some(function(userId) { return mongoUtils.objectIDsEqual(userId, fromUserId); }); var toUserInRoom = userIds.some(function(userId) { return mongoUtils.objectIDsEqual(userId, toUserId); }); debug('Re-adding users to room: fromUser=%s, toUser=%s', fromUserInRoom, toUserInRoom); return Promise.all([ !fromUserInRoom && addOneToOneMemberToRoom(troupeId, fromUserId), !toUserInRoom && addOneToOneMemberToRoom(troupeId, toUserId) ]); }); } /** * Ensure that both users are in the one-to-one room */ function addOneToOneUsersToNewRoom(troupeId, fromUserId, toUserId) { return userDefaultFlagsService .getDefaultOneToOneFlagsForUserIds([fromUserId, toUserId]) .then(function(userFlags) { var fromUserFlags = userFlags[fromUserId]; var toUserFlags = userFlags[toUserId]; if (!fromUserFlags) throw new StatusError(404); if (!toUserFlags) throw new StatusError(404); return Promise.join( roomMembershipService.addRoomMember(troupeId, fromUserId, fromUserFlags, null), roomMembershipService.addRoomMember(troupeId, toUserId, toUserFlags, null) ); }); } /** * Find a one-to-one troupe, otherwise create it * * @return {[ troupe, other-user ]} */ function findOrCreateOneToOneRoom(fromUser, toUserId) { assert(fromUser, 'Need to provide fromUser'); assert(fromUser._id, 'fromUser invalid'); assert(toUserId, 'Need to provide toUserId'); var fromUserId = fromUser._id; toUserId = mongoUtils.asObjectID(toUserId); return userService .findById(toUserId) .bind({ toUser: undefined, troupe: undefined }) .then(function(toUser) { if (!toUser) throw new StatusError(404, 'User does not exist'); this.toUser = toUser; return findOneToOneRoom(fromUserId, toUserId); }) .then(function(existingRoom) { if (existingRoom) { return [existingRoom, true]; } var toUser = this.toUser; // Do not allow new rooms to be created for REMOVED users if (toUser.state === 'REMOVED') { var err = new StatusError(404); err.githubType = 'ONETOONE'; err.uri = toUser.username; throw err; } // TODO: in future we need to add request one-to-one here... return policyFactory .createPolicyForOneToOne(fromUser, toUser) .then(function(policy) { return policy.canJoin(); }) .then(function(canJoin) { if (!canJoin) { var err = new StatusError(404); err.githubType = 'ONETOONE'; err.uri = toUser.username; throw err; } return upsertNewOneToOneRoom(fromUserId, toUserId); }); }) .spread(function(troupe, isAlreadyExisting) { debug('findOrCreate isAlreadyExisting=%s', isAlreadyExisting); var troupeId = troupe._id; this.troupe = troupe; if (isAlreadyExisting) { return ensureUsersInRoom(troupeId, fromUserId, toUserId); } else { stats.event('new_troupe', { troupeId: troupeId, oneToOne: true, userId: fromUserId }); return addOneToOneUsersToNewRoom(troupeId, fromUserId, toUserId); } }) .then(function() { return [this.troupe, this.toUser]; }); } /* Exports */ module.exports = { findOrCreateOneToOneRoom: Promise.method(findOrCreateOneToOneRoom), findOneToOneRoom: Promise.method(findOneToOneRoom), findOneToOneRoomsForUserId: findOneToOneRoomsForUserId };
getOneToOneRoomQuery
plugin_env.rs
/* * Docker Engine API * * The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { \"message\": \"page not found\" } ``` # Versioning The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. For Docker Engine 17.10, the API version is 1.33. To lock to this version, you prefix the URL with `/v1.33`. For example, calling `/info` is the same as calling `/v1.33/info`. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. This documentation is for version 1.34 of the API. Use this table to find documentation for previous versions of the API: Docker version | API version | Changes ----------------|-------------|--------- 17.10.x | [1.33](https://docs.docker.com/engine/api/v1.33/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-33-api-changes) 17.09.x | [1.32](https://docs.docker.com/engine/api/v1.32/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-32-api-changes) 17.07.x | [1.31](https://docs.docker.com/engine/api/v1.31/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-31-api-changes) 17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes) 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes) 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: ``` { \"username\": \"string\", \"password\": \"string\", \"email\": \"string\", \"serveraddress\": \"string\" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { \"identitytoken\": \"9cbaf023786cd7...\" } ``` * * OpenAPI spec version: 1.34 * * Generated by: https://github.com/swagger-api/swagger-codegen.git */ use serde_derive::{Deserialize, Serialize}; #[allow(unused_imports)] use serde_json::Value; #[derive(Debug, Serialize, Deserialize)] pub struct PluginEnv { #[serde(rename = "Name")] name: String, #[serde(rename = "Description")] description: String, #[serde(rename = "Settable")] settable: Vec<String>, #[serde(rename = "Value")] value: String, } impl PluginEnv { pub fn new(name: String, description: String, settable: Vec<String>, value: String) -> Self { PluginEnv { name: name, description: description, settable: settable, value: value, } } pub fn set_name(&mut self, name: String) { self.name = name; } pub fn with_name(mut self, name: String) -> Self { self.name = name; self } pub fn name(&self) -> &String { &self.name } pub fn set_description(&mut self, description: String) { self.description = description; } pub fn with_description(mut self, description: String) -> Self { self.description = description; self }
&self.description } pub fn set_settable(&mut self, settable: Vec<String>) { self.settable = settable; } pub fn with_settable(mut self, settable: Vec<String>) -> Self { self.settable = settable; self } pub fn settable(&self) -> &[String] { &self.settable } pub fn set_value(&mut self, value: String) { self.value = value; } pub fn with_value(mut self, value: String) -> Self { self.value = value; self } pub fn value(&self) -> &String { &self.value } }
pub fn description(&self) -> &String {
legacy_stapel_stage_builder.go
package stage_builder import ( "context" "github.com/werf/werf/pkg/container_backend" ) type LegacyStapelStageBuilderInterface interface { Container() container_backend.LegacyContainer BuilderContainer() container_backend.LegacyBuilderContainer Build(ctx context.Context, opts container_backend.BuildOptions) error } type LegacyStapelStageBuilder struct { ContainerBackend container_backend.ContainerBackend Image container_backend.LegacyImageInterface } func
(containerBackend container_backend.ContainerBackend, image container_backend.LegacyImageInterface) *LegacyStapelStageBuilder { return &LegacyStapelStageBuilder{ ContainerBackend: containerBackend, Image: image, } } func (builder *LegacyStapelStageBuilder) Container() container_backend.LegacyContainer { return builder.Image.Container() } func (builder *LegacyStapelStageBuilder) BuilderContainer() container_backend.LegacyBuilderContainer { return builder.Image.BuilderContainer() } func (builder *LegacyStapelStageBuilder) Build(ctx context.Context, opts container_backend.BuildOptions) error { return builder.Image.Build(ctx, opts) }
NewLegacyStapelStageBuilder
test_dt_utils_get_row.py
# (c) Copyright IBM Corp. 2019. All Rights Reserved. # -*- coding: utf-8 -*- """Tests using pytest_resilient_circuits""" from __future__ import print_function import pytest from resilient_circuits.util import get_config_data, get_function_definition from resilient_circuits import SubmitTestFunction, FunctionResult from tests.test_helper import * PACKAGE_NAME = "fn_datatable_utils" FUNCTION_NAME = "dt_utils_get_row" # Read the default configuration-data section from the package config_data = get_config_data(PACKAGE_NAME) # Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance) resilient_mock = DTResilientMock def call_dt_utils_get_row_function(circuits, function_params, timeout=10): # Fire a message to the function evt = SubmitTestFunction("dt_utils_get_row", function_params) circuits.manager.fire(evt) event = circuits.watcher.wait("dt_utils_get_row_result", parent=evt, timeout=timeout) assert event assert isinstance(event.kwargs["result"], FunctionResult) pytest.wait_for(event, "complete", True) return event.kwargs["result"].value class
: """ Tests for the dt_utils_get_row function""" def test_function_definition(self): """ Test that the package provides customization_data that defines the function """ func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME) assert func is not None inputs = { "incident_id": 1001, "dt_utils_datatable_api_name": "mock_data_table", "dt_utils_row_id": None, "dt_utils_search_column": "dt_col_email", "dt_utils_search_value": "[email protected]" } output = { "success": True, "inputs": inputs, "row": { u'id': 1, u'cells': { u'dt_col_email': { u'id': u'dt_col_email', u'row_id': 1, u'value': u'[email protected]' }, u'dt_col_id': { u'id': u'dt_col_id', u'row_id': 1, u'value': 3001 }, u'dt_col_name': {u'id': u'dt_col_name', u'row_id': 1, u'value': u'Joe Blogs' }, u'dt_col_status': {u'id': u'dt_col_status', u'row_id': 1, u'value': u'In Progress'} } } } @pytest.mark.parametrize("inputs, expected_results", [(inputs, output)]) def test_success(self, circuits_app, inputs, expected_results): """ Test calling with sample values for the parameters """ results = call_dt_utils_get_row_function(circuits_app, inputs) assert(expected_results == results)
TestDtUtilsGetRow
lib.rs
//! Compiler Wrapper from `LibAFL` use std::{convert::Into, path::Path, process::Command, string::String, vec::Vec}; pub mod clang; pub use clang::{ClangWrapper, LLVMPasses}; /// `LibAFL` CC Error Type #[derive(Debug)] pub enum Error { /// CC Wrapper called with invalid arguments InvalidArguments(String), /// Io error occurred Io(std::io::Error), /// Something else happened Unknown(String), } // TODO macOS /// extension for static libraries #[cfg(windows)] pub const LIB_EXT: &str = "lib"; /// extension for static libraries #[cfg(not(windows))] pub const LIB_EXT: &str = "a"; /// prefix for static libraries #[cfg(windows)] pub const LIB_PREFIX: &str = ""; /// prefix for static libraries #[cfg(not(windows))] pub const LIB_PREFIX: &str = "lib"; /// Wrap a compiler hijacking its arguments pub trait CompilerWrapper { /// Set the wrapper arguments parsing a command line set of arguments fn from_args<S>(&mut self, args: &[S]) -> Result<&'_ mut Self, Error> where S: AsRef<str>; /// Add a compiler argument fn add_arg<S>(&mut self, arg: S) -> &'_ mut Self where S: AsRef<str>; /// Add a compiler argument only when compiling fn add_cc_arg<S>(&mut self, arg: S) -> &'_ mut Self where S: AsRef<str>; /// Add a compiler argument only when linking fn add_link_arg<S>(&mut self, arg: S) -> &'_ mut Self where S: AsRef<str>; /// Add compiler arguments fn add_args<S>(&mut self, args: &[S]) -> &'_ mut Self where S: AsRef<str>, { for arg in args { self.add_arg(arg); } self } /// Add compiler arguments only when compiling fn add_cc_args<S>(&mut self, args: &[S]) -> &'_ mut Self where S: AsRef<str>, { for arg in args { self.add_cc_arg(arg); } self } /// Add compiler arguments only when linking fn add_link_args<S>(&mut self, args: &[S]) -> &'_ mut Self where S: AsRef<str>, { for arg in args { self.add_link_arg(arg);
} self } /// Link static C lib fn link_staticlib<S>(&mut self, dir: &Path, name: S) -> &'_ mut Self where S: AsRef<str>; /// Command to run the compiler fn command(&mut self) -> Result<Vec<String>, Error>; /// Get if in linking mode fn is_linking(&self) -> bool; /// Silences `libafl_cc` output fn silence(&mut self, value: bool) -> &'_ mut Self; /// Returns `true` if `silence` was called with `true` fn is_silent(&self) -> bool; /// Run the compiler fn run(&mut self) -> Result<Option<i32>, Error> { let args = self.command()?; if !self.is_silent() { dbg!(&args); } if args.is_empty() { return Err(Error::InvalidArguments( "The number of arguments cannot be 0".into(), )); } let status = match Command::new(&args[0]).args(&args[1..]).status() { Ok(s) => s, Err(e) => return Err(Error::Io(e)), }; if !self.is_silent() { dbg!(status); } Ok(status.code()) } }
suncc.py
"""SCons.Tool.suncc Tool-specific initialization for Sun Solaris (Forte) CC and cc. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/suncc.py 2013/03/03 09:48:35 garyo" import SCons.Util import cc
def generate(env): """ Add Builders and construction variables for Forte C and C++ compilers to an Environment. """ cc.generate(env) env['CXX'] = 'CC' env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC') env['SHOBJPREFIX'] = 'so_' env['SHOBJSUFFIX'] = '.o' def exists(env): return env.Detect('CC') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
index.ts
/**
*/
* Root export for package
handles.rs
//! Handles to an operator's input and output streams. //! //! These handles are used by the generic operator interfaces to allow user closures to interact as //! the operator would with its input and output streams. use std::rc::Rc; use std::cell::RefCell; use crate::progress::Timestamp; use crate::progress::ChangeBatch; use crate::progress::frontier::MutableAntichain; use crate::dataflow::channels::pullers::Counter as PullCounter; use crate::dataflow::channels::pushers::CounterCore as PushCounter; use crate::dataflow::channels::pushers::buffer::{BufferCore, Session}; use crate::dataflow::channels::BundleCore; use crate::communication::{Push, Pull, message::RefOrMut}; use crate::Container; use crate::logging::TimelyLogger as Logger; use crate::dataflow::operators::CapabilityRef; use crate::dataflow::operators::capability::CapabilityTrait; /// Handle to an operator's input stream. pub struct InputHandleCore<T: Timestamp, D: Container, P: Pull<BundleCore<T, D>>> { pull_counter: PullCounter<T, D, P>, internal: Rc<RefCell<Vec<Rc<RefCell<ChangeBatch<T>>>>>>, logging: Option<Logger>, } /// Handle to an operator's input stream, specialized to vectors. pub type InputHandle<T, D, P> = InputHandleCore<T, Vec<D>, P>; /// Handle to an operator's input stream and frontier. pub struct FrontieredInputHandleCore<'a, T: Timestamp, D: Container+'a, P: Pull<BundleCore<T, D>>+'a> { /// The underlying input handle. pub handle: &'a mut InputHandleCore<T, D, P>, /// The frontier as reported by timely progress tracking. pub frontier: &'a MutableAntichain<T>, } /// Handle to an operator's input stream and frontier, specialized to vectors. pub type FrontieredInputHandle<'a, T, D, P> = FrontieredInputHandleCore<'a, T, Vec<D>, P>; impl<'a, T: Timestamp, D: Container, P: Pull<BundleCore<T, D>>> InputHandleCore<T, D, P> { /// Reads the next input buffer (at some timestamp `t`) and a corresponding capability for `t`. /// The timestamp `t` of the input buffer can be retrieved by invoking `.time()` on the capability. /// Returns `None` when there's no more data available. #[inline] pub fn next(&mut self) -> Option<(CapabilityRef<T>, RefOrMut<D>)> { let internal = &self.internal; self.pull_counter.next().map(|bundle| { match bundle.as_ref_or_mut() { RefOrMut::Ref(bundle) => { (CapabilityRef::new(&bundle.time, internal.clone()), RefOrMut::Ref(&bundle.data)) }, RefOrMut::Mut(bundle) => { (CapabilityRef::new(&bundle.time, internal.clone()), RefOrMut::Mut(&mut bundle.data)) }, } }) } /// Repeatedly calls `logic` till exhaustion of the available input data. /// `logic` receives a capability and an input buffer. /// /// # Examples /// ``` /// use timely::dataflow::operators::ToStream; /// use timely::dataflow::operators::generic::Operator; /// use timely::dataflow::channels::pact::Pipeline; /// /// timely::example(|scope| { /// (0..10).to_stream(scope) /// .unary(Pipeline, "example", |_cap, _info| |input, output| { /// input.for_each(|cap, data| { /// output.session(&cap).give_vec(&mut data.replace(Vec::new())); /// }); /// }); /// }); /// ``` #[inline] pub fn for_each<F: FnMut(CapabilityRef<T>, RefOrMut<D>)>(&mut self, mut logic: F) { // We inline `next()` so that we can use `self.logging` without cloning (and dropping) the logger. let internal = &self.internal; while let Some((cap, data)) = self.pull_counter.next().map(|bundle| { match bundle.as_ref_or_mut() { RefOrMut::Ref(bundle) => { (CapabilityRef::new(&bundle.time, internal.clone()), RefOrMut::Ref(&bundle.data)) }, RefOrMut::Mut(bundle) => { (CapabilityRef::new(&bundle.time, internal.clone()), RefOrMut::Mut(&mut bundle.data)) }, } }) { self.logging.as_mut().map(|l| l.log(crate::logging::GuardedMessageEvent { is_start: true })); logic(cap, data); self.logging.as_mut().map(|l| l.log(crate::logging::GuardedMessageEvent { is_start: false })); } } } impl<'a, T: Timestamp, D: Container, P: Pull<BundleCore<T, D>>+'a> FrontieredInputHandleCore<'a, T, D, P> { /// Allocate a new frontiered input handle. pub fn new(handle: &'a mut InputHandleCore<T, D, P>, frontier: &'a MutableAntichain<T>) -> Self { FrontieredInputHandleCore { handle, frontier, } } /// Reads the next input buffer (at some timestamp `t`) and a corresponding capability for `t`. /// The timestamp `t` of the input buffer can be retrieved by invoking `.time()` on the capability. /// Returns `None` when there's no more data available. #[inline] pub fn next(&mut self) -> Option<(CapabilityRef<T>, RefOrMut<D>)> { self.handle.next() } /// Repeatedly calls `logic` till exhaustion of the available input data. /// `logic` receives a capability and an input buffer. /// /// # Examples /// ``` /// use timely::dataflow::operators::ToStream; /// use timely::dataflow::operators::generic::Operator; /// use timely::dataflow::channels::pact::Pipeline; /// /// timely::example(|scope| { /// (0..10).to_stream(scope) /// .unary(Pipeline, "example", |_cap,_info| |input, output| { /// input.for_each(|cap, data| { /// output.session(&cap).give_vec(&mut data.replace(Vec::new())); /// }); /// }); /// }); /// ``` #[inline] pub fn for_each<F: FnMut(CapabilityRef<T>, RefOrMut<D>)>(&mut self, logic: F) { self.handle.for_each(logic) } /// Inspect the frontier associated with this input. #[inline] pub fn frontier(&self) -> &'a MutableAntichain<T> { self.frontier } } pub fn _access_pull_counter<T: Timestamp, D: Container, P: Pull<BundleCore<T, D>>>(input: &mut InputHandleCore<T, D, P>) -> &mut PullCounter<T, D, P> { &mut input.pull_counter } /// Constructs an input handle. /// Declared separately so that it can be kept private when `InputHandle` is re-exported. pub fn new_input_handle<T: Timestamp, D: Container, P: Pull<BundleCore<T, D>>>(pull_counter: PullCounter<T, D, P>, internal: Rc<RefCell<Vec<Rc<RefCell<ChangeBatch<T>>>>>>, logging: Option<Logger>) -> InputHandleCore<T, D, P> { InputHandleCore { pull_counter, internal, logging, } } /// An owned instance of an output buffer which ensures certain API use. /// /// An `OutputWrapper` exists to prevent anyone from using the wrapped buffer in any way other /// than with an `OutputHandle`, whose methods ensure that capabilities are used and that the /// pusher is flushed (via the `cease` method) once it is no longer used. #[derive(Debug)] pub struct
<T: Timestamp, D: Container, P: Push<BundleCore<T, D>>> { push_buffer: BufferCore<T, D, PushCounter<T, D, P>>, internal_buffer: Rc<RefCell<ChangeBatch<T>>>, } impl<T: Timestamp, D: Container, P: Push<BundleCore<T, D>>> OutputWrapper<T, D, P> { /// Creates a new output wrapper from a push buffer. pub fn new(push_buffer: BufferCore<T, D, PushCounter<T, D, P>>, internal_buffer: Rc<RefCell<ChangeBatch<T>>>) -> Self { OutputWrapper { push_buffer, internal_buffer, } } /// Borrows the push buffer into a handle, which can be used to send records. /// /// This method ensures that the only access to the push buffer is through the `OutputHandle` /// type which ensures the use of capabilities, and which calls `cease` when it is dropped. pub fn activate(&mut self) -> OutputHandleCore<T, D, P> { OutputHandleCore { push_buffer: &mut self.push_buffer, internal_buffer: &self.internal_buffer, } } } /// Handle to an operator's output stream. pub struct OutputHandleCore<'a, T: Timestamp, C: Container+'a, P: Push<BundleCore<T, C>>+'a> { push_buffer: &'a mut BufferCore<T, C, PushCounter<T, C, P>>, internal_buffer: &'a Rc<RefCell<ChangeBatch<T>>>, } /// Handle specialized to `Vec`-based container. pub type OutputHandle<'a, T, D, P> = OutputHandleCore<'a, T, Vec<D>, P>; impl<'a, T: Timestamp, C: Container, P: Push<BundleCore<T, C>>> OutputHandleCore<'a, T, C, P> { /// Obtains a session that can send data at the timestamp associated with capability `cap`. /// /// In order to send data at a future timestamp, obtain a capability for the new timestamp /// first, as show in the example. /// /// # Examples /// ``` /// use timely::dataflow::operators::ToStream; /// use timely::dataflow::operators::generic::Operator; /// use timely::dataflow::channels::pact::Pipeline; /// /// timely::example(|scope| { /// (0..10).to_stream(scope) /// .unary(Pipeline, "example", |_cap, _info| |input, output| { /// input.for_each(|cap, data| { /// let time = cap.time().clone() + 1; /// output.session(&cap.delayed(&time)) /// .give_vec(&mut data.replace(Vec::new())); /// }); /// }); /// }); /// ``` pub fn session<'b, CT: CapabilityTrait<T>>(&'b mut self, cap: &'b CT) -> Session<'b, T, C, PushCounter<T, C, P>> where 'a: 'b { assert!(cap.valid_for_output(&self.internal_buffer), "Attempted to open output session with invalid capability"); self.push_buffer.session(cap.time()) } } impl<'a, T: Timestamp, C: Container, P: Push<BundleCore<T, C>>> Drop for OutputHandleCore<'a, T, C, P> { fn drop(&mut self) { self.push_buffer.cease(); } }
OutputWrapper
hot_feed.go
package routes import ( "bytes" "encoding/gob" "encoding/hex" "encoding/json" "fmt" "io" "math" "net/http" "reflect" "sort" "time" "github.com/deso-smart/deso-core/v2/lib" "github.com/golang/glog" ) // This file defines a simple go routine that tracks "hot" posts from the specified look-back period as well // as API functionality for retrieving scored posts. The algorithm for assessing a post's // "hotness" is experimental and will likely be iterated upon depending on its results. // HotnessFeed scoring algorithm knobs. const ( // Number of blocks per halving for the scoring time decay for the global hot feed. DefaultHotFeedTimeDecayBlocks uint64 = 72 // Number of blocks per halving for the scoring time decay for a tag hot feed. DefaultHotFeedTagTimeDecayBlocks uint64 = 96 // Maximum score amount that any individual PKID can contribute before time decay. DefaultHotFeedInteractionCap uint64 = 4e12 // Maximum score amount that any individual PKID can contribute before time decay for a particular tag grouping. DefaultHotFeedTagInteractionCap uint64 = 4e12 // How many iterations of the hot feed calculation until the built-up caches should be reset. (Once per day) ResetCachesIterationLimit int = 288 ) // A single element in the server's HotFeedOrderedList. type HotFeedEntry struct { PostHash *lib.BlockHash PostHashHex string HotnessScore uint64 } // A single element in the server's HotFeedOrderedList, with the age of the post for sorting purposes. type HotFeedEntryTimeSortable struct { PostHash *lib.BlockHash PostHashHex string HotnessScore uint64 PostBlockAge int } // A key to track whether a specific public key has interacted with a post before. type HotFeedInteractionKey struct { InteractionPKID lib.PKID InteractionPostHash lib.BlockHash } // Multipliers to help a node operator boost content from PKID's relevant to their node. // For example, a sports-focused node could boost athlete PKIDs. type HotFeedPKIDMultiplier struct { // A multiplier applied to the score that each user interaction adds to a post. InteractionMultiplier float64 // A multiplier applied to all posts from this specific PKID. PostsMultiplier float64 } // A cached "HotFeedOrderedList" is stored on the server object and updated whenever a new // block is found. In addition, a "HotFeedApprovedPostMap" is maintained using hot feed // approval/removal operations stored in global state. Once started, the routine runs every // second in order to make sure hot feed removals are processed quickly. func (fes *APIServer) StartHotFeedRoutine() { glog.Info("Starting hot feed routine.") // Initialize maps used for serving tag-specific hot feeds. fes.PostTagToPostHashesMap = make(map[string]map[lib.BlockHash]bool) fes.PostTagToOrderedHotFeedEntries = make(map[string][]*HotFeedEntry) fes.PostTagToOrderedNewestEntries = make(map[string][]*HotFeedEntry) fes.PostHashToPostTagsMap = make(map[lib.BlockHash][]string) fes.HotFeedBlockCache = make(map[lib.BlockHash]*lib.MsgDeSoBlock) cacheResetCounter := 0 go func() { out: for { select { case <-time.After(30 * time.Second): resetCache := false if cacheResetCounter >= ResetCachesIterationLimit { resetCache = true cacheResetCounter = 0 } fes.UpdateHotFeed(resetCache) cacheResetCounter += 1 case <-fes.quit: break out } } }() } // The business. func (fes *APIServer) UpdateHotFeed(resetCache bool) { glog.Info("Refreshing hot feed...") if resetCache { glog.Info("Resetting hot feed cache.") fes.PostTagToPostHashesMap = make(map[string]map[lib.BlockHash]bool) fes.PostHashToPostTagsMap = make(map[lib.BlockHash][]string) fes.HotFeedBlockCache = make(map[lib.BlockHash]*lib.MsgDeSoBlock) } // We copy the HotFeedApprovedPosts map and HotFeedPKIDMultiplier maps so we can access // them safely without locking them. hotFeedApprovedPosts := fes.CopyHotFeedApprovedPostsMap() hotFeedPKIDMultipliers := fes.CopyHotFeedPKIDMultipliersMap() // Update the approved posts map and pkid multipliers map based on global state. fes.UpdateHotFeedApprovedPostsMap(hotFeedApprovedPosts) fes.UpdateHotFeedPKIDMultipliersMap(hotFeedPKIDMultipliers) // Update the HotFeedOrderedList based on the specified look-back period's blocks. hotFeedPosts := fes.UpdateHotFeedOrderedList(hotFeedApprovedPosts, hotFeedPKIDMultipliers) // The hotFeedPosts map will be nil unless we found new blocks in the call above. if hotFeedPosts != nil { fes.PruneHotFeedApprovedPostsMap(hotFeedPosts, hotFeedApprovedPosts) } // Replace the HotFeedApprovedPostsMap and HotFeedPKIDMultiplier map with the fresh ones. fes.HotFeedApprovedPostsToMultipliers = hotFeedApprovedPosts fes.HotFeedPKIDMultipliers = hotFeedPKIDMultipliers glog.Infof("Updated hot feed maps") } func (fes *APIServer) UpdateHotFeedApprovedPostsMap(hotFeedApprovedPosts map[lib.BlockHash]float64) { // Grab all of the relevant operations to update the map with. startTimestampNanos := uint64(time.Now().UTC().AddDate(0, 0, -1).UnixNano()) // 1 day ago. if fes.LastHotFeedApprovedPostOpProcessedTstampNanos != 0 { startTimestampNanos = fes.LastHotFeedApprovedPostOpProcessedTstampNanos } startPrefix := GlobalStateSeekKeyForHotFeedApprovedPostOps(startTimestampNanos + 1) opKeys, opVals, err := fes.GlobalState.Seek( startPrefix, _GlobalStatePrefixForHotFeedApprovedPostOps, /*validForPrefix*/ 0, /*maxKeyLen -- ignored since reverse is false*/ 0, /*numToFetch -- 0 is ignored*/ false, /*reverse*/ true, /*fetchValues*/ ) if err != nil { glog.Infof("UpdateHotFeedApprovedPostsMap: Seek failed: %v", err) } // Chop up the keys and process each operation. for opIdx, opKey := range opKeys { // Each key consists of: prefix, timestamp, posthash. timestampStartIdx := 1 postHashStartIdx := timestampStartIdx + 8 postHashBytes := opKey[postHashStartIdx:] postHash := &lib.BlockHash{} copy(postHash[:], postHashBytes) // Deserialize the HotFeedApprovedPostOp. hotFeedOp := HotFeedApprovedPostOp{} hotFeedOpBytes := opVals[opIdx] if len(hotFeedOpBytes) > 0 { err = gob.NewDecoder(bytes.NewReader(hotFeedOpBytes)).Decode(&hotFeedOp) if err != nil { glog.Infof("UpdateHotFeedApprovedPostsMap: ERROR decoding HotFeedApprovedPostOp: %v", err) continue } } else { // If this row doesn't actually have a HotFeedApprovedPostOp, bail. continue } if hotFeedOp.IsRemoval { delete(hotFeedApprovedPosts, *postHash) } else if hotFeedOp.Multiplier >= 0 { hotFeedApprovedPosts[*postHash] = hotFeedOp.Multiplier // Now we need to figure out if this was a multiplier update. prevMultiplier, hasPrevMultiplier := fes.HotFeedApprovedPostsToMultipliers[*postHash] if hasPrevMultiplier && prevMultiplier != hotFeedOp.Multiplier { fes.HotFeedPostMultiplierUpdated = true } else if hotFeedOp.Multiplier != 1 { fes.HotFeedPostMultiplierUpdated = true } } // If we've made it to the end of the op list, update the last op processed timestamp. if opIdx == len(opKeys)-1 { opTstampBytes := opKey[timestampStartIdx:postHashStartIdx] opTstampNanos := lib.DecodeUint64(opTstampBytes) fes.LastHotFeedApprovedPostOpProcessedTstampNanos = opTstampNanos } } } func (fes *APIServer) UpdateHotFeedPKIDMultipliersMap( hotFeedPKIDMultipliers map[lib.PKID]*HotFeedPKIDMultiplier, ) { // Grab all of the relevant operations to update the map with. startTimestampNanos := uint64(time.Now().UTC().AddDate(0, 0, -1).UnixNano()) // 1 day ago. if fes.LastHotFeedPKIDMultiplierOpProcessedTstampNanos != 0 { startTimestampNanos = fes.LastHotFeedPKIDMultiplierOpProcessedTstampNanos } startPrefix := GlobalStateSeekKeyForHotFeedPKIDMultiplierOps(startTimestampNanos + 1) opKeys, opVals, err := fes.GlobalState.Seek( startPrefix, _GlobalStatePrefixForHotFeedPKIDMultiplierOps, /*validForPrefix*/ 0, /*maxKeyLen -- ignored since reverse is false*/ 0, /*numToFetch -- 0 is ignored*/ false, /*reverse*/ true, /*fetchValues*/ ) if err != nil { glog.Infof("UpdateHotFeedPKIDMultipliersMap: Seek failed: %v", err) } // Chop up the keys and process each operation. for opIdx, opKey := range opKeys { // Each key consists of: prefix, timestamp, PKID. timestampStartIdx := 1 pkidStartIdx := timestampStartIdx + 8 opPKIDBytes := opKey[pkidStartIdx:] opPKID := &lib.PKID{} copy(opPKID[:], opPKIDBytes) // Deserialize the HotFeedPKIDMultiplierOp. hotFeedOp := HotFeedPKIDMultiplierOp{} hotFeedOpBytes := opVals[opIdx] if len(hotFeedOpBytes) > 0 { err = gob.NewDecoder(bytes.NewReader(hotFeedOpBytes)).Decode(&hotFeedOp) if err != nil { glog.Infof("UpdateHotFeedPKIDMultipliersMap: ERROR decoding HotFeedPKIDMultiplierOp: %v", err) continue } } else { // If this row doesn't actually have a HotFeedPKIDMultiplierOp, bail. continue } // Get the current multiplier and update it. Note that negatives are ignored. hotFeedPKIDMultiplier := hotFeedPKIDMultipliers[*opPKID] if hotFeedPKIDMultiplier == nil { hotFeedPKIDMultiplier = &HotFeedPKIDMultiplier{ InteractionMultiplier: 1, PostsMultiplier: 1, } } if hotFeedOp.InteractionMultiplier >= 0 { hotFeedPKIDMultiplier.InteractionMultiplier = hotFeedOp.InteractionMultiplier } else if hotFeedOp.PostsMultiplier >= 0 { hotFeedPKIDMultiplier.PostsMultiplier = hotFeedOp.PostsMultiplier } hotFeedPKIDMultipliers[*opPKID] = hotFeedPKIDMultiplier // If we've made it to the end of the op list, update trackers. if opIdx == len(opKeys)-1 { // Update the time stamp of the last op processed. opTstampBytes := opKey[timestampStartIdx:pkidStartIdx] opTstampNanos := lib.DecodeUint64(opTstampBytes) fes.LastHotFeedPKIDMultiplierOpProcessedTstampNanos = opTstampNanos // Record that the multiplier map has updates. fes.HotFeedPKIDMultiplierUpdated = true } } } func (fes *APIServer) CopyHotFeedApprovedPostsMap() map[lib.BlockHash]float64 { hotFeedApprovedPosts := make(map[lib.BlockHash]float64, len(fes.HotFeedApprovedPostsToMultipliers)) for postKey, postVal := range fes.HotFeedApprovedPostsToMultipliers { hotFeedApprovedPosts[postKey] = postVal } return hotFeedApprovedPosts } func (fes *APIServer) CopyHotFeedPKIDMultipliersMap() map[lib.PKID]*HotFeedPKIDMultiplier { hotFeedPKIDMultipliers := make(map[lib.PKID]*HotFeedPKIDMultiplier, len(fes.HotFeedPKIDMultipliers)) for pkidKey, multiplierVal := range fes.HotFeedPKIDMultipliers { multiplierValCopy := *multiplierVal hotFeedPKIDMultipliers[pkidKey] = &multiplierValCopy } return hotFeedPKIDMultipliers } type HotnessPostInfo struct { // How long ago the post was created in number of blocks PostBlockAge int HotnessScore uint64 } func (fes *APIServer) UpdateHotFeedOrderedList( postsToMultipliers map[lib.BlockHash]float64, pkidsToMultipliers map[lib.PKID]*HotFeedPKIDMultiplier, ) (_hotFeedPostsMap map[lib.BlockHash]*HotnessPostInfo, ) { // Check to see if any of the algorithm constants have changed. globalStateInteractionCap, globalStateTagInteractionCap, globalStateTimeDecayBlocks, globalStateTagTimeDecayBlocks, globalStateTxnTypeMultiplierMap, err := fes.GetHotFeedConstantsFromGlobalState() if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to get constants: %v", err) return nil } if globalStateInteractionCap == 0 || globalStateTimeDecayBlocks == 0 { // The hot feed go routine has not been run yet since constants have not been set. // Set the default constants in GlobalState and then on the server object. err := fes.GlobalState.Put( _GlobalStatePrefixForHotFeedInteractionCap, lib.EncodeUint64(DefaultHotFeedInteractionCap), ) if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to put InteractionCap: %v", err) return nil } err = fes.GlobalState.Put( _GlobalStatePrefixForHotFeedTagInteractionCap, lib.EncodeUint64(DefaultHotFeedTagInteractionCap), ) if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to put InteractionCap for tag feeds: %v", err) return nil } err = fes.GlobalState.Put( _GlobalStatePrefixForHotFeedTimeDecayBlocks, lib.EncodeUint64(DefaultHotFeedTimeDecayBlocks), ) if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to put TimeDecayBlocks: %v", err) return nil } err = fes.GlobalState.Put( _GlobalStatePrefixForHotFeedTagTimeDecayBlocks, lib.EncodeUint64(DefaultHotFeedTagTimeDecayBlocks), ) if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to put TimeDecayBlocks for tag feeds: %v", err) return nil } // Now that we've successfully updated global state, set them on the server object. fes.HotFeedInteractionCap = DefaultHotFeedInteractionCap fes.HotFeedTagInteractionCap = DefaultHotFeedTagInteractionCap fes.HotFeedTimeDecayBlocks = DefaultHotFeedTimeDecayBlocks fes.HotFeedTagTimeDecayBlocks = DefaultHotFeedTagTimeDecayBlocks fes.HotFeedTxnTypeMultiplierMap = make(map[lib.TxnType]uint64) // Check to see if only the tag-specific feed configuration variables are unset and set just those. } else if globalStateTagInteractionCap == 0 || globalStateTagTimeDecayBlocks == 0 { // The hot feed go routine has not been run yet since constants have not been set. err = fes.GlobalState.Put( _GlobalStatePrefixForHotFeedTagInteractionCap, lib.EncodeUint64(DefaultHotFeedTagInteractionCap), ) if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to put InteractionCap: %v", err) return nil } err = fes.GlobalState.Put( _GlobalStatePrefixForHotFeedTagTimeDecayBlocks, lib.EncodeUint64(DefaultHotFeedTagTimeDecayBlocks), ) if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to put TimeDecayBlocks: %v", err) return nil } // Now that we've successfully updated global state, set them on the server object. fes.HotFeedTagInteractionCap = DefaultHotFeedTagInteractionCap fes.HotFeedTagTimeDecayBlocks = DefaultHotFeedTagTimeDecayBlocks fes.HotFeedTxnTypeMultiplierMap = make(map[lib.TxnType]uint64) } else if fes.HotFeedInteractionCap != globalStateInteractionCap || fes.HotFeedTimeDecayBlocks != globalStateTimeDecayBlocks || fes.HotFeedTagInteractionCap != globalStateTagInteractionCap || fes.HotFeedTagTimeDecayBlocks != globalStateTagTimeDecayBlocks || !reflect.DeepEqual(fes.HotFeedTxnTypeMultiplierMap, globalStateTxnTypeMultiplierMap) { // New constants were found in global state. Set them and proceed. fes.HotFeedInteractionCap = globalStateInteractionCap fes.HotFeedTimeDecayBlocks = globalStateTimeDecayBlocks fes.HotFeedTagInteractionCap = globalStateTagInteractionCap fes.HotFeedTagTimeDecayBlocks = globalStateTagTimeDecayBlocks fes.HotFeedTxnTypeMultiplierMap = globalStateTxnTypeMultiplierMap } else if fes.HotFeedPostMultiplierUpdated || fes.HotFeedPKIDMultiplierUpdated { fes.HotFeedPostMultiplierUpdated = false fes.HotFeedPKIDMultiplierUpdated = false } // If the constants for the algorithm haven't changed and we have already seen the latest // block or the chain is out of sync, bail. blockTip := fes.blockchain.BlockTip() // This offset allows us to see what the hot feed would look like in the past, // which is useful for testing purposes. blockOffsetForTesting := 0 // Grab the last 60 days worth of blocks (25,920 blocks @ 5min/block). lookbackWindowBlocks := 60 * 24 * 60 / 5 // Check if the most recent blocks that we'll be considering in hot feed computation have been processed. for _, blockNode := range fes.blockchain.BestChain() { if blockNode.Height < blockTip.Height-uint32(lookbackWindowBlocks+blockOffsetForTesting) { continue } } // Log how long this routine takes, since it could be heavy. glog.Info("UpdateHotFeedOrderedList: Starting new update cycle.") start := time.Now() // Get a utxoView for lookups. utxoView, err := fes.backendServer.GetMempool().GetAugmentedUniversalView() if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to get utxo view: %v", err) return nil } // Grab the last 24 hours worth of blocks (288 blocks @ 5min/block). blockTipIndex := len(fes.blockchain.BestChain()) - 1 - blockOffsetForTesting relevantNodes := fes.blockchain.BestChain() if len(fes.blockchain.BestChain()) > (lookbackWindowBlocks + blockOffsetForTesting) { relevantNodes = fes.blockchain.BestChain()[blockTipIndex-lookbackWindowBlocks-blockOffsetForTesting : blockTipIndex] } var hotnessInfoBlocks []*HotnessInfoBlock for blockIdx, node := range relevantNodes { var block *lib.MsgDeSoBlock if cachedBlock, ok := fes.HotFeedBlockCache[*node.Hash]; ok { block = cachedBlock } else { block, _ = lib.GetBlock(node.Hash, utxoView.Handle, fes.blockchain.Snapshot()) fes.HotFeedBlockCache[*node.Hash] = block } hotnessInfoBlocks = append(hotnessInfoBlocks, &HotnessInfoBlock{ Block: block, // For time decay, we care about how many blocks away from the tip this block is. BlockAge: len(relevantNodes) - blockIdx, }) } // Fake block height for mempool transactions that haven't been mined yet var mempoolBlockHeight int if fes.blockchain.BlockTip() != nil { mempoolBlockHeight = int(fes.blockchain.BlockTip().Height + 1) } else { mempoolBlockHeight = 1 } // Create new "block" for mempool txns, give it a block age of 1 greater than the current tip // First get all MempoolTxns from mempool. mempoolTxnsOrderedByTime, _, err := fes.backendServer.GetMempool().GetTransactionsOrderedByTimeAdded() // Extract MsgDesoTxn from each MempoolTxn var txnsFromMempoolOrderedByTime []*lib.MsgDeSoTxn for _, mempoolTxn := range mempoolTxnsOrderedByTime { txnsFromMempoolOrderedByTime = append(txnsFromMempoolOrderedByTime, mempoolTxn.Tx) } if err != nil { glog.Errorf("Error getting mempool transactions: %v", err) } else if len(txnsFromMempoolOrderedByTime) > 0 { hotnessInfoBlocks = append(hotnessInfoBlocks, &HotnessInfoBlock{ Block: &lib.MsgDeSoBlock{ Txns: txnsFromMempoolOrderedByTime, }, BlockAge: mempoolBlockHeight, }) } // Iterate over the blocks and track global feed hotness scores for each post. hotnessInfoMapGlobalFeed, err := fes.PopulateHotnessInfoMap(utxoView, postsToMultipliers, pkidsToMultipliers, false, hotnessInfoBlocks) if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to put PopulateHotnessInfoMap for global feed: %v", err) return nil } // Iterate over the blocks and track tag feed hotness scores for each post. hotnessInfoMapTagFeed, err := fes.PopulateHotnessInfoMap(utxoView, postsToMultipliers, pkidsToMultipliers, true, hotnessInfoBlocks) if err != nil { glog.Infof("UpdateHotFeedOrderedList: ERROR - Failed to put PopulateHotnessInfoMap for tag feed: %v", err) return nil } // Sort the map into an ordered list and set it as the server's new HotFeedOrderedList. hotFeedOrderedList := []*HotFeedEntry{} for postHashKey, hotnessInfo := range hotnessInfoMapGlobalFeed { postHash := postHashKey hotFeedEntry := &HotFeedEntry{ PostHash: &postHash, PostHashHex: hex.EncodeToString(postHash[:]), HotnessScore: hotnessInfo.HotnessScore, } hotFeedOrderedList = append(hotFeedOrderedList, hotFeedEntry) } sort.Slice(hotFeedOrderedList, func(ii, jj int) bool { if hotFeedOrderedList[ii].HotnessScore != hotFeedOrderedList[jj].HotnessScore { return hotFeedOrderedList[ii].HotnessScore > hotFeedOrderedList[jj].HotnessScore } else { return hotFeedOrderedList[ii].PostHashHex > hotFeedOrderedList[jj].PostHashHex } }) fes.HotFeedOrderedList = hotFeedOrderedList fes.HotFeedPostHashToTagScoreMap = hotnessInfoMapTagFeed // Set the ordered lists for hot feed based on tags. postTagToOrderedHotFeedEntries := make(map[string][]*HotFeedEntry) postTagToOrderedHotFeedEntries = fes.SaveOrderedFeedForTags(true, postTagToOrderedHotFeedEntries) fes.PostTagToOrderedHotFeedEntries = postTagToOrderedHotFeedEntries // Set the ordered lists for newness based on tags. postTagToOrderedNewestEntries := map[string][]*HotFeedEntry{} postTagToOrderedNewestEntries = fes.SaveOrderedFeedForTags(false, postTagToOrderedNewestEntries) fes.PostTagToOrderedNewestEntries = postTagToOrderedNewestEntries // Update the HotFeedBlockHeight so we don't re-evaluate this set of blocks. fes.HotFeedBlockHeight = blockTip.Height elapsed := time.Since(start) glog.Infof("Successfully updated HotFeedOrderedList in %s", elapsed) return hotnessInfoMapGlobalFeed } type HotnessInfoBlock struct { Block *lib.MsgDeSoBlock BlockAge int } func (fes *APIServer) PopulateHotnessInfoMap( utxoView *lib.UtxoView, postsToMultipliers map[lib.BlockHash]float64, pkidsToMultipliers map[lib.PKID]*HotFeedPKIDMultiplier, isTagFeed bool, hotnessInfoBlocks []*HotnessInfoBlock, ) (map[lib.BlockHash]*HotnessPostInfo, error) { hotnessInfoMap := make(map[lib.BlockHash]*HotnessPostInfo) // Map of interaction key to transaction type multiplier applied. postInteractionMap := make(map[HotFeedInteractionKey]uint64) for _, hotnessInfoBlock := range hotnessInfoBlocks { block := hotnessInfoBlock.Block blockAgee := hotnessInfoBlock.BlockAge for _, txn := range block.Txns { // We only care about posts created in the specified look-back period. There should always be a // transaction that creates a given post before someone interacts with it. By only // scoring posts that meet this condition, we can restrict the HotFeedOrderedList // to posts from the specified look-back period without even looking up the post time stamp. isCreatePost, postHashCreated := CheckTxnForCreatePost(txn) if isCreatePost { hotnessInfoMap[*postHashCreated] = &HotnessPostInfo{ PostBlockAge: blockAgee, HotnessScore: 0, } continue } // If the post has been edited, remove all tags associated with that post. // This ensures that the categorization reflects the most recently edited version. isEditPost, postHashEdited := CheckTxnForEditPost(txn) if isEditPost { tags := fes.PostHashToPostTagsMap[*postHashEdited] delete(fes.PostHashToPostTagsMap, *postHashEdited) for _, tag := range tags { if postHashes, ok := fes.PostTagToPostHashesMap[tag]; ok { delete(postHashes, *postHashEdited) } } continue } // The age used in determining the score should be that of the post // that we are evaluating. The interaction's score will be discounted // by this age. postHashToScore, posterPKID := GetPostHashToScoreForTxn(txn, utxoView) if postHashToScore == nil { // If we don't have a post hash to score then this txn is not relevant // and we can continue. continue } prevHotnessInfo, inHotnessInfoMap := hotnessInfoMap[*postHashToScore] if !inHotnessInfoMap { // If the post is not in the hotnessInfoMap yet, it wasn't created // in the specified look-back period so we can continue. continue } postBlockAge := prevHotnessInfo.PostBlockAge // If we get here, we know we are dealing with a txn that interacts with a // post that was created within the specified look-back period. // Evaluate the txn and attempt to update the hotnessInfoMap. postHashScored, interactionPKID, txnHotnessScore := fes.GetHotnessScoreInfoForTxn(txn, postBlockAge, postInteractionMap, utxoView, isTagFeed) if postHashScored != nil { // Check for a post-specific multiplier. multiplier, hasMultiplier := postsToMultipliers[*postHashScored] if hasMultiplier && multiplier >= 0 { txnHotnessScore = uint64(multiplier * float64(txnHotnessScore)) } // Check for PKID-specifc multipliers for the poster and the interactor. posterPKIDMultiplier, hasPosterPKIDMultiplier := pkidsToMultipliers[*posterPKID] if hasPosterPKIDMultiplier { txnHotnessScore = uint64( posterPKIDMultiplier.PostsMultiplier * float64(txnHotnessScore)) } interactionPKIDMultiplier, hasInteractionPKIDMultiplier := pkidsToMultipliers[*interactionPKID] if hasInteractionPKIDMultiplier { txnHotnessScore = uint64( interactionPKIDMultiplier.InteractionMultiplier * float64(txnHotnessScore)) } // Check for overflow just in case. if prevHotnessInfo.HotnessScore > math.MaxInt64-txnHotnessScore { continue } // Finally, make sure the post scored isn't a comment or repost. postEntryScored := utxoView.GetPostEntryForPostHash(postHashScored) if len(postEntryScored.ParentStakeID) > 0 || lib.IsVanillaRepost(postEntryScored) { continue } // If the post has been deleted, then exclude it from the hot feed. if postEntryScored.IsHidden { continue } var tags []string var err error // Before parsing the text body, first check to see if this post has been processed and cached prior. if postTags, ok := fes.PostHashToPostTagsMap[*postHashScored]; ok { tags = postTags } else { // Parse tags from post entry. tags, err = ParseTagsFromPost(postEntryScored) if err != nil { return nil, err } // Cache processed post in map. fes.PostHashToPostTagsMap[*postHashScored] = tags // Add each tagged post to the tag:postEntries map for _, tag := range tags { // If a post hash set already exists, append to it, // otherwise create a new set and add it to the map. var postHashSet map[lib.BlockHash]bool if postHashSet, ok = fes.PostTagToPostHashesMap[tag]; !ok { postHashSet = make(map[lib.BlockHash]bool) } if _, ok = postHashSet[*postHashScored]; !ok { postHashSet[*postHashScored] = true } fes.PostTagToPostHashesMap[tag] = postHashSet } } // Update the hotness score. prevHotnessInfo.HotnessScore += txnHotnessScore } } } return hotnessInfoMap, nil } // Rank posts on a tag-by-tag basis and save them to their corresponding index in a map. // If sortByHotness is true, sort by their hotness score, otherwise sort by newness. func (fes *APIServer) SaveOrderedFeedForTags(sortByHotness bool, PostTagToOrderedEntries map[string][]*HotFeedEntry) map[string][]*HotFeedEntry { for tag, tagPostHashes := range fes.PostTagToPostHashesMap { tagHotFeedOrderedList := []*HotFeedEntry{} tagHotFeedListWithAge := []*HotFeedEntryTimeSortable{} // Loop through every tagged post for the tag in question. for tagPostHashKey := range tagPostHashes { tagPostHash := tagPostHashKey if postHotnessInfo, ok := fes.HotFeedPostHashToTagScoreMap[tagPostHash]; ok { postHotFeedEntry := &HotFeedEntryTimeSortable{ PostHash: &tagPostHash, PostHashHex: hex.EncodeToString(tagPostHash[:]), HotnessScore: postHotnessInfo.HotnessScore, PostBlockAge: postHotnessInfo.PostBlockAge, } tagHotFeedListWithAge = append(tagHotFeedListWithAge, postHotFeedEntry) } } // Sort posts based on specified criteria, either age (asc) or hotness (desc). sort.Slice(tagHotFeedListWithAge, func(ii, jj int) bool { if sortByHotness { return tagHotFeedListWithAge[ii].HotnessScore > tagHotFeedListWithAge[jj].HotnessScore } else { return tagHotFeedListWithAge[ii].PostBlockAge < tagHotFeedListWithAge[jj].PostBlockAge } }) // Remove age from entry to save space. tagHotFeedOrderedList = removeAgeFromSortedHotFeedEntries(tagHotFeedListWithAge) PostTagToOrderedEntries[tag] = tagHotFeedOrderedList } return PostTagToOrderedEntries } // This function removes the age field from a sorted list of hot feed entries. This allows us to reduce the size // of the entries created. func removeAgeFromSortedHotFeedEntries(sortedHotFeedEntries []*HotFeedEntryTimeSortable) []*HotFeedEntry { hotFeedEntriesWithoutAge := []*HotFeedEntry{} for _, hotFeedEntryWithAge := range sortedHotFeedEntries { hotFeedEntriesWithoutAge = append(hotFeedEntriesWithoutAge, &HotFeedEntry{ PostHash: hotFeedEntryWithAge.PostHash, PostHashHex: hotFeedEntryWithAge.PostHashHex, HotnessScore: hotFeedEntryWithAge.HotnessScore, }) } return hotFeedEntriesWithoutAge } func (fes *APIServer) GetHotFeedParamFromGlobalState(prefix []byte) (uint64, error) { valueBytes, err := fes.GlobalState.Get(prefix) if err != nil { return 0, err } value := uint64(0) if len(valueBytes) > 0 { value = lib.DecodeUint64(valueBytes) } return value, nil } func (fes *APIServer) GetHotFeedConstantsFromGlobalState() ( _interactionCap uint64, _interactionTagCap uint64, _timeDecayBlocks uint64, _timeDecayTagBlocks uint64, _tnxTypeMultiplierMap map[lib.TxnType]uint64, _err error, ) { interactionCap, err := fes.GetHotFeedParamFromGlobalState(_GlobalStatePrefixForHotFeedInteractionCap) if err != nil { return 0, 0, 0, 0, nil, nil } interactionCapTag, err := fes.GetHotFeedParamFromGlobalState(_GlobalStatePrefixForHotFeedTagInteractionCap) if err != nil { return 0, 0, 0, 0, nil, nil } timeDecayBlocks, err := fes.GetHotFeedParamFromGlobalState(_GlobalStatePrefixForHotFeedTimeDecayBlocks) if err != nil { return 0, 0, 0, 0, nil, nil } timeDecayBlocksTag, err := fes.GetHotFeedParamFromGlobalState(_GlobalStatePrefixForHotFeedTagTimeDecayBlocks) if err != nil { return 0, 0, 0, 0, nil, nil } txnTypeMultiplierMapBytes, err := fes.GlobalState.Get(_GlobalStatePrefixHotFeedTxnTypeMultiplierBasisPoints) if err != nil { return 0, 0, 0, 0, nil, nil } txnTypeMultiplierMap := make(map[lib.TxnType]uint64) if len(txnTypeMultiplierMapBytes) > 0 { if err = gob.NewDecoder(bytes.NewReader(txnTypeMultiplierMapBytes)).Decode(&txnTypeMultiplierMap); err != nil { return 0, 0, 0, 0, nil, fmt.Errorf("Error decoding txnTypeMultiplierMapBytes to map: %v", err) } } return interactionCap, interactionCapTag, timeDecayBlocks, timeDecayBlocksTag, txnTypeMultiplierMap, nil } func CheckTxnForCreatePost(txn *lib.MsgDeSoTxn) ( _isCreatePostTxn bool, _postHashCreated *lib.BlockHash) { if txn.TxnMeta.GetTxnType() == lib.TxnTypeSubmitPost { txMeta := txn.TxnMeta.(*lib.SubmitPostMetadata) // The post hash of a brand new post is the same as its txn hash. if len(txMeta.PostHashToModify) == 0 { return true, txn.Hash() } } return false, nil } func CheckTxnForEditPost(txn *lib.MsgDeSoTxn) ( _isEditPostTxn bool, _postHashCreated *lib.BlockHash) { if txn.TxnMeta.GetTxnType() == lib.TxnTypeSubmitPost { txMeta := txn.TxnMeta.(*lib.SubmitPostMetadata) // The post hash of a brand new post is the same as its txn hash. if len(txMeta.PostHashToModify) != 0 { blockHashToModify := lib.NewBlockHash(txMeta.PostHashToModify) return true, blockHashToModify } } return false, nil } func GetPostHashToScoreForTxn(txn *lib.MsgDeSoTxn, utxoView *lib.UtxoView) (_postHashScored *lib.BlockHash, _posterPKID *lib.PKID)
// Returns the post hash that a txn is relevant to and the amount that the txn should contribute // to that post's hotness score. The postInteractionMap is used to ensure that each PKID only // gets one interaction per post. func (fes *APIServer) GetHotnessScoreInfoForTxn( txn *lib.MsgDeSoTxn, blockAge int, // Number of blocks this txn is from the blockTip. Not block height. postInteractionMap map[HotFeedInteractionKey]uint64, utxoView *lib.UtxoView, isTagFeed bool, ) (_postHashScored *lib.BlockHash, _interactionPKID *lib.PKID, _hotnessScore uint64, ) { // Figure out who is responsible for the transaction. interactionPKIDEntry := utxoView.GetPKIDForPublicKey(txn.PublicKey) interactionPostHash, _ := GetPostHashToScoreForTxn(txn, utxoView) // Check to see if we've seen this interaction pair before. Log an interaction if not. interactionKey := HotFeedInteractionKey{ InteractionPKID: *interactionPKIDEntry.PKID, InteractionPostHash: *interactionPostHash, } // Transaction type multiplier for current transaction. multiplier := fes.getTxnMultiplier(txn) // Get previously applied multiplier for post, if post has been counted already for this user. if prevMultiplier, exists := postInteractionMap[interactionKey]; exists { // If the previously applied multiplier is greater, skip this transaction. if prevMultiplier > multiplier { return nil, nil, 0 } postInteractionMap[interactionKey] = multiplier // We want to count the difference of the new multiplier and the previously counted multiplier. multiplier = multiplier - prevMultiplier } else { postInteractionMap[interactionKey] = multiplier } // Finally return the post hash and the txn's hotness score. interactionProfile := utxoView.GetProfileEntryForPKID(interactionPKIDEntry.PKID) interactionUserBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(txn.PublicKey) if err != nil { return nil, nil, 0 } hotnessScore := interactionUserBalance // It is possible for the profile to be nil since you don't need a profile for diamonds. if interactionProfile != nil && !interactionProfile.IsDeleted() { hotnessScore += interactionProfile.CreatorCoinEntry.DeSoLockedNanos } // Apply transaction type multiplier. // Multipliers are defined in basis points, so the resulting product is divided by 10,000. hotnessScore = hotnessScore * multiplier / 10000 if hotnessScore > fes.HotFeedInteractionCap && !isTagFeed { hotnessScore = fes.HotFeedInteractionCap } else if hotnessScore > fes.HotFeedTagInteractionCap && isTagFeed { hotnessScore = fes.HotFeedTagInteractionCap } var timeDecayBlocks uint64 if isTagFeed { timeDecayBlocks = fes.HotFeedTagTimeDecayBlocks } else { timeDecayBlocks = fes.HotFeedTimeDecayBlocks } hotnessScoreTimeDecayed := uint64(float64(hotnessScore) * math.Pow(0.5, float64(blockAge)/float64(timeDecayBlocks))) return interactionPostHash, interactionPKIDEntry.PKID, hotnessScoreTimeDecayed } func (fes *APIServer) PruneHotFeedApprovedPostsMap( hotFeedPosts map[lib.BlockHash]*HotnessPostInfo, hotFeedApprovedPosts map[lib.BlockHash]float64, ) { for postHash := range fes.HotFeedApprovedPostsToMultipliers { if _, inHotFeedMap := hotFeedPosts[postHash]; !inHotFeedMap { delete(hotFeedApprovedPosts, postHash) } } } // Get the transaction type multiplier associated with a particular transaction func (fes *APIServer) getTxnMultiplier(txn *lib.MsgDeSoTxn) uint64 { if multiplier, ok := fes.HotFeedTxnTypeMultiplierMap[txn.TxnMeta.GetTxnType()]; ok { return multiplier } else { // If transaction doesn't have a multiplier defined, multiply by 1x (in basis points) return 10000 } } type HotFeedPageRequest struct { ReaderPublicKeyBase58Check string // Since the hot feed is constantly changing, we pass a list of posts that have already // been seen in order to send a more accurate next page. SeenPosts []string // Number of post entry responses to return. ResponseLimit int // If defined, only get the hot feed for posts tagged with this tag. Tag string // If true, sort by new instead of by hotness. Only applies to queries where "Tag" is defined. SortByNew bool } type HotFeedPageResponse struct { HotFeedPage []PostEntryResponse } func (fes *APIServer) AdminGetUnfilteredHotFeed(ww http.ResponseWriter, req *http.Request) { fes.HandleHotFeedPageRequest(ww, req, false /*approvedPostsOnly*/, true /*addMultiplierBool*/) } func (fes *APIServer) GetHotFeed(ww http.ResponseWriter, req *http.Request) { fes.HandleHotFeedPageRequest(ww, req, false /*approvedPostsOnly*/, false /*addMultiplierBool*/) } func (fes *APIServer) HandleHotFeedPageRequest( ww http.ResponseWriter, req *http.Request, approvedPostsOnly bool, addMultiplierBool bool, ) { decoder := json.NewDecoder(io.LimitReader(req.Body, MaxRequestBodySizeBytes)) requestData := HotFeedPageRequest{} if err := decoder.Decode(&requestData); err != nil { _AddBadRequestError(ww, fmt.Sprintf("HandleHotFeedPageRequest: Problem parsing request body: %v", err)) return } var readerPublicKeyBytes []byte var err error if requestData.ReaderPublicKeyBase58Check != "" { readerPublicKeyBytes, _, err = lib.Base58CheckDecode(requestData.ReaderPublicKeyBase58Check) if err != nil { _AddBadRequestError(ww, fmt.Sprintf("HandleHotFeedPageRequest: Problem decoding reader public key: %v", err)) return } } // Get a view. utxoView, err := fes.backendServer.GetMempool().GetAugmentedUniversalView() if err != nil { _AddBadRequestError(ww, fmt.Sprintf("HandleHotFeedPageRequest: Error getting utxoView: %v", err)) return } // Make the lists of posts a user has already seen into a map. seenPostsMap := make(map[string][]byte) for _, postHashHex := range requestData.SeenPosts { seenPostsMap[postHashHex] = []byte{} } hotFeed := []PostEntryResponse{} // The list of posts that will be iterated on var hotFeedOrderedList []*HotFeedEntry // Only process posts tagged with a particular tag if specified in the request if requestData.Tag != "" { // Choose the map with the lists sorted in the manner specified by the user (hotness or newness). var tagMap map[string][]*HotFeedEntry if requestData.SortByNew { tagMap = fes.PostTagToOrderedNewestEntries } else { tagMap = fes.PostTagToOrderedHotFeedEntries } // Check to make sure key exists in map. If not, return an empty list. if orderedEntriesForTag, ok := tagMap[requestData.Tag]; ok { hotFeedOrderedList = orderedEntriesForTag } else { hotFeedOrderedList = []*HotFeedEntry{} } } else { hotFeedOrderedList = fes.HotFeedOrderedList } for _, hotFeedEntry := range hotFeedOrderedList { if requestData.ResponseLimit != 0 && len(hotFeed) > requestData.ResponseLimit { break } // Skip posts that have already been seen. if _, alreadySeen := seenPostsMap[hotFeedEntry.PostHashHex]; alreadySeen { continue } // Skip posts that aren't approved yet, if requested. if _, isApproved := fes.HotFeedApprovedPostsToMultipliers[*hotFeedEntry.PostHash]; approvedPostsOnly && !isApproved { continue } postEntry := utxoView.GetPostEntryForPostHash(hotFeedEntry.PostHash) postEntryResponse, err := fes._postEntryToResponse( postEntry, true, fes.Params, utxoView, readerPublicKeyBytes, 1) if err != nil { continue } // Skip posts that are pinned (these will be added to the very top of the feed later) if *postEntryResponse.IsPinned { continue } profileEntry := utxoView.GetProfileEntryForPublicKey(postEntry.PosterPublicKey) postEntryResponse.ProfileEntryResponse = fes._profileEntryToResponse( profileEntry, utxoView) postEntryResponse.PostEntryReaderState = utxoView.GetPostEntryReaderState( readerPublicKeyBytes, postEntry) postEntryResponse.HotnessScore = hotFeedEntry.HotnessScore hotFeedMultiplier, inHotFeed := fes.HotFeedApprovedPostsToMultipliers[*postEntry.PostHash] if inHotFeed && addMultiplierBool { postEntryResponse.PostMultiplier = hotFeedMultiplier } hotFeed = append(hotFeed, *postEntryResponse) } { // Only add pinned posts if we are starting from the top of the feed. if len(requestData.SeenPosts) == 0 { maxBigEndianUint64Bytes := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} maxKeyLen := 1 + len(maxBigEndianUint64Bytes) + lib.HashSizeBytes // Get all pinned posts and prepend them to the list of postEntries pinnedStartKey := _GlobalStatePrefixTstampNanosPinnedPostHash // todo: how many posts can we really pin? keys, _, err := fes.GlobalState.Seek(pinnedStartKey, pinnedStartKey, maxKeyLen, 10, true, false) if err != nil { _AddBadRequestError(ww, fmt.Sprintf("HandleHotFeedPageRequest: Getting pinned posts: %v", err)) } var pinnedPostEntryRepsonses []PostEntryResponse for _, dbKeyBytes := range keys { postHash := &lib.BlockHash{} copy(postHash[:], dbKeyBytes[1+len(maxBigEndianUint64Bytes):][:]) postEntry := utxoView.GetPostEntryForPostHash(postHash) if postEntry != nil { postEntry.IsPinned = true profileEntry := utxoView.GetProfileEntryForPublicKey(postEntry.PosterPublicKey) postEntryResponse, err := fes._postEntryToResponse( postEntry, true, fes.Params, utxoView, readerPublicKeyBytes, 1) postEntryResponse.ProfileEntryResponse = fes._profileEntryToResponse( profileEntry, utxoView) postEntryResponse.PostEntryReaderState = utxoView.GetPostEntryReaderState( readerPublicKeyBytes, postEntry) if err != nil { continue } pinnedPostEntryRepsonses = append(pinnedPostEntryRepsonses, *postEntryResponse) } } hotFeed = append(pinnedPostEntryRepsonses, hotFeed...) } } res := HotFeedPageResponse{HotFeedPage: hotFeed} if err = json.NewEncoder(ww).Encode(res); err != nil { _AddBadRequestError(ww, fmt.Sprintf("HandleHotFeedPageRequest: Problem encoding response as JSON: %v", err)) return } } type AdminUpdateHotFeedAlgorithmRequest struct { // Maximum score amount that any individual PKID can contribute to the global hot feed score // before time decay. Ignored if set to zero. InteractionCap int // Maximum score amount that any individual PKID can contribute to a particular tag's hot feed score // before time decay. Ignored if set to zero. InteractionCapTag int // Number of blocks per halving for the global hot feed score time decay. Ignored if set to zero. TimeDecayBlocks int // Number of blocks per halving for a tag's hot feed score time decay. Ignored if set to zero. TimeDecayBlocksTag int // Multiplier which alters the hotness score for a particular transaction type. Multiplier is stored in basis points. TxnTypeMultiplierMap map[lib.TxnType]uint64 } type AdminUpdateHotFeedAlgorithmResponse struct{} func (fes *APIServer) AdminUpdateHotFeedAlgorithm(ww http.ResponseWriter, req *http.Request) { decoder := json.NewDecoder(io.LimitReader(req.Body, MaxRequestBodySizeBytes)) requestData := AdminUpdateHotFeedAlgorithmRequest{} if err := decoder.Decode(&requestData); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedAlgorithm: Problem parsing request body: %v", err)) return } if requestData.InteractionCap < 0 || requestData.TimeDecayBlocks < 0 || requestData.InteractionCapTag < 0 || requestData.TimeDecayBlocksTag < 0 { _AddBadRequestError(ww, fmt.Sprintf( "AdminUpdateHotFeedAlgorithm: InteractionCap (%d, %d) and TimeDecayBlocks (%d, %d) can't be negative.", requestData.InteractionCap, requestData.InteractionCapTag, requestData.TimeDecayBlocks, requestData.TimeDecayBlocksTag)) return } err := fes.AddHotFeedParamToGlobalState(_GlobalStatePrefixForHotFeedInteractionCap, requestData.InteractionCap) if err != nil { _AddInternalServerError(ww, fmt.Sprintf("AdminUpdateHotFeedAlgorithm: Error putting InteractionCap: %v", err)) return } err = fes.AddHotFeedParamToGlobalState(_GlobalStatePrefixForHotFeedTagInteractionCap, requestData.InteractionCapTag) if err != nil { _AddInternalServerError(ww, fmt.Sprintf("AdminUpdateHotFeedAlgorithm: Error putting InteractionCapTag: %v", err)) return } err = fes.AddHotFeedParamToGlobalState(_GlobalStatePrefixForHotFeedTimeDecayBlocks, requestData.TimeDecayBlocks) if err != nil { _AddInternalServerError(ww, fmt.Sprintf("AdminUpdateHotFeedAlgorithm: Error putting TimeDecayBlocks: %v", err)) return } err = fes.AddHotFeedParamToGlobalState(_GlobalStatePrefixForHotFeedTagTimeDecayBlocks, requestData.TimeDecayBlocksTag) if err != nil { _AddInternalServerError(ww, fmt.Sprintf("AdminUpdateHotFeedAlgorithm: Error putting TimeDecayBlocksTag: %v", err)) return } if len(requestData.TxnTypeMultiplierMap) > 0 { txnTypeMultiplierMapBuffer := bytes.NewBuffer([]byte{}) if err := gob.NewEncoder(txnTypeMultiplierMapBuffer).Encode(requestData.TxnTypeMultiplierMap); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedAlgorithm: Problem encoding transaction multiplier map: %v", err)) return } if err := fes.GlobalState.Put(_GlobalStatePrefixHotFeedTxnTypeMultiplierBasisPoints, txnTypeMultiplierMapBuffer.Bytes()); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedAlgorithm: Problem putting txn type multiplier map in global state: %v", err)) return } } res := AdminUpdateHotFeedAlgorithmResponse{} if err := json.NewEncoder(ww).Encode(res); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedAlgorithm: Problem encoding response as JSON: %v", err)) return } } func (fes *APIServer) AddHotFeedParamToGlobalState(prefix []byte, value int) error { if value > 0 { err := fes.GlobalState.Put( prefix, lib.EncodeUint64(uint64(value)), ) return err } return nil } type AdminGetHotFeedAlgorithmRequest struct{} type AdminGetHotFeedAlgorithmResponse struct { InteractionCap uint64 InteractionCapTag uint64 TimeDecayBlocks uint64 TimeDecayBlocksTag uint64 TxnTypeMultiplierMap map[lib.TxnType]uint64 } func (fes *APIServer) AdminGetHotFeedAlgorithm(ww http.ResponseWriter, req *http.Request) { decoder := json.NewDecoder(io.LimitReader(req.Body, MaxRequestBodySizeBytes)) requestData := AdminGetHotFeedAlgorithmRequest{} if err := decoder.Decode(&requestData); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminGetHotFeedAlgorithm: Problem parsing request body: %v", err)) return } interactionCap, interactionCapTag, timeDecayBlocks, timeDecayBlocksTag, txnTypeMultiplierMap, err := fes.GetHotFeedConstantsFromGlobalState() if err != nil { _AddInternalServerError(ww, fmt.Sprintf("AdminGetHotFeedAlgorithm: Error getting constants: %v", err)) return } res := AdminGetHotFeedAlgorithmResponse{ InteractionCap: interactionCap, InteractionCapTag: interactionCapTag, TimeDecayBlocks: timeDecayBlocks, TimeDecayBlocksTag: timeDecayBlocksTag, TxnTypeMultiplierMap: txnTypeMultiplierMap, } if err := json.NewEncoder(ww).Encode(res); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminGetHotFeedAlgorithm: Problem encoding response as JSON: %v", err)) return } } type AdminUpdateHotFeedPostMultiplierRequest struct { PostHashHex string `safeforlogging:"true"` Multiplier float64 `safeforlogging:"true"` } type AdminUpdateHotFeedPostMultiplierResponse struct{} func (fes *APIServer) AdminUpdateHotFeedPostMultiplier(ww http.ResponseWriter, req *http.Request) { decoder := json.NewDecoder(io.LimitReader(req.Body, MaxRequestBodySizeBytes)) requestData := AdminUpdateHotFeedPostMultiplierRequest{} if err := decoder.Decode(&requestData); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedPostMultiplier: Problem parsing request body: %v", err)) return } if requestData.Multiplier < 0 { _AddBadRequestError(ww, fmt.Sprintf( "AdminUpdateHotFeedPostMultiplier: Please provide non-negative multiplier: %f", requestData.Multiplier)) return } // Decode the postHash. postHash := &lib.BlockHash{} if requestData.PostHashHex != "" { postHashBytes, err := hex.DecodeString(requestData.PostHashHex) if err != nil || len(postHashBytes) != lib.HashSizeBytes { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedPostMultiplier: Error parsing post hash %v: %v", requestData.PostHashHex, err)) return } copy(postHash[:], postHashBytes) } else { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedPostMultiplier: Request missing PostHashHex")) return } // Add a new hot feed op for this post. hotFeedOp := HotFeedApprovedPostOp{ IsRemoval: false, Multiplier: requestData.Multiplier, } hotFeedOpDataBuf := bytes.NewBuffer([]byte{}) gob.NewEncoder(hotFeedOpDataBuf).Encode(hotFeedOp) opTimestamp := uint64(time.Now().UnixNano()) hotFeedOpKey := GlobalStateKeyForHotFeedApprovedPostOp(opTimestamp, postHash) err := fes.GlobalState.Put(hotFeedOpKey, hotFeedOpDataBuf.Bytes()) if err != nil { _AddInternalServerError(ww, fmt.Sprintf("AdminUpdateHotFeedPostMultiplier: Problem putting hotFeedOp: %v", err)) return } res := AdminUpdateHotFeedPostMultiplierResponse{} if err := json.NewEncoder(ww).Encode(res); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedPostMultiplier: Problem encoding response as JSON: %v", err)) return } } type AdminUpdateHotFeedUserMultiplierRequest struct { Username string `safeforlogging:"true"` InteractionMultiplier float64 `safeforlogging:"true"` PostsMultiplier float64 `safeforlogging:"true"` } type AdminUpdateHotFeedUserMultiplierResponse struct{} func (fes *APIServer) AdminUpdateHotFeedUserMultiplier(ww http.ResponseWriter, req *http.Request) { decoder := json.NewDecoder(io.LimitReader(req.Body, MaxRequestBodySizeBytes)) requestData := AdminUpdateHotFeedUserMultiplierRequest{} if err := decoder.Decode(&requestData); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedUserMultiplier: Problem parsing request body: %v", err)) return } // Verify the username adheres to the consensus username criteria. if len(requestData.Username) == 0 || len(requestData.Username) > lib.MaxUsernameLengthBytes || !lib.UsernameRegex.Match([]byte(requestData.Username)) { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedUserMultiplier: Must provide a valid username")) return } // Verify the username has an underlying profile. pubKey, err := fes.getPublicKeyFromUsernameOrPublicKeyString(requestData.Username) if err != nil { _AddBadRequestError(ww, fmt.Sprintf( "AdminUpdateHotFeedUserMultiplier: Username %s has no associated underlying publickey.", requestData.Username)) return } // Use a utxoView to get the pkid for this pub key. utxoView, err := fes.backendServer.GetMempool().GetAugmentedUniversalView() if err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedUserMultiplier: Problem getting utxoView: %v", err)) return } pkidEntry := utxoView.GetPKIDForPublicKey(pubKey) if pkidEntry == nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedUserMultiplier: PKID not found for username: %s", requestData.Username)) return } // Add a new hot feed op for this post. hotFeedOp := HotFeedPKIDMultiplierOp{ InteractionMultiplier: requestData.InteractionMultiplier, PostsMultiplier: requestData.PostsMultiplier, } hotFeedOpDataBuf := bytes.NewBuffer([]byte{}) gob.NewEncoder(hotFeedOpDataBuf).Encode(hotFeedOp) opTimestamp := uint64(time.Now().UnixNano()) hotFeedOpKey := GlobalStateKeyForHotFeedPKIDMultiplierOp(opTimestamp, pkidEntry.PKID) err = fes.GlobalState.Put(hotFeedOpKey, hotFeedOpDataBuf.Bytes()) if err != nil { _AddInternalServerError(ww, fmt.Sprintf("AdminUpdateHotFeedUserMultiplier: Problem putting hotFeedOp: %v", err)) return } res := AdminUpdateHotFeedUserMultiplierResponse{} if err := json.NewEncoder(ww).Encode(res); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminUpdateHotFeedUserMultiplier: Problem encoding response as JSON: %v", err)) return } } type AdminGetHotFeedUserMultiplierRequest struct { Username string `safeforlogging:"true"` } type AdminGetHotFeedUserMultiplierResponse struct { InteractionMultiplier float64 `safeforlogging:"true"` PostsMultiplier float64 `safeforlogging:"true"` } func (fes *APIServer) AdminGetHotFeedUserMultiplier(ww http.ResponseWriter, req *http.Request) { decoder := json.NewDecoder(io.LimitReader(req.Body, MaxRequestBodySizeBytes)) requestData := AdminGetHotFeedUserMultiplierRequest{} if err := decoder.Decode(&requestData); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminGetHotFeedUserMultiplier: Problem parsing request body: %v", err)) return } // Verify the username adheres to the consensus username criteria. if len(requestData.Username) == 0 || len(requestData.Username) > lib.MaxUsernameLengthBytes || !lib.UsernameRegex.Match([]byte(requestData.Username)) { _AddBadRequestError(ww, fmt.Sprintf("AdminGetHotFeedUserMultiplier: Must provide a valid username")) return } // Verify the username has an underlying profile. pubKey, err := fes.getPublicKeyFromUsernameOrPublicKeyString(requestData.Username) if err != nil { _AddBadRequestError(ww, fmt.Sprintf( "AdminGetHotFeedUserMultiplier: Username %s has no associated underlying publickey.", requestData.Username)) return } // Use a utxoView to get the pkid for this pub key. utxoView, err := fes.backendServer.GetMempool().GetAugmentedUniversalView() if err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminGetHotFeedUserMultiplier: Problem getting utxoView: %v", err)) return } pkidEntry := utxoView.GetPKIDForPublicKey(pubKey) if pkidEntry == nil { _AddBadRequestError(ww, fmt.Sprintf("AdminGetHotFeedUserMultiplier: PKID not found for username: %s", requestData.Username)) return } // Grab the current multiplier object for this PKID. hotFeedMultiplier := fes.HotFeedPKIDMultipliers[*pkidEntry.PKID] if hotFeedMultiplier == nil { hotFeedMultiplier = &HotFeedPKIDMultiplier{ InteractionMultiplier: 1, PostsMultiplier: 1, } } res := AdminGetHotFeedUserMultiplierResponse{ InteractionMultiplier: hotFeedMultiplier.InteractionMultiplier, PostsMultiplier: hotFeedMultiplier.PostsMultiplier, } if err := json.NewEncoder(ww).Encode(res); err != nil { _AddBadRequestError(ww, fmt.Sprintf("AdminGetHotFeedUserMultiplier: Problem encoding response as JSON: %v", err)) return } }
{ // Figure out which post this transaction should affect. interactionPostHash := &lib.BlockHash{} var interactionPostEntry *lib.PostEntry txnType := txn.TxnMeta.GetTxnType() if txnType == lib.TxnTypeLike { txMeta := txn.TxnMeta.(*lib.LikeMetadata) interactionPostHash = txMeta.LikedPostHash } else if txnType == lib.TxnTypeBasicTransfer { // Check for a post being diamonded. diamondPostHashBytes, hasDiamondPostHash := txn.ExtraData[lib.DiamondPostHashKey] if hasDiamondPostHash { copy(interactionPostHash[:], diamondPostHashBytes[:]) } else { // If this basic transfer doesn't have a diamond, it is irrelevant. return nil, nil } } else if txnType == lib.TxnTypeSubmitPost { txMeta := txn.TxnMeta.(*lib.SubmitPostMetadata) // If this is a transaction creating a brand new post, we can ignore it. if len(txMeta.PostHashToModify) == 0 { return nil, nil } postHash := &lib.BlockHash{} copy(postHash[:], txMeta.PostHashToModify[:]) interactionPostEntry = utxoView.GetPostEntryForPostHash(postHash) // For posts we must process three cases: Reposts, Quoted Reposts, and Comments. if lib.IsVanillaRepost(interactionPostEntry) || lib.IsQuotedRepost(interactionPostEntry) { repostedPostHashBytes := txn.ExtraData[lib.RepostedPostHash] copy(interactionPostHash[:], repostedPostHashBytes) } else if len(interactionPostEntry.ParentStakeID) > 0 { copy(interactionPostHash[:], interactionPostEntry.ParentStakeID[:]) } else { return nil, nil } } else { // This transaction is not relevant, bail. return nil, nil } // If we haven't gotten the post entry yet, make sure we fetch it. if interactionPostEntry == nil { interactionPostEntry = utxoView.GetPostEntryForPostHash(interactionPostHash) } // Double check that we got a valid interaction post entry. If not, bail. if interactionPostEntry == nil { return nil, nil } // At this point, we have a post hash to return so look up the posterPKID as well. posterPKIDEntry := utxoView.GetPKIDForPublicKey(interactionPostEntry.PosterPublicKey) return interactionPostHash, posterPKIDEntry.PKID }
lib.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] //! This provides a simple networking substrate between a client and server. It is assumed that all //! operations are blocking and return only complete blocks of data. The intended use case has the //! server blocking on read. Upon receiving a payload during a read, the server should process the //! payload, write a response, and then block on read again. The client should block on read after //! performing a write. Upon errors or remote disconnections, the call (read, write) will return an //! error to let the caller know of the event. A follow up call will result in the service //! attempting to either reconnect in the case of a client or accept a new client in the case of a //! server. //! //! Internally both the client and server leverage a NetworkStream that communications in blocks //! where a block is a length prefixed array of bytes. use diem_logger::{info, trace, warn, Schema}; use diem_secure_push_metrics::{register_int_counter_vec, IntCounterVec}; use once_cell::sync::Lazy; use serde::Serialize; use std::{ io::{Read, Write}, net::{Shutdown, SocketAddr, TcpListener, TcpStream}, thread, time, }; use thiserror::Error; #[derive(Schema)] struct SecureNetLogSchema<'a> { service: &'static str, mode: NetworkMode, event: LogEvent, #[schema(debug)] remote_peer: Option<&'a SocketAddr>, #[schema(debug)] error: Option<&'a Error>, } impl<'a> SecureNetLogSchema<'a> { fn new(service: &'static str, mode: NetworkMode, event: LogEvent) -> Self { Self { service, mode, event, remote_peer: None, error: None, } } } #[derive(Clone, Copy, Serialize)] #[serde(rename_all = "snake_case")] enum LogEvent { ConnectionAttempt, ConnectionSuccessful, ConnectionFailed, DisconnectedPeerOnRead, DisconnectedPeerOnWrite, Shutdown, } #[derive(Clone, Copy, Serialize)] #[serde(rename_all = "snake_case")] enum NetworkMode { Client, Server, } impl NetworkMode { fn as_str(&self) -> &'static str { match self { NetworkMode::Client => "client", NetworkMode::Server => "server", } } } static EVENT_COUNTER: Lazy<IntCounterVec> = Lazy::new(|| { register_int_counter_vec!( "diem_secure_net_events", "Outcome of secure net events", &["service", "mode", "method", "result"] ) .unwrap() }); #[derive(Clone, Copy, Serialize)] #[serde(rename_all = "snake_case")] enum Method { Connect, Read, Write, } impl Method { fn as_str(&self) -> &'static str { match self { Method::Connect => "connect", Method::Read => "read", Method::Write => "write", } } } #[derive(Clone, Copy, Serialize)] #[serde(rename_all = "snake_case")] enum MethodResult { Failure, Query, Success, } impl MethodResult { fn as_str(&self) -> &'static str { match self { MethodResult::Failure => "failure", MethodResult::Query => "query", MethodResult::Success => "success", } } } fn increment_counter( service: &'static str, mode: NetworkMode, method: Method, result: MethodResult, ) { EVENT_COUNTER .with_label_values(&[service, mode.as_str(), method.as_str(), result.as_str()]) .inc() } #[derive(Debug, Error)] pub enum Error { #[error("Already called shutdown")] AlreadyShutdown, #[error("Found data that is too large to decode: {0}")] DataTooLarge(usize), #[error("Internal network error:")] NetworkError(#[from] std::io::Error), #[error("No active stream")] NoActiveStream, #[error("Overflow error: {0}")] OverflowError(String), #[error("Remote stream cleanly closed")] RemoteStreamClosed, } pub struct NetworkClient { service: &'static str, server: SocketAddr, stream: Option<NetworkStream>, /// Read, Write, Connect timeout in milliseconds. timeout_ms: u64, } impl NetworkClient { pub fn new(service: &'static str, server: SocketAddr, timeout_ms: u64) -> Self { Self { service, server, stream: None, timeout_ms, } } fn increment_counter(&self, method: Method, result: MethodResult) { increment_counter(self.service, NetworkMode::Client, method, result) } /// Blocking read until able to successfully read an entire message pub fn read(&mut self) -> Result<Vec<u8>, Error> { self.increment_counter(Method::Read, MethodResult::Query); let stream = self.server()?; let result = stream.read(); if let Err(err) = &result { self.increment_counter(Method::Read, MethodResult::Failure); warn!(SecureNetLogSchema::new( self.service, NetworkMode::Client, LogEvent::DisconnectedPeerOnRead, ) .error(&err) .remote_peer(&self.server)); self.stream = None; } else { self.increment_counter(Method::Read, MethodResult::Success); } result } /// Shutdown the internal network stream pub fn shutdown(&mut self) -> Result<(), Error> { info!(SecureNetLogSchema::new( self.service, NetworkMode::Client, LogEvent::Shutdown, )); let stream = self.stream.take().ok_or(Error::NoActiveStream)?; stream.shutdown()?; Ok(()) } /// Blocking write until able to successfully send an entire message pub fn write(&mut self, data: &[u8]) -> Result<(), Error> { self.increment_counter(Method::Write, MethodResult::Query); let stream = self.server()?; let result = stream.write(data); if let Err(err) = &result { self.increment_counter(Method::Write, MethodResult::Failure); warn!(SecureNetLogSchema::new( self.service, NetworkMode::Client, LogEvent::DisconnectedPeerOnWrite, ) .error(&err) .remote_peer(&self.server)); self.stream = None; } else { self.increment_counter(Method::Write, MethodResult::Success); } result } fn server(&mut self) -> Result<&mut NetworkStream, Error> { if self.stream.is_none() { self.increment_counter(Method::Connect, MethodResult::Query); info!(SecureNetLogSchema::new( self.service, NetworkMode::Client, LogEvent::ConnectionAttempt, ) .remote_peer(&self.server)); let timeout = std::time::Duration::from_millis(self.timeout_ms); let mut stream = TcpStream::connect_timeout(&self.server, timeout); let sleeptime = time::Duration::from_millis(100); while let Err(err) = stream { self.increment_counter(Method::Connect, MethodResult::Failure); warn!(SecureNetLogSchema::new( self.service, NetworkMode::Client, LogEvent::ConnectionFailed, ) .error(&err.into()) .remote_peer(&self.server)); thread::sleep(sleeptime); stream = TcpStream::connect_timeout(&self.server, timeout); } let stream = stream?; stream.set_nodelay(true)?; self.stream = Some(NetworkStream::new(stream, self.server, self.timeout_ms)); self.increment_counter(Method::Connect, MethodResult::Success); info!(SecureNetLogSchema::new( self.service, NetworkMode::Client, LogEvent::ConnectionSuccessful, ) .remote_peer(&self.server)); } self.stream.as_mut().ok_or(Error::NoActiveStream) } } pub struct NetworkServer { service: &'static str, listener: Option<TcpListener>, stream: Option<NetworkStream>, /// Read, Write, Connect timeout in milliseconds. timeout_ms: u64, } impl NetworkServer { pub fn new(service: &'static str, listen: SocketAddr, timeout_ms: u64) -> Self { let listener = TcpListener::bind(listen); Self { service, listener: Some(listener.unwrap()), stream: None, timeout_ms, } } fn increment_counter(&self, method: Method, result: MethodResult) { increment_counter(self.service, NetworkMode::Server, method, result) } /// If there isn't already a downstream client, it accepts. Otherwise it /// blocks until able to successfully read an entire message pub fn read(&mut self) -> Result<Vec<u8>, Error> { self.increment_counter(Method::Read, MethodResult::Query); let result = { let stream = self.client()?; stream.read().map_err(|e| (stream.remote, e)) }; if let Err((remote, err)) = &result { self.increment_counter(Method::Read, MethodResult::Failure); warn!(SecureNetLogSchema::new( self.service, NetworkMode::Server, LogEvent::DisconnectedPeerOnRead, ) .error(&err) .remote_peer(&remote)); self.stream = None; } else { self.increment_counter(Method::Read, MethodResult::Success); } result.map_err(|err| err.1) } /// Shutdown the internal network stream pub fn shutdown(&mut self) -> Result<(), Error> { info!(SecureNetLogSchema::new( self.service, NetworkMode::Server, LogEvent::Shutdown, )); self.listener.take().ok_or(Error::AlreadyShutdown)?; let stream = self.stream.take().ok_or(Error::NoActiveStream)?; stream.shutdown()?; Ok(()) } /// If there isn't already a downstream client, it accepts. Otherwise it /// blocks until it is able to successfully send an entire message. pub fn write(&mut self, data: &[u8]) -> Result<(), Error> { self.increment_counter(Method::Write, MethodResult::Query); let result = { let stream = self.client()?; stream.write(data).map_err(|e| (stream.remote, e)) }; if let Err((remote, err)) = &result { self.increment_counter(Method::Write, MethodResult::Failure); warn!(SecureNetLogSchema::new( self.service, NetworkMode::Server, LogEvent::DisconnectedPeerOnWrite, ) .error(&err) .remote_peer(&remote)); self.stream = None; } else { self.increment_counter(Method::Write, MethodResult::Success); } result.map_err(|err| err.1) } fn client(&mut self) -> Result<&mut NetworkStream, Error>
} struct NetworkStream { stream: TcpStream, remote: SocketAddr, buffer: Vec<u8>, temp_buffer: [u8; 1024], } impl NetworkStream { pub fn new(stream: TcpStream, remote: SocketAddr, timeout_ms: u64) -> Self { let timeout = Some(std::time::Duration::from_millis(timeout_ms)); // These only fail if a duration of 0 is passed in. stream.set_read_timeout(timeout).unwrap(); stream.set_write_timeout(timeout).unwrap(); Self { stream, remote, buffer: Vec::new(), temp_buffer: [0; 1024], } } /// Blocking read until able to successfully read an entire message pub fn read(&mut self) -> Result<Vec<u8>, Error> { let result = self.read_buffer(); if !result.is_empty() { return Ok(result); } loop { trace!("Attempting to read from stream"); let read = self.stream.read(&mut self.temp_buffer)?; trace!("Read {} bytes from stream", read); if read == 0 { return Err(Error::RemoteStreamClosed); } self.buffer.extend(self.temp_buffer[..read].to_vec()); let result = self.read_buffer(); if !result.is_empty() { trace!("Found a message in the stream"); return Ok(result); } trace!("Did not find a message yet, reading again"); } } /// Terminate the socket pub fn shutdown(&self) -> Result<(), Error> { Ok(self.stream.shutdown(Shutdown::Both)?) } /// Blocking write until able to successfully send an entire message pub fn write(&mut self, data: &[u8]) -> Result<(), Error> { let u32_max = u32::max_value() as usize; if u32_max <= data.len() { return Err(Error::DataTooLarge(data.len())); } let data_len = data.len() as u32; trace!("Attempting to write length, {}, to the stream", data_len); self.write_all(&data_len.to_le_bytes())?; trace!("Attempting to write data, {}, to the stream", data_len); self.write_all(data)?; trace!( "Successfully wrote length, {}, and data to the stream", data_len ); Ok(()) } /// Data sent on a TCP socket may not necessarily be delivered at the exact time. So a read may /// only include a subset of what was sent. This wraps around the TCP read buffer to ensure /// that only full messages are received. fn read_buffer(&mut self) -> Vec<u8> { if self.buffer.len() < 4 { return Vec::new(); } let mut u32_bytes = [0; 4]; u32_bytes.copy_from_slice(&self.buffer[..4]); let data_size = u32::from_le_bytes(u32_bytes) as usize; let remaining_data = &self.buffer[4..]; if remaining_data.len() < data_size { return Vec::new(); } let returnable_data = remaining_data[..data_size].to_vec(); self.buffer = remaining_data[data_size..].to_vec(); returnable_data } /// Writing to a TCP socket will take in as much data as the underlying buffer has space for. /// This wraps around that buffer and blocks until all the data has been pushed. fn write_all(&mut self, data: &[u8]) -> Result<(), Error> { let mut unwritten = data; let mut total_written: u64 = 0; while !unwritten.is_empty() { let written = self.stream.write(unwritten)?; total_written = total_written .checked_add(written as u64) .ok_or_else(|| Error::OverflowError("write_all::total_written".into()))?; unwritten = &data[total_written as usize..]; } Ok(()) } } #[cfg(test)] mod test { use super::*; use diem_config::utils; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; /// Read, Write, Connect timeout in milliseconds. const TIMEOUT: u64 = 5_000; #[test] fn test_ping() { let server_port = utils::get_available_port(); let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), server_port); let mut server = NetworkServer::new("test", server_addr, TIMEOUT); let mut client = NetworkClient::new("test", server_addr, TIMEOUT); let data = vec![0, 1, 2, 3]; client.write(&data).unwrap(); let result = server.read().unwrap(); assert_eq!(data, result); let data = vec![4, 5, 6, 7]; server.write(&data).unwrap(); let result = client.read().unwrap(); assert_eq!(data, result); } #[test] fn test_client_shutdown() { let server_port = utils::get_available_port(); let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), server_port); let mut server = NetworkServer::new("test", server_addr, TIMEOUT); let mut client = NetworkClient::new("test", server_addr, TIMEOUT); let data = vec![0, 1, 2, 3]; client.write(&data).unwrap(); let result = server.read().unwrap(); assert_eq!(data, result); client.shutdown().unwrap(); let mut client = NetworkClient::new("test", server_addr, TIMEOUT); assert!(server.read().is_err()); let data = vec![4, 5, 6, 7]; client.write(&data).unwrap(); let result = server.read().unwrap(); assert_eq!(data, result); } #[test] fn test_server_shutdown() { let server_port = utils::get_available_port(); let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), server_port); let mut server = NetworkServer::new("test", server_addr, TIMEOUT); let mut client = NetworkClient::new("test", server_addr, TIMEOUT); let data = vec![0, 1, 2, 3]; client.write(&data).unwrap(); let result = server.read().unwrap(); assert_eq!(data, result); server.shutdown().unwrap(); let mut server = NetworkServer::new("test", server_addr, TIMEOUT); let data = vec![4, 5, 6, 7]; // We aren't notified immediately that a server has shutdown, but it happens eventually while client.write(&data).is_ok() {} let data = vec![8, 9, 10, 11]; client.write(&data).unwrap(); let result = server.read().unwrap(); assert_eq!(data, result); } #[test] fn test_write_two_messages_buffered() { let server_port = utils::get_available_port(); let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), server_port); let mut server = NetworkServer::new("test", server_addr, TIMEOUT); let mut client = NetworkClient::new("test", server_addr, TIMEOUT); let data1 = vec![0, 1, 2, 3]; let data2 = vec![4, 5, 6, 7]; client.write(&data1).unwrap(); client.write(&data2).unwrap(); let result1 = server.read().unwrap(); let result2 = server.read().unwrap(); assert_eq!(data1, result1); assert_eq!(data2, result2); } #[test] fn test_server_timeout() { let server_port = utils::get_available_port(); let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), server_port); let mut server = NetworkServer::new("test", server_addr, TIMEOUT); let mut client = NetworkClient::new("test", server_addr, TIMEOUT); let data1 = vec![0, 1, 2, 3]; let data2 = vec![4, 5, 6, 7]; // First client, success client.write(&data1).unwrap(); let result1 = server.read().unwrap(); assert_eq!(data1, result1); // Timedout server.read().unwrap_err(); // New client, success, note the previous client connection is still active, the server is // actively letting it go due to lack of activity. let mut client2 = NetworkClient::new("test", server_addr, TIMEOUT); client2.write(&data2).unwrap(); let result2 = server.read().unwrap(); assert_eq!(data2, result2); } #[test] fn test_client_timeout() { let server_port = utils::get_available_port(); let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), server_port); let mut server = NetworkServer::new("test", server_addr, TIMEOUT); let mut client = NetworkClient::new("test", server_addr, TIMEOUT); let data1 = vec![0, 1, 2, 3]; let data2 = vec![4, 5, 6, 7]; // First server success client.write(&data1).unwrap(); let result1 = server.read().unwrap(); assert_eq!(data1, result1); // Timedout, it is hard to simulate a client receiving a write timeout client.read().unwrap_err(); // Clean up old Server listener but keep the stream online. Start a new server, which will // be the one the client now connects to. server.listener = None; let mut server2 = NetworkServer::new("test", server_addr, TIMEOUT); // Client starts a new stream, success client.write(&data2).unwrap(); let result2 = server2.read().unwrap(); assert_eq!(data2, result2); } }
{ if self.stream.is_none() { self.increment_counter(Method::Connect, MethodResult::Query); info!(SecureNetLogSchema::new( self.service, NetworkMode::Server, LogEvent::ConnectionAttempt, )); let listener = self.listener.as_mut().ok_or(Error::AlreadyShutdown)?; let (stream, stream_addr) = match listener.accept() { Ok(ok) => ok, Err(err) => { self.increment_counter(Method::Connect, MethodResult::Failure); let err = err.into(); warn!(SecureNetLogSchema::new( self.service, NetworkMode::Server, LogEvent::ConnectionSuccessful, ) .error(&err)); return Err(err); } }; self.increment_counter(Method::Connect, MethodResult::Success); info!(SecureNetLogSchema::new( self.service, NetworkMode::Server, LogEvent::ConnectionSuccessful, ) .remote_peer(&stream_addr)); stream.set_nodelay(true)?; self.stream = Some(NetworkStream::new(stream, stream_addr, self.timeout_ms)); } self.stream.as_mut().ok_or(Error::NoActiveStream) }
test_auto_TCat.py
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals from ..utils import TCat def test_TCat_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(deprecated='1.0.0', nohash=True, usedefault=True, ), in_files=dict(argstr=' %s', copyfile=False, mandatory=True, position=-1, ), num_threads=dict(nohash=True, usedefault=True, ), out_file=dict(argstr='-prefix %s', name_source='in_files', name_template='%s_tcat', ), outputtype=dict(), rlt=dict(argstr='-rlt%s', position=1, ), terminal_output=dict(deprecated='1.0.0', nohash=True, ), verbose=dict(argstr='-verb', ), ) inputs = TCat.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()):
def test_TCat_outputs(): output_map = dict(out_file=dict(), ) outputs = TCat.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
assert getattr(inputs.traits()[key], metakey) == value
__init__.py
# coding=utf-8 # Copyright 2018 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Soft Actor Critic agent.""" from tf_agents.agents.sac import sac_agent
# # http://www.apache.org/licenses/LICENSE-2.0 #
mod.rs
use anyhow::{bail, Error, Result}; use diesel::r2d2::{self, ConnectionManager, PooledConnection}; use diesel::sqlite::SqliteConnection; use diesel::RunQueryDsl; use std::path::Path; mod schema; pub use self::schema::*; #[allow(dead_code)] const DB_MIGRATIONS_PATH: &str = "migrations"; embed_migrations!("migrations"); #[derive(Clone)] pub struct DB { pool: r2d2::Pool<ConnectionManager<SqliteConnection>>, } #[derive(Debug)] struct ConnectionCustomizer {} impl diesel::r2d2::CustomizeConnection<SqliteConnection, diesel::r2d2::Error> for ConnectionCustomizer { fn on_acquire(&self, connection: &mut SqliteConnection) -> Result<(), diesel::r2d2::Error> { let query = diesel::sql_query( r#" PRAGMA busy_timeout = 60000; PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL; PRAGMA foreign_keys = ON; "#, ); query .execute(connection) .map_err(diesel::r2d2::Error::QueryError)?; Ok(()) } } impl DB { pub fn new(path: &Path) -> Result<DB> { std::fs::create_dir_all(&path.parent().unwrap())?; let manager = ConnectionManager::<SqliteConnection>::new(path.to_string_lossy()); let pool = diesel::r2d2::Pool::builder() .connection_customizer(Box::new(ConnectionCustomizer {})) .build(manager)?; let db = DB { pool }; db.migrate_up()?; Ok(db) } pub fn connect(&self) -> Result<PooledConnection<ConnectionManager<SqliteConnection>>> { self.pool.get().map_err(Error::new) } #[allow(dead_code)] fn migrate_down(&self) -> Result<()> { let connection = self.connect().unwrap(); loop { match diesel_migrations::revert_latest_migration_in_directory( &connection, Path::new(DB_MIGRATIONS_PATH), ) { Ok(_) => (), Err(diesel_migrations::RunMigrationsError::MigrationError( diesel_migrations::MigrationError::NoMigrationRun, )) => break, Err(e) => bail!(e), } } Ok(()) } fn
(&self) -> Result<()> { let connection = self.connect().unwrap(); embedded_migrations::run(&connection)?; Ok(()) } } #[test] fn run_migrations() { use crate::test::*; use crate::test_name; let output_dir = prepare_test_directory(test_name!()); let db_path = output_dir.join("db.sqlite"); let db = DB::new(&db_path).unwrap(); db.migrate_down().unwrap(); db.migrate_up().unwrap(); }
migrate_up
provider.go
package etw import ( "crypto/sha1" "encoding/binary" "strings" "unicode/utf16" "github.com/Microsoft/go-winio/pkg/guid" "golang.org/x/sys/windows" ) // Provider represents an ETW event provider. It is identified by a provider // name and ID (GUID), which should always have a 1:1 mapping to each other // (e.g. don't use multiple provider names with the same ID, or vice versa). type Provider struct { ID *guid.GUID handle providerHandle metadata []byte callback EnableCallback index uint enabled bool level Level keywordAny uint64 keywordAll uint64 } // String returns the `provider`.ID as a string func (provider *Provider) String() string { if provider == nil { return "<nil>" } return provider.ID.String() } type providerHandle uint64 // ProviderState informs the provider EnableCallback what action is being // performed. type ProviderState uint32 const ( // ProviderStateDisable indicates the provider is being disabled. ProviderStateDisable ProviderState = iota // ProviderStateEnable indicates the provider is being enabled. ProviderStateEnable // ProviderStateCaptureState indicates the provider is having its current // state snap-shotted. ProviderStateCaptureState ) type eventInfoClass uint32 const ( eventInfoClassProviderBinaryTrackInfo eventInfoClass = iota eventInfoClassProviderSetReserved1 eventInfoClassProviderSetTraits eventInfoClassProviderUseDescriptorType ) // EnableCallback is the form of the callback function that receives provider // enable/disable notifications from ETW. type EnableCallback func(*guid.GUID, ProviderState, Level, uint64, uint64, uintptr) func providerCallback(sourceID *guid.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) { provider := providers.getProvider(uint(i)) switch state { case ProviderStateDisable: provider.enabled = false case ProviderStateEnable: provider.enabled = true provider.level = level provider.keywordAny = matchAnyKeyword provider.keywordAll = matchAllKeyword } if provider.callback != nil { provider.callback(sourceID, state, level, matchAnyKeyword, matchAllKeyword, filterData) } } // providerCallbackAdapter acts as the first-level callback from the C/ETW side // for provider notifications. Because Go has trouble with callback arguments of // different size, it has only pointer-sized arguments, which are then cast to // the appropriate types when calling providerCallback. func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr { providerCallback(sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i) return 0 } // providerIDFromName generates a provider ID based on the provider name. It // uses the same algorithm as used by .NET's EventSource class, which is based // on RFC 4122. More information on the algorithm can be found here: // https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/ // The algorithm is roughly: // Hash = Sha1(namespace + arg.ToUpper().ToUtf16be()) // Guid = Hash[0..15], with Hash[7] tweaked according to RFC 4122 func providerIDFromName(name string) *guid.GUID { buffer := sha1.New() namespace := []byte{0x48, 0x2C, 0x2D, 0xB2, 0xC3, 0x90, 0x47, 0xC8, 0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB} buffer.Write(namespace) binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name)))) sum := buffer.Sum(nil) sum[7] = (sum[7] & 0xf) | 0x50 return &guid.GUID{ Data1: binary.LittleEndian.Uint32(sum[0:4]), Data2: binary.LittleEndian.Uint16(sum[4:6]), Data3: binary.LittleEndian.Uint16(sum[6:8]), Data4: [8]byte{sum[8], sum[9], sum[10], sum[11], sum[12], sum[13], sum[14], sum[15]}, } } // NewProvider creates and registers a new ETW provider. The provider ID is // generated based on the provider name. func NewProvider(name string, callback EnableCallback) (provider *Provider, err error) { return NewProviderWithID(name, providerIDFromName(name), callback) } // Close unregisters the provider. func (provider *Provider) Close() error { if provider == nil { return nil } providers.removeProvider(provider) return eventUnregister(provider.handle) } // IsEnabled calls IsEnabledForLevelAndKeywords with LevelAlways and all // keywords set. func (provider *Provider) IsEnabled() bool { return provider.IsEnabledForLevelAndKeywords(LevelAlways, ^uint64(0)) } // IsEnabledForLevel calls IsEnabledForLevelAndKeywords with the specified level // and all keywords set. func (provider *Provider) IsEnabledForLevel(level Level) bool { return provider.IsEnabledForLevelAndKeywords(level, ^uint64(0)) } // IsEnabledForLevelAndKeywords allows event producer code to check if there are // any event sessions that are interested in an event, based on the event level // and keywords. Although this check happens automatically in the ETW // infrastructure, it can be useful to check if an event will actually be // consumed before doing expensive work to build the event data. func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool { if provider == nil { return false } if !provider.enabled { return false } // ETW automatically sets the level to 255 if it is specified as 0, so we // don't need to worry about the level=0 (all events) case. if level > provider.level { return false } if keywords != 0 && (keywords&provider.keywordAny == 0 || keywords&provider.keywordAll != provider.keywordAll) { return false } return true } // WriteEvent writes a single ETW event from the provider. The event is // constructed based on the EventOpt and FieldOpt values that are passed as // opts. func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error { if provider == nil { return nil } options := eventOptions{descriptor: newEventDescriptor()} em := &eventMetadata{} ed := &eventData{} // We need to evaluate the EventOpts first since they might change tags, and // we write out the tags before evaluating FieldOpts. for _, opt := range eventOpts { opt(&options) } if !provider.IsEnabledForLevelAndKeywords(options.descriptor.level, options.descriptor.keyword) { return nil } em.writeEventHeader(name, options.tags) for _, opt := range fieldOpts { opt(em, ed) } // Don't pass a data blob if there is no event data. There will always be // event metadata (e.g. for the name) so we don't need to do this check for // the metadata. dataBlobs := [][]byte{} if len(ed.bytes()) > 0 { dataBlobs = [][]byte{ed.bytes()} } return provider.writeEventRaw(options.descriptor, options.activityID, options.relatedActivityID, [][]byte{em.bytes()}, dataBlobs) } // writeEventRaw writes a single ETW event from the provider. This function is // less abstracted than WriteEvent, and presents a fairly direct interface to // the event writing functionality. It expects a series of event metadata and // event data blobs to be passed in, which must conform to the TraceLogging // schema. The functions on EventMetadata and EventData can help with creating // these blobs. The blobs of each type are effectively concatenated together by
relatedActivityID *guid.GUID, metadataBlobs [][]byte, dataBlobs [][]byte) error { dataDescriptorCount := uint32(1 + len(metadataBlobs) + len(dataBlobs)) dataDescriptors := make([]eventDataDescriptor, 0, dataDescriptorCount) dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata)) for _, blob := range metadataBlobs { dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob)) } for _, blob := range dataBlobs { dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob)) } return eventWriteTransfer(provider.handle, descriptor, (*windows.GUID)(activityID), (*windows.GUID)(relatedActivityID), dataDescriptorCount, &dataDescriptors[0]) }
// the ETW infrastructure. func (provider *Provider) writeEventRaw( descriptor *eventDescriptor, activityID *guid.GUID,
unsupported.rs
use pin_project::pin_project; #[pin_project] struct Struct1 {} //~ ERROR may not be used on structs with zero fields #[pin_project] struct Struct2(); //~ ERROR may not be used on structs with zero fields #[pin_project] struct Struct3; //~ ERROR may not be used on structs with units #[pin_project] enum Enum1 {} //~ ERROR may not be used on enums without variants #[pin_project] enum Enum2 { A = 2, //~ ERROR may not be used on enums with discriminants } #[pin_project] enum Enum3 { A, //~ ERROR may not be used on enums that have no field B, } #[pin_project] union Union { //~^ ERROR may only be used on structs or enums x: u8, } fn
() {}
main
create_step.go
package create import ( "strings" "github.com/jenkins-x/jx/v2/pkg/cmd/create/options" "github.com/jenkins-x/jx/v2/pkg/cmd/helper" "github.com/jenkins-x/jx-logging/pkg/log" "github.com/jenkins-x/jx/v2/pkg/cmd/opts" "github.com/jenkins-x/jx/v2/pkg/cmd/templates" "github.com/jenkins-x/jx/v2/pkg/config" "github.com/jenkins-x/jx/v2/pkg/jenkinsfile" "github.com/jenkins-x/jx/v2/pkg/tekton/syntax" "github.com/jenkins-x/jx/v2/pkg/util" "github.com/spf13/cobra" survey "gopkg.in/AlecAivazis/survey.v1" ) const ( defaultPipeline = "release" defaultLifecycle = "build" defaultMode = jenkinsfile.CreateStepModePost ) var ( createStepLong = templates.LongDesc(` Creates a step in the Jenkins X Pipeline `) createStepExample = templates.Examples(` # Create a new step in the Jenkins X Pipeline interactively jx create step # Creates a step on the command line: adding a post step to the release build lifecycle jx create step -sh "echo hello world" # Creates a step on the command line: adding a pre step to the pullRequest promote lifecycle jx create step -p pullrequest -l promote -m pre -c "echo before promote" `) ) // NewStepDetails configures a new step type NewStepDetails struct { Pipeline string Lifecycle string Mode string Step syntax.Step } // AddToPipeline adds the step to the given pipeline configuration func (s *NewStepDetails) AddToPipeline(projectConfig *config.ProjectConfig) error { pipelines := projectConfig.GetOrCreatePipelineConfig() pipeline, err := pipelines.Pipelines.GetPipeline(s.Pipeline, true) if err != nil { return err } lifecycle, err := pipeline.GetLifecycle(s.Lifecycle, true) if err != nil { return err } return lifecycle.CreateStep(s.Mode, &s.Step) } // CreateStepOptions the options for the create spring command type CreateStepOptions struct { options.CreateOptions Dir string NewStepDetails NewStepDetails } // NewCmdCreateStep creates a command object for the "create" command func NewCmdCreateStep(commonOpts *opts.CommonOptions) *cobra.Command { options := &CreateStepOptions{ CreateOptions: options.CreateOptions{ CommonOptions: commonOpts, }, } cmd := &cobra.Command{ Use: "step", Short: "Creates a step in the Jenkins X Pipeline", Aliases: []string{"steps"}, Long: createStepLong, Example: createStepExample, Run: func(cmd *cobra.Command, args []string) { options.Cmd = cmd options.Args = args err := options.Run() helper.CheckErr(err) }, } step := &options.NewStepDetails cmd.Flags().StringVarP(&step.Pipeline, "pipeline", "p", "", "The pipeline kind to add your step. Possible values: "+strings.Join(jenkinsfile.PipelineKinds, ", ")) cmd.Flags().StringVarP(&step.Lifecycle, "lifecycle", "l", "", "The lifecycle stage to add your step. Possible values: "+strings.Join(jenkinsfile.PipelineLifecycleNames, ", ")) cmd.Flags().StringVarP(&step.Mode, "mode", "m", "", "The create mode for the new step. Possible values: "+strings.Join(jenkinsfile.CreateStepModes, ", ")) cmd.Flags().StringVarP(&step.Step.Command, "sh", "c", "", "The command to invoke for the new step") cmd.Flags().StringVarP(&options.Dir, "dir", "d", "", "The root project directory. Defaults to the current dir") return cmd } // Run implements the command func (o *CreateStepOptions) Run() error { projectConfig, fileName, err := o.AddStepToProjectConfig() if err != nil { return err } err = projectConfig.SaveConfig(fileName) if err != nil { return err } log.Logger().Infof("Updated Jenkins X Pipeline file: %s", util.ColorInfo(fileName)) return nil } // AddStepToProjectConfig creates the new step, adds it to the project config, and returns the modified project config. func (o *CreateStepOptions) AddStepToProjectConfig() (*config.ProjectConfig, string, error) { dir := o.Dir var err error if dir == "" { dir, _, err := o.Git().FindGitConfigDir(o.Dir) if err != nil { return nil, "", err } if dir == "" { dir = "." } } projectConfig, fileName, err := config.LoadProjectConfig(dir) if err != nil { return nil, "", err } s := &o.NewStepDetails err = o.configureNewStepDetails(s) if err != nil { return nil, "", err } err = s.AddToPipeline(projectConfig) if err != nil
return projectConfig, fileName, nil } func (o *CreateStepOptions) configureNewStepDetails(stepDetails *NewStepDetails) error { s := &o.NewStepDetails if o.BatchMode { if s.Pipeline == "" { s.Pipeline = defaultPipeline } if s.Lifecycle == "" { s.Lifecycle = defaultLifecycle } if s.Mode == "" { s.Mode = defaultMode } if s.Step.GetCommand() == "" { return util.MissingOption("command") } return nil } var err error if s.Pipeline == "" { s.Pipeline, err = util.PickNameWithDefault(jenkinsfile.PipelineKinds, "Pick the pipeline kind: ", defaultPipeline, "which kind of pipeline do you want to add a step", o.GetIOFileHandles()) if err != nil { return err } } if s.Lifecycle == "" { s.Lifecycle, err = util.PickNameWithDefault(jenkinsfile.PipelineLifecycleNames, "Pick the lifecycle: ", defaultLifecycle, "which lifecycle (stage) do you want to add the step", o.GetIOFileHandles()) if err != nil { return err } } if s.Mode == "" { s.Mode, err = util.PickNameWithDefault(jenkinsfile.CreateStepModes, "Pick the create mode: ", defaultMode, "which create mode do you want to use to add the step - pre (before), post (after) or replace?", o.GetIOFileHandles()) if err != nil { return err } } if s.Step.GetCommand() == "" { prompt := &survey.Input{ Message: "Command for the new step: ", Help: "The shell command executed inside the container to implement this step", } err := survey.AskOne(prompt, &s.Step.Command, survey.Required, survey.WithStdio(o.In, o.Out, o.Err)) if err != nil { return err } } return nil }
{ return nil, "", err }
heroes.py
from SDWLE.cards.base import HeroCard from SDWLE.constants import CHARACTER_CLASS, MINION_TYPE from SDWLE.powers import MagePower, DruidPower, HunterPower, PaladinPower, PriestPower, RoguePower,\ ShamanPower, WarlockPower, WarriorPower, JaraxxusPower, DieInsect class Malfurion(HeroCard): def __init__(self): super().__init__("Malfurion Stormrage", CHARACTER_CLASS.DRUID, 30, DruidPower) class Rexxar(HeroCard): def __init__(self): super().__init__("Rexxar", CHARACTER_CLASS.HUNTER, 30, HunterPower) class Jaina(HeroCard): def __init__(self): super().__init__("Jaina Proudmoore", CHARACTER_CLASS.MAGE, 30, MagePower) class
(HeroCard): def __init__(self): super().__init__("Uther the Lightbringer", CHARACTER_CLASS.PALADIN, 30, PaladinPower) class Anduin(HeroCard): def __init__(self): super().__init__("Anduin Wrynn", CHARACTER_CLASS.PRIEST, 30, PriestPower) class Valeera(HeroCard): def __init__(self): super().__init__("Valeera Sanguinar", CHARACTER_CLASS.ROGUE, 30, RoguePower) class Thrall(HeroCard): def __init__(self): super().__init__("Thrall", CHARACTER_CLASS.SHAMAN, 30, ShamanPower) class Guldan(HeroCard): def __init__(self): super().__init__("Gul'dan", CHARACTER_CLASS.WARLOCK, 30, WarlockPower) class Garrosh(HeroCard): def __init__(self): super().__init__("Garrosh Hellscream", CHARACTER_CLASS.WARRIOR, 30, WarriorPower) class Jaraxxus(HeroCard): def __init__(self): super().__init__("Lord Jaraxxus", CHARACTER_CLASS.WARLOCK, 15, JaraxxusPower, MINION_TYPE.DEMON, ref_name="Lord Jarraxus (hero)") class Ragnaros(HeroCard): def __init__(self): super().__init__("Ragnaros the Firelord (hero)", CHARACTER_CLASS.ALL, 8, DieInsect) def hero_for_class(character_class): if character_class == CHARACTER_CLASS.DRUID: return Malfurion() elif character_class == CHARACTER_CLASS.HUNTER: return Rexxar() elif character_class == CHARACTER_CLASS.MAGE: return Jaina() elif character_class == CHARACTER_CLASS.PRIEST: return Anduin() elif character_class == CHARACTER_CLASS.PALADIN: return Uther() elif character_class == CHARACTER_CLASS.ROGUE: return Valeera() elif character_class == CHARACTER_CLASS.SHAMAN: return Thrall() elif character_class == CHARACTER_CLASS.WARLOCK: return Guldan() elif character_class == CHARACTER_CLASS.WARRIOR: return Garrosh() else: return Jaina() __hero_lookup = {"Jaina": Jaina, "Malfurion": Malfurion, "Rexxar": Rexxar, "Anduin": Anduin, "Uther": Uther, "Gul'dan": Guldan, "Valeera": Valeera, "Thrall": Thrall, "Garrosh": Garrosh, "Jaraxxus": Jaraxxus, "Ragnaros": Ragnaros, } def hero_from_name(name): return __hero_lookup[name]()
Uther
influx_dashboard_queue_bytes.go
package influxdb import "time" import ( "fmt" bulkQuerygen "github.com/naivewong/influxdb-comparisons/bulk_query_gen" ) // InfluxDashboardQueueBytes produces Influx-specific queries for the dashboard single-host case. type InfluxDashboardQueueBytes struct { InfluxDashboard } func NewInfluxQLDashboardQueueBytes(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator
func NewFluxDashboardQueueBytes(dbConfig bulkQuerygen.DatabaseConfig, interval bulkQuerygen.TimeInterval, duration time.Duration, scaleVar int) bulkQuerygen.QueryGenerator { underlying := newInfluxDashboard(Flux, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) return &InfluxDashboardQueueBytes{ InfluxDashboard: *underlying, } } func (d *InfluxDashboardQueueBytes) Dispatch(i int) bulkQuerygen.Query { q, interval := d.InfluxDashboard.DispatchCommon(i) var query string //SELECT mean("queueBytes") FROM "telegraf"."default"."influxdb_hh_processor" WHERE "cluster_id" = :Cluster_Id: AND time > :dashboardTime: GROUP BY time(1m), "host" fill(0) query = fmt.Sprintf("SELECT mean(\"temp_files\") FROM postgresl WHERE cluster_id = '%s' and %s group by time(1m), hostname, fill(0)", d.GetRandomClusterId(), d.GetTimeConstraint(interval)) humanLabel := fmt.Sprintf("InfluxDB (%s) Hinted HandOff Queue Size (MB), rand cluster, %s by 1m", d.language.String(), interval.Duration()) d.getHttpQuery(humanLabel, interval.StartString(), query, q) return q }
{ underlying := newInfluxDashboard(InfluxQL, dbConfig, interval, duration, scaleVar).(*InfluxDashboard) return &InfluxDashboardQueueBytes{ InfluxDashboard: *underlying, } }
ast_validation.rs
// Validate AST before lowering it to HIR. // // This pass is supposed to catch things that fit into AST data structures, // but not permitted by the language. It runs after expansion when AST is frozen, // so it can check for erroneous constructions produced by syntax extensions. // This pass is supposed to perform only simple checks not requiring name resolution // or type checking or some other kind of complex analysis. use itertools::{Either, Itertools}; use rustc_ast::ptr::P; use rustc_ast::visit::{self, AssocCtxt, FnCtxt, FnKind, Visitor}; use rustc_ast::walk_list; use rustc_ast::*; use rustc_ast_pretty::pprust; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{error_code, pluralize, struct_span_err, Applicability}; use rustc_parse::validate_attr; use rustc_session::lint::builtin::{MISSING_ABI, PATTERNS_IN_FNS_WITHOUT_BODY}; use rustc_session::lint::{BuiltinLintDiagnostics, LintBuffer}; use rustc_session::Session; use rustc_span::source_map::Spanned; use rustc_span::symbol::{kw, sym, Ident}; use rustc_span::Span; use rustc_target::spec::abi; use std::mem; use std::ops::DerefMut; const MORE_EXTERN: &str = "for more information, visit https://doc.rust-lang.org/std/keyword.extern.html"; /// Is `self` allowed semantically as the first parameter in an `FnDecl`? enum SelfSemantic { Yes, No, } struct AstValidator<'a> { session: &'a Session, /// The span of the `extern` in an `extern { ... }` block, if any. extern_mod: Option<&'a Item>, /// Are we inside a trait impl? in_trait_impl: bool, in_const_trait_impl: bool, has_proc_macro_decls: bool, /// Used to ban nested `impl Trait`, e.g., `impl Into<impl Debug>`. /// Nested `impl Trait` _is_ allowed in associated type position, /// e.g., `impl Iterator<Item = impl Debug>`. outer_impl_trait: Option<Span>, is_tilde_const_allowed: bool, /// Used to ban `impl Trait` in path projections like `<impl Iterator>::Item` /// or `Foo::Bar<impl Trait>` is_impl_trait_banned: bool, /// Used to ban associated type bounds (i.e., `Type<AssocType: Bounds>`) in /// certain positions. is_assoc_ty_bound_banned: bool, /// Used to allow `let` expressions in certain syntactic locations. is_let_allowed: bool, lint_buffer: &'a mut LintBuffer, } impl<'a> AstValidator<'a> { fn with_in_trait_impl( &mut self, is_in: bool, constness: Option<Const>, f: impl FnOnce(&mut Self), ) { let old = mem::replace(&mut self.in_trait_impl, is_in); let old_const = mem::replace(&mut self.in_const_trait_impl, matches!(constness, Some(Const::Yes(_)))); f(self); self.in_trait_impl = old; self.in_const_trait_impl = old_const; } fn with_banned_impl_trait(&mut self, f: impl FnOnce(&mut Self)) { let old = mem::replace(&mut self.is_impl_trait_banned, true); f(self); self.is_impl_trait_banned = old; } fn with_tilde_const_allowed(&mut self, f: impl FnOnce(&mut Self)) { let old = mem::replace(&mut self.is_tilde_const_allowed, true); f(self); self.is_tilde_const_allowed = old; } fn with_banned_tilde_const(&mut self, f: impl FnOnce(&mut Self)) { let old = mem::replace(&mut self.is_tilde_const_allowed, false); f(self); self.is_tilde_const_allowed = old; } fn with_let_allowed(&mut self, allowed: bool, f: impl FnOnce(&mut Self, bool)) { let old = mem::replace(&mut self.is_let_allowed, allowed); f(self, old); self.is_let_allowed = old; } /// Emits an error banning the `let` expression provided in the given location. fn ban_let_expr(&self, expr: &'a Expr) { let sess = &self.session; if sess.opts.unstable_features.is_nightly_build() { sess.struct_span_err(expr.span, "`let` expressions are not supported here") .note("only supported directly in conditions of `if`- and `while`-expressions") .note("as well as when nested within `&&` and parentheses in those conditions") .emit(); } else { sess.struct_span_err(expr.span, "expected expression, found statement (`let`)") .note("variable declaration using `let` is a statement") .emit(); } } fn with_banned_assoc_ty_bound(&mut self, f: impl FnOnce(&mut Self)) { let old = mem::replace(&mut self.is_assoc_ty_bound_banned, true); f(self); self.is_assoc_ty_bound_banned = old; } fn with_impl_trait(&mut self, outer: Option<Span>, f: impl FnOnce(&mut Self)) { let old = mem::replace(&mut self.outer_impl_trait, outer); if outer.is_some() { self.with_banned_tilde_const(f); } else { f(self); } self.outer_impl_trait = old; } fn visit_assoc_ty_constraint_from_generic_args(&mut self, constraint: &'a AssocTyConstraint) { match constraint.kind { AssocTyConstraintKind::Equality { .. } => {} AssocTyConstraintKind::Bound { .. } => { if self.is_assoc_ty_bound_banned { self.err_handler().span_err( constraint.span, "associated type bounds are not allowed within structs, enums, or unions", ); } } } self.visit_assoc_ty_constraint(constraint); } // Mirrors `visit::walk_ty`, but tracks relevant state. fn walk_ty(&mut self, t: &'a Ty) { match t.kind { TyKind::ImplTrait(..) => { self.with_impl_trait(Some(t.span), |this| visit::walk_ty(this, t)) } TyKind::TraitObject(..) => self.with_banned_tilde_const(|this| visit::walk_ty(this, t)), TyKind::Path(ref qself, ref path) => { // We allow these: // - `Option<impl Trait>` // - `option::Option<impl Trait>` // - `option::Option<T>::Foo<impl Trait> // // But not these: // - `<impl Trait>::Foo` // - `option::Option<impl Trait>::Foo`. // // To implement this, we disallow `impl Trait` from `qself` // (for cases like `<impl Trait>::Foo>`) // but we allow `impl Trait` in `GenericArgs` // iff there are no more PathSegments. if let Some(ref qself) = *qself { // `impl Trait` in `qself` is always illegal self.with_banned_impl_trait(|this| this.visit_ty(&qself.ty)); } // Note that there should be a call to visit_path here, // so if any logic is added to process `Path`s a call to it should be // added both in visit_path and here. This code mirrors visit::walk_path. for (i, segment) in path.segments.iter().enumerate() { // Allow `impl Trait` iff we're on the final path segment if i == path.segments.len() - 1 { self.visit_path_segment(path.span, segment); } else { self.with_banned_impl_trait(|this| { this.visit_path_segment(path.span, segment) }); } } } _ => visit::walk_ty(self, t), } } fn visit_struct_field_def(&mut self, field: &'a FieldDef) { if let Some(ident) = field.ident { if ident.name == kw::Underscore { self.visit_vis(&field.vis); self.visit_ident(ident); self.visit_ty_common(&field.ty); self.walk_ty(&field.ty); walk_list!(self, visit_attribute, &field.attrs); return; } } self.visit_field_def(field); } fn err_handler(&self) -> &rustc_errors::Handler { &self.session.diagnostic() } fn check_lifetime(&self, ident: Ident) { let valid_names = [kw::UnderscoreLifetime, kw::StaticLifetime, kw::Empty]; if !valid_names.contains(&ident.name) && ident.without_first_quote().is_reserved() { self.err_handler().span_err(ident.span, "lifetimes cannot use keyword names"); } } fn check_label(&self, ident: Ident) { if ident.without_first_quote().is_reserved() { self.err_handler() .span_err(ident.span, &format!("invalid label name `{}`", ident.name)); } } fn invalid_visibility(&self, vis: &Visibility, note: Option<&str>) { if let VisibilityKind::Inherited = vis.kind { return; } let mut err = struct_span_err!(self.session, vis.span, E0449, "unnecessary visibility qualifier"); if vis.kind.is_pub() { err.span_label(vis.span, "`pub` not permitted here because it's implied"); } if let Some(note) = note { err.note(note); } err.emit(); } fn check_decl_no_pat(decl: &FnDecl, mut report_err: impl FnMut(Span, Option<Ident>, bool)) { for Param { pat, .. } in &decl.inputs { match pat.kind { PatKind::Ident(BindingMode::ByValue(Mutability::Not), _, None) | PatKind::Wild => {} PatKind::Ident(BindingMode::ByValue(Mutability::Mut), ident, None) => { report_err(pat.span, Some(ident), true) } _ => report_err(pat.span, None, false), } } } fn check_trait_fn_not_async(&self, fn_span: Span, asyncness: Async) { if let Async::Yes { span, .. } = asyncness { struct_span_err!( self.session, fn_span, E0706, "functions in traits cannot be declared `async`" ) .span_label(span, "`async` because of this") .note("`async` trait functions are not currently supported") .note("consider using the `async-trait` crate: https://crates.io/crates/async-trait") .emit(); } } fn check_trait_fn_not_const(&self, constness: Const) { if let Const::Yes(span) = constness { struct_span_err!( self.session, span, E0379, "functions in traits cannot be declared const" ) .span_label(span, "functions in traits cannot be const") .emit(); } } // FIXME(ecstaticmorse): Instead, use `bound_context` to check this in `visit_param_bound`. fn no_questions_in_bounds(&self, bounds: &GenericBounds, where_: &str, is_trait: bool) { for bound in bounds { if let GenericBound::Trait(ref poly, TraitBoundModifier::Maybe) = *bound { let mut err = self.err_handler().struct_span_err( poly.span, &format!("`?Trait` is not permitted in {}", where_), ); if is_trait { let path_str = pprust::path_to_string(&poly.trait_ref.path); err.note(&format!("traits are `?{}` by default", path_str)); } err.emit(); } } } /// Matches `'-' lit | lit (cf. parser::Parser::parse_literal_maybe_minus)`, /// or paths for ranges. // // FIXME: do we want to allow `expr -> pattern` conversion to create path expressions? // That means making this work: // // ```rust,ignore (FIXME) // struct S; // macro_rules! m { // ($a:expr) => { // let $a = S; // } // } // m!(S); // ``` fn check_expr_within_pat(&self, expr: &Expr, allow_paths: bool) { match expr.kind { ExprKind::Lit(..) | ExprKind::ConstBlock(..) | ExprKind::Err => {} ExprKind::Path(..) if allow_paths => {} ExprKind::Unary(UnOp::Neg, ref inner) if matches!(inner.kind, ExprKind::Lit(_)) => {} _ => self.err_handler().span_err( expr.span, "arbitrary expressions aren't allowed \ in patterns", ), } } fn check_late_bound_lifetime_defs(&self, params: &[GenericParam]) { // Check only lifetime parameters are present and that the lifetime // parameters that are present have no bounds. let non_lt_param_spans: Vec<_> = params .iter() .filter_map(|param| match param.kind { GenericParamKind::Lifetime { .. } => { if !param.bounds.is_empty() { let spans: Vec<_> = param.bounds.iter().map(|b| b.span()).collect(); self.err_handler() .span_err(spans, "lifetime bounds cannot be used in this context"); } None } _ => Some(param.ident.span), }) .collect(); if !non_lt_param_spans.is_empty() { self.err_handler().span_err( non_lt_param_spans, "only lifetime parameters can be used in this context", ); } } fn check_fn_decl(&self, fn_decl: &FnDecl, self_semantic: SelfSemantic) { self.check_decl_num_args(fn_decl); self.check_decl_cvaradic_pos(fn_decl); self.check_decl_attrs(fn_decl); self.check_decl_self_param(fn_decl, self_semantic); } /// Emits fatal error if function declaration has more than `u16::MAX` arguments /// Error is fatal to prevent errors during typechecking fn check_decl_num_args(&self, fn_decl: &FnDecl)
fn check_decl_cvaradic_pos(&self, fn_decl: &FnDecl) { match &*fn_decl.inputs { [Param { ty, span, .. }] => { if let TyKind::CVarArgs = ty.kind { self.err_handler().span_err( *span, "C-variadic function must be declared with at least one named argument", ); } } [ps @ .., _] => { for Param { ty, span, .. } in ps { if let TyKind::CVarArgs = ty.kind { self.err_handler().span_err( *span, "`...` must be the last argument of a C-variadic function", ); } } } _ => {} } } fn check_decl_attrs(&self, fn_decl: &FnDecl) { fn_decl .inputs .iter() .flat_map(|i| i.attrs.as_ref()) .filter(|attr| { let arr = [sym::allow, sym::cfg, sym::cfg_attr, sym::deny, sym::forbid, sym::warn]; !arr.contains(&attr.name_or_empty()) && rustc_attr::is_builtin_attr(attr) }) .for_each(|attr| { if attr.is_doc_comment() { self.err_handler() .struct_span_err( attr.span, "documentation comments cannot be applied to function parameters", ) .span_label(attr.span, "doc comments are not allowed here") .emit(); } else { self.err_handler().span_err( attr.span, "allow, cfg, cfg_attr, deny, \ forbid, and warn are the only allowed built-in attributes in function parameters", ) } }); } fn check_decl_self_param(&self, fn_decl: &FnDecl, self_semantic: SelfSemantic) { if let (SelfSemantic::No, [param, ..]) = (self_semantic, &*fn_decl.inputs) { if param.is_self() { self.err_handler() .struct_span_err( param.span, "`self` parameter is only allowed in associated functions", ) .span_label(param.span, "not semantically valid as function parameter") .note("associated functions are those in `impl` or `trait` definitions") .emit(); } } } fn check_defaultness(&self, span: Span, defaultness: Defaultness) { if let Defaultness::Default(def_span) = defaultness { let span = self.session.source_map().guess_head_span(span); self.err_handler() .struct_span_err(span, "`default` is only allowed on items in trait impls") .span_label(def_span, "`default` because of this") .emit(); } } fn error_item_without_body(&self, sp: Span, ctx: &str, msg: &str, sugg: &str) { self.err_handler() .struct_span_err(sp, msg) .span_suggestion( self.session.source_map().end_point(sp), &format!("provide a definition for the {}", ctx), sugg.to_string(), Applicability::HasPlaceholders, ) .emit(); } fn check_impl_item_provided<T>(&self, sp: Span, body: &Option<T>, ctx: &str, sugg: &str) { if body.is_none() { let msg = format!("associated {} in `impl` without body", ctx); self.error_item_without_body(sp, ctx, &msg, sugg); } } fn check_type_no_bounds(&self, bounds: &[GenericBound], ctx: &str) { let span = match bounds { [] => return, [b0] => b0.span(), [b0, .., bl] => b0.span().to(bl.span()), }; self.err_handler() .struct_span_err(span, &format!("bounds on `type`s in {} have no effect", ctx)) .emit(); } fn check_foreign_ty_genericless(&self, generics: &Generics) { let cannot_have = |span, descr, remove_descr| { self.err_handler() .struct_span_err( span, &format!("`type`s inside `extern` blocks cannot have {}", descr), ) .span_suggestion( span, &format!("remove the {}", remove_descr), String::new(), Applicability::MaybeIncorrect, ) .span_label(self.current_extern_span(), "`extern` block begins here") .note(MORE_EXTERN) .emit(); }; if !generics.params.is_empty() { cannot_have(generics.span, "generic parameters", "generic parameters"); } if !generics.where_clause.predicates.is_empty() { cannot_have(generics.where_clause.span, "`where` clauses", "`where` clause"); } } fn check_foreign_kind_bodyless(&self, ident: Ident, kind: &str, body: Option<Span>) { let body = match body { None => return, Some(body) => body, }; self.err_handler() .struct_span_err(ident.span, &format!("incorrect `{}` inside `extern` block", kind)) .span_label(ident.span, "cannot have a body") .span_label(body, "the invalid body") .span_label( self.current_extern_span(), format!( "`extern` blocks define existing foreign {0}s and {0}s \ inside of them cannot have a body", kind ), ) .note(MORE_EXTERN) .emit(); } /// An `fn` in `extern { ... }` cannot have a body `{ ... }`. fn check_foreign_fn_bodyless(&self, ident: Ident, body: Option<&Block>) { let body = match body { None => return, Some(body) => body, }; self.err_handler() .struct_span_err(ident.span, "incorrect function inside `extern` block") .span_label(ident.span, "cannot have a body") .span_suggestion( body.span, "remove the invalid body", ";".to_string(), Applicability::MaybeIncorrect, ) .help( "you might have meant to write a function accessible through FFI, \ which can be done by writing `extern fn` outside of the `extern` block", ) .span_label( self.current_extern_span(), "`extern` blocks define existing foreign functions and functions \ inside of them cannot have a body", ) .note(MORE_EXTERN) .emit(); } fn current_extern_span(&self) -> Span { self.session.source_map().guess_head_span(self.extern_mod.unwrap().span) } /// An `fn` in `extern { ... }` cannot have qualifiers, e.g. `async fn`. fn check_foreign_fn_headerless(&self, ident: Ident, span: Span, header: FnHeader) { if header.has_qualifiers() { self.err_handler() .struct_span_err(ident.span, "functions in `extern` blocks cannot have qualifiers") .span_label(self.current_extern_span(), "in this `extern` block") .span_suggestion_verbose( span.until(ident.span.shrink_to_lo()), "remove the qualifiers", "fn ".to_string(), Applicability::MaybeIncorrect, ) .emit(); } } /// An item in `extern { ... }` cannot use non-ascii identifier. fn check_foreign_item_ascii_only(&self, ident: Ident) { let symbol_str = ident.as_str(); if !symbol_str.is_ascii() { let n = 83942; self.err_handler() .struct_span_err( ident.span, "items in `extern` blocks cannot use non-ascii identifiers", ) .span_label(self.current_extern_span(), "in this `extern` block") .note(&format!( "this limitation may be lifted in the future; see issue #{} <https://github.com/rust-lang/rust/issues/{}> for more information", n, n, )) .emit(); } } /// Reject C-varadic type unless the function is foreign, /// or free and `unsafe extern "C"` semantically. fn check_c_varadic_type(&self, fk: FnKind<'a>) { match (fk.ctxt(), fk.header()) { (Some(FnCtxt::Foreign), _) => return, (Some(FnCtxt::Free), Some(header)) => match header.ext { Extern::Explicit(StrLit { symbol_unescaped: sym::C, .. }) | Extern::Implicit if matches!(header.unsafety, Unsafe::Yes(_)) => { return; } _ => {} }, _ => {} }; for Param { ty, span, .. } in &fk.decl().inputs { if let TyKind::CVarArgs = ty.kind { self.err_handler() .struct_span_err( *span, "only foreign or `unsafe extern \"C\"` functions may be C-variadic", ) .emit(); } } } fn check_item_named(&self, ident: Ident, kind: &str) { if ident.name != kw::Underscore { return; } self.err_handler() .struct_span_err(ident.span, &format!("`{}` items in this context need a name", kind)) .span_label(ident.span, format!("`_` is not a valid name for this `{}` item", kind)) .emit(); } fn check_nomangle_item_asciionly(&self, ident: Ident, item_span: Span) { if ident.name.as_str().is_ascii() { return; } let head_span = self.session.source_map().guess_head_span(item_span); struct_span_err!( self.session, head_span, E0754, "`#[no_mangle]` requires ASCII identifier" ) .emit(); } fn check_mod_file_item_asciionly(&self, ident: Ident) { if ident.name.as_str().is_ascii() { return; } struct_span_err!( self.session, ident.span, E0754, "trying to load file for module `{}` with non-ascii identifier name", ident.name ) .help("consider using `#[path]` attribute to specify filesystem path") .emit(); } fn deny_generic_params(&self, generics: &Generics, ident_span: Span) { if !generics.params.is_empty() { struct_span_err!( self.session, generics.span, E0567, "auto traits cannot have generic parameters" ) .span_label(ident_span, "auto trait cannot have generic parameters") .span_suggestion( generics.span, "remove the parameters", String::new(), Applicability::MachineApplicable, ) .emit(); } } fn emit_e0568(&self, span: Span, ident_span: Span) { struct_span_err!( self.session, span, E0568, "auto traits cannot have super traits or lifetime bounds" ) .span_label(ident_span, "auto trait cannot have super traits or lifetime bounds") .span_suggestion( span, "remove the super traits or lifetime bounds", String::new(), Applicability::MachineApplicable, ) .emit(); } fn deny_super_traits(&self, bounds: &GenericBounds, ident_span: Span) { if let [.., last] = &bounds[..] { let span = ident_span.shrink_to_hi().to(last.span()); self.emit_e0568(span, ident_span); } } fn deny_where_clause(&self, where_clause: &WhereClause, ident_span: Span) { if !where_clause.predicates.is_empty() { self.emit_e0568(where_clause.span, ident_span); } } fn deny_items(&self, trait_items: &[P<AssocItem>], ident_span: Span) { if !trait_items.is_empty() { let spans: Vec<_> = trait_items.iter().map(|i| i.ident.span).collect(); let total_span = trait_items.first().unwrap().span.to(trait_items.last().unwrap().span); struct_span_err!( self.session, spans, E0380, "auto traits cannot have associated items" ) .span_suggestion( total_span, "remove these associated items", String::new(), Applicability::MachineApplicable, ) .span_label(ident_span, "auto trait cannot have associated items") .emit(); } } fn correct_generic_order_suggestion(&self, data: &AngleBracketedArgs) -> String { // Lifetimes always come first. let lt_sugg = data.args.iter().filter_map(|arg| match arg { AngleBracketedArg::Arg(lt @ GenericArg::Lifetime(_)) => { Some(pprust::to_string(|s| s.print_generic_arg(lt))) } _ => None, }); let args_sugg = data.args.iter().filter_map(|a| match a { AngleBracketedArg::Arg(GenericArg::Lifetime(_)) | AngleBracketedArg::Constraint(_) => { None } AngleBracketedArg::Arg(arg) => Some(pprust::to_string(|s| s.print_generic_arg(arg))), }); // Constraints always come last. let constraint_sugg = data.args.iter().filter_map(|a| match a { AngleBracketedArg::Arg(_) => None, AngleBracketedArg::Constraint(c) => { Some(pprust::to_string(|s| s.print_assoc_constraint(c))) } }); format!( "<{}>", lt_sugg.chain(args_sugg).chain(constraint_sugg).collect::<Vec<String>>().join(", ") ) } /// Enforce generic args coming before constraints in `<...>` of a path segment. fn check_generic_args_before_constraints(&self, data: &AngleBracketedArgs) { // Early exit in case it's partitioned as it should be. if data.args.iter().is_partitioned(|arg| matches!(arg, AngleBracketedArg::Arg(_))) { return; } // Find all generic argument coming after the first constraint... let (constraint_spans, arg_spans): (Vec<Span>, Vec<Span>) = data.args.iter().partition_map(|arg| match arg { AngleBracketedArg::Constraint(c) => Either::Left(c.span), AngleBracketedArg::Arg(a) => Either::Right(a.span()), }); let args_len = arg_spans.len(); let constraint_len = constraint_spans.len(); // ...and then error: self.err_handler() .struct_span_err( arg_spans.clone(), "generic arguments must come before the first constraint", ) .span_label(constraint_spans[0], &format!("constraint{}", pluralize!(constraint_len))) .span_label( *arg_spans.iter().last().unwrap(), &format!("generic argument{}", pluralize!(args_len)), ) .span_labels(constraint_spans, "") .span_labels(arg_spans, "") .span_suggestion_verbose( data.span, &format!( "move the constraint{} after the generic argument{}", pluralize!(constraint_len), pluralize!(args_len) ), self.correct_generic_order_suggestion(&data), Applicability::MachineApplicable, ) .emit(); } fn visit_ty_common(&mut self, ty: &'a Ty) { match ty.kind { TyKind::BareFn(ref bfty) => { self.check_fn_decl(&bfty.decl, SelfSemantic::No); Self::check_decl_no_pat(&bfty.decl, |span, _, _| { struct_span_err!( self.session, span, E0561, "patterns aren't allowed in function pointer types" ) .emit(); }); self.check_late_bound_lifetime_defs(&bfty.generic_params); if let Extern::Implicit = bfty.ext { let sig_span = self.session.source_map().next_point(ty.span.shrink_to_lo()); self.maybe_lint_missing_abi(sig_span, ty.id); } } TyKind::TraitObject(ref bounds, ..) => { let mut any_lifetime_bounds = false; for bound in bounds { if let GenericBound::Outlives(ref lifetime) = *bound { if any_lifetime_bounds { struct_span_err!( self.session, lifetime.ident.span, E0226, "only a single explicit lifetime bound is permitted" ) .emit(); break; } any_lifetime_bounds = true; } } self.no_questions_in_bounds(bounds, "trait object types", false); } TyKind::ImplTrait(_, ref bounds) => { if self.is_impl_trait_banned { struct_span_err!( self.session, ty.span, E0667, "`impl Trait` is not allowed in path parameters" ) .emit(); } if let Some(outer_impl_trait_sp) = self.outer_impl_trait { struct_span_err!( self.session, ty.span, E0666, "nested `impl Trait` is not allowed" ) .span_label(outer_impl_trait_sp, "outer `impl Trait`") .span_label(ty.span, "nested `impl Trait` here") .emit(); } if !bounds.iter().any(|b| matches!(b, GenericBound::Trait(..))) { self.err_handler().span_err(ty.span, "at least one trait must be specified"); } } _ => {} } } fn maybe_lint_missing_abi(&mut self, span: Span, id: NodeId) { // FIXME(davidtwco): This is a hack to detect macros which produce spans of the // call site which do not have a macro backtrace. See #61963. let is_macro_callsite = self .session .source_map() .span_to_snippet(span) .map(|snippet| snippet.starts_with("#[")) .unwrap_or(true); if !is_macro_callsite { self.lint_buffer.buffer_lint_with_diagnostic( MISSING_ABI, id, span, "extern declarations without an explicit ABI are deprecated", BuiltinLintDiagnostics::MissingAbi(span, abi::Abi::FALLBACK), ) } } } /// Checks that generic parameters are in the correct order, /// which is lifetimes, then types and then consts. (`<'a, T, const N: usize>`) fn validate_generic_param_order( sess: &Session, handler: &rustc_errors::Handler, generics: &[GenericParam], span: Span, ) { let mut max_param: Option<ParamKindOrd> = None; let mut out_of_order = FxHashMap::default(); let mut param_idents = Vec::with_capacity(generics.len()); for (idx, param) in generics.iter().enumerate() { let ident = param.ident; let (kind, bounds, span) = (&param.kind, &param.bounds, ident.span); let (ord_kind, ident) = match &param.kind { GenericParamKind::Lifetime => (ParamKindOrd::Lifetime, ident.to_string()), GenericParamKind::Type { default: _ } => (ParamKindOrd::Type, ident.to_string()), GenericParamKind::Const { ref ty, kw_span: _, default: _ } => { let ty = pprust::ty_to_string(ty); let unordered = sess.features_untracked().unordered_const_ty_params(); (ParamKindOrd::Const { unordered }, format!("const {}: {}", ident, ty)) } }; param_idents.push((kind, ord_kind, bounds, idx, ident)); match max_param { Some(max_param) if max_param > ord_kind => { let entry = out_of_order.entry(ord_kind).or_insert((max_param, vec![])); entry.1.push(span); } Some(_) | None => max_param = Some(ord_kind), }; } if !out_of_order.is_empty() { let mut ordered_params = "<".to_string(); param_idents.sort_by_key(|&(_, po, _, i, _)| (po, i)); let mut first = true; for (kind, _, bounds, _, ident) in param_idents { if !first { ordered_params += ", "; } ordered_params += &ident; if !bounds.is_empty() { ordered_params += ": "; ordered_params += &pprust::bounds_to_string(&bounds); } match kind { GenericParamKind::Type { default: Some(default) } => { ordered_params += " = "; ordered_params += &pprust::ty_to_string(default); } GenericParamKind::Type { default: None } => (), GenericParamKind::Lifetime => (), GenericParamKind::Const { ty: _, kw_span: _, default: Some(default) } => { ordered_params += " = "; ordered_params += &pprust::expr_to_string(&*default.value); } GenericParamKind::Const { ty: _, kw_span: _, default: None } => (), } first = false; } ordered_params += ">"; for (param_ord, (max_param, spans)) in &out_of_order { let mut err = handler.struct_span_err( spans.clone(), &format!( "{} parameters must be declared prior to {} parameters", param_ord, max_param, ), ); err.span_suggestion( span, &format!( "reorder the parameters: lifetimes, {}", if sess.features_untracked().unordered_const_ty_params() { "then consts and types" } else { "then types, then consts" } ), ordered_params.clone(), Applicability::MachineApplicable, ); err.emit(); } } } impl<'a> Visitor<'a> for AstValidator<'a> { fn visit_attribute(&mut self, attr: &Attribute) { validate_attr::check_meta(&self.session.parse_sess, attr); } fn visit_expr(&mut self, expr: &'a Expr) { self.with_let_allowed(false, |this, let_allowed| match &expr.kind { ExprKind::If(cond, then, opt_else) => { this.visit_block(then); walk_list!(this, visit_expr, opt_else); this.with_let_allowed(true, |this, _| this.visit_expr(cond)); return; } ExprKind::Let(..) if !let_allowed => this.ban_let_expr(expr), ExprKind::LlvmInlineAsm(..) if !this.session.target.allow_asm => { struct_span_err!( this.session, expr.span, E0472, "llvm_asm! is unsupported on this target" ) .emit(); } ExprKind::Match(expr, arms) => { this.visit_expr(expr); for arm in arms { this.visit_expr(&arm.body); this.visit_pat(&arm.pat); walk_list!(this, visit_attribute, &arm.attrs); if let Some(ref guard) = arm.guard { if let ExprKind::Let(_, ref expr, _) = guard.kind { this.with_let_allowed(true, |this, _| this.visit_expr(expr)); return; } } } } ExprKind::Paren(_) | ExprKind::Binary(Spanned { node: BinOpKind::And, .. }, ..) => { this.with_let_allowed(let_allowed, |this, _| visit::walk_expr(this, expr)); return; } ExprKind::While(cond, then, opt_label) => { walk_list!(this, visit_label, opt_label); this.visit_block(then); this.with_let_allowed(true, |this, _| this.visit_expr(cond)); return; } _ => visit::walk_expr(this, expr), }); } fn visit_ty(&mut self, ty: &'a Ty) { self.visit_ty_common(ty); self.walk_ty(ty) } fn visit_label(&mut self, label: &'a Label) { self.check_label(label.ident); visit::walk_label(self, label); } fn visit_lifetime(&mut self, lifetime: &'a Lifetime) { self.check_lifetime(lifetime.ident); visit::walk_lifetime(self, lifetime); } fn visit_field_def(&mut self, s: &'a FieldDef) { visit::walk_field_def(self, s) } fn visit_item(&mut self, item: &'a Item) { if item.attrs.iter().any(|attr| self.session.is_proc_macro_attr(attr)) { self.has_proc_macro_decls = true; } if self.session.contains_name(&item.attrs, sym::no_mangle) { self.check_nomangle_item_asciionly(item.ident, item.span); } match item.kind { ItemKind::Impl(box ImplKind { unsafety, polarity, defaultness: _, constness, ref generics, of_trait: Some(ref t), ref self_ty, ref items, }) => { self.with_in_trait_impl(true, Some(constness), |this| { this.invalid_visibility(&item.vis, None); if let TyKind::Err = self_ty.kind { this.err_handler() .struct_span_err( item.span, "`impl Trait for .. {}` is an obsolete syntax", ) .help("use `auto trait Trait {}` instead") .emit(); } if let (Unsafe::Yes(span), ImplPolarity::Negative(sp)) = (unsafety, polarity) { struct_span_err!( this.session, sp.to(t.path.span), E0198, "negative impls cannot be unsafe" ) .span_label(sp, "negative because of this") .span_label(span, "unsafe because of this") .emit(); } this.visit_vis(&item.vis); this.visit_ident(item.ident); if let Const::Yes(_) = constness { this.with_tilde_const_allowed(|this| this.visit_generics(generics)); } else { this.visit_generics(generics); } this.visit_trait_ref(t); this.visit_ty(self_ty); walk_list!(this, visit_assoc_item, items, AssocCtxt::Impl); }); return; // Avoid visiting again. } ItemKind::Impl(box ImplKind { unsafety, polarity, defaultness, constness, generics: _, of_trait: None, ref self_ty, items: _, }) => { let error = |annotation_span, annotation| { let mut err = self.err_handler().struct_span_err( self_ty.span, &format!("inherent impls cannot be {}", annotation), ); err.span_label(annotation_span, &format!("{} because of this", annotation)); err.span_label(self_ty.span, "inherent impl for this type"); err }; self.invalid_visibility( &item.vis, Some("place qualifiers on individual impl items instead"), ); if let Unsafe::Yes(span) = unsafety { error(span, "unsafe").code(error_code!(E0197)).emit(); } if let ImplPolarity::Negative(span) = polarity { error(span, "negative").emit(); } if let Defaultness::Default(def_span) = defaultness { error(def_span, "`default`") .note("only trait implementations may be annotated with `default`") .emit(); } if let Const::Yes(span) = constness { error(span, "`const`") .note("only trait implementations may be annotated with `const`") .emit(); } } ItemKind::Fn(box FnKind(def, ref sig, ref generics, ref body)) => { self.check_defaultness(item.span, def); if body.is_none() { let msg = "free function without a body"; self.error_item_without_body(item.span, "function", msg, " { <body> }"); } self.visit_vis(&item.vis); self.visit_ident(item.ident); if let Const::Yes(_) = sig.header.constness { self.with_tilde_const_allowed(|this| this.visit_generics(generics)); } else { self.visit_generics(generics); } let kind = FnKind::Fn(FnCtxt::Free, item.ident, sig, &item.vis, body.as_deref()); self.visit_fn(kind, item.span, item.id); walk_list!(self, visit_attribute, &item.attrs); return; // Avoid visiting again. } ItemKind::ForeignMod(ForeignMod { abi, unsafety, .. }) => { let old_item = mem::replace(&mut self.extern_mod, Some(item)); self.invalid_visibility( &item.vis, Some("place qualifiers on individual foreign items instead"), ); if let Unsafe::Yes(span) = unsafety { self.err_handler().span_err(span, "extern block cannot be declared unsafe"); } if abi.is_none() { self.maybe_lint_missing_abi(item.span, item.id); } visit::walk_item(self, item); self.extern_mod = old_item; return; // Avoid visiting again. } ItemKind::Enum(ref def, _) => { for variant in &def.variants { self.invalid_visibility(&variant.vis, None); for field in variant.data.fields() { self.invalid_visibility(&field.vis, None); } } } ItemKind::Trait(box TraitKind( is_auto, _, ref generics, ref bounds, ref trait_items, )) => { if is_auto == IsAuto::Yes { // Auto traits cannot have generics, super traits nor contain items. self.deny_generic_params(generics, item.ident.span); self.deny_super_traits(bounds, item.ident.span); self.deny_where_clause(&generics.where_clause, item.ident.span); self.deny_items(trait_items, item.ident.span); } self.no_questions_in_bounds(bounds, "supertraits", true); // Equivalent of `visit::walk_item` for `ItemKind::Trait` that inserts a bound // context for the supertraits. self.visit_vis(&item.vis); self.visit_ident(item.ident); self.visit_generics(generics); self.with_banned_tilde_const(|this| walk_list!(this, visit_param_bound, bounds)); walk_list!(self, visit_assoc_item, trait_items, AssocCtxt::Trait); walk_list!(self, visit_attribute, &item.attrs); return; } ItemKind::Mod(unsafety, ref mod_kind) => { if let Unsafe::Yes(span) = unsafety { self.err_handler().span_err(span, "module cannot be declared unsafe"); } // Ensure that `path` attributes on modules are recorded as used (cf. issue #35584). if !matches!(mod_kind, ModKind::Loaded(_, Inline::Yes, _)) && !self.session.contains_name(&item.attrs, sym::path) { self.check_mod_file_item_asciionly(item.ident); } } ItemKind::Struct(ref vdata, ref generics) => match vdata { // Duplicating the `Visitor` logic allows catching all cases // of `Anonymous(Struct, Union)` outside of a field struct or union. // // Inside `visit_ty` the validator catches every `Anonymous(Struct, Union)` it // encounters, and only on `ItemKind::Struct` and `ItemKind::Union` // it uses `visit_ty_common`, which doesn't contain that specific check. VariantData::Struct(ref fields, ..) => { self.visit_vis(&item.vis); self.visit_ident(item.ident); self.visit_generics(generics); self.with_banned_assoc_ty_bound(|this| { walk_list!(this, visit_struct_field_def, fields); }); walk_list!(self, visit_attribute, &item.attrs); return; } _ => {} }, ItemKind::Union(ref vdata, ref generics) => { if vdata.fields().is_empty() { self.err_handler().span_err(item.span, "unions cannot have zero fields"); } match vdata { VariantData::Struct(ref fields, ..) => { self.visit_vis(&item.vis); self.visit_ident(item.ident); self.visit_generics(generics); self.with_banned_assoc_ty_bound(|this| { walk_list!(this, visit_struct_field_def, fields); }); walk_list!(self, visit_attribute, &item.attrs); return; } _ => {} } } ItemKind::Const(def, .., None) => { self.check_defaultness(item.span, def); let msg = "free constant item without body"; self.error_item_without_body(item.span, "constant", msg, " = <expr>;"); } ItemKind::Static(.., None) => { let msg = "free static item without body"; self.error_item_without_body(item.span, "static", msg, " = <expr>;"); } ItemKind::TyAlias(box TyAliasKind(def, _, ref bounds, ref body)) => { self.check_defaultness(item.span, def); if body.is_none() { let msg = "free type alias without body"; self.error_item_without_body(item.span, "type", msg, " = <type>;"); } self.check_type_no_bounds(bounds, "this context"); } _ => {} } visit::walk_item(self, item); } fn visit_foreign_item(&mut self, fi: &'a ForeignItem) { match &fi.kind { ForeignItemKind::Fn(box FnKind(def, sig, _, body)) => { self.check_defaultness(fi.span, *def); self.check_foreign_fn_bodyless(fi.ident, body.as_deref()); self.check_foreign_fn_headerless(fi.ident, fi.span, sig.header); self.check_foreign_item_ascii_only(fi.ident); } ForeignItemKind::TyAlias(box TyAliasKind(def, generics, bounds, body)) => { self.check_defaultness(fi.span, *def); self.check_foreign_kind_bodyless(fi.ident, "type", body.as_ref().map(|b| b.span)); self.check_type_no_bounds(bounds, "`extern` blocks"); self.check_foreign_ty_genericless(generics); self.check_foreign_item_ascii_only(fi.ident); } ForeignItemKind::Static(_, _, body) => { self.check_foreign_kind_bodyless(fi.ident, "static", body.as_ref().map(|b| b.span)); self.check_foreign_item_ascii_only(fi.ident); } ForeignItemKind::MacCall(..) => {} } visit::walk_foreign_item(self, fi) } // Mirrors `visit::walk_generic_args`, but tracks relevant state. fn visit_generic_args(&mut self, _: Span, generic_args: &'a GenericArgs) { match *generic_args { GenericArgs::AngleBracketed(ref data) => { self.check_generic_args_before_constraints(data); for arg in &data.args { match arg { AngleBracketedArg::Arg(arg) => self.visit_generic_arg(arg), // Type bindings such as `Item = impl Debug` in `Iterator<Item = Debug>` // are allowed to contain nested `impl Trait`. AngleBracketedArg::Constraint(constraint) => { self.with_impl_trait(None, |this| { this.visit_assoc_ty_constraint_from_generic_args(constraint); }); } } } } GenericArgs::Parenthesized(ref data) => { walk_list!(self, visit_ty, &data.inputs); if let FnRetTy::Ty(ty) = &data.output { // `-> Foo` syntax is essentially an associated type binding, // so it is also allowed to contain nested `impl Trait`. self.with_impl_trait(None, |this| this.visit_ty(ty)); } } } } fn visit_generics(&mut self, generics: &'a Generics) { let cg_defaults = self.session.features_untracked().unordered_const_ty_params(); let mut prev_param_default = None; for param in &generics.params { match param.kind { GenericParamKind::Lifetime => (), GenericParamKind::Type { default: Some(_), .. } | GenericParamKind::Const { default: Some(_), .. } => { prev_param_default = Some(param.ident.span); } GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => { if let Some(span) = prev_param_default { let mut err = self.err_handler().struct_span_err( span, "generic parameters with a default must be trailing", ); if matches!(param.kind, GenericParamKind::Const { .. }) && !cg_defaults { err.note( "using type defaults and const parameters \ in the same parameter list is currently not permitted", ); } err.emit(); break; } } } } validate_generic_param_order( self.session, self.err_handler(), &generics.params, generics.span, ); for predicate in &generics.where_clause.predicates { if let WherePredicate::EqPredicate(ref predicate) = *predicate { deny_equality_constraints(self, predicate, generics); } } walk_list!(self, visit_generic_param, &generics.params); for predicate in &generics.where_clause.predicates { match predicate { WherePredicate::BoundPredicate(bound_pred) => { // A type binding, eg `for<'c> Foo: Send+Clone+'c` self.check_late_bound_lifetime_defs(&bound_pred.bound_generic_params); // This is slightly complicated. Our representation for poly-trait-refs contains a single // binder and thus we only allow a single level of quantification. However, // the syntax of Rust permits quantification in two places in where clauses, // e.g., `T: for <'a> Foo<'a>` and `for <'a, 'b> &'b T: Foo<'a>`. If both are // defined, then error. if !bound_pred.bound_generic_params.is_empty() { for bound in &bound_pred.bounds { match bound { GenericBound::Trait(t, _) => { if !t.bound_generic_params.is_empty() { struct_span_err!( self.err_handler(), t.span, E0316, "nested quantification of lifetimes" ) .emit(); } } GenericBound::Outlives(_) => {} } } } } _ => {} } self.visit_where_predicate(predicate); } } fn visit_generic_param(&mut self, param: &'a GenericParam) { if let GenericParamKind::Lifetime { .. } = param.kind { self.check_lifetime(param.ident); } visit::walk_generic_param(self, param); } fn visit_param_bound(&mut self, bound: &'a GenericBound) { match bound { GenericBound::Trait(_, TraitBoundModifier::MaybeConst) => { if !self.is_tilde_const_allowed { self.err_handler() .struct_span_err(bound.span(), "`~const` is not allowed here") .note("only allowed on bounds on traits' associated types and functions, const fns, const impls and its associated functions") .emit(); } } GenericBound::Trait(_, TraitBoundModifier::MaybeConstMaybe) => { self.err_handler() .span_err(bound.span(), "`~const` and `?` are mutually exclusive"); } _ => {} } visit::walk_param_bound(self, bound) } fn visit_pat(&mut self, pat: &'a Pat) { match &pat.kind { PatKind::Lit(expr) => { self.check_expr_within_pat(expr, false); } PatKind::Range(start, end, _) => { if let Some(expr) = start { self.check_expr_within_pat(expr, true); } if let Some(expr) = end { self.check_expr_within_pat(expr, true); } } _ => {} } visit::walk_pat(self, pat) } fn visit_poly_trait_ref(&mut self, t: &'a PolyTraitRef, m: &'a TraitBoundModifier) { self.check_late_bound_lifetime_defs(&t.bound_generic_params); visit::walk_poly_trait_ref(self, t, m); } fn visit_variant_data(&mut self, s: &'a VariantData) { self.with_banned_assoc_ty_bound(|this| visit::walk_struct_def(this, s)) } fn visit_enum_def( &mut self, enum_definition: &'a EnumDef, generics: &'a Generics, item_id: NodeId, _: Span, ) { self.with_banned_assoc_ty_bound(|this| { visit::walk_enum_def(this, enum_definition, generics, item_id) }) } fn visit_fn(&mut self, fk: FnKind<'a>, span: Span, id: NodeId) { // Only associated `fn`s can have `self` parameters. let self_semantic = match fk.ctxt() { Some(FnCtxt::Assoc(_)) => SelfSemantic::Yes, _ => SelfSemantic::No, }; self.check_fn_decl(fk.decl(), self_semantic); self.check_c_varadic_type(fk); // Functions cannot both be `const async` if let Some(FnHeader { constness: Const::Yes(cspan), asyncness: Async::Yes { span: aspan, .. }, .. }) = fk.header() { self.err_handler() .struct_span_err( vec![*cspan, *aspan], "functions cannot be both `const` and `async`", ) .span_label(*cspan, "`const` because of this") .span_label(*aspan, "`async` because of this") .span_label(span, "") // Point at the fn header. .emit(); } if let FnKind::Fn( _, _, FnSig { span: sig_span, header: FnHeader { ext: Extern::Implicit, .. }, .. }, _, _, ) = fk { self.maybe_lint_missing_abi(*sig_span, id); } // Functions without bodies cannot have patterns. if let FnKind::Fn(ctxt, _, sig, _, None) = fk { Self::check_decl_no_pat(&sig.decl, |span, ident, mut_ident| { let (code, msg, label) = match ctxt { FnCtxt::Foreign => ( error_code!(E0130), "patterns aren't allowed in foreign function declarations", "pattern not allowed in foreign function", ), _ => ( error_code!(E0642), "patterns aren't allowed in functions without bodies", "pattern not allowed in function without body", ), }; if mut_ident && matches!(ctxt, FnCtxt::Assoc(_)) { if let Some(ident) = ident { let diag = BuiltinLintDiagnostics::PatternsInFnsWithoutBody(span, ident); self.lint_buffer.buffer_lint_with_diagnostic( PATTERNS_IN_FNS_WITHOUT_BODY, id, span, msg, diag, ) } } else { self.err_handler() .struct_span_err(span, msg) .span_label(span, label) .code(code) .emit(); } }); } visit::walk_fn(self, fk, span); } fn visit_assoc_item(&mut self, item: &'a AssocItem, ctxt: AssocCtxt) { if self.session.contains_name(&item.attrs, sym::no_mangle) { self.check_nomangle_item_asciionly(item.ident, item.span); } if ctxt == AssocCtxt::Trait || !self.in_trait_impl { self.check_defaultness(item.span, item.kind.defaultness()); } if ctxt == AssocCtxt::Impl { match &item.kind { AssocItemKind::Const(_, _, body) => { self.check_impl_item_provided(item.span, body, "constant", " = <expr>;"); } AssocItemKind::Fn(box FnKind(_, _, _, body)) => { self.check_impl_item_provided(item.span, body, "function", " { <body> }"); } AssocItemKind::TyAlias(box TyAliasKind(_, _, bounds, body)) => { self.check_impl_item_provided(item.span, body, "type", " = <type>;"); self.check_type_no_bounds(bounds, "`impl`s"); } _ => {} } } if ctxt == AssocCtxt::Trait || self.in_trait_impl { self.invalid_visibility(&item.vis, None); if let AssocItemKind::Fn(box FnKind(_, sig, _, _)) = &item.kind { self.check_trait_fn_not_const(sig.header.constness); self.check_trait_fn_not_async(item.span, sig.header.asyncness); } } if let AssocItemKind::Const(..) = item.kind { self.check_item_named(item.ident, "const"); } match item.kind { AssocItemKind::TyAlias(box TyAliasKind(_, ref generics, ref bounds, ref ty)) if ctxt == AssocCtxt::Trait => { self.visit_vis(&item.vis); self.visit_ident(item.ident); walk_list!(self, visit_attribute, &item.attrs); self.with_tilde_const_allowed(|this| { this.visit_generics(generics); walk_list!(this, visit_param_bound, bounds); }); walk_list!(self, visit_ty, ty); } AssocItemKind::Fn(box FnKind(_, ref sig, ref generics, ref body)) if self.in_const_trait_impl || ctxt == AssocCtxt::Trait || matches!(sig.header.constness, Const::Yes(_)) => { self.visit_vis(&item.vis); self.visit_ident(item.ident); self.with_tilde_const_allowed(|this| this.visit_generics(generics)); let kind = FnKind::Fn(FnCtxt::Assoc(ctxt), item.ident, sig, &item.vis, body.as_deref()); self.visit_fn(kind, item.span, item.id); } _ => self .with_in_trait_impl(false, None, |this| visit::walk_assoc_item(this, item, ctxt)), } } } /// When encountering an equality constraint in a `where` clause, emit an error. If the code seems /// like it's setting an associated type, provide an appropriate suggestion. fn deny_equality_constraints( this: &mut AstValidator<'_>, predicate: &WhereEqPredicate, generics: &Generics, ) { let mut err = this.err_handler().struct_span_err( predicate.span, "equality constraints are not yet supported in `where` clauses", ); err.span_label(predicate.span, "not supported"); // Given `<A as Foo>::Bar = RhsTy`, suggest `A: Foo<Bar = RhsTy>`. if let TyKind::Path(Some(qself), full_path) = &predicate.lhs_ty.kind { if let TyKind::Path(None, path) = &qself.ty.kind { match &path.segments[..] { [PathSegment { ident, args: None, .. }] => { for param in &generics.params { if param.ident == *ident { let param = ident; match &full_path.segments[qself.position..] { [PathSegment { ident, args, .. }] => { // Make a new `Path` from `foo::Bar` to `Foo<Bar = RhsTy>`. let mut assoc_path = full_path.clone(); // Remove `Bar` from `Foo::Bar`. assoc_path.segments.pop(); let len = assoc_path.segments.len() - 1; let gen_args = args.as_ref().map(|p| (**p).clone()); // Build `<Bar = RhsTy>`. let arg = AngleBracketedArg::Constraint(AssocTyConstraint { id: rustc_ast::node_id::DUMMY_NODE_ID, ident: *ident, gen_args, kind: AssocTyConstraintKind::Equality { ty: predicate.rhs_ty.clone(), }, span: ident.span, }); // Add `<Bar = RhsTy>` to `Foo`. match &mut assoc_path.segments[len].args { Some(args) => match args.deref_mut() { GenericArgs::Parenthesized(_) => continue, GenericArgs::AngleBracketed(args) => { args.args.push(arg); } }, empty_args => { *empty_args = AngleBracketedArgs { span: ident.span, args: vec![arg], } .into(); } } err.span_suggestion_verbose( predicate.span, &format!( "if `{}` is an associated type you're trying to set, \ use the associated type binding syntax", ident ), format!( "{}: {}", param, pprust::path_to_string(&assoc_path) ), Applicability::MaybeIncorrect, ); } _ => {} }; } } } _ => {} } } } err.note( "see issue #20041 <https://github.com/rust-lang/rust/issues/20041> for more information", ); err.emit(); } pub fn check_crate(session: &Session, krate: &Crate, lints: &mut LintBuffer) -> bool { let mut validator = AstValidator { session, extern_mod: None, in_trait_impl: false, in_const_trait_impl: false, has_proc_macro_decls: false, outer_impl_trait: None, is_tilde_const_allowed: false, is_impl_trait_banned: false, is_assoc_ty_bound_banned: false, is_let_allowed: false, lint_buffer: lints, }; visit::walk_crate(&mut validator, krate); validator.has_proc_macro_decls }
{ let max_num_args: usize = u16::MAX.into(); if fn_decl.inputs.len() > max_num_args { let Param { span, .. } = fn_decl.inputs[0]; self.err_handler().span_fatal( span, &format!("function can not have more than {} arguments", max_num_args), ); } }
nl.js
/*
CKEDITOR.lang['nl']={"wsc":{"btnIgnore":"Negeren","btnIgnoreAll":"Alles negeren","btnReplace":"Vervangen","btnReplaceAll":"Alles vervangen","btnUndo":"Ongedaan maken","changeTo":"Wijzig in","errorLoading":"Er is een fout opgetreden bij het laden van de dienst: %s.","ieSpellDownload":"De spellingscontrole is niet geïnstalleerd. Wilt u deze nu downloaden?","manyChanges":"Klaar met spellingscontrole: %1 woorden aangepast","noChanges":"Klaar met spellingscontrole: geen woorden aangepast","noMispell":"Klaar met spellingscontrole: geen fouten gevonden","noSuggestions":"- Geen suggesties -","notAvailable":"Excuses, deze dienst is momenteel niet beschikbaar.","notInDic":"Niet in het woordenboek","oneChange":"Klaar met spellingscontrole: één woord aangepast","progress":"Bezig met spellingscontrole...","title":"Spellingscontrole","toolbar":"Spellingscontrole"},"undo":{"redo":"Opnieuw uitvoeren","undo":"Ongedaan maken"},"toolbar":{"toolbarCollapse":"Werkbalk inklappen","toolbarExpand":"Werkbalk uitklappen","toolbarGroups":{"document":"Document","clipboard":"Klembord/Ongedaan maken","editing":"Bewerken","forms":"Formulieren","basicstyles":"Basisstijlen","paragraph":"Paragraaf","links":"Links","insert":"Invoegen","styles":"Stijlen","colors":"Kleuren","tools":"Toepassingen"},"toolbars":"Werkbalken"},"table":{"border":"Randdikte","caption":"Titel","cell":{"menu":"Cel","insertBefore":"Voeg cel in voor","insertAfter":"Voeg cel in na","deleteCell":"Cellen verwijderen","merge":"Cellen samenvoegen","mergeRight":"Voeg samen naar rechts","mergeDown":"Voeg samen naar beneden","splitHorizontal":"Splits cel horizontaal","splitVertical":"Splits cel vertikaal","title":"Celeigenschappen","cellType":"Celtype","rowSpan":"Rijen samenvoegen","colSpan":"Kolommen samenvoegen","wordWrap":"Automatische terugloop","hAlign":"Horizontale uitlijning","vAlign":"Verticale uitlijning","alignBaseline":"Tekstregel","bgColor":"Achtergrondkleur","borderColor":"Randkleur","data":"Gegevens","header":"Kop","yes":"Ja","no":"Nee","invalidWidth":"De celbreedte moet een getal zijn.","invalidHeight":"De celhoogte moet een getal zijn.","invalidRowSpan":"Rijen samenvoegen moet een heel getal zijn.","invalidColSpan":"Kolommen samenvoegen moet een heel getal zijn.","chooseColor":"Kies"},"cellPad":"Celopvulling","cellSpace":"Celafstand","column":{"menu":"Kolom","insertBefore":"Voeg kolom in voor","insertAfter":"Voeg kolom in na","deleteColumn":"Kolommen verwijderen"},"columns":"Kolommen","deleteTable":"Tabel verwijderen","headers":"Koppen","headersBoth":"Beide","headersColumn":"Eerste kolom","headersNone":"Geen","headersRow":"Eerste rij","invalidBorder":"De randdikte moet een getal zijn.","invalidCellPadding":"Celopvulling moet een getal zijn.","invalidCellSpacing":"Celafstand moet een getal zijn.","invalidCols":"Het aantal kolommen moet een getal zijn groter dan 0.","invalidHeight":"De tabelhoogte moet een getal zijn.","invalidRows":"Het aantal rijen moet een getal zijn groter dan 0.","invalidWidth":"De tabelbreedte moet een getal zijn.","menu":"Tabeleigenschappen","row":{"menu":"Rij","insertBefore":"Voeg rij in voor","insertAfter":"Voeg rij in na","deleteRow":"Rijen verwijderen"},"rows":"Rijen","summary":"Samenvatting","title":"Tabeleigenschappen","toolbar":"Tabel","widthPc":"procent","widthPx":"pixels","widthUnit":"eenheid breedte"},"stylescombo":{"label":"Stijl","panelTitle":"Opmaakstijlen","panelTitle1":"Blok stijlen","panelTitle2":"Inline stijlen","panelTitle3":"Object stijlen"},"specialchar":{"options":"Speciale tekens opties","title":"Selecteer speciaal teken","toolbar":"Speciaal teken invoegen"},"sourcearea":{"toolbar":"Broncode"},"scayt":{"btn_about":"Over SCAYT","btn_dictionaries":"Woordenboeken","btn_disable":"SCAYT uitschakelen","btn_enable":"SCAYT inschakelen","btn_langs":"Talen","btn_options":"Opties","text_title":"Controleer de spelling tijdens het typen"},"removeformat":{"toolbar":"Opmaak verwijderen"},"pastetext":{"button":"Plakken als platte tekst","title":"Plakken als platte tekst"},"pastefromword":{"confirmCleanup":"De tekst die u wilt plakken lijkt gekopieerd te zijn vanuit Word. Wilt u de tekst opschonen voordat deze geplakt wordt?","error":"Het was niet mogelijk om de geplakte tekst op te schonen door een interne fout","title":"Plakken vanuit Word","toolbar":"Plakken vanuit Word"},"maximize":{"maximize":"Maximaliseren","minimize":"Minimaliseren"},"magicline":{"title":"Hier paragraaf invoeren"},"list":{"bulletedlist":"Opsomming invoegen","numberedlist":"Genummerde lijst invoegen"},"link":{"acccessKey":"Toegangstoets","advanced":"Geavanceerd","advisoryContentType":"Aanbevolen content-type","advisoryTitle":"Adviserende titel","anchor":{"toolbar":"Interne link","menu":"Eigenschappen interne link","title":"Eigenschappen interne link","name":"Naam interne link","errorName":"Geef de naam van de interne link op","remove":"Interne link verwijderen"},"anchorId":"Op kenmerk interne link","anchorName":"Op naam interne link","charset":"Karakterset van gelinkte bron","cssClasses":"Stylesheet-klassen","emailAddress":"E-mailadres","emailBody":"Inhoud bericht","emailSubject":"Onderwerp bericht","id":"Id","info":"Linkomschrijving","langCode":"Taalcode","langDir":"Schrijfrichting","langDirLTR":"Links naar rechts (LTR)","langDirRTL":"Rechts naar links (RTL)","menu":"Link wijzigen","name":"Naam","noAnchors":"(Geen interne links in document gevonden)","noEmail":"Geef een e-mailadres","noUrl":"Geef de link van de URL","other":"<ander>","popupDependent":"Afhankelijk (Netscape)","popupFeatures":"Instellingen popupvenster","popupFullScreen":"Volledig scherm (IE)","popupLeft":"Positie links","popupLocationBar":"Locatiemenu","popupMenuBar":"Menubalk","popupResizable":"Herschaalbaar","popupScrollBars":"Schuifbalken","popupStatusBar":"Statusbalk","popupToolbar":"Werkbalk","popupTop":"Positie boven","rel":"Relatie","selectAnchor":"Kies een interne link","styles":"Stijl","tabIndex":"Tabvolgorde","target":"Doelvenster","targetFrame":"<frame>","targetFrameName":"Naam doelframe","targetPopup":"<popupvenster>","targetPopupName":"Naam popupvenster","title":"Link","toAnchor":"Interne link in pagina","toEmail":"E-mail","toUrl":"URL","toolbar":"Link invoegen/wijzigen","type":"Linktype","unlink":"Link verwijderen","upload":"Upload"},"indent":{"indent":"Inspringing vergroten","outdent":"Inspringing verkleinen"},"image":{"alt":"Alternatieve tekst","border":"Rand","btnUpload":"Naar server verzenden","button2Img":"Wilt u de geselecteerde afbeeldingsknop vervangen door een eenvoudige afbeelding?","hSpace":"HSpace","img2Button":"Wilt u de geselecteerde afbeelding vervangen door een afbeeldingsknop?","infoTab":"Informatie afbeelding","linkTab":"Link","lockRatio":"Afmetingen vergrendelen","menu":"Eigenschappen afbeelding","resetSize":"Afmetingen resetten","title":"Eigenschappen afbeelding","titleButton":"Eigenschappen afbeeldingsknop","upload":"Upload","urlMissing":"De URL naar de afbeelding ontbreekt.","vSpace":"VSpace","validateBorder":"Rand moet een heel nummer zijn.","validateHSpace":"HSpace moet een heel nummer zijn.","validateVSpace":"VSpace moet een heel nummer zijn."},"horizontalrule":{"toolbar":"Horizontale lijn invoegen"},"format":{"label":"Opmaak","panelTitle":"Opmaak","tag_address":"Adres","tag_div":"Normaal (DIV)","tag_h1":"Kop 1","tag_h2":"Kop 2","tag_h3":"Kop 3","tag_h4":"Kop 4","tag_h5":"Kop 5","tag_h6":"Kop 6","tag_p":"Normaal","tag_pre":"Met opmaak"},"fakeobjects":{"anchor":"Interne link","flash":"Flash animatie","hiddenfield":"Verborgen veld","iframe":"IFrame","unknown":"Onbekend object"},"elementspath":{"eleLabel":"Elementenpad","eleTitle":"%1 element"},"contextmenu":{"options":"Contextmenu opties"},"clipboard":{"copy":"Kopiëren","copyError":"De beveiligingsinstelling van de browser verhinderen het automatisch kopiëren. Gebruik de sneltoets Ctrl/Cmd+C van het toetsenbord.","cut":"Knippen","cutError":"De beveiligingsinstelling van de browser verhinderen het automatisch knippen. Gebruik de sneltoets Ctrl/Cmd+X van het toetsenbord.","paste":"Plakken","pasteArea":"Plakgebied","pasteMsg":"Plak de tekst in het volgende vak gebruikmakend van uw toetsenbord (<strong>Ctrl/Cmd+V</strong>) en klik op OK.","securityMsg":"Door de beveiligingsinstellingen van uw browser is het niet mogelijk om direct vanuit het klembord in de editor te plakken. Middels opnieuw plakken in dit venster kunt u de tekst alsnog plakken in de editor.","title":"Plakken"},"button":{"selectedLabel":"%1 (Geselecteerd)"},"blockquote":{"toolbar":"Citaatblok"},"basicstyles":{"bold":"Vet","italic":"Cursief","strike":"Doorhalen","subscript":"Subscript","superscript":"Superscript","underline":"Onderstrepen"},"about":{"copy":"Copyright &copy; $1. Alle rechten voorbehouden.","dlgTitle":"Over CKEditor","help":"Bekijk de $1 voor hulp.","moreInfo":"Bezoek onze website voor licentieinformatie:","title":"Over CKEditor","userGuide":"CKEditor gebruiksaanwijzing"},"editor":"Tekstverwerker","editorPanel":"Tekstverwerker beheerpaneel","common":{"editorHelp":"Druk ALT 0 voor hulp","browseServer":"Bladeren op server","url":"URL","protocol":"Protocol","upload":"Upload","uploadSubmit":"Naar server verzenden","image":"Afbeelding","flash":"Flash","form":"Formulier","checkbox":"Selectievinkje","radio":"Keuzerondje","textField":"Tekstveld","textarea":"Tekstvak","hiddenField":"Verborgen veld","button":"Knop","select":"Selectieveld","imageButton":"Afbeeldingsknop","notSet":"<niet ingevuld>","id":"Id","name":"Naam","langDir":"Schrijfrichting","langDirLtr":"Links naar rechts (LTR)","langDirRtl":"Rechts naar links (RTL)","langCode":"Taalcode","longDescr":"Lange URL-omschrijving","cssClass":"Stylesheet-klassen","advisoryTitle":"Adviserende titel","cssStyle":"Stijl","ok":"OK","cancel":"Annuleren","close":"Sluiten","preview":"Voorbeeld","resize":"Sleep om te herschalen","generalTab":"Algemeen","advancedTab":"Geavanceerd","validateNumberFailed":"Deze waarde is geen geldig getal.","confirmNewPage":"Alle aangebrachte wijzigingen gaan verloren. Weet u zeker dat u een nieuwe pagina wilt openen?","confirmCancel":"Enkele opties zijn gewijzigd. Weet u zeker dat u dit dialoogvenster wilt sluiten?","options":"Opties","target":"Doelvenster","targetNew":"Nieuw venster (_blank)","targetTop":"Hele venster (_top)","targetSelf":"Zelfde venster (_self)","targetParent":"Origineel venster (_parent)","langDirLTR":"Links naar rechts (LTR)","langDirRTL":"Rechts naar links (RTL)","styles":"Stijl","cssClasses":"Stylesheet-klassen","width":"Breedte","height":"Hoogte","align":"Uitlijning","alignLeft":"Links","alignRight":"Rechts","alignCenter":"Centreren","alignJustify":"Uitvullen","alignTop":"Boven","alignMiddle":"Midden","alignBottom":"Onder","alignNone":"Geen","invalidValue":"Ongeldige waarde.","invalidHeight":"De hoogte moet een getal zijn.","invalidWidth":"De breedte moet een getal zijn.","invalidCssLength":"Waarde in veld \"%1\" moet een positief nummer zijn, met of zonder een geldige CSS meeteenheid (px, %, in, cm, mm, em, ex, pt of pc).","invalidHtmlLength":"Waarde in veld \"%1\" moet een positief nummer zijn, met of zonder een geldige HTML meeteenheid (px of %).","invalidInlineStyle":"Waarde voor de online stijl moet bestaan uit een of meerdere tupels met het formaat \"naam : waarde\", gescheiden door puntkomma's.","cssLengthTooltip":"Geef een nummer in voor een waarde in pixels of geef een nummer in met een geldige CSS eenheid (px, %, in, cm, mm, em, ex, pt, of pc).","unavailable":"%1<span class=\"cke_accessibility\">, niet beschikbaar</span>"}};
Copyright (c) 2003-2016, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */
apihandler.go
// Copyright 2017 The Kubernetes Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "log" "net/http" "strconv" "strings" restful "github.com/emicklei/go-restful" "github.com/kubernetes/dashboard/src/app/backend/api" "github.com/kubernetes/dashboard/src/app/backend/auth" authApi "github.com/kubernetes/dashboard/src/app/backend/auth/api" clientapi "github.com/kubernetes/dashboard/src/app/backend/client/api" kdErrors "github.com/kubernetes/dashboard/src/app/backend/errors" "github.com/kubernetes/dashboard/src/app/backend/integration" metricapi "github.com/kubernetes/dashboard/src/app/backend/integration/metric/api" "github.com/kubernetes/dashboard/src/app/backend/resource/clusterrole" "github.com/kubernetes/dashboard/src/app/backend/resource/common" "github.com/kubernetes/dashboard/src/app/backend/resource/configmap" "github.com/kubernetes/dashboard/src/app/backend/resource/container" "github.com/kubernetes/dashboard/src/app/backend/resource/controller" "github.com/kubernetes/dashboard/src/app/backend/resource/cronjob" "github.com/kubernetes/dashboard/src/app/backend/resource/daemonset" "github.com/kubernetes/dashboard/src/app/backend/resource/dataselect" "github.com/kubernetes/dashboard/src/app/backend/resource/deployment" "github.com/kubernetes/dashboard/src/app/backend/resource/endpoint" "github.com/kubernetes/dashboard/src/app/backend/resource/event" "github.com/kubernetes/dashboard/src/app/backend/resource/horizontalpodautoscaler" "github.com/kubernetes/dashboard/src/app/backend/resource/ingress" "github.com/kubernetes/dashboard/src/app/backend/resource/job" "github.com/kubernetes/dashboard/src/app/backend/resource/logs" ns "github.com/kubernetes/dashboard/src/app/backend/resource/namespace" "github.com/kubernetes/dashboard/src/app/backend/resource/node" "github.com/kubernetes/dashboard/src/app/backend/resource/persistentvolume" "github.com/kubernetes/dashboard/src/app/backend/resource/persistentvolumeclaim" "github.com/kubernetes/dashboard/src/app/backend/resource/pod" "github.com/kubernetes/dashboard/src/app/backend/resource/replicaset" "github.com/kubernetes/dashboard/src/app/backend/resource/replicationcontroller" "github.com/kubernetes/dashboard/src/app/backend/resource/secret" resourceService "github.com/kubernetes/dashboard/src/app/backend/resource/service" "github.com/kubernetes/dashboard/src/app/backend/resource/statefulset" "github.com/kubernetes/dashboard/src/app/backend/resource/storageclass" "github.com/kubernetes/dashboard/src/app/backend/scaling" "github.com/kubernetes/dashboard/src/app/backend/settings" "github.com/kubernetes/dashboard/src/app/backend/systembanner" "github.com/kubernetes/dashboard/src/app/backend/validation" "golang.org/x/net/xsrftoken" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/remotecommand" ) const ( // RequestLogString is a template for request log message. RequestLogString = "[%s] Incoming %s %s %s request from %s: %s" // ResponseLogString is a template for response log message. ResponseLogString = "[%s] Outcoming response to %s with %d status code" ) // APIHandler is a representation of API handler. Structure contains clientapi, Heapster clientapi and clientapi configuration. type APIHandler struct { iManager integration.IntegrationManager cManager clientapi.ClientManager sManager settings.SettingsManager } // TerminalResponse is sent by handleExecShell. The Id is a random session id that binds the original REST request and the SockJS connection. // Any clientapi in possession of this Id can hijack the terminal session. type TerminalResponse struct { Id string `json:"id"` } // CreateHTTPAPIHandler creates a new HTTP handler that handles all requests to the API of the backend. func CreateHTTPAPIHandler(iManager integration.IntegrationManager, cManager clientapi.ClientManager, authManager authApi.AuthManager, sManager settings.SettingsManager, sbManager systembanner.SystemBannerManager) ( http.Handler, error) { apiHandler := APIHandler{iManager: iManager, cManager: cManager} wsContainer := restful.NewContainer() wsContainer.EnableContentEncoding(true) apiV1Ws := new(restful.WebService) InstallFilters(apiV1Ws, cManager) apiV1Ws.Path("/api/v1"). Consumes(restful.MIME_JSON). Produces(restful.MIME_JSON) wsContainer.Add(apiV1Ws) integrationHandler := integration.NewIntegrationHandler(iManager) integrationHandler.Install(apiV1Ws) authHandler := auth.NewAuthHandler(authManager) authHandler.Install(apiV1Ws) settingsHandler := settings.NewSettingsHandler(sManager) settingsHandler.Install(apiV1Ws) systemBannerHandler := systembanner.NewSystemBannerHandler(sbManager) systemBannerHandler.Install(apiV1Ws) apiV1Ws.Route( apiV1Ws.GET("csrftoken/{action}"). To(apiHandler.handleGetCsrfToken). Writes(api.CsrfToken{})) apiV1Ws.Route( apiV1Ws.POST("/appdeployment"). To(apiHandler.handleDeploy). Reads(deployment.AppDeploymentSpec{}). Writes(deployment.AppDeploymentSpec{})) apiV1Ws.Route( apiV1Ws.POST("/appdeployment/validate/name"). To(apiHandler.handleNameValidity). Reads(validation.AppNameValiditySpec{}). Writes(validation.AppNameValidity{})) apiV1Ws.Route( apiV1Ws.POST("/appdeployment/validate/imagereference"). To(apiHandler.handleImageReferenceValidity). Reads(validation.ImageReferenceValiditySpec{}). Writes(validation.ImageReferenceValidity{})) apiV1Ws.Route( apiV1Ws.POST("/appdeployment/validate/protocol"). To(apiHandler.handleProtocolValidity). Reads(validation.ProtocolValiditySpec{}). Writes(validation.ProtocolValidity{})) apiV1Ws.Route( apiV1Ws.GET("/appdeployment/protocols"). To(apiHandler.handleGetAvailableProcotols). Writes(deployment.Protocols{})) apiV1Ws.Route( apiV1Ws.POST("/appdeploymentfromfile"). To(apiHandler.handleDeployFromFile). Reads(deployment.AppDeploymentFromFileSpec{}). Writes(deployment.AppDeploymentFromFileResponse{})) apiV1Ws.Route( apiV1Ws.GET("/replicationcontroller"). To(apiHandler.handleGetReplicationControllerList). Writes(replicationcontroller.ReplicationControllerList{})) apiV1Ws.Route( apiV1Ws.GET("/replicationcontroller/{namespace}"). To(apiHandler.handleGetReplicationControllerList). Writes(replicationcontroller.ReplicationControllerList{})) apiV1Ws.Route( apiV1Ws.GET("/replicationcontroller/{namespace}/{replicationController}"). To(apiHandler.handleGetReplicationControllerDetail). Writes(replicationcontroller.ReplicationControllerDetail{})) apiV1Ws.Route( apiV1Ws.POST("/replicationcontroller/{namespace}/{replicationController}/update/pod"). To(apiHandler.handleUpdateReplicasCount). Reads(replicationcontroller.ReplicationControllerSpec{})) apiV1Ws.Route( apiV1Ws.GET("/replicationcontroller/{namespace}/{replicationController}/pod"). To(apiHandler.handleGetReplicationControllerPods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/replicationcontroller/{namespace}/{replicationController}/event"). To(apiHandler.handleGetReplicationControllerEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/replicationcontroller/{namespace}/{replicationController}/service").
apiV1Ws.GET("/replicaset"). To(apiHandler.handleGetReplicaSets). Writes(replicaset.ReplicaSetList{})) apiV1Ws.Route( apiV1Ws.GET("/replicaset/{namespace}"). To(apiHandler.handleGetReplicaSets). Writes(replicaset.ReplicaSetList{})) apiV1Ws.Route( apiV1Ws.GET("/replicaset/{namespace}/{replicaSet}"). To(apiHandler.handleGetReplicaSetDetail). Writes(replicaset.ReplicaSetDetail{})) apiV1Ws.Route( apiV1Ws.GET("/replicaset/{namespace}/{replicaSet}/pod"). To(apiHandler.handleGetReplicaSetPods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/replicaset/{namespace}/{replicaSet}/service"). To(apiHandler.handleGetReplicaSetServices). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/replicaset/{namespace}/{replicaSet}/event"). To(apiHandler.handleGetReplicaSetEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/pod"). To(apiHandler.handleGetPods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/pod/{namespace}"). To(apiHandler.handleGetPods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/pod/{namespace}/{pod}"). To(apiHandler.handleGetPodDetail). Writes(pod.PodDetail{})) apiV1Ws.Route( apiV1Ws.GET("/pod/{namespace}/{pod}/container"). To(apiHandler.handleGetPodContainers). Writes(pod.PodDetail{})) apiV1Ws.Route( apiV1Ws.GET("/pod/{namespace}/{pod}/event"). To(apiHandler.handleGetPodEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/pod/{namespace}/{pod}/shell/{container}"). To(apiHandler.handleExecShell). Writes(TerminalResponse{})) apiV1Ws.Route( apiV1Ws.GET("/pod/{namespace}/{pod}/persistentvolumeclaim"). To(apiHandler.handleGetPodPersistentVolumeClaims). Writes(persistentvolumeclaim.PersistentVolumeClaimList{})) apiV1Ws.Route( apiV1Ws.GET("/deployment"). To(apiHandler.handleGetDeployments). Writes(deployment.DeploymentList{})) apiV1Ws.Route( apiV1Ws.GET("/deployment/{namespace}"). To(apiHandler.handleGetDeployments). Writes(deployment.DeploymentList{})) apiV1Ws.Route( apiV1Ws.GET("/deployment/{namespace}/{deployment}"). To(apiHandler.handleGetDeploymentDetail). Writes(deployment.DeploymentDetail{})) apiV1Ws.Route( apiV1Ws.GET("/deployment/{namespace}/{deployment}/event"). To(apiHandler.handleGetDeploymentEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/deployment/{namespace}/{deployment}/oldreplicaset"). To(apiHandler.handleGetDeploymentOldReplicaSets). Writes(replicaset.ReplicaSetList{})) apiV1Ws.Route( apiV1Ws.PUT("/scale/{kind}/{namespace}/{name}/"). To(apiHandler.handleScaleResource). Writes(scaling.ReplicaCounts{})) apiV1Ws.Route( apiV1Ws.GET("/scale/{kind}/{namespace}/{name}"). To(apiHandler.handleGetReplicaCount). Writes(scaling.ReplicaCounts{})) apiV1Ws.Route( apiV1Ws.GET("/daemonset"). To(apiHandler.handleGetDaemonSetList). Writes(daemonset.DaemonSetList{})) apiV1Ws.Route( apiV1Ws.GET("/daemonset/{namespace}"). To(apiHandler.handleGetDaemonSetList). Writes(daemonset.DaemonSetList{})) apiV1Ws.Route( apiV1Ws.GET("/daemonset/{namespace}/{daemonSet}"). To(apiHandler.handleGetDaemonSetDetail). Writes(daemonset.DaemonSetDetail{})) apiV1Ws.Route( apiV1Ws.GET("/daemonset/{namespace}/{daemonSet}/pod"). To(apiHandler.handleGetDaemonSetPods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/daemonset/{namespace}/{daemonSet}/service"). To(apiHandler.handleGetDaemonSetServices). Writes(resourceService.ServiceList{})) apiV1Ws.Route( apiV1Ws.GET("/daemonset/{namespace}/{daemonSet}/event"). To(apiHandler.handleGetDaemonSetEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/horizontalpodautoscaler"). To(apiHandler.handleGetHorizontalPodAutoscalerList). Writes(horizontalpodautoscaler.HorizontalPodAutoscalerList{})) apiV1Ws.Route( apiV1Ws.GET("/horizontalpodautoscaler/{namespace}"). To(apiHandler.handleGetHorizontalPodAutoscalerList). Writes(horizontalpodautoscaler.HorizontalPodAutoscalerList{})) apiV1Ws.Route( apiV1Ws.GET("/horizontalpodautoscaler/{namespace}/{horizontalpodautoscaler}"). To(apiHandler.handleGetHorizontalPodAutoscalerDetail). Writes(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{})) apiV1Ws.Route( apiV1Ws.GET("/job"). To(apiHandler.handleGetJobList). Writes(job.JobList{})) apiV1Ws.Route( apiV1Ws.GET("/job/{namespace}"). To(apiHandler.handleGetJobList). Writes(job.JobList{})) apiV1Ws.Route( apiV1Ws.GET("/job/{namespace}/{name}"). To(apiHandler.handleGetJobDetail). Writes(job.JobDetail{})) apiV1Ws.Route( apiV1Ws.GET("/job/{namespace}/{name}/pod"). To(apiHandler.handleGetJobPods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/job/{namespace}/{name}/event"). To(apiHandler.handleGetJobEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/cronjob"). To(apiHandler.handleGetCronJobList). Writes(cronjob.CronJobList{})) apiV1Ws.Route( apiV1Ws.GET("/cronjob/{namespace}"). To(apiHandler.handleGetCronJobList). Writes(cronjob.CronJobList{})) apiV1Ws.Route( apiV1Ws.GET("/cronjob/{namespace}/{name}"). To(apiHandler.handleGetCronJobDetail). Writes(cronjob.CronJobDetail{})) apiV1Ws.Route( apiV1Ws.GET("/cronjob/{namespace}/{name}/job"). To(apiHandler.handleGetCronJobJobs). Writes(job.JobList{})) apiV1Ws.Route( apiV1Ws.GET("/cronjob/{namespace}/{name}/event"). To(apiHandler.handleGetCronJobEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.PUT("/cronjob/{namespace}/{name}/trigger"). To(apiHandler.handleTriggerCronJob)) apiV1Ws.Route( apiV1Ws.POST("/namespace"). To(apiHandler.handleCreateNamespace). Reads(ns.NamespaceSpec{}). Writes(ns.NamespaceSpec{})) apiV1Ws.Route( apiV1Ws.GET("/namespace"). To(apiHandler.handleGetNamespaces). Writes(ns.NamespaceList{})) apiV1Ws.Route( apiV1Ws.GET("/namespace/{name}"). To(apiHandler.handleGetNamespaceDetail). Writes(ns.NamespaceDetail{})) apiV1Ws.Route( apiV1Ws.GET("/namespace/{name}/event"). To(apiHandler.handleGetNamespaceEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/secret"). To(apiHandler.handleGetSecretList). Writes(secret.SecretList{})) apiV1Ws.Route( apiV1Ws.GET("/secret/{namespace}"). To(apiHandler.handleGetSecretList). Writes(secret.SecretList{})) apiV1Ws.Route( apiV1Ws.GET("/secret/{namespace}/{name}"). To(apiHandler.handleGetSecretDetail). Writes(secret.SecretDetail{})) apiV1Ws.Route( apiV1Ws.POST("/secret"). To(apiHandler.handleCreateImagePullSecret). Reads(secret.ImagePullSecretSpec{}). Writes(secret.Secret{})) apiV1Ws.Route( apiV1Ws.GET("/configmap"). To(apiHandler.handleGetConfigMapList). Writes(configmap.ConfigMapList{})) apiV1Ws.Route( apiV1Ws.GET("/configmap/{namespace}"). To(apiHandler.handleGetConfigMapList). Writes(configmap.ConfigMapList{})) apiV1Ws.Route( apiV1Ws.GET("/configmap/{namespace}/{configmap}"). To(apiHandler.handleGetConfigMapDetail). Writes(configmap.ConfigMapDetail{})) apiV1Ws.Route( apiV1Ws.GET("/service"). To(apiHandler.handleGetServiceList). Writes(resourceService.ServiceList{})) apiV1Ws.Route( apiV1Ws.GET("/service/{namespace}"). To(apiHandler.handleGetServiceList). Writes(resourceService.ServiceList{})) apiV1Ws.Route( apiV1Ws.GET("/service/{namespace}/{service}"). To(apiHandler.handleGetServiceDetail). Writes(resourceService.ServiceDetail{})) apiV1Ws.Route( apiV1Ws.GET("/service/{namespace}/{service}/event"). To(apiHandler.handleGetServiceEvent). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/service/{namespace}/{service}/pod"). To(apiHandler.handleGetServicePods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/ingress"). To(apiHandler.handleGetIngressList). Writes(ingress.IngressList{})) apiV1Ws.Route( apiV1Ws.GET("/ingress/{namespace}"). To(apiHandler.handleGetIngressList). Writes(ingress.IngressList{})) apiV1Ws.Route( apiV1Ws.GET("/ingress/{namespace}/{name}"). To(apiHandler.handleGetIngressDetail). Writes(ingress.IngressDetail{})) apiV1Ws.Route( apiV1Ws.GET("/statefulset"). To(apiHandler.handleGetStatefulSetList). Writes(statefulset.StatefulSetList{})) apiV1Ws.Route( apiV1Ws.GET("/statefulset/{namespace}"). To(apiHandler.handleGetStatefulSetList). Writes(statefulset.StatefulSetList{})) apiV1Ws.Route( apiV1Ws.GET("/statefulset/{namespace}/{statefulset}"). To(apiHandler.handleGetStatefulSetDetail). Writes(statefulset.StatefulSetDetail{})) apiV1Ws.Route( apiV1Ws.GET("/statefulset/{namespace}/{statefulset}/pod"). To(apiHandler.handleGetStatefulSetPods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.GET("/statefulset/{namespace}/{statefulset}/event"). To(apiHandler.handleGetStatefulSetEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/node"). To(apiHandler.handleGetNodeList). Writes(node.NodeList{})) apiV1Ws.Route( apiV1Ws.GET("/node/{name}"). To(apiHandler.handleGetNodeDetail). Writes(node.NodeDetail{})) apiV1Ws.Route( apiV1Ws.GET("/node/{name}/event"). To(apiHandler.handleGetNodeEvents). Writes(common.EventList{})) apiV1Ws.Route( apiV1Ws.GET("/node/{name}/pod"). To(apiHandler.handleGetNodePods). Writes(pod.PodList{})) apiV1Ws.Route( apiV1Ws.DELETE("/_raw/{kind}/namespace/{namespace}/name/{name}"). To(apiHandler.handleDeleteResource)) apiV1Ws.Route( apiV1Ws.GET("/_raw/{kind}/namespace/{namespace}/name/{name}"). To(apiHandler.handleGetResource)) apiV1Ws.Route( apiV1Ws.PUT("/_raw/{kind}/namespace/{namespace}/name/{name}"). To(apiHandler.handlePutResource)) apiV1Ws.Route( apiV1Ws.DELETE("/_raw/{kind}/name/{name}"). To(apiHandler.handleDeleteResource)) apiV1Ws.Route( apiV1Ws.GET("/_raw/{kind}/name/{name}"). To(apiHandler.handleGetResource)) apiV1Ws.Route( apiV1Ws.PUT("/_raw/{kind}/name/{name}"). To(apiHandler.handlePutResource)) apiV1Ws.Route( apiV1Ws.GET("/clusterrole"). To(apiHandler.handleGetClusterRoleList). Writes(clusterrole.ClusterRoleList{})) apiV1Ws.Route( apiV1Ws.GET("/clusterrole/{name}"). To(apiHandler.handleGetClusterRoleDetail). Writes(clusterrole.ClusterRoleDetail{})) apiV1Ws.Route( apiV1Ws.GET("/persistentvolume"). To(apiHandler.handleGetPersistentVolumeList). Writes(persistentvolume.PersistentVolumeList{})) apiV1Ws.Route( apiV1Ws.GET("/persistentvolume/{persistentvolume}"). To(apiHandler.handleGetPersistentVolumeDetail). Writes(persistentvolume.PersistentVolumeDetail{})) apiV1Ws.Route( apiV1Ws.GET("/persistentvolume/namespace/{namespace}/name/{persistentvolume}"). To(apiHandler.handleGetPersistentVolumeDetail). Writes(persistentvolume.PersistentVolumeDetail{})) apiV1Ws.Route( apiV1Ws.GET("/persistentvolumeclaim/"). To(apiHandler.handleGetPersistentVolumeClaimList). Writes(persistentvolumeclaim.PersistentVolumeClaimList{})) apiV1Ws.Route( apiV1Ws.GET("/persistentvolumeclaim/{namespace}"). To(apiHandler.handleGetPersistentVolumeClaimList). Writes(persistentvolumeclaim.PersistentVolumeClaimList{})) apiV1Ws.Route( apiV1Ws.GET("/persistentvolumeclaim/{namespace}/{name}"). To(apiHandler.handleGetPersistentVolumeClaimDetail). Writes(persistentvolumeclaim.PersistentVolumeClaimDetail{})) apiV1Ws.Route( apiV1Ws.GET("/storageclass"). To(apiHandler.handleGetStorageClassList). Writes(storageclass.StorageClassList{})) apiV1Ws.Route( apiV1Ws.GET("/storageclass/{storageclass}"). To(apiHandler.handleGetStorageClass). Writes(storageclass.StorageClass{})) apiV1Ws.Route( apiV1Ws.GET("/storageclass/{storageclass}/persistentvolume"). To(apiHandler.handleGetStorageClassPersistentVolumes). Writes(persistentvolume.PersistentVolumeList{})) apiV1Ws.Route( apiV1Ws.GET("/log/source/{namespace}/{resourceName}/{resourceType}"). To(apiHandler.handleLogSource). Writes(controller.LogSources{})) apiV1Ws.Route( apiV1Ws.GET("/log/{namespace}/{pod}"). To(apiHandler.handleLogs). Writes(logs.LogDetails{})) apiV1Ws.Route( apiV1Ws.GET("/log/{namespace}/{pod}/{container}"). To(apiHandler.handleLogs). Writes(logs.LogDetails{})) apiV1Ws.Route( apiV1Ws.GET("/log/file/{namespace}/{pod}/{container}"). To(apiHandler.handleLogFile). Writes(logs.LogDetails{})) return wsContainer, nil } // TODO: Handle case in which RBAC feature is not enabled in API server. Currently returns 404 resource not found func (apiHandler *APIHandler) handleGetClusterRoleList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } dataSelect := parseDataSelectPathParameter(request) result, err := clusterrole.GetClusterRoleList(k8sClient, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetClusterRoleDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("name") result, err := clusterrole.GetClusterRoleDetail(k8sClient, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleRbacStatus(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } result, err := validation.ValidateRbacStatus(k8sClient) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetCsrfToken(request *restful.Request, response *restful.Response) { action := request.PathParameter("action") token := xsrftoken.Generate(apiHandler.cManager.CSRFKey(), "none", action) response.WriteHeaderAndEntity(http.StatusOK, api.CsrfToken{Token: token}) } func (apiHandler *APIHandler) handleGetStatefulSetList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := statefulset.GetStatefulSetList(k8sClient, namespace, dataSelect, apiHandler.iManager.Metric().Client()) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetStatefulSetDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("statefulset") result, err := statefulset.GetStatefulSetDetail(k8sClient, apiHandler.iManager.Metric().Client(), namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetStatefulSetPods(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("statefulset") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := statefulset.GetStatefulSetPods(k8sClient, apiHandler.iManager.Metric().Client(), dataSelect, name, namespace) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetStatefulSetEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("statefulset") dataSelect := parseDataSelectPathParameter(request) result, err := event.GetResourceEvents(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetServiceList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) result, err := resourceService.GetServiceList(k8sClient, namespace, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetServiceEndpoints(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("service") result, err := endpoint.GetServiceEndpoints(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetServiceDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("service") result, err := resourceService.GetServiceDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetServiceEvent(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("service") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := resourceService.GetServiceEvents(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetIngressDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") result, err := ingress.GetIngressDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetIngressList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } dataSelect := parseDataSelectPathParameter(request) namespace := parseNamespacePathParameter(request) result, err := ingress.GetIngressList(k8sClient, namespace, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetServicePods(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("service") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := resourceService.GetServicePods(k8sClient, apiHandler.iManager.Metric().Client(), namespace, name, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetNodeList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := node.GetNodeList(k8sClient, dataSelect, apiHandler.iManager.Metric().Client()) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetNodeDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := node.GetNodeDetail(k8sClient, apiHandler.iManager.Metric().Client(), name, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetNodeEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := event.GetNodeEvents(k8sClient, dataSelect, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetNodePods(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := node.GetNodePods(k8sClient, apiHandler.iManager.Metric().Client(), dataSelect, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleDeploy(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } appDeploymentSpec := new(deployment.AppDeploymentSpec) if err := request.ReadEntity(appDeploymentSpec); err != nil { kdErrors.HandleInternalError(response, err) return } if err := deployment.DeployApp(appDeploymentSpec, k8sClient); err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusCreated, appDeploymentSpec) } func (apiHandler *APIHandler) handleScaleResource(request *restful.Request, response *restful.Response) { cfg, err := apiHandler.cManager.Config(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") kind := request.PathParameter("kind") name := request.PathParameter("name") count := request.QueryParameter("scaleBy") replicaCountSpec, err := scaling.ScaleResource(cfg, kind, namespace, name, count) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, replicaCountSpec) } func (apiHandler *APIHandler) handleGetReplicaCount(request *restful.Request, response *restful.Response) { cfg, err := apiHandler.cManager.Config(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") kind := request.PathParameter("kind") name := request.PathParameter("name") scaleSpec, err := scaling.GetScaleSpec(cfg, kind, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, scaleSpec) } func (apiHandler *APIHandler) handleDeployFromFile(request *restful.Request, response *restful.Response) { cfg, err := apiHandler.cManager.Config(request) if err != nil { kdErrors.HandleInternalError(response, err) return } deploymentSpec := new(deployment.AppDeploymentFromFileSpec) if err := request.ReadEntity(deploymentSpec); err != nil { kdErrors.HandleInternalError(response, err) return } isDeployed, err := deployment.DeployAppFromFile(cfg, deploymentSpec) if !isDeployed { kdErrors.HandleInternalError(response, err) return } errorMessage := "" if err != nil { errorMessage = err.Error() } response.WriteHeaderAndEntity(http.StatusCreated, deployment.AppDeploymentFromFileResponse{ Name: deploymentSpec.Name, Content: deploymentSpec.Content, Error: errorMessage, }) } func (apiHandler *APIHandler) handleNameValidity(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } spec := new(validation.AppNameValiditySpec) if err := request.ReadEntity(spec); err != nil { kdErrors.HandleInternalError(response, err) return } validity, err := validation.ValidateAppName(spec, k8sClient) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, validity) } func (APIHandler *APIHandler) handleImageReferenceValidity(request *restful.Request, response *restful.Response) { spec := new(validation.ImageReferenceValiditySpec) if err := request.ReadEntity(spec); err != nil { kdErrors.HandleInternalError(response, err) return } validity, err := validation.ValidateImageReference(spec) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, validity) } func (apiHandler *APIHandler) handleProtocolValidity(request *restful.Request, response *restful.Response) { spec := new(validation.ProtocolValiditySpec) if err := request.ReadEntity(spec); err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, validation.ValidateProtocol(spec)) } func (apiHandler *APIHandler) handleGetAvailableProcotols(request *restful.Request, response *restful.Response) { response.WriteHeaderAndEntity(http.StatusOK, deployment.GetAvailableProtocols()) } func (apiHandler *APIHandler) handleGetReplicationControllerList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := replicationcontroller.GetReplicationControllerList(k8sClient, namespace, dataSelect, apiHandler.iManager.Metric().Client()) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetReplicaSets(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := replicaset.GetReplicaSetList(k8sClient, namespace, dataSelect, apiHandler.iManager.Metric().Client()) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetReplicaSetDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") replicaSet := request.PathParameter("replicaSet") result, err := replicaset.GetReplicaSetDetail(k8sClient, apiHandler.iManager.Metric().Client(), namespace, replicaSet) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetReplicaSetPods(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") replicaSet := request.PathParameter("replicaSet") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := replicaset.GetReplicaSetPods(k8sClient, apiHandler.iManager.Metric().Client(), dataSelect, replicaSet, namespace) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetReplicaSetServices(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") replicaSet := request.PathParameter("replicaSet") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := replicaset.GetReplicaSetServices(k8sClient, dataSelect, namespace, replicaSet) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetReplicaSetEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("replicaSet") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := event.GetResourceEvents(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPodEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } log.Println("Getting events related to a pod in namespace") namespace := request.PathParameter("namespace") name := request.PathParameter("pod") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := pod.GetEventsForPod(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } // Handles execute shell API call func (apiHandler *APIHandler) handleExecShell(request *restful.Request, response *restful.Response) { sessionId, err := genTerminalSessionId() if err != nil { kdErrors.HandleInternalError(response, err) return } k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } cfg, err := apiHandler.cManager.Config(request) if err != nil { kdErrors.HandleInternalError(response, err) return } terminalSessions.Set(sessionId, TerminalSession{ id: sessionId, bound: make(chan error), sizeChan: make(chan remotecommand.TerminalSize), }) go WaitForTerminal(k8sClient, cfg, request, sessionId) response.WriteHeaderAndEntity(http.StatusOK, TerminalResponse{Id: sessionId}) } func (apiHandler *APIHandler) handleGetDeployments(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := deployment.GetDeploymentList(k8sClient, namespace, dataSelect, apiHandler.iManager.Metric().Client()) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetDeploymentDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("deployment") result, err := deployment.GetDeploymentDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetDeploymentEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("deployment") dataSelect := parseDataSelectPathParameter(request) result, err := event.GetResourceEvents(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetDeploymentOldReplicaSets(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("deployment") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := deployment.GetDeploymentOldReplicaSets(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPods(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics // download standard metrics - cpu, and memory - by default result, err := pod.GetPodList(k8sClient, apiHandler.iManager.Metric().Client(), namespace, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPodDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("pod") result, err := pod.GetPodDetail(k8sClient, apiHandler.iManager.Metric().Client(), namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetReplicationControllerDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("replicationController") result, err := replicationcontroller.GetReplicationControllerDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleUpdateReplicasCount(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("replicationController") spec := new(replicationcontroller.ReplicationControllerSpec) if err := request.ReadEntity(spec); err != nil { kdErrors.HandleInternalError(response, err) return } if err := replicationcontroller.UpdateReplicasCount(k8sClient, namespace, name, spec); err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeader(http.StatusAccepted) } func (apiHandler *APIHandler) handleGetResource(request *restful.Request, response *restful.Response) { verber, err := apiHandler.cManager.VerberClient(request) if err != nil { kdErrors.HandleInternalError(response, err) return } kind := request.PathParameter("kind") namespace, ok := request.PathParameters()["namespace"] name := request.PathParameter("name") result, err := verber.Get(kind, ok, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handlePutResource( request *restful.Request, response *restful.Response) { verber, err := apiHandler.cManager.VerberClient(request) if err != nil { kdErrors.HandleInternalError(response, err) return } kind := request.PathParameter("kind") namespace, ok := request.PathParameters()["namespace"] name := request.PathParameter("name") putSpec := &runtime.Unknown{} if err := request.ReadEntity(putSpec); err != nil { kdErrors.HandleInternalError(response, err) return } if err := verber.Put(kind, ok, namespace, name, putSpec); err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeader(http.StatusCreated) } func (apiHandler *APIHandler) handleDeleteResource( request *restful.Request, response *restful.Response) { verber, err := apiHandler.cManager.VerberClient(request) if err != nil { kdErrors.HandleInternalError(response, err) return } kind := request.PathParameter("kind") namespace, ok := request.PathParameters()["namespace"] name := request.PathParameter("name") if err := verber.Delete(kind, ok, namespace, name); err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeader(http.StatusOK) } func (apiHandler *APIHandler) handleGetReplicationControllerPods(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") rc := request.PathParameter("replicationController") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := replicationcontroller.GetReplicationControllerPods(k8sClient, apiHandler.iManager.Metric().Client(), dataSelect, rc, namespace) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleCreateNamespace(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespaceSpec := new(ns.NamespaceSpec) if err := request.ReadEntity(namespaceSpec); err != nil { kdErrors.HandleInternalError(response, err) return } if err := ns.CreateNamespace(namespaceSpec, k8sClient); err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusCreated, namespaceSpec) } func (apiHandler *APIHandler) handleGetNamespaces(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } dataSelect := parseDataSelectPathParameter(request) result, err := ns.GetNamespaceList(k8sClient, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetNamespaceDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("name") result, err := ns.GetNamespaceDetail(k8sClient, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetNamespaceEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) result, err := event.GetNamespaceEvents(k8sClient, dataSelect, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleCreateImagePullSecret(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } spec := new(secret.ImagePullSecretSpec) if err := request.ReadEntity(spec); err != nil { kdErrors.HandleInternalError(response, err) return } result, err := secret.CreateSecret(k8sClient, spec) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusCreated, result) } func (apiHandler *APIHandler) handleGetSecretDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") result, err := secret.GetSecretDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetSecretList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } dataSelect := parseDataSelectPathParameter(request) namespace := parseNamespacePathParameter(request) result, err := secret.GetSecretList(k8sClient, namespace, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetConfigMapList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) result, err := configmap.GetConfigMapList(k8sClient, namespace, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetConfigMapDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("configmap") result, err := configmap.GetConfigMapDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPersistentVolumeList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } dataSelect := parseDataSelectPathParameter(request) result, err := persistentvolume.GetPersistentVolumeList(k8sClient, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPersistentVolumeDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("persistentvolume") result, err := persistentvolume.GetPersistentVolumeDetail(k8sClient, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPersistentVolumeClaimList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) result, err := persistentvolumeclaim.GetPersistentVolumeClaimList(k8sClient, namespace, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPersistentVolumeClaimDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") result, err := persistentvolumeclaim.GetPersistentVolumeClaimDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPodContainers(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("pod") result, err := container.GetPodContainers(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetReplicationControllerEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("replicationController") dataSelect := parseDataSelectPathParameter(request) result, err := event.GetResourceEvents(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetReplicationControllerServices(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("replicationController") dataSelect := parseDataSelectPathParameter(request) result, err := replicationcontroller.GetReplicationControllerServices(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetDaemonSetList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := daemonset.GetDaemonSetList(k8sClient, namespace, dataSelect, apiHandler.iManager.Metric().Client()) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetDaemonSetDetail( request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("daemonSet") result, err := daemonset.GetDaemonSetDetail(k8sClient, apiHandler.iManager.Metric().Client(), namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetDaemonSetPods(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("daemonSet") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := daemonset.GetDaemonSetPods(k8sClient, apiHandler.iManager.Metric().Client(), dataSelect, name, namespace) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetDaemonSetServices(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") daemonSet := request.PathParameter("daemonSet") dataSelect := parseDataSelectPathParameter(request) result, err := daemonset.GetDaemonSetServices(k8sClient, dataSelect, namespace, daemonSet) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetDaemonSetEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("daemonSet") dataSelect := parseDataSelectPathParameter(request) result, err := event.GetResourceEvents(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetHorizontalPodAutoscalerList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) result, err := horizontalpodautoscaler.GetHorizontalPodAutoscalerList(k8sClient, namespace, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetHorizontalPodAutoscalerDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("horizontalpodautoscaler") result, err := horizontalpodautoscaler.GetHorizontalPodAutoscalerDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetJobList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := job.GetJobList(k8sClient, namespace, dataSelect, apiHandler.iManager.Metric().Client()) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetJobDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := job.GetJobDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetJobPods(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := job.GetJobPods(k8sClient, apiHandler.iManager.Metric().Client(), dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetJobEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) result, err := job.GetJobEvents(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetCronJobList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := parseNamespacePathParameter(request) dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := cronjob.GetCronJobList(k8sClient, namespace, dataSelect, apiHandler.iManager.Metric().Client()) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetCronJobDetail(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) dataSelect.MetricQuery = dataselect.StandardMetrics result, err := cronjob.GetCronJobDetail(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetCronJobJobs(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") active := true if request.QueryParameter("active") == "false" { active = false } dataSelect := parseDataSelectPathParameter(request) result, err := cronjob.GetCronJobJobs(k8sClient, apiHandler.iManager.Metric().Client(), dataSelect, namespace, name, active) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetCronJobEvents(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") dataSelect := parseDataSelectPathParameter(request) result, err := cronjob.GetCronJobEvents(k8sClient, dataSelect, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleTriggerCronJob(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") name := request.PathParameter("name") err = cronjob.TriggerCronJob(k8sClient, namespace, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeader(http.StatusOK) } func (apiHandler *APIHandler) handleGetStorageClassList(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } dataSelect := parseDataSelectPathParameter(request) result, err := storageclass.GetStorageClassList(k8sClient, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetStorageClass(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("storageclass") result, err := storageclass.GetStorageClass(k8sClient, name) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetStorageClassPersistentVolumes(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("storageclass") dataSelect := parseDataSelectPathParameter(request) result, err := persistentvolume.GetStorageClassPersistentVolumes(k8sClient, name, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleGetPodPersistentVolumeClaims(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } name := request.PathParameter("pod") namespace := request.PathParameter("namespace") dataSelect := parseDataSelectPathParameter(request) result, err := persistentvolumeclaim.GetPodPersistentVolumeClaims(k8sClient, namespace, name, dataSelect) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleLogSource(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } resourceName := request.PathParameter("resourceName") resourceType := request.PathParameter("resourceType") namespace := request.PathParameter("namespace") logSources, err := logs.GetLogSources(k8sClient, namespace, resourceName, resourceType) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, logSources) } func (apiHandler *APIHandler) handleLogs(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") podID := request.PathParameter("pod") containerID := request.PathParameter("container") refTimestamp := request.QueryParameter("referenceTimestamp") if refTimestamp == "" { refTimestamp = logs.NewestTimestamp } refLineNum, err := strconv.Atoi(request.QueryParameter("referenceLineNum")) if err != nil { refLineNum = 0 } usePreviousLogs := request.QueryParameter("previous") == "true" offsetFrom, err1 := strconv.Atoi(request.QueryParameter("offsetFrom")) offsetTo, err2 := strconv.Atoi(request.QueryParameter("offsetTo")) logFilePosition := request.QueryParameter("logFilePosition") logSelector := logs.DefaultSelection if err1 == nil && err2 == nil { logSelector = &logs.Selection{ ReferencePoint: logs.LogLineId{ LogTimestamp: logs.LogTimestamp(refTimestamp), LineNum: refLineNum, }, OffsetFrom: offsetFrom, OffsetTo: offsetTo, LogFilePosition: logFilePosition, } } result, err := container.GetLogDetails(k8sClient, namespace, podID, containerID, logSelector, usePreviousLogs) if err != nil { kdErrors.HandleInternalError(response, err) return } response.WriteHeaderAndEntity(http.StatusOK, result) } func (apiHandler *APIHandler) handleLogFile(request *restful.Request, response *restful.Response) { k8sClient, err := apiHandler.cManager.Client(request) if err != nil { kdErrors.HandleInternalError(response, err) return } namespace := request.PathParameter("namespace") podID := request.PathParameter("pod") containerID := request.PathParameter("container") usePreviousLogs := request.QueryParameter("previous") == "true" logStream, err := container.GetLogFile(k8sClient, namespace, podID, containerID, usePreviousLogs) if err != nil { kdErrors.HandleInternalError(response, err) return } handleDownload(response, logStream) } // parseNamespacePathParameter parses namespace selector for list pages in path parameter. // The namespace selector is a comma separated list of namespaces that are trimmed. // No namespaces means "view all user namespaces", i.e., everything except kube-system. func parseNamespacePathParameter(request *restful.Request) *common.NamespaceQuery { namespace := request.PathParameter("namespace") namespaces := strings.Split(namespace, ",") var nonEmptyNamespaces []string for _, n := range namespaces { n = strings.Trim(n, " ") if len(n) > 0 { nonEmptyNamespaces = append(nonEmptyNamespaces, n) } } return common.NewNamespaceQuery(nonEmptyNamespaces) } func parsePaginationPathParameter(request *restful.Request) *dataselect.PaginationQuery { itemsPerPage, err := strconv.ParseInt(request.QueryParameter("itemsPerPage"), 10, 0) if err != nil { return dataselect.NoPagination } page, err := strconv.ParseInt(request.QueryParameter("page"), 10, 0) if err != nil { return dataselect.NoPagination } // Frontend pages start from 1 and backend starts from 0 return dataselect.NewPaginationQuery(int(itemsPerPage), int(page-1)) } func parseFilterPathParameter(request *restful.Request) *dataselect.FilterQuery { return dataselect.NewFilterQuery(strings.Split(request.QueryParameter("filterBy"), ",")) } // Parses query parameters of the request and returns a SortQuery object func parseSortPathParameter(request *restful.Request) *dataselect.SortQuery { return dataselect.NewSortQuery(strings.Split(request.QueryParameter("sortBy"), ",")) } // Parses query parameters of the request and returns a MetricQuery object func parseMetricPathParameter(request *restful.Request) *dataselect.MetricQuery { metricNamesParam := request.QueryParameter("metricNames") var metricNames []string if metricNamesParam != "" { metricNames = strings.Split(metricNamesParam, ",") } else { metricNames = nil } aggregationsParam := request.QueryParameter("aggregations") var rawAggregations []string if aggregationsParam != "" { rawAggregations = strings.Split(aggregationsParam, ",") } else { rawAggregations = nil } aggregationModes := metricapi.AggregationModes{} for _, e := range rawAggregations { aggregationModes = append(aggregationModes, metricapi.AggregationMode(e)) } return dataselect.NewMetricQuery(metricNames, aggregationModes) } // Parses query parameters of the request and returns a DataSelectQuery object func parseDataSelectPathParameter(request *restful.Request) *dataselect.DataSelectQuery { paginationQuery := parsePaginationPathParameter(request) sortQuery := parseSortPathParameter(request) filterQuery := parseFilterPathParameter(request) metricQuery := parseMetricPathParameter(request) return dataselect.NewDataSelectQuery(paginationQuery, sortQuery, filterQuery, metricQuery) }
To(apiHandler.handleGetReplicationControllerServices). Writes(resourceService.ServiceList{})) apiV1Ws.Route(
post_architect_emergencygroups_parameters.go
// Code generated by go-swagger; DO NOT EDIT. package architect // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "context" "net/http" "time" "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" "github.com/freman/genesysapi/models" ) // NewPostArchitectEmergencygroupsParams creates a new PostArchitectEmergencygroupsParams object // with the default values initialized. func NewPostArchitectEmergencygroupsParams() *PostArchitectEmergencygroupsParams { var () return &PostArchitectEmergencygroupsParams{ timeout: cr.DefaultTimeout, } } // NewPostArchitectEmergencygroupsParamsWithTimeout creates a new PostArchitectEmergencygroupsParams object // with the default values initialized, and the ability to set a timeout on a request func NewPostArchitectEmergencygroupsParamsWithTimeout(timeout time.Duration) *PostArchitectEmergencygroupsParams { var () return &PostArchitectEmergencygroupsParams{ timeout: timeout, } } // NewPostArchitectEmergencygroupsParamsWithContext creates a new PostArchitectEmergencygroupsParams object // with the default values initialized, and the ability to set a context for a request func NewPostArchitectEmergencygroupsParamsWithContext(ctx context.Context) *PostArchitectEmergencygroupsParams { var () return &PostArchitectEmergencygroupsParams{ Context: ctx, } } // NewPostArchitectEmergencygroupsParamsWithHTTPClient creates a new PostArchitectEmergencygroupsParams object // with the default values initialized, and the ability to set a custom HTTPClient for a request func NewPostArchitectEmergencygroupsParamsWithHTTPClient(client *http.Client) *PostArchitectEmergencygroupsParams { var () return &PostArchitectEmergencygroupsParams{ HTTPClient: client, } } /*PostArchitectEmergencygroupsParams contains all the parameters to send to the API endpoint for the post architect emergencygroups operation typically these are written to a http.Request */ type PostArchitectEmergencygroupsParams struct { /*Body*/ Body *models.EmergencyGroup timeout time.Duration Context context.Context HTTPClient *http.Client }
return o } // SetTimeout adds the timeout to the post architect emergencygroups params func (o *PostArchitectEmergencygroupsParams) SetTimeout(timeout time.Duration) { o.timeout = timeout } // WithContext adds the context to the post architect emergencygroups params func (o *PostArchitectEmergencygroupsParams) WithContext(ctx context.Context) *PostArchitectEmergencygroupsParams { o.SetContext(ctx) return o } // SetContext adds the context to the post architect emergencygroups params func (o *PostArchitectEmergencygroupsParams) SetContext(ctx context.Context) { o.Context = ctx } // WithHTTPClient adds the HTTPClient to the post architect emergencygroups params func (o *PostArchitectEmergencygroupsParams) WithHTTPClient(client *http.Client) *PostArchitectEmergencygroupsParams { o.SetHTTPClient(client) return o } // SetHTTPClient adds the HTTPClient to the post architect emergencygroups params func (o *PostArchitectEmergencygroupsParams) SetHTTPClient(client *http.Client) { o.HTTPClient = client } // WithBody adds the body to the post architect emergencygroups params func (o *PostArchitectEmergencygroupsParams) WithBody(body *models.EmergencyGroup) *PostArchitectEmergencygroupsParams { o.SetBody(body) return o } // SetBody adds the body to the post architect emergencygroups params func (o *PostArchitectEmergencygroupsParams) SetBody(body *models.EmergencyGroup) { o.Body = body } // WriteToRequest writes these params to a swagger request func (o *PostArchitectEmergencygroupsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { if err := r.SetTimeout(o.timeout); err != nil { return err } var res []error if o.Body != nil { if err := r.SetBodyParam(o.Body); err != nil { return err } } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
// WithTimeout adds the timeout to the post architect emergencygroups params func (o *PostArchitectEmergencygroupsParams) WithTimeout(timeout time.Duration) *PostArchitectEmergencygroupsParams { o.SetTimeout(timeout)
randomTile.tsx
import React from "react"; import { Pressable, Text, View } from "react-native"; import FastImage from "react-native-fast-image"; import { Button, Chip, Surface } from "react-native-paper"; import { RandomMediaInfo } from "../../../../Api/types"; import { ThemeColors } from "../../../../Components/types"; import { getScoreColor } from "../../../../utils"; type Props = { item:RandomMediaInfo; colors: ThemeColors; navigation: any; } const RandomTile = ({item, colors, navigation}:Props) => { return( <Surface style={{flex:1, backgroundColor:colors.card, marginVertical:10, marginHorizontal:15}}> <FastImage fallback source={{uri: item.bannerImage ?? item.coverImage.extraLarge}} style={{width:'100%', height:80}} resizeMode='cover' /> <View style={{flex:1, marginVertical:5, width:'100%', paddingHorizontal:10, flexDirection:'row'}}> <FastImage fallback source={{uri: item.coverImage.extraLarge}} style={{width:110, height:170, borderRadius:8}} resizeMode={'cover'} /> <View style={{flex:1, paddingHorizontal:10, alignItems:'center'}}> <Text numberOfLines={2} style={{color:colors.text, textAlign:'center', fontSize:18, fontWeight:'bold'}}>{item.title.userPreferred}</Text> <Text style={{color:colors.text, textTransform:(item.format === 'TV') ? undefined : 'capitalize'}}>{item.format} | {item.status}</Text> {(item.isAdult) ? <Text style={{color:'red', fontWeight:'bold'}}>NSFW</Text> : null} {(item.averageScore || item.meanScore) ? <Text style={{color:getScoreColor(item.averageScore ?? item.meanScore), fontWeight:'bold'}}>{item.averageScore ?? item.meanScore}</Text> : null} <View style={{flex:1, flexDirection:'row', justifyContent:'center', flexWrap:'wrap'}}> {item.genres?.map((genre, idx) => <Pressable key={idx} style={{ padding: 5, height: 30, alignSelf:'center', alignItems: 'flex-start', justifyContent: 'center', paddingHorizontal: 10, marginHorizontal: 5, marginVertical: 10, backgroundColor: colors.primary, borderRadius: 12 }}> <Text style={{ color: '#FFF' }}>{genre}</Text> </Pressable> )} </View> <View style={{alignSelf:'center'}}> <Button mode="outlined" onPress={() => navigation.navigate('Info', {id:item.id})} color={colors.primary} style={{width:200, borderColor:colors.primary}}>View</Button> </View> </View> </View> </Surface> ); }
export default RandomTile;
membuf.go
package membuf import ( "fmt" "io" "github.com/skillian/errors" ) const ( // pagePow2 is the only const that can be tweaked in this // implementation. pagePow2 = 15 // pageSize is 2 to the (pagePow2) pageSize = 1 << pagePow2 // pageMask is the page size minus one. Go requires 2's complement, so // pageSize - 1 should work? pageMask = pageSize - 1 ) var ( errInvalidWhence = errors.New("invalid seek origin") errSeekOutOfRange = errors.New("seek out of range") ) type page [pageSize]byte // Buffer is like a bytes.Buffer but keeps memory around so it supports Seeking. type Buffer struct { // pages is a slice of pages in the buffer pages []*page // pagei is the index into the buffer that has been seeked to. pagei int // lasti is the index of the end of the buffer. lasti int } func (b *Buffer) String() string { return fmt.Sprintf( "(*Buffer){pages: [%d]*page, pagei: %d, lasti: %d}", len(b.pages), b.pagei, b.lasti) } // Close the buffer is a no-op. func (b *Buffer) Close() error { return nil } func (b *Buffer) logData(name string) { p := make([]byte, b.lasti) for i := range b.pages { i *= pageSize pg := getBufferIndex(i).getPage(b) copy(p[i:], pg) } logger.Debug2("%s: Data: %v", name, p) } // Read implements io.Reader func (b *Buffer) Read(p []byte) (n int, err error) { defer b.logData("Read") for t := p; len(t) > 0; t = p[n:] { logger.Debug0(b.String()) pg := getBufferIndex(b.pagei).getPage(b) if bytesZero(pg) { logger.Warn0("bytes are zero") } m := copy(t, pg) b.pagei += m n += m logger.Debug0(b.String()) if len(pg) == 0 { err = io.EOF break } } return } // Write implements io.Writer. It always succeeds unless there's a panic from // running out of memory. func (b *Buffer) Write(p []byte) (n int, err error) { defer b.logData("Write") var newPage *page for s := p; len(s) > 0; s = p[n:] { logger.Debug0(b.String()) bi := getBufferIndex(b.pagei) var pg []byte if bi.pageIndex > 0 || bi.byteIndex > 0 { pg = (*b.pages[bi.pageIndex])[bi.byteIndex:] pg = pg[:cap(pg)] } if len(pg) == 0 { logger.Debug0("adding another page") newPage = new(page) b.pages = append(b.pages, newPage) pg = (*newPage)[:] } m := copy(pg, s) b.pagei += m n += m if b.pagei > b.lasti { b.lasti = b.pagei } } logger.Debug0(b.String()) return } // Seek implements io.Seeker func (b *Buffer) Seek(offset int64, whence int) (n int64, err error) { o := int(offset) var current int switch whence { case io.SeekCurrent: current = b.pagei + o case io.SeekStart: current = o case io.SeekEnd: current = b.lasti + o default: return 0, errInvalidWhence } err = b.setPageI(current) return int64(b.pagei), err } // getNextPage is like getCurrentPage but moves the page index past the // returned page so the next call returns new data. func (b *Buffer) getNextPage(i int) ([]byte, bool) { pg := getBufferIndex(i).getPage(b) if len(pg) == 0 { return nil, false } b.pagei += len(pg) return pg, true } func (b *Buffer) setPageI(o int) error { if o < 0 || o > b.lasti { return errors.ErrorfWithCause( errSeekOutOfRange, "cannot set offset to %v. Requires 0 <= offset <= %v", o, b.lasti) } b.pagei = o return nil } // bufferIndex separates a single index integer into its page index and then // to the index of the byte within the page. type bufferIndex struct { // pageIndex holds the index of the page in the buffer. pageIndex int // byteIndex holds the index of the byte within its page. byteIndex int } // getBufferIndex creates a 2-int tuple of a buffer's scalar index value into // the page index and then the offset within the page. func getBufferIndex(i int) bufferIndex
// getPage gets the page from the buffer that the index corresponds to and then // gets a slice of the page's bytes starting at the inner-page index. func (i bufferIndex) getPage(b *Buffer) []byte { if len(b.pages) == 0 { return nil } pagePtr := b.pages[i.pageIndex] pg := (*pagePtr)[i.byteIndex:] lasti := getBufferIndex(b.lasti) lastPage := lasti.pageIndex == i.pageIndex if lastPage { pg = pg[:lasti.byteIndex-i.byteIndex] } return pg } func (i bufferIndex) value() int { return ((i.pageIndex << 12) & ^pageMask) | (i.byteIndex & pageMask) }
{ return bufferIndex{i >> pagePow2, i & pageMask} }
exporter.go
/* Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metrics import ( "fmt" "net/http" "sync" "contrib.go.opencensus.io/exporter/stackdriver" "go.opencensus.io/exporter/prometheus" "go.opencensus.io/stats/view" "go.uber.org/zap" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" ) var ( curMetricsExporter view.Exporter curMetricsConfig *metricsConfig curPromSrv *http.Server metricsMux sync.Mutex ) // newMetricsExporter gets a metrics exporter based on the config. func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) error { // If there is a Prometheus Exporter server running, stop it. resetCurPromSrv() ce := getCurMetricsExporter() if ce != nil { // UnregisterExporter is idempotent and it can be called multiple times for the same exporter // without side effects. view.UnregisterExporter(ce) } var err error var e view.Exporter switch config.backendDestination { case Stackdriver: e, err = newStackdriverExporter(config, logger) case Prometheus: e, err = newPrometheusExporter(config, logger) default: err = fmt.Errorf("Unsupported metrics backend %v", config.backendDestination) } if err != nil { return err } existingConfig := getCurMetricsConfig() setCurMetricsExporterAndConfig(e, config) logger.Infof("Successfully updated the metrics exporter; old config: %v; new config %v", existingConfig, config) return nil } func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { e, err := stackdriver.NewExporter(stackdriver.Options{ ProjectID: config.stackdriverProjectID, MetricPrefix: config.domain + "/" + config.component, Resource: &monitoredrespb.MonitoredResource{ Type: "global", }, DefaultMonitoringLabels: &stackdriver.Labels{}, }) if err != nil { logger.Error("Failed to create the Stackdriver exporter.", zap.Error(err)) return nil, err } logger.Infof("Created Opencensus Stackdriver exporter with config %v", config) return e, nil } func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { e, err := prometheus.NewExporter(prometheus.Options{Namespace: config.component}) if err != nil { logger.Error("Failed to create the Prometheus exporter.", zap.Error(err)) return nil, err } logger.Infof("Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config) // Start the server for Prometheus scraping go func() { srv := startNewPromSrv(e) srv.ListenAndServe() }() return e, nil } func getCurPromSrv() *http.Server { metricsMux.Lock() defer metricsMux.Unlock() return curPromSrv } func resetCurPromSrv() { metricsMux.Lock() defer metricsMux.Unlock() if curPromSrv != nil { curPromSrv.Close() curPromSrv = nil } } func startNewPromSrv(e *prometheus.Exporter) *http.Server { sm := http.NewServeMux() sm.Handle("/metrics", e) metricsMux.Lock() defer metricsMux.Unlock() if curPromSrv != nil { curPromSrv.Close() }
Handler: sm, } return curPromSrv } func getCurMetricsExporter() view.Exporter { metricsMux.Lock() defer metricsMux.Unlock() return curMetricsExporter } func setCurMetricsExporterAndConfig(e view.Exporter, c *metricsConfig) { metricsMux.Lock() defer metricsMux.Unlock() view.RegisterExporter(e) if c != nil { view.SetReportingPeriod(c.reportingPeriod) } else { // Setting to 0 enables the default behavior. view.SetReportingPeriod(0) } curMetricsExporter = e curMetricsConfig = c } func getCurMetricsConfig() *metricsConfig { metricsMux.Lock() defer metricsMux.Unlock() return curMetricsConfig }
curPromSrv = &http.Server{ Addr: ":9090",
gcs_cache.go
package gcs_cache import ( "context" "io" "io/ioutil" "log" "net/http" "path/filepath" "sync" "time" "cloud.google.com/go/storage" "github.com/buildbuddy-io/buildbuddy/server/interfaces" "github.com/buildbuddy-io/buildbuddy/server/remote_cache/digest" "github.com/buildbuddy-io/buildbuddy/server/util/cache_metrics" "github.com/buildbuddy-io/buildbuddy/server/util/prefix" "github.com/buildbuddy-io/buildbuddy/server/util/status" "golang.org/x/sync/errgroup" "google.golang.org/api/googleapi" "google.golang.org/api/option" repb "github.com/buildbuddy-io/buildbuddy/proto/remote_execution" ) const ( maxNumRetries = 3 ) var ( cacheLabels = cache_metrics.MakeCacheLabels(cache_metrics.CloudCacheTier, "gcs") ) type GCSCache struct { gcsClient *storage.Client bucketHandle *storage.BucketHandle projectID string prefix string ttlInDays int64 } func NewGCSCache(bucketName, projectID string, ageInDays int64, opts ...option.ClientOption) (*GCSCache, error) { ctx := context.Background() gcsClient, err := storage.NewClient(ctx, opts...) if err != nil { return nil, err } g := &GCSCache{ gcsClient: gcsClient, projectID: projectID, ttlInDays: ageInDays, } if err := g.createBucketIfNotExists(ctx, bucketName); err != nil { return nil, err } if err := g.setBucketTTL(ctx, bucketName, ageInDays); err != nil { return nil, err } log.Printf("Initialized GCS cache with bucket %q, ttl (days): %d", bucketName, ageInDays) return g, nil } func (g *GCSCache) createBucketIfNotExists(ctx context.Context, bucketName string) error { if _, err := g.gcsClient.Bucket(bucketName).Attrs(ctx); err != nil { log.Printf("Creating storage bucket: %s", bucketName) g.bucketHandle = g.gcsClient.Bucket(bucketName) return g.bucketHandle.Create(ctx, g.projectID, nil) } g.bucketHandle = g.gcsClient.Bucket(bucketName) return nil } func (g *GCSCache) setBucketTTL(ctx context.Context, bucketName string, ageInDays int64) error { attrs, err := g.gcsClient.Bucket(bucketName).Attrs(ctx) if err != nil { return err } for _, rule := range attrs.Lifecycle.Rules { if rule.Condition.AgeInDays == ageInDays && rule.Action.Type == storage.DeleteAction { return nil } } lc := storage.Lifecycle{ Rules: []storage.LifecycleRule{ { Condition: storage.LifecycleCondition{ AgeInDays: ageInDays, }, Action: storage.LifecycleAction{ Type: storage.DeleteAction, }, }, }, } // Update the bucket TTL, regardless of whatever value is set. _, err = g.gcsClient.Bucket(bucketName).Update(ctx, storage.BucketAttrsToUpdate{Lifecycle: &lc}) return err } func (g *GCSCache) key(ctx context.Context, d *repb.Digest) (string, error) { hash, err := digest.Validate(d) if err != nil { return "", err } userPrefix, err := prefix.UserPrefixFromContext(ctx) if err != nil { return "", err } return userPrefix + g.prefix + hash, nil } func (g *GCSCache) WithPrefix(prefix string) interfaces.Cache { newPrefix := filepath.Join(append(filepath.SplitList(g.prefix), prefix)...) if len(newPrefix) > 0 && newPrefix[len(newPrefix)-1] != '/' { newPrefix += "/" } return &GCSCache{ gcsClient: g.gcsClient, bucketHandle: g.bucketHandle, projectID: g.projectID, ttlInDays: g.ttlInDays, prefix: newPrefix, } } func (g *GCSCache) WithIsolation(ctx context.Context, cacheType interfaces.CacheType, remoteInstanceName string) (interfaces.Cache, error) { newPrefix := filepath.Join(remoteInstanceName, cacheType.Prefix()) if len(newPrefix) > 0 && newPrefix[len(newPrefix)-1] != '/' { newPrefix += "/" } return &GCSCache{ gcsClient: g.gcsClient, bucketHandle: g.bucketHandle, projectID: g.projectID, ttlInDays: g.ttlInDays, prefix: newPrefix, }, nil } func (g *GCSCache) Get(ctx context.Context, d *repb.Digest) ([]byte, error) { k, err := g.key(ctx, d) if err != nil { return nil, err } reader, err := g.bucketHandle.Object(k).NewReader(ctx) if err != nil { if err == storage.ErrObjectNotExist { return nil, status.NotFoundErrorf("Digest '%s/%d' not found in cache", d.GetHash(), d.GetSizeBytes()) } return nil, err } timer := cache_metrics.NewCacheTimer(cacheLabels) b, err := ioutil.ReadAll(reader) timer.ObserveGet(len(b), err) // Note, if we decide to retry reads in the future, be sure to // add a new metric for retry count. return b, err } func (g *GCSCache) GetMulti(ctx context.Context, digests []*repb.Digest) (map[*repb.Digest][]byte, error) { lock := sync.RWMutex{} // protects(foundMap) foundMap := make(map[*repb.Digest][]byte, len(digests)) eg, ctx := errgroup.WithContext(ctx) for _, d := range digests { fetchFn := func(d *repb.Digest) { eg.Go(func() error { data, err := g.Get(ctx, d) if err != nil { return err } lock.Lock() defer lock.Unlock() foundMap[d] = data return nil }) } fetchFn(d) } if err := eg.Wait(); err != nil { return nil, err } return foundMap, nil } func
(err error) error { if err != nil { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == http.StatusPreconditionFailed { return nil } // When building with some languages, like Java, certain // files like MANIFEST files which are identical across // all actions can be uploaded. Many concurrent actions // means that these files can be written simultaneously // which triggers http.StatusTooManyRequests. Because // the cache is a CAS, we assume that writing the same // hash over and over again is just writing the same // file, and we swallow the error here. if gerr.Code == http.StatusTooManyRequests { return nil } } } return err } func (g *GCSCache) Set(ctx context.Context, d *repb.Digest, data []byte) error { k, err := g.key(ctx, d) if err != nil { return err } numAttempts := 0 for { obj := g.bucketHandle.Object(k) writer := obj.If(storage.Conditions{DoesNotExist: true}).NewWriter(ctx) setChunkSize(d, writer) timer := cache_metrics.NewCacheTimer(cacheLabels) if _, err = writer.Write(data); err == nil { err = swallowGCSAlreadyExistsError(writer.Close()) } timer.ObserveSet(len(data), err) numAttempts++ if !isRetryableGCSError(err) || numAttempts > maxNumRetries { break } } cache_metrics.RecordSetRetries(cacheLabels, numAttempts-1) return err } func (g *GCSCache) SetMulti(ctx context.Context, kvs map[*repb.Digest][]byte) error { eg, ctx := errgroup.WithContext(ctx) for d, data := range kvs { setFn := func(d *repb.Digest, data []byte) { eg.Go(func() error { return g.Set(ctx, d, data) }) } setFn(d, data) } if err := eg.Wait(); err != nil { return err } return nil } func (g *GCSCache) Delete(ctx context.Context, d *repb.Digest) error { k, err := g.key(ctx, d) if err != nil { return err } timer := cache_metrics.NewCacheTimer(cacheLabels) err = g.bucketHandle.Object(k).Delete(ctx) timer.ObserveDelete(err) // Note, if we decide to retry deletions in the future, be sure to // add a new metric for retry count. return err } func (g *GCSCache) bumpTTLIfStale(ctx context.Context, key string, t time.Time) bool { if int64(time.Since(t).Hours()) < 24*g.ttlInDays/2 { return true } obj := g.bucketHandle.Object(key) _, err := obj.CopierFrom(obj).Run(ctx) if err == storage.ErrObjectNotExist { return false } if err != nil { log.Printf("Error bumping TTL for key %s: %s", key, err.Error()) } return true } func (g *GCSCache) Contains(ctx context.Context, d *repb.Digest) (bool, error) { k, err := g.key(ctx, d) if err != nil { return false, err } finalErr := error(nil) numAttempts := 0 for { timer := cache_metrics.NewCacheTimer(cacheLabels) attrs, err := g.bucketHandle.Object(k).Attrs(ctx) timer.ObserveContains(err) numAttempts++ finalErr = err if err == storage.ErrObjectNotExist { return false, nil } else if err == nil { return g.bumpTTLIfStale(ctx, k, attrs.Created), nil } else if isRetryableGCSError(err) { log.Printf("Retrying GCS exists, err: %s", err.Error()) continue } break } cache_metrics.RecordSetRetries(cacheLabels, numAttempts-1) return false, finalErr } func (g *GCSCache) ContainsMulti(ctx context.Context, digests []*repb.Digest) (map[*repb.Digest]bool, error) { lock := sync.RWMutex{} // protects(foundMap) foundMap := make(map[*repb.Digest]bool, len(digests)) eg, ctx := errgroup.WithContext(ctx) for _, d := range digests { fetchFn := func(d *repb.Digest) { eg.Go(func() error { exists, err := g.Contains(ctx, d) if err != nil { return err } lock.Lock() defer lock.Unlock() foundMap[d] = exists return nil }) } fetchFn(d) } if err := eg.Wait(); err != nil { return nil, err } return foundMap, nil } func (g *GCSCache) Reader(ctx context.Context, d *repb.Digest, offset int64) (io.ReadCloser, error) { k, err := g.key(ctx, d) if err != nil { return nil, err } reader, err := g.bucketHandle.Object(k).NewReader(ctx) if err != nil { if err == storage.ErrObjectNotExist { return nil, status.NotFoundErrorf("Digest '%s/%d' not found in cache", d.GetHash(), d.GetSizeBytes()) } return nil, err } timer := cache_metrics.NewCacheTimer(cacheLabels) return io.NopCloser(timer.NewInstrumentedReader(reader, d.GetSizeBytes())), nil } func isRetryableGCSError(err error) bool { if err != nil { if gerr, ok := err.(*googleapi.Error); ok { switch gerr.Code { case http.StatusServiceUnavailable: // 503 case http.StatusBadGateway: // 502 log.Printf("Saw a retryable error: %s", err.Error()) return true default: return false } } } return false } type gcsDedupingWriteCloser struct { io.WriteCloser timer *cache_metrics.CacheTimer size int64 } func (wc *gcsDedupingWriteCloser) Write(in []byte) (int, error) { n, err := wc.WriteCloser.Write(in) numRetries := 0 for isRetryableGCSError(err) && numRetries < maxNumRetries { log.Printf("Retrying GCS write after error: %s", err.Error()) numRetries++ n, err = wc.WriteCloser.Write(in) } cache_metrics.RecordWriteRetries(cacheLabels, numRetries) return n, err } func (wc *gcsDedupingWriteCloser) Close() error { return swallowGCSAlreadyExistsError(wc.WriteCloser.Close()) } func setChunkSize(d *repb.Digest, w *storage.Writer) { switch size := d.GetSizeBytes(); { case size < 8*1000*1000: { w.ChunkSize = int(size) } default: w.ChunkSize = googleapi.DefaultUploadChunkSize } } func (g *GCSCache) Writer(ctx context.Context, d *repb.Digest) (io.WriteCloser, error) { k, err := g.key(ctx, d) if err != nil { return nil, err } obj := g.bucketHandle.Object(k) writer := obj.If(storage.Conditions{DoesNotExist: true}).NewWriter(ctx) setChunkSize(d, writer) timer := cache_metrics.NewCacheTimer(cacheLabels) return &gcsDedupingWriteCloser{ WriteCloser: writer, timer: timer, size: d.GetSizeBytes(), }, nil } func (g *GCSCache) Start() error { return nil } func (g *GCSCache) Stop() error { return nil }
swallowGCSAlreadyExistsError
delete_faucet_test.go
// +build unit package faucets import ( "context" "testing" "github.com/consensys/orchestrate/services/api/store/models/testutils" "github.com/consensys/orchestrate/pkg/toolkit/app/multitenancy" "github.com/consensys/orchestrate/pkg/errors" "github.com/consensys/orchestrate/services/api/store/mocks" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) func
(t *testing.T) { ctx := context.Background() ctrl := gomock.NewController(t) defer ctrl.Finish() mockDB := mocks.NewMockDB(ctrl) faucetAgent := mocks.NewMockFaucetAgent(ctrl) mockDB.EXPECT().Faucet().Return(faucetAgent).AnyTimes() userInfo := multitenancy.NewUserInfo("tenantOne", "username") usecase := NewDeleteFaucetUseCase(mockDB) t.Run("should execute use case successfully", func(t *testing.T) { faucetModel := testutils.FakeFaucetModel() faucetAgent.EXPECT().FindOneByUUID(gomock.Any(), "uuid", userInfo.AllowedTenants).Return(faucetModel, nil) faucetAgent.EXPECT().Delete(gomock.Any(), faucetModel, userInfo.AllowedTenants).Return(nil) err := usecase.Execute(ctx, "uuid", userInfo) assert.NoError(t, err) }) t.Run("should fail with same error if findOne faucet fails", func(t *testing.T) { expectedErr := errors.NotFoundError("error") faucetAgent.EXPECT().FindOneByUUID(gomock.Any(), "uuid", userInfo.AllowedTenants).Return(nil, expectedErr) err := usecase.Execute(ctx, "uuid", userInfo) assert.Error(t, err) assert.Equal(t, errors.FromError(expectedErr).ExtendComponent(deleteFaucetComponent), err) }) t.Run("should fail with same error if delete faucet fails", func(t *testing.T) { expectedErr := errors.NotFoundError("error") faucetAgent.EXPECT().FindOneByUUID(gomock.Any(), "uuid", userInfo.AllowedTenants).Return(testutils.FakeFaucetModel(), nil) faucetAgent.EXPECT().Delete(gomock.Any(), gomock.Any(), userInfo.AllowedTenants).Return(expectedErr) err := usecase.Execute(ctx, "uuid", userInfo) assert.Error(t, err) assert.Equal(t, errors.FromError(expectedErr).ExtendComponent(deleteFaucetComponent), err) }) }
TestDeleteFaucet_Execute
static-glitz-core.ts
import { Style, Query } from './shared'; export function media(list: Query | string, style: Style): Style { return { [`@media ${typeof list === 'string' ? list : query(list)}`]: style }; } export function query(list: Query): string { const results = Object.keys(list).map(feature => { const value = list[feature]; return value === true ? `(${hyphenateProperty(feature)})` : `(${hyphenateProperty(feature)}: ${value})`; }); return results.join(' and '); } const hyphenateRegex = /(?:^(ms|moz|webkit))|[A-Z]/g; export function
(property: string) { return property.replace(hyphenateRegex, '-$&').toLowerCase(); } export function selector(selectors: string | string[], style: Style): Style { // TODO Pseudo validation return typeof selectors === 'string' ? { [selectors]: style } : selectors.reduce((acc, value) => ({ ...acc, [value]: style }), {}); }
hyphenateProperty
relation.go
package model import ( "encoding/json" "fmt" "sort" "github.com/google/go-cmp/cmp" jsonutils "github.com/keboola/keboola-as-code/internal/pkg/json" "github.com/keboola/keboola-as-code/internal/pkg/utils" "github.com/keboola/keboola-as-code/internal/pkg/utils/orderedmap" ) const ( VariablesForRelType = RelationType(`variablesFor`) VariablesFromRelType = RelationType(`variablesFrom`) VariablesValuesForRelType = RelationType(`variablesValuesFor`) VariablesValuesFromRelType = RelationType(`variablesValuesFrom`) SharedCodeVariablesForRelType = RelationType(`sharedCodeVariablesFor`) SharedCodeVariablesFromRelType = RelationType(`sharedCodeVariablesFrom`) SchedulerForRelType = RelationType(`schedulerFor`) UsedInOrchestratorRelType = RelationType(`usedInOrchestrator`) UsedInConfigInputMappingRelType = RelationType(`usedInConfigInputMapping`) UsedInRowInputMappingRelType = RelationType(`usedInRowInputMapping`) ) // OneToXRelations gets relations that can be defined on an object only once. func OneToXRelations() []RelationType { return []RelationType{ VariablesForRelType, VariablesFromRelType, VariablesValuesForRelType, VariablesValuesFromRelType, SchedulerForRelType, } } type RelationType string func (t RelationType) String() string { return string(t) } func (t RelationType) Type() RelationType { return t } // Relation between objects, eg. config <-> config. type Relation interface { Type() RelationType Desc() string // human-readable description Key() string // unique key within the object on which the relation is defined, for sorting and comparing ParentKey(relationDefinedOn Key) (Key, error) // if relation type is parent <-> child, then parent key is returned, otherwise nil IsDefinedInManifest() bool // if true, relation will be present in the manifest IsDefinedInApi() bool // if true, relation will be present in API calls NewOtherSideRelation(relationDefinedOn Object, allObjects Objects) (otherSide Key, relation Relation, err error) } type Relations []Relation type RelationsBySide struct { InManifest Relations InApi Relations } func (v Relations) ParentKey(source Key) (Key, error) { var parents []Key for _, r := range v { if parent, err := r.ParentKey(source); err != nil { return nil, err } else if parent != nil { parents = append(parents, parent) } } // Found parent defined via Relations if len(parents) == 1 { return parents[0], nil } // Multiple parents are forbidden if len(parents) > 1 { return nil, fmt.Errorf(`unexpected state: multiple parents defined by "relations" in %s`, source.Desc()) } return nil, nil } func (v Relations) RelationsBySide() RelationsBySide { return RelationsBySide{ InManifest: v.OnlyStoredInManifest(), InApi: v.OnlyStoredInApi(), } } func (v Relations) OnlyStoredInApi() Relations { var out Relations for _, relation := range v { if relation.IsDefinedInApi() { out = append(out, relation) } } return out } func (v Relations) OnlyStoredInManifest() Relations { var out Relations for _, relation := range v { if relation.IsDefinedInManifest() { out = append(out, relation) } } return out } func (v Relations) Equal(v2 Relations) bool { onlyIn1, onlyIn2 := v.Diff(v2) return onlyIn1 == nil && onlyIn2 == nil } func (v Relations) Diff(v2 Relations) (onlyIn1 Relations, onlyIn2 Relations) { v1Map := make(map[string]bool) v2Map := make(map[string]bool) for _, r := range v { v1Map[r.Key()] = true } for _, r := range v2 { v2Map[r.Key()] = true } for _, r := range v { if !v2Map[r.Key()] { onlyIn1 = append(onlyIn1, r) } } for _, r := range v2 { if !v1Map[r.Key()] { onlyIn2 = append(onlyIn2, r) } } onlyIn1.Sort() onlyIn2.Sort() return onlyIn1, onlyIn2 } func (v Relations) Sort() { sort.SliceStable(v, func(i, j int) bool { return v[i].Key() < v[j].Key() }) } func (v Relations) Has(t RelationType) bool { for _, relation := range v { if relation.Type() == t { return true } } return false } func (v Relations) GetByType(t RelationType) Relations { var out Relations for _, relation := range v { if relation.Type() == t { out = append(out, relation) } } return out } func (v Relations) GetOneByType(t RelationType) (Relation, error) { relations := v.GetByType(t) if len(relations) == 0 { return nil, nil } else if len(relations) > 1 { errors := utils.NewMultiError() errors.Append(fmt.Errorf(`only one relation "%s" expected, but found %d`, t, len(relations))) for _, relation := range relations { errors.Append(fmt.Errorf(` - %s`, jsonutils.MustEncodeString(relation, false))) } return nil, errors } return relations[0], nil } func (v Relations) GetAllByType() map[RelationType]Relations { out := make(map[RelationType]Relations) for _, relation := range v { out[relation.Type()] = append(out[relation.Type()], relation) } return out } func (v *Relations) Add(relation Relation) { for _, item := range *v { if cmp.Equal(item, relation) { // Relation is already present return } } *v = append(*v, relation) } func (v *Relations) Remove(toDelete Relation) { var out Relations for _, relation := range *v { if relation != toDelete { out = append(out, relation) } } *v = out } func (v *Relations) RemoveByType(t RelationType) { var out Relations for _, relation := range *v { if relation.Type() != t { out = append(out, relation) } } *v = out } func (v *Relations) UnmarshalJSON(data []byte) error { var raw []json.RawMessage if err := json.Unmarshal(data, &raw); err != nil { return err } for _, item := range raw { var obj map[string]interface{} if err := json.Unmarshal(item, &obj); err != nil { return err } // Get type value typeRaw, ok := obj["type"] if !ok { return fmt.Errorf(`missing "type" field in relation definition`) } typeStr, ok := typeRaw.(string) if !ok { return fmt.Errorf(`field "type" must be string in relation definition, "%T" given`, typeStr) } // Create instance from type value, err := newEmptyRelation(RelationType(typeStr)) if err != nil { return fmt.Errorf(`invalid "type" value "%s" in relation definition`, typeStr) } // Unmarshal to concrete sub-type of the Relation if err := json.Unmarshal(item, value); err != nil { return err } // Validate, only manifest side should be present in JSON if !value.IsDefinedInManifest() { return fmt.Errorf(`unexpected state: relation "%T" should not be present in JSON, it is not a manifest side`, value) } *v = append(*v, value) } return nil } func (v Relations) MarshalJSON() ([]byte, error) { var out []*orderedmap.OrderedMap for _, relation := range v { // Validate, only manifest side should be serialized to JSON if !relation.IsDefinedInManifest() { return nil, fmt.Errorf(`unexpected state: relation "%T" should not be serialized to JSON, it is not an manifest side`, relation) } // Convert struct -> map relationMap := orderedmap.New() if err := jsonutils.ConvertByJson(relation, &relationMap); err != nil { return nil, err } relationMap.Set(`type`, relation.Type().String()) out = append(out, relationMap) } return json.Marshal(out) } func newEmptyRelation(t RelationType) (Relation, error)
{ switch t { case VariablesForRelType: return &VariablesForRelation{}, nil case VariablesFromRelType: return &VariablesFromRelation{}, nil case VariablesValuesForRelType: return &VariablesValuesForRelation{}, nil case VariablesValuesFromRelType: return &VariablesValuesFromRelation{}, nil case SharedCodeVariablesForRelType: return &SharedCodeVariablesForRelation{}, nil case SharedCodeVariablesFromRelType: return &SharedCodeVariablesFromRelation{}, nil case SchedulerForRelType: return &SchedulerForRelation{}, nil case UsedInOrchestratorRelType: return &UsedInOrchestratorRelation{}, nil case UsedInConfigInputMappingRelType: return &UsedInConfigInputMappingRelation{}, nil case UsedInRowInputMappingRelType: return &UsedInRowInputMappingRelation{}, nil default: return nil, fmt.Errorf(`unexpected RelationType "%s"`, t) } }
subprocess.py
from __future__ import absolute_import import sys import os import errno import types import gc import signal import traceback from gevent.event import AsyncResult from gevent.hub import get_hub, linkproxy, sleep, getcurrent from gevent.fileobject import FileObject from gevent.greenlet import Greenlet, joinall spawn = Greenlet.spawn import subprocess as __subprocess__ # Standard functions and classes that this module re-implements in a gevent-aware way. __implements__ = ['Popen', 'call', 'check_call', 'check_output'] # Standard functions and classes that this module re-imports. __imports__ = ['PIPE', 'STDOUT', 'CalledProcessError', # Windows: 'CREATE_NEW_CONSOLE', 'CREATE_NEW_PROCESS_GROUP', 'STD_INPUT_HANDLE', 'STD_OUTPUT_HANDLE', 'STD_ERROR_HANDLE', 'SW_HIDE', 'STARTF_USESTDHANDLES', 'STARTF_USESHOWWINDOW'] __extra__ = ['MAXFD', '_eintr_retry_call', 'STARTUPINFO', 'pywintypes', 'list2cmdline', '_subprocess', # Python 2.5 does not have _subprocess, so we don't use it 'WAIT_OBJECT_0', 'WaitForSingleObject', 'GetExitCodeProcess', 'GetStdHandle', 'CreatePipe', 'DuplicateHandle', 'GetCurrentProcess', 'DUPLICATE_SAME_ACCESS', 'GetModuleFileName', 'GetVersion', 'CreateProcess', 'INFINITE', 'TerminateProcess'] for name in __imports__[:]: try: value = getattr(__subprocess__, name) globals()[name] = value except AttributeError: __imports__.remove(name) __extra__.append(name) if sys.version_info[:2] <= (2, 6): __implements__.remove('check_output') __extra__.append('check_output') _subprocess = getattr(__subprocess__, '_subprocess', None) _NONE = object() for name in __extra__[:]: if name in globals(): continue value = _NONE try: value = getattr(__subprocess__, name) except AttributeError: if _subprocess is not None: try: value = getattr(_subprocess, name) except AttributeError: pass if value is _NONE: __extra__.remove(name) else: globals()[name] = value __all__ = __implements__ + __imports__ mswindows = sys.platform == 'win32' if mswindows: import msvcrt else: import fcntl import pickle from gevent import monkey fork = monkey.get_original('os', 'fork') def call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: retcode = call(["ls", "-l"]) """ return Popen(*popenargs, **kwargs).wait() def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd) return 0 def check_output(*popenargs, **kwargs): r"""Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-1", "/dev/null"]) '/dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", "echo hello world"], stderr=STDOUT) 'hello world\n' """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = Popen(stdout=PIPE, *popenargs, **kwargs) output = process.communicate()[0] retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] ex = CalledProcessError(retcode, cmd) # on Python 2.6 and older CalledProcessError does not accept 'output' argument ex.output = output raise ex return output class Popen(object): def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0, threadpool=None): """Create new Popen instance.""" if not isinstance(bufsize, (int, long)): raise TypeError("bufsize must be an integer") hub = get_hub() if mswindows: if preexec_fn is not None: raise ValueError("preexec_fn is not supported on Windows " "platforms") if close_fds and (stdin is not None or stdout is not None or stderr is not None): raise ValueError("close_fds is not supported on Windows " "platforms if you redirect stdin/stdout/stderr") if threadpool is None: threadpool = hub.threadpool self.threadpool = threadpool self._waiting = False else: # POSIX if startupinfo is not None: raise ValueError("startupinfo is only supported on Windows " "platforms") if creationflags != 0: raise ValueError("creationflags is only supported on Windows " "platforms") assert threadpool is None self._loop = hub.loop self.stdin = None self.stdout = None self.stderr = None self.pid = None self.returncode = None self.universal_newlines = universal_newlines self.result = AsyncResult() # Input and output objects. The general principle is like # this: # # Parent Child # ------ ----- # p2cwrite ---stdin---> p2cread # c2pread <--stdout--- c2pwrite # errread <--stderr--- errwrite # # On POSIX, the child objects are file descriptors. On # Windows, these are Windows file handles. The parent objects # are file descriptors on both platforms. The parent objects # are None when not using PIPEs. The child objects are None # when not redirecting. (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) self._execute_child(args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) if mswindows: if p2cwrite is not None: p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) if c2pread is not None: c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) if errread is not None: errread = msvcrt.open_osfhandle(errread.Detach(), 0) if p2cwrite is not None: self.stdin = FileObject(p2cwrite, 'wb') if c2pread is not None: if universal_newlines: self.stdout = FileObject(c2pread, 'rU') else: self.stdout = FileObject(c2pread, 'rb') if errread is not None: if universal_newlines: self.stderr = FileObject(errread, 'rU') else: self.stderr = FileObject(errread, 'rb') def __repr__(self): return '<%s at 0x%x pid=%r returncode=%r>' % (self.__class__.__name__, id(self), self.pid, self.returncode) def _on_child(self, watcher): watcher.stop() status = watcher.rstatus if os.WIFSIGNALED(status): self.returncode = -os.WTERMSIG(status) else: self.returncode = os.WEXITSTATUS(status) self.result.set(self.returncode) def communicate(self, input=None): """Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr).""" greenlets = [] if self.stdin: greenlets.append(spawn(write_and_close, self.stdin, input)) if self.stdout: stdout = spawn(self.stdout.read) greenlets.append(stdout) else: stdout = None if self.stderr: stderr = spawn(self.stderr.read) greenlets.append(stderr) else: stderr = None joinall(greenlets) if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() self.wait() return (None if stdout is None else stdout.value or '', None if stderr is None else stderr.value or '') def poll(self): return self._internal_poll() if mswindows: # # Windows methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ if stdin is None and stdout is None and stderr is None: return (None, None, None, None, None, None) p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: p2cread = GetStdHandle(STD_INPUT_HANDLE) if p2cread is None: p2cread, _ = CreatePipe(None, 0) elif stdin == PIPE: p2cread, p2cwrite = CreatePipe(None, 0) elif isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) else: # Assuming file-like object p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) if stdout is None: c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE) if c2pwrite is None: _, c2pwrite = CreatePipe(None, 0) elif stdout == PIPE: c2pread, c2pwrite = CreatePipe(None, 0) elif isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) else: # Assuming file-like object c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) if stderr is None: errwrite = GetStdHandle(STD_ERROR_HANDLE) if errwrite is None: _, errwrite = CreatePipe(None, 0) elif stderr == PIPE: errread, errwrite = CreatePipe(None, 0) elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) else: # Assuming file-like object errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): """Return a duplicate of handle, which is inheritable""" return DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), 0, 1, DUPLICATE_SAME_ACCESS) def _find_w9xpopen(self): """Find and return absolut path to w9xpopen.exe""" w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)), "w9xpopen.exe") if not os.path.exists(w9xpopen): # Eeek - file-not-found - possibly an embedding # situation - see if we can locate it in sys.exec_prefix w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix), "w9xpopen.exe") if not os.path.exists(w9xpopen): raise RuntimeError("Cannot locate w9xpopen.exe, which is " "needed for Popen to work with your " "shell or platform.") return w9xpopen def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite):
def _internal_poll(self): """Check if child process has terminated. Returns returncode attribute. """ if self.returncode is None: if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0: self.returncode = GetExitCodeProcess(self._handle) self.result.set(self.returncode) return self.returncode def rawlink(self, callback): if not self.result.ready() and not self._waiting: self._waiting = True Greenlet.spawn(self._wait) self.result.rawlink(linkproxy(callback, self)) # XXX unlink def _blocking_wait(self): WaitForSingleObject(self._handle, INFINITE) self.returncode = GetExitCodeProcess(self._handle) return self.returncode def _wait(self): self.threadpool.spawn(self._blocking_wait).rawlink(self.result) def wait(self, timeout=None): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is None: if not self._waiting: self._waiting = True self._wait() return self.result.wait(timeout=timeout) def send_signal(self, sig): """Send a signal to the process """ if sig == signal.SIGTERM: self.terminate() elif sig == signal.CTRL_C_EVENT: os.kill(self.pid, signal.CTRL_C_EVENT) elif sig == signal.CTRL_BREAK_EVENT: os.kill(self.pid, signal.CTRL_BREAK_EVENT) else: raise ValueError("Unsupported signal: {}".format(sig)) def terminate(self): """Terminates the process """ TerminateProcess(self._handle, 1) kill = terminate else: # # POSIX methods # def rawlink(self, callback): self.result.rawlink(linkproxy(callback, self)) # XXX unlink def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: pass elif stdin == PIPE: p2cread, p2cwrite = self.pipe_cloexec() elif isinstance(stdin, int): p2cread = stdin else: # Assuming file-like object p2cread = stdin.fileno() if stdout is None: pass elif stdout == PIPE: c2pread, c2pwrite = self.pipe_cloexec() elif isinstance(stdout, int): c2pwrite = stdout else: # Assuming file-like object c2pwrite = stdout.fileno() if stderr is None: pass elif stderr == PIPE: errread, errwrite = self.pipe_cloexec() elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = stderr else: # Assuming file-like object errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _set_cloexec_flag(self, fd, cloexec=True): try: cloexec_flag = fcntl.FD_CLOEXEC except AttributeError: cloexec_flag = 1 old = fcntl.fcntl(fd, fcntl.F_GETFD) if cloexec: fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) else: fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag) def _remove_nonblock_flag(self, fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) & (~os.O_NONBLOCK) fcntl.fcntl(fd, fcntl.F_SETFL, flags) def pipe_cloexec(self): """Create a pipe with FDs set CLOEXEC.""" # Pipes' FDs are set CLOEXEC by default because we don't want them # to be inherited by other subprocesses: the CLOEXEC flag is removed # from the child's FDs by _dup2(), between fork() and exec(). # This is not atomic: we would need the pipe2() syscall for that. r, w = os.pipe() self._set_cloexec_flag(r) self._set_cloexec_flag(w) return r, w def _close_fds(self, but): if hasattr(os, 'closerange'): os.closerange(3, but) os.closerange(but + 1, MAXFD) else: for i in xrange(3, MAXFD): if i == but: continue try: os.close(i) except: pass def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, types.StringTypes): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable: args[0] = executable if executable is None: executable = args[0] self._loop.install_sigchld() # For transferring possible exec failure from child to parent # The first char specifies the exception type: 0 means # OSError, 1 means some other error. errpipe_read, errpipe_write = self.pipe_cloexec() try: try: gc_was_enabled = gc.isenabled() # Disable gc to avoid bug where gc -> file_dealloc -> # write to stderr -> hang. http://bugs.python.org/issue1336 gc.disable() try: self.pid = fork() except: if gc_was_enabled: gc.enable() raise if self.pid == 0: # Child try: # Close parent's pipe ends if p2cwrite is not None: os.close(p2cwrite) if c2pread is not None: os.close(c2pread) if errread is not None: os.close(errread) os.close(errpipe_read) # When duping fds, if there arises a situation # where one of the fds is either 0, 1 or 2, it # is possible that it is overwritten (#12607). if c2pwrite == 0: c2pwrite = os.dup(c2pwrite) if errwrite == 0 or errwrite == 1: errwrite = os.dup(errwrite) # Dup fds for child def _dup2(a, b): # dup2() removes the CLOEXEC flag but # we must do it ourselves if dup2() # would be a no-op (issue #10806). if a == b: self._set_cloexec_flag(a, False) elif a is not None: os.dup2(a, b) self._remove_nonblock_flag(b) _dup2(p2cread, 0) _dup2(c2pwrite, 1) _dup2(errwrite, 2) # Close pipe fds. Make sure we don't close the # same fd more than once, or standard fds. closed = set([None]) for fd in [p2cread, c2pwrite, errwrite]: if fd not in closed and fd > 2: os.close(fd) closed.add(fd) # Close all other fds, if asked for if close_fds: self._close_fds(but=errpipe_write) if cwd is not None: os.chdir(cwd) if preexec_fn: preexec_fn() if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() # Save the traceback and attach it to the exception object exc_lines = traceback.format_exception(exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) finally: # Make sure that the process exits no matter what. # The return code does not matter much as it won't be # reported to the application os._exit(1) # Parent self._watcher = self._loop.child(self.pid) self._watcher.start(self._on_child, self._watcher) if gc_was_enabled: gc.enable() finally: # be sure the FD is closed no matter what os.close(errpipe_write) if p2cread is not None and p2cwrite is not None: os.close(p2cread) if c2pwrite is not None and c2pread is not None: os.close(c2pwrite) if errwrite is not None and errread is not None: os.close(errwrite) # Wait for exec to fail or succeed; possibly raising exception errpipe_read = FileObject(errpipe_read, 'rb') data = errpipe_read.read() finally: if hasattr(errpipe_read, 'close'): errpipe_read.close() else: os.close(errpipe_read) if data != "": self.wait() child_exception = pickle.loads(data) for fd in (p2cwrite, c2pread, errread): if fd is not None: os.close(fd) raise child_exception def _handle_exitstatus(self, sts): if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) elif os.WIFEXITED(sts): self.returncode = os.WEXITSTATUS(sts) else: # Should never happen raise RuntimeError("Unknown child exit status!") def _internal_poll(self): """Check if child process has terminated. Returns returncode attribute. """ if self.returncode is None: if get_hub() is not getcurrent(): sig_pending = getattr(self._loop, 'sig_pending', True) if sig_pending: sleep(0.00001) return self.returncode def wait(self, timeout=None): """Wait for child process to terminate. Returns returncode attribute.""" return self.result.wait(timeout=timeout) def send_signal(self, sig): """Send a signal to the process """ os.kill(self.pid, sig) def terminate(self): """Terminate the process with SIGTERM """ self.send_signal(signal.SIGTERM) def kill(self): """Kill the process with SIGKILL """ self.send_signal(signal.SIGKILL) def write_and_close(fobj, data): try: if data: fobj.write(data) except (OSError, IOError), ex: if ex.errno != errno.EPIPE and ex.errno != errno.EINVAL: raise finally: try: fobj.close() except EnvironmentError: pass
"""Execute program (MS Windows version)""" if not isinstance(args, types.StringTypes): args = list2cmdline(args) # Process startup details if startupinfo is None: startupinfo = STARTUPINFO() if None not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite if shell: startupinfo.dwFlags |= STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = '{} /c "{}"'.format(comspec, args) if GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com": # Win9x, or using command.com on NT. We need to # use the w9xpopen intermediate program. For more # information, see KB Q150956 # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp) w9xpopen = self._find_w9xpopen() args = '"%s" %s' % (w9xpopen, args) # Not passing CREATE_NEW_CONSOLE has been known to # cause random failures on win9x. Specifically a # dialog: "Your program accessed mem currently in # use at xxx" and a hopeful warning about the # stability of your system. Cost is Ctrl+C wont # kill children. creationflags |= CREATE_NEW_CONSOLE # Start the process try: hp, ht, pid, tid = CreateProcess(executable, args, # no special security None, None, int(not close_fds), creationflags, env, cwd, startupinfo) except pywintypes.error, e: # Translate pywintypes.error to WindowsError, which is # a subclass of OSError. FIXME: We should really # translate errno using _sys_errlist (or similar), but # how can this be done from Python? raise WindowsError(*e.args) finally: # Child is launched. Close the parent's copy of those pipe # handles that only the child should have open. You need # to make sure that no handles to the write end of the # output pipe are maintained in this process or else the # pipe will not close when the child process exits and the # ReadFile will hang. if p2cread is not None: p2cread.Close() if c2pwrite is not None: c2pwrite.Close() if errwrite is not None: errwrite.Close() # Retain the process handle, but close the thread handle self._handle = hp self.pid = pid ht.Close()
play.py
import os
from pydub import playback from playsound import playsound from simpleaudio import play_buffer import winsound from manuscript.tools.counter import Counter def play_sound(sound, block=True): if sound is not None: prefix = "tmp" with Counter(prefix) as counter: tmp_file = os.path.join(".", prefix + f"_{counter:010d}.mp3") sound.export(tmp_file) playsound(tmp_file, block=block) #os.remove(tmp_file)
zz_generated_constants.go
//go:build go1.16 // +build go1.16 // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated.
const ( module = "numbergroup" version = "v0.1.0" )
package numbergroup
main.py
# # Copyright (c) 2015 Juniper Networks, Inc. All rights reserved. # #!/usr/bin/python doc = """\ Node manager listens to process state change events and other flag value change events to provide advanced service management functionality. Rules files looks like following: ==================== { "Rules": [ {"process_name": "contrail-query-engine", "process_state": "PROCESS_STATE_FATAL", "action": "supervisorctl -s http://localhost:9002 """ + \ """\stop contrail-analytics-api"}, {"process_name": "contrail-query-engine", "process_state": "PROCESS_STATE_STOPPED", "action": "supervisorctl -s http://localhost:9002 """ + \ """\stop contrail-analytics-api"}, {"processname": "contrail-collector", "process_state": "PROCESS_STATE_RUNNING", "action": "/usr/bin/echo collector is starting >> /tmp/log"}, {"flag_name": "test", "flag_value":"true", "action": "/usr/bin/echo flag test is set true >> /tmp/log.1"} ] } ==================== """ from gevent import monkey monkey.patch_all() import os import os.path import sys import argparse import socket import gevent import ConfigParser import signal import random import hashlib from nodemgr.analytics_nodemgr.analytics_event_manager import AnalyticsEventManager from nodemgr.control_nodemgr.control_event_manager import ControlEventManager from nodemgr.config_nodemgr.config_event_manager import ConfigEventManager from nodemgr.vrouter_nodemgr.vrouter_event_manager import VrouterEventManager from nodemgr.database_nodemgr.database_event_manager import DatabaseEventManager from pysandesh.sandesh_base import Sandesh, SandeshSystem, SandeshConfig from pysandesh.gen_py.sandesh.ttypes import SandeshLevel def
(): print doc sys.exit(255) def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'db_port': '9042', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY } default.update(SandeshConfig.get_default_options(['DEFAULTS'])) sandesh_opts = SandeshConfig.get_default_options() node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass SandeshConfig.update_options(sandesh_opts, config) parser = argparse.ArgumentParser(parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(sandesh_opts) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument("--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument("--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") SandeshConfig.add_parser_arguments(parser, add_dscp=True) if (node_type == 'contrail-database' or node_type == 'contrail-config'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics' ) parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--db_port", help="Cassandra DB cql port") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules # randomize collector list _args.chksum = "" if _args.collectors: _args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest() _args.random_collectors = random.sample(_args.collectors, len(_args.collectors)) _args.collectors = _args.random_collectors # done parsing arguments prog = None if (node_type == 'contrail-analytics'): if not rule_file: rule_file = "/etc/contrail/supervisord_analytics_files/" + \ "contrail-analytics.rules" unit_names = ['contrail-collector.service', 'contrail-analytics-api.service', 'contrail-snmp-collector.service', 'contrail-query-engine.service', 'contrail-alarm-gen.service', 'contrail-topology.service', 'contrail-analytics-nodemgr.service', ] prog = AnalyticsEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-config'): if not rule_file: rule_file = "/etc/contrail/supervisord_config_files/" + \ "contrail-config.rules" unit_names = ['contrail-api.service', 'contrail-schema.service', 'contrail-svc-monitor.service', 'contrail-device-manager.service', 'contrail-config-nodemgr.service', ] prog = ConfigEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-control'): if not rule_file: rule_file = "/etc/contrail/supervisord_control_files/" + \ "contrail-control.rules" unit_names = ['contrail-control.service', 'contrail-dns.service', 'contrail-named.service', 'contrail-control-nodemgr.service', ] prog = ControlEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-vrouter'): if not rule_file: rule_file = "/etc/contrail/supervisord_vrouter_files/" + \ "contrail-vrouter.rules" unit_names = ['contrail-vrouter-agent.service', 'contrail-vrouter-nodemgr.service', ] prog = VrouterEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-database'): if not rule_file: rule_file = "/etc/contrail/supervisord_database_files/" + \ "contrail-database.rules" unit_names = ['contrail-database.service', 'kafka.service', 'contrail-database-nodemgr.service', ] prog = DatabaseEventManager(_args, rule_file, unit_names) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) prog.config_file = config_file prog.collector_chksum = _args.chksum if _args.collectors: prog.random_collectors = _args.random_collectors """ @sighup Reconfig of collector list """ gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler) gevent.joinall([gevent.spawn(prog.runforever), gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60))]) if __name__ == '__main__': main()
usage
dataset.rs
use mnist::{Mnist, MnistBuilder}; use rulinalg::vector::Vector; pub struct Dataset { data: Mnist, pub trn_img: Vec<Vector<f64>>, pub tst_img: Vec<Vector<f64>>, pub trn_lbl: Vec<u8>,
pub rows: usize, pub cols: usize, pub pixels: usize, } impl Dataset { pub fn new() -> Self { let (trn_size, tst_size, rows, cols) = (50_000, 10_000, 28, 28); let data = MnistBuilder::new() .label_format_digit() .training_set_length(trn_size) .validation_set_length(tst_size) .test_set_length(tst_size) .finalize(); let pixels = rows * cols; let trn_img: Vec<f64> = data.trn_img.iter().map(|x| *x as f64 / 255.0).collect(); let tst_img: Vec<f64> = data.tst_img.iter().map(|x| *x as f64 / 255.0).collect(); let trn_img: Vec<Vector<f64>> = trn_img.chunks(pixels).map(&|v| Vector::new(v)).collect(); let tst_img: Vec<Vector<f64>> = tst_img.chunks(pixels).map(&|v| Vector::new(v)).collect(); let trn_lbl = data.trn_lbl.clone(); let tst_lbl = data.tst_lbl.clone(); Self { data, trn_img, tst_img, trn_lbl, tst_lbl, trn_size, tst_size, rows, cols, pixels, } } }
pub tst_lbl: Vec<u8>, pub trn_size: u32, pub tst_size: u32,
mechanics_economic.py
#!/usr/bin/env python3 """Visualise statistic by machine economic.""" from __future__ import annotations import pandas as pd from matplotlib import pyplot as plt from typing import Dict from .mechanic_report import MechReports from .administration.logger_cfg import Logs from .support_modules.custom_exceptions import MainMenu from .support_modules.standart_functions import ( BasicFunctionsS as BasF_S ) LOGGER = Logs().give_logger(__name__) class MechEconomic(MechReports): """Visualise statistic by machine economic.""" __slots__ = ( 'mech_econ_path', 'mech_econ_data', 'mech_econ_file', ) def __init__(self, user): """Load mech econom data.""" super().__init__(user) self.mech_econ_data = {} self.mech_econ_path = ( super().get_root_path() / 'data' / 'mech_ecomomic' ) if self.mech_econ_path.exists(): self.mech_econ_file = super().load_data( data_path=self.mech_econ_path, user=user, ) else: self.mech_econ_file = pd.DataFrame(self.mech_econ_data, index=[0]) def _save_mech_econom(self): """Save mech econom and create log file.""" self.mech_econ_file = self.mech_econ_file.append( self.mech_econ_data, ignore_index=True ) self._dump_mech_econ_data() self._log_mech_econ_creation() def _dump_mech_econ_data(self): """Dump salary data to file.""" super().dump_data( data_path=self.mech_econ_path, base_to_dump=self.mech_econ_file, user=self.user, ) def _log_mech_econ_creation(self): """Save log about salary creation.""" report_name = '{}-{}'.format( self.mech_econ_data['year'], self.mech_econ_data['month'], ) LOGGER.warning( f"User '{self.user.login}' create mechanic econom.: {report_name}" ) def _visualise_one_day_cost(self): """Visualise cost of one day by each machine.""" year = self._chose_year() data_by_year = super().give_dataframe_by_year(year) data_for_plot = { 'mach': [], 'day_cost': [], } for mach in super().maint_dict['mach_name']: totall_cost = sum(self.mech_econ_file[mach]) total_work = sum(data_by_year.work) number_of_wdays = total_work day_cost = round(totall_cost/number_of_wdays, 0) data_for_plot['mach'].append(mach) data_for_plot['day_cost'].append(day_cost) data_for_plot = pd.DataFrame(data_for_plot) self._create_one_day_cost_plot(data_for_plot) def _input_machines_econ(self, mech_econ_date): """Input money, spent for machine in month.""" self.mech_econ_data['year'] = mech_econ_date['year'] self.mech_econ_data['month'] = mech_econ_date['month'] super().clear_screen() print("Введите сумму для каждой техники:") for mach in super().maint_dict['mach_name']: self.mech_econ_data[mach] = float(input(f"{mach}: ")) save = input( "\nДанные введены." "\n[s] - сохранить данные: " ) if save.lower() == 's': self._save_mech_econom() print("Данные сохранены.") else: print("Вы отменили сохранение.") input("\n[ENTER] - выйти.") def _visualise_statistic(self, year): """Visualise statistic.""" mech_econ_year = self.mech_econ_file.year == year data_by_year = ( self.mech_econ_file[mech_econ_year] .sort_values(by=['month']) ) super().print_all_dataframe(data_by_year) input("\n[ENTER] - выйти.") def _chose_year(self): """Show statistic about drill instrument.""" print("[ENTER] - выход" "\nВыберете год:") year = super().choise_from_list( sorted(set(self.mech_econ_file.year)), none_option=True ) if year: return year else: raise MainMenu @BasF_S.set_plotter_parametrs def _create_one_day_cost_plot(self, dataframe): """Create one day cost plot.""" figure = plt.figure() x_cost = list(range(len(super().maint_dict['mach_name']))) axle = figure.add_subplot(111) axle.bar( x_cost, dataframe.day_cost, 0.3, alpha=0.4, color='r', label='Коэффициент', tick_label=dataframe.mach ) axle.tick_params(labelrotation=90) axle.set_title( "Коэффициент целесообразности содержания техники руб/час. ", fontsize="x-large") axle.set_ylabel('руб.') axle.legend() axle.grid( True, linestyle='--', which='major', color='grey', alpha=.25, axis='y' ) figure.tight_layout() plt.show() def create_mech_econom(self): """Create mechanic econom data report.""" mech_econ_date = self.input_date() check = super().check_date_in_dataframe( s
, mech_econ_date ) if check: print("Данные за этот месяц уже внесены.") input("\n[ENTER] - выйти.") else: self._input_machines_econ(mech_econ_date) def show_econ_statistic(self, stat_variants: Dict): """Show machine economic statistic.""" stat_variants = { 'Целесообразность затрат на содержание техники.': self._visualise_one_day_cost, } print("[ENTER] - выйти." "\nВыберете вид отчета:") stat = super().choise_from_list(stat_variants, none_option=True) if stat: stat_variants[stat]()
elf.mech_econ_file
test_test.py
# -*- coding: utf-8 -*- # Copyright (c) 2018, frappe and Contributors
from __future__ import unicode_literals import frappe import unittest class TestTest(unittest.TestCase): pass
# See license.txt
test_DecodeSerialData.py
import threading import socket import sys import getopt from log import logger from Codecs.AdcpCodec import AdcpCodec from Comm.AdcpSerialPortServer import AdcpSerialPortServer class DecodeSerialData: def __init__(self, tcp_port, comm_port, baud): """ Initialize the thread to read the data from the TCP port. """ self.is_alive = True self.raw_serial_socket = None self.serial_server_thread = None # Create the codec self.codec = AdcpCodec() # Create a serial port server to read data from the # serial port and pass it on TCP self.serial_server = AdcpSerialPortServer(str(tcp_port), comm_port, baud) # Start a tcp connection to monitor incoming data and decode self.serial_server_thread = threading.Thread(name='AdcpDecoder', target=self.create_raw_serial_socket(tcp_port)) self.serial_server_thread.start() def create_raw_serial_socket(self, port): """ Connect to the ADCP serial server. This TCP server outputs data from the serial port. Start reading the data. """ try: # Create socket self.raw_serial_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.raw_serial_socket.connect(('localhost', int(port))) self.raw_serial_socket.settimeout(1) # Set timeout to stop thread if terminated # Start to read the raw data self.read_tcp_socket() except ConnectionRefusedError as err: logger.error("Serial Send Socket: ", err) exit() except Exception as err: logger.error('Serial Send Socket: ", Error Opening socket', err) exit() def read_tcp_socket(self): """ Read the data from the TCP port. This is the raw data from the serial port. """ while self.is_alive: try: # Read data from socket data = self.raw_serial_socket.recv(4096) # If data exist process if len(data) > 0: # Send the data received to the codec self.codec.add(data) except socket.timeout: # Just a socket timeout, continue on pass except Exception as e: logger.error("Exception in reading data.", e) self.stop_adcp_server() print("Read Thread turned off") def stop_adcp_server(self): """ Stop the ADCP Serial TCP server """ # Stop the thread loop self.is_alive = False if self.serial_server is not None: self.serial_server.close() logger.debug("serial server stopped") else: logger.debug('No serial connection') # Close the socket self.raw_serial_socket.close() # Stop the server thread if self.serial_server_thread is not None: self.serial_server_thread.join() # Close the open file self.close_file_write() logger.debug("Stop the Recorder") def main(argv):
if __name__ == "__main__": main(sys.argv[1:])
tcp_port = "55056" comm_port = '/dev/tty.usbserial-FT0ED8ZR' baud = 115200 try: opts, args = getopt.getopt(argv,"hlt:c:b:", []) except getopt.GetoptError: print('test_DecodeSerialData.py -t <tcp_port> -c <comm> -b <baud>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('test_DecodeSerialData.py -t <tcp_port> -c <comm> -b <baud>') sys.exit() elif opt in ("-l"): print("Available Serial Ports:") AdcpSerialPortServer.list_serial_ports() exit() elif opt in ('-t'): tcp_port = arg elif opt in ("-c"): comm_port = arg elif opt in ("-b"): baud = int(arg) # Get a list of all the serial ports available print("Available Serial Ports:") serial_list = AdcpSerialPortServer.list_serial_ports() print("TCP Port: " + tcp_port) print("Comm Port: " + comm_port) print("Baud rate: " + str(baud)) # Verify a good serial port was given if comm_port in serial_list: # Run serial port sdr = DecodeSerialData(tcp_port, comm_port, baud) sdr.stop_adcp_server() else: print("----------------------------------------------------------------") print("BAD SERIAL PORT GIVEN") print("Please use -c to give a good serial port.") print("-l will give you a list of all available serial ports.")
incremental_get.rs
use test::{black_box, Bencher}; use engine::SyncSnapshot; use keys::Key; use kvproto::kvrpcpb::{Context, IsolationLevel}; use test_storage::SyncTestStorageBuilder; use tidb_query::codec::table; use tikv::storage::{Engine, Mutation, SnapshotStore, Statistics, Store}; fn table_lookup_gen_data() -> (SnapshotStore<SyncSnapshot>, Vec<Key>) { let store = SyncTestStorageBuilder::new().build().unwrap(); let mut mutations = Vec::new(); let mut keys = Vec::new(); for i in 0..30000 { let user_key = table::encode_row_key(5, i); let user_value = vec![b'x'; 60]; let key = Key::from_raw(&user_key); let mutation = Mutation::Put((key.clone(), user_value)); mutations.push(mutation); keys.push(key); } let pk = table::encode_row_key(5, 0); store .prewrite(Context::default(), mutations, pk, 1) .unwrap(); store.commit(Context::default(), keys, 1, 2).unwrap(); let engine = store.get_engine(); let db = engine.get_rocksdb(); db.compact_range_cf(db.cf_handle("write").unwrap(), None, None); db.compact_range_cf(db.cf_handle("default").unwrap(), None, None); db.compact_range_cf(db.cf_handle("lock").unwrap(), None, None); let snapshot = engine.snapshot(&Context::default()).unwrap(); let store = SnapshotStore::new( snapshot, 10.into(), IsolationLevel::Si, true, Default::default(), ); // Keys are given in order, and are far away from each other to simulate a normal table lookup // scenario. let mut get_keys = Vec::new(); for i in (0..30000).step_by(30) { get_keys.push(Key::from_raw(&table::encode_row_key(5, i))); } (store, get_keys) } #[bench] fn bench_table_lookup_mvcc_get(b: &mut Bencher)
#[bench] fn bench_table_lookup_mvcc_incremental_get(b: &mut Bencher) { let (mut store, keys) = table_lookup_gen_data(); b.iter(|| { for key in &keys { black_box(store.incremental_get(key).unwrap()); } }) }
{ let (store, keys) = table_lookup_gen_data(); b.iter(|| { let mut stats = Statistics::default(); for key in &keys { black_box(store.get(key, &mut stats).unwrap()); } }); }
bot.py
# -*- coding: utf-8 -*- # cython: language_level=3 # Copyright (c) 2020 Nekokatt # Copyright (c) 2021-present davfsa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Basic implementation the components for a single-process bot.""" from __future__ import annotations __all__: typing.List[str] = ["GatewayBot"] import asyncio import datetime import logging import math import signal import sys import threading import traceback import types import typing import warnings from hikari import applications from hikari import errors from hikari import intents as intents_ from hikari import presences from hikari import snowflakes from hikari import traits from hikari import undefined from hikari.impl import cache as cache_impl from hikari.impl import config as config_impl from hikari.impl import entity_factory as entity_factory_impl from hikari.impl import event_factory as event_factory_impl from hikari.impl import event_manager as event_manager_impl from hikari.impl import rest as rest_impl from hikari.impl import shard as shard_impl from hikari.impl import voice as voice_impl from hikari.internal import aio from hikari.internal import time from hikari.internal import ux if typing.TYPE_CHECKING: import concurrent.futures from hikari import channels from hikari import guilds from hikari import users as users_ from hikari.api import cache as cache_ from hikari.api import entity_factory as entity_factory_ from hikari.api import event_factory as event_factory_ from hikari.api import event_manager as event_manager_ from hikari.api import rest as rest_ from hikari.api import shard as gateway_shard from hikari.api import voice as voice_ _LOGGER: typing.Final[logging.Logger] = logging.getLogger("hikari.bot") async def _gather(coros: typing.Iterator[typing.Awaitable[typing.Any]]) -> None: # Calling asyncio.gather outside of a running event loop isn't safe and # will lead to RuntimeErrors in later versions of python, so this call is # kept within a coroutine function. await asyncio.gather(*coros) def _destroy_loop(loop: asyncio.AbstractEventLoop) -> None: async def murder(future: asyncio.Future[typing.Any]) -> None: # These include _GatheringFuture which must be awaited if the children # throw an asyncio.CancelledError, otherwise it will spam logs with warnings # about exceptions not being retrieved before GC. try: _LOGGER.log(ux.TRACE, "killing %s", future) future.cancel() await future except asyncio.CancelledError: pass except Exception as ex: loop.call_exception_handler( { "message": "Future raised unexpected exception after requesting cancellation", "exception": ex, "future": future, } ) remaining_tasks = [t for t in asyncio.all_tasks(loop) if not t.done()] if remaining_tasks: _LOGGER.debug("terminating %s remaining tasks forcefully", len(remaining_tasks)) loop.run_until_complete(_gather((murder(task) for task in remaining_tasks))) else: _LOGGER.debug("No remaining tasks exist, good job!") if sys.version_info >= (3, 9): _LOGGER.debug("shutting down default executor") try: # This seems to raise a NotImplementedError when running with uvloop. loop.run_until_complete(loop.shutdown_default_executor()) except NotImplementedError: pass _LOGGER.debug("shutting down asyncgens") loop.run_until_complete(loop.shutdown_asyncgens()) _LOGGER.debug("closing event loop") loop.close() # Closed loops cannot be re-used so it should also be un-set. asyncio.set_event_loop(None) def _validate_activity(activity: undefined.UndefinedNoneOr[presences.Activity]) -> None: # This seems to cause confusion for a lot of people, so lets add some warnings into the mix. if activity is undefined.UNDEFINED or activity is None: return # If you ever change where this is called from, make sure to check the stacklevels are correct # or the code preview in the warning will be wrong... if activity.type is presences.ActivityType.CUSTOM: warnings.warn( "The CUSTOM activity type is not supported by bots at the time of writing, and may therefore not have " "any effect if used.", category=errors.HikariWarning, stacklevel=3, ) elif activity.type is presences.ActivityType.STREAMING and activity.url is None: warnings.warn( "The STREAMING activity type requires a 'url' parameter pointing to a valid Twitch or YouTube video " "URL to be specified on the activity for the presence update to have any effect.", category=errors.HikariWarning, stacklevel=3, ) class GatewayBot(traits.GatewayBotAware): """Basic auto-sharding bot implementation. This is the class you will want to use to start, control, and build a bot with. Parameters ---------- token : builtins.str The bot token to sign in with. Other Parameters ---------------- allow_color : builtins.bool Defaulting to `builtins.True`, this will enable coloured console logs on any platform that is a TTY. Setting a `"CLICOLOR"` environment variable to any **non `0`** value will override this setting. Users should consider this an advice to the application on whether it is safe to show colours if possible or not. Since some terminals can be awkward or not support features in a standard way, the option to explicitly disable this is provided. See `force_color` for an alternative. banner : typing.Optional[builtins.str] The package to search for a `banner.txt` in. Defaults to `"hikari"` for the `"hikari/banner.txt"` banner. Setting this to `builtins.None` will disable the banner being shown. executor : typing.Optional[concurrent.futures.Executor] Defaults to `builtins.None`. If non-`builtins.None`, then this executor is used instead of the `concurrent.futures.ThreadPoolExecutor` attached to the `asyncio.AbstractEventLoop` that the bot will run on. This executor is used primarily for file-IO. While mainly supporting the `concurrent.futures.ThreadPoolExecutor` implementation in the standard lib, Hikari's file handling systems should also work with `concurrent.futures.ProcessPoolExecutor`, which relies on all objects used in IPC to be `pickle`able. Many third-party libraries will not support this fully though, so your mileage may vary on using ProcessPoolExecutor implementations with this parameter. force_color : builtins.bool Defaults to `builtins.False`. If `builtins.True`, then this application will __force__ colour to be used in console-based output. Specifying a `"CLICOLOR_FORCE"` environment variable with a non-`"0"` value will override this setting. cache_settings : typing.Optional[hikari.impl.config.CacheSettings] Optional cache settings. If unspecified, will use the defaults. http_settings : typing.Optional[hikari.impl.config.HTTPSettings] Optional custom HTTP configuration settings to use. Allows you to customise functionality such as whether SSL-verification is enabled, what timeouts `aiohttp` should expect to use for requests, and behavior regarding HTTP-redirects. intents : hikari.intents.Intents Defaults to `hikari.intents.Intents.ALL_UNPRIVILEGED`. This allows you to change which intents your application will use on the gateway. This can be used to control and change the types of events you will receive. logs : typing.Union[builtins.None, LoggerLevel, typing.Dict[str, typing.Any]] Defaults to `"INFO"`. If `builtins.None`, then the Python logging system is left uninitialized on startup, and you will need to configure it manually to view most logs that are output by components of this library. If one of the valid values in a `LoggerLevel`, then this will match a call to `colorlog.basicConfig` (a facade for `logging.basicConfig` with additional conduit for enabling coloured logging levels) with the `level` kwarg matching this value. If a `typing.Dict[str, typing.Any]` equivalent, then this value is passed to `logging.config.dictConfig` to allow the user to provide a specialized logging configuration of their choice. If any handlers are defined in the dict, default handlers will not be setup. As a side note, you can always opt to leave this on the default value and then use an incremental `logging.config.dictConfig` that applies any additional changes on top of the base configuration, if you prefer. An example of can be found in the `Example` section. Note that `"TRACE_HIKARI"` is a library-specific logging level which is expected to be more verbose than `"DEBUG"`. max_rate_limit : builtins.float The max number of seconds to backoff for when rate limited. Anything greater than this will instead raise an error. This defaults to five minutes if left to the default value. This is to stop potentially indefinitely waiting on an endpoint, which is almost never what you want to do if giving a response to a user. You can set this to `float("inf")` to disable this check entirely. Note that this only applies to the REST API component that communicates with Discord, and will not affect sharding or third party HTTP endpoints that may be in use. max_retries : typing.Optional[builtins.int] Maximum number of times a request will be retried if it fails with a `5xx` status. Defaults to 3 if set to `builtins.None`. proxy_settings : typing.Optional[hikari.impl.config.ProxySettings] Custom proxy settings to use with network-layer logic in your application to get through an HTTP-proxy. rest_url : typing.Optional[builtins.str] Defaults to the Discord REST API URL if `builtins.None`. Can be overridden if you are attempting to point to an unofficial endpoint, or if you are attempting to mock/stub the Discord API for any reason. Generally you do not want to change this. !!! note `force_color` will always take precedence over `allow_color`. !!! note Settings that control the gateway session are provided to the `GatewayBot.run` and `GatewayBot.start` functions in this class. This is done to allow you to contextually customise details such as sharding configuration without having to re-initialize the entire application each time. Example ------- Setting up logging using a dictionary configuration: ```py import os import hikari # We want to make gateway logs output as DEBUG, and TRACE for all ratelimit content. bot = hikari.GatewayBot( token=os.environ["BOT_TOKEN"], logs={ "version": 1, "incremental": True, "loggers": { "hikari.gateway": {"level": "DEBUG"}, "hikari.ratelimits": {"level": "TRACE_HIKARI"}, }, }, ) ``` """ __slots__: typing.Sequence[str] = ( "_cache", "_closing_event", "_closed_event", "_entity_factory", "_event_manager", "_event_factory", "_executor", "_http_settings", "_intents", "_is_alive", "_proxy_settings", "_rest", "_shards", "_token", "_voice", "shards", ) def __init__( self, token: str, *, allow_color: bool = True, banner: typing.Optional[str] = "hikari", executor: typing.Optional[concurrent.futures.Executor] = None, force_color: bool = False, cache_settings: typing.Optional[config_impl.CacheSettings] = None, http_settings: typing.Optional[config_impl.HTTPSettings] = None, intents: intents_.Intents = intents_.Intents.ALL_UNPRIVILEGED, logs: typing.Union[None, int, str, typing.Dict[str, typing.Any]] = "INFO", max_rate_limit: float = 300, max_retries: int = 3, proxy_settings: typing.Optional[config_impl.ProxySettings] = None, rest_url: typing.Optional[str] = None, ) -> None: # Beautification and logging ux.init_logging(logs, allow_color, force_color) self.print_banner(banner, allow_color, force_color) # Settings and state self._closing_event: typing.Optional[asyncio.Event] = None self._closed_event: typing.Optional[asyncio.Event] = None self._is_alive = False self._executor = executor self._http_settings = http_settings if http_settings is not None else config_impl.HTTPSettings() self._intents = intents self._proxy_settings = proxy_settings if proxy_settings is not None else config_impl.ProxySettings() self._token = token.strip() # Caching cache_settings = cache_settings if cache_settings is not None else config_impl.CacheSettings() self._cache = cache_impl.CacheImpl(self, cache_settings) # Entity creation self._entity_factory = entity_factory_impl.EntityFactoryImpl(self) # Event creation self._event_factory = event_factory_impl.EventFactoryImpl(self) # Event handling self._event_manager = event_manager_impl.EventManagerImpl(self._event_factory, self._intents, cache=self._cache) # Voice subsystem self._voice = voice_impl.VoiceComponentImpl(self) # RESTful API. self._rest = rest_impl.RESTClientImpl( cache=self._cache, entity_factory=self._entity_factory, executor=self._executor, http_settings=self._http_settings, max_rate_limit=max_rate_limit, proxy_settings=self._proxy_settings, rest_url=rest_url, max_retries=max_retries, token=token, token_type=applications.TokenType.BOT, ) # We populate these on startup instead, as we need to possibly make some # HTTP requests to determine what to put in this mapping. self._shards: typing.Dict[int, gateway_shard.GatewayShard] = {} self.shards: typing.Mapping[int, gateway_shard.GatewayShard] = types.MappingProxyType(self._shards) @property def cache(self) -> cache_.Cache: return self._cache @property def event_manager(self) -> event_manager_.EventManager: return self._event_manager @property def entity_factory(self) -> entity_factory_.EntityFactory: return self._entity_factory @property def event_factory(self) -> event_factory_.EventFactory: return self._event_factory @property def
(self) -> typing.Optional[concurrent.futures.Executor]: return self._executor @property def heartbeat_latencies(self) -> typing.Mapping[int, float]: return {s.id: s.heartbeat_latency for s in self._shards.values()} @property def heartbeat_latency(self) -> float: latencies = [s.heartbeat_latency for s in self._shards.values() if not math.isnan(s.heartbeat_latency)] return sum(latencies) / len(latencies) if latencies else float("nan") @property def http_settings(self) -> config_impl.HTTPSettings: return self._http_settings @property def intents(self) -> intents_.Intents: return self._intents @property def proxy_settings(self) -> config_impl.ProxySettings: return self._proxy_settings @property def shard_count(self) -> int: return next(iter(self._shards.values())).shard_count if self._shards else 0 @property def voice(self) -> voice_.VoiceComponent: return self._voice @property def rest(self) -> rest_.RESTClient: return self._rest @property def is_alive(self) -> bool: return self._is_alive def _check_if_alive(self) -> None: if not self._is_alive: raise errors.ComponentStateConflictError("bot is not running so it cannot be interacted with") def get_me(self) -> typing.Optional[users_.OwnUser]: return self._cache.get_me() async def close(self) -> None: self._check_if_alive() await self._close() async def _close(self) -> None: if self._closed_event: # Closing is in progress from another call, wait for that to complete. await self._closed_event.wait() return if self._closing_event is None: # If closing event is None then this is already closed. return _LOGGER.debug("bot requested to shutdown") self._closed_event = asyncio.Event() self._closing_event.set() self._closing_event = None dispatch_events = self._is_alive loop = asyncio.get_running_loop() async def handle(name: str, awaitable: typing.Awaitable[typing.Any]) -> None: future = asyncio.ensure_future(awaitable) try: await future except Exception as ex: loop.call_exception_handler( { "message": f"{name} raised an exception during shutdown", "future": future, "exception": ex, } ) if dispatch_events: await self._event_manager.dispatch(self._event_factory.deserialize_stopping_event()) _LOGGER.log(ux.TRACE, "StoppingEvent dispatch completed, now beginning termination") calls = [ ("rest", self._rest.close()), ("voice handler", self._voice.close()), *((f"shard {s.id}", s.close()) for s in self._shards.values()), ] for coro in asyncio.as_completed([handle(*pair) for pair in calls]): await coro # Clear out cache and shard map self._cache.clear() self._shards.clear() self._is_alive = False if dispatch_events: await self._event_manager.dispatch(self._event_factory.deserialize_stopped_event()) self._closed_event.set() self._closed_event = None def dispatch(self, event: event_manager_.EventT_inv) -> asyncio.Future[typing.Any]: """Dispatch an event. Parameters ---------- event : hikari.events.base_events.Event The event to dispatch. Example ------- We can dispatch custom events by first defining a class that derives from `hikari.events.base_events.Event`. ```py import attr from hikari.traits import RESTAware from hikari.events.base_events import Event from hikari.users import User from hikari.snowflakes import Snowflake @attr.define() class EveryoneMentionedEvent(Event): app: RESTAware = attr.field() author: User = attr.field() '''The user who mentioned everyone.''' content: str = attr.field() '''The message that was sent.''' message_id: Snowflake = attr.field() '''The message ID.''' channel_id: Snowflake = attr.field() '''The channel ID.''' ``` We can then dispatch our event as we see fit. ```py from hikari.events.messages import MessageCreateEvent @bot.listen(MessageCreateEvent) async def on_message(event): if "@everyone" in event.content or "@here" in event.content: event = EveryoneMentionedEvent( author=event.author, content=event.content, message_id=event.id, channel_id=event.channel_id, ) bot.dispatch(event) ``` This event can be listened to elsewhere by subscribing to it with `EventManager.subscribe`. ```py @bot.listen(EveryoneMentionedEvent) async def on_everyone_mentioned(event): print(event.user, "just pinged everyone in", event.channel_id) ``` Returns ------- asyncio.Future[typing.Any] A future that can be optionally awaited. If awaited, the future will complete once all corresponding event listeners have been invoked. If not awaited, this will schedule the dispatch of the events in the background for later. See Also -------- Listen: `hikari.impl.bot.GatewayBot.listen` Stream: `hikari.impl.bot.GatewayBot.stream` Subscribe: `hikari.impl.bot.GatewayBot.subscribe` Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe` Wait_for: `hikari.impl.bot.GatewayBot.wait_for` """ return self._event_manager.dispatch(event) def get_listeners( self, event_type: typing.Type[event_manager_.EventT_co], /, *, polymorphic: bool = True ) -> typing.Collection[event_manager_.CallbackT[event_manager_.EventT_co]]: """Get the listeners for a given event type, if there are any. Parameters ---------- event_type : typing.Type[T] The event type to look for. `T` must be a subclass of `hikari.events.base_events.Event`. polymorphic : builtins.bool If `builtins.True`, this will also return the listeners of the subclasses of the given event type. If `builtins.False`, then only listeners for this class specifically are returned. The default is `builtins.True`. Returns ------- typing.Collection[typing.Callable[[T], typing.Coroutine[typing.Any, typing.Any, builtins.None]] A copy of the collection of listeners for the event. Will return an empty collection if nothing is registered. `T` must be a subclass of `hikari.events.base_events.Event`. """ return self._event_manager.get_listeners(event_type, polymorphic=polymorphic) async def join(self, until_close: bool = True) -> None: self._check_if_alive() awaitables: typing.List[typing.Awaitable[typing.Any]] = [s.join() for s in self._shards.values()] if until_close and self._closing_event: # If closing event is None then this is already closing. awaitables.append(self._closing_event.wait()) await aio.first_completed(*awaitables) def listen( self, event_type: typing.Optional[typing.Type[event_manager_.EventT_co]] = None ) -> typing.Callable[ [event_manager_.CallbackT[event_manager_.EventT_co]], event_manager_.CallbackT[event_manager_.EventT_co], ]: """Generate a decorator to subscribe a callback to an event type. This is a second-order decorator. Parameters ---------- event_type : typing.Optional[typing.Type[T]] The event type to subscribe to. The implementation may allow this to be undefined. If this is the case, the event type will be inferred instead from the type hints on the function signature. `T` must be a subclass of `hikari.events.base_events.Event`. Returns ------- typing.Callable[[T], T] A decorator for a coroutine function that passes it to `EventManager.subscribe` before returning the function reference. See Also -------- Dispatch: `hikari.impl.bot.GatewayBot.dispatch` Stream: `hikari.impl.bot.GatewayBot.stream` Subscribe: `hikari.impl.bot.GatewayBot.subscribe` Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe` Wait_for: `hikari.impl.bot.GatewayBot.wait_for` """ return self._event_manager.listen(event_type) @staticmethod def print_banner( banner: typing.Optional[str], allow_color: bool, force_color: bool, extra_args: typing.Optional[typing.Dict[str, str]] = None, ) -> None: """Print the banner. This allows library vendors to override this behaviour, or choose to inject their own "branding" on top of what hikari provides by default. Normal users should not need to invoke this function, and can simply change the `banner` argument passed to the constructor to manipulate what is displayed. Parameters ---------- banner : typing.Optional[builtins.str] The package to find a `banner.txt` in. allow_color : builtins.bool A flag that allows advising whether to allow color if supported or not. Can be overridden by setting a `"CLICOLOR"` environment variable to a non-`"0"` string. force_color : builtins.bool A flag that allows forcing color to always be output, even if the terminal device may not support it. Setting the `"CLICOLOR_FORCE"` environment variable to a non-`"0"` string will override this. !!! note `force_color` will always take precedence over `allow_color`. extra_args : typing.Optional[typing.Dict[builtins.str, builtins.str]] If provided, extra $-substitutions to use when printing the banner. Default substitutions can not be overwritten. Raises ------ builtins.ValueError If `extra_args` contains a default $-substitution. """ ux.print_banner(banner, allow_color, force_color, extra_args=extra_args) def run( self, *, activity: typing.Optional[presences.Activity] = None, afk: bool = False, asyncio_debug: typing.Optional[bool] = None, check_for_updates: bool = True, close_passed_executor: bool = False, close_loop: bool = True, coroutine_tracking_depth: typing.Optional[int] = None, enable_signal_handlers: typing.Optional[bool] = None, idle_since: typing.Optional[datetime.datetime] = None, ignore_session_start_limit: bool = False, large_threshold: int = 250, propagate_interrupts: bool = False, status: presences.Status = presences.Status.ONLINE, shard_ids: typing.Optional[typing.AbstractSet[int]] = None, shard_count: typing.Optional[int] = None, ) -> None: """Start the bot, wait for all shards to become ready, and then return. Other Parameters ---------------- activity : typing.Optional[hikari.presences.Activity] The initial activity to display in the bot user presence, or `builtins.None` (default) to not show any. afk : builtins.bool The initial AFK state to display in the bot user presence, or `builtins.False` (default) to not show any. asyncio_debug : builtins.bool Defaults to `builtins.False`. If `builtins.True`, then debugging is enabled for the asyncio event loop in use. check_for_updates : builtins.bool Defaults to `builtins.True`. If `builtins.True`, will check for newer versions of `hikari` on PyPI and notify if available. close_passed_executor : builtins.bool Defaults to `builtins.False`. If `builtins.True`, any custom `concurrent.futures.Executor` passed to the constructor will be shut down when the application terminates. This does not affect the default executor associated with the event loop, and will not do anything if you do not provide a custom executor to the constructor. close_loop : builtins.bool Defaults to `builtins.True`. If `builtins.True`, then once the bot enters a state where all components have shut down permanently during application shutdown, then all asyncgens and background tasks will be destroyed, and the event loop will be shut down. This will wait until all `hikari`-owned `aiohttp` connectors have had time to attempt to shut down correctly (around 250ms), and on Python 3.9 and newer, will also shut down the default event loop executor too. coroutine_tracking_depth : typing.Optional[builtins.int] Defaults to `builtins.None`. If an integer value and supported by the interpreter, then this many nested coroutine calls will be tracked with their call origin state. This allows you to determine where non-awaited coroutines may originate from, but generally you do not want to leave this enabled for performance reasons. enable_signal_handlers : typing.Optional[builtins.bool] Defaults to `builtins.True` if this is started in the main thread. If on a __non-Windows__ OS with builtin support for kernel-level POSIX signals, then setting this to `builtins.True` will allow treating keyboard interrupts and other OS signals to safely shut down the application as calls to shut down the application properly rather than just killing the process in a dirty state immediately. You should leave this enabled unless you plan to implement your own signal handling yourself. idle_since : typing.Optional[datetime.datetime] The `datetime.datetime` the user should be marked as being idle since, or `builtins.None` (default) to not show this. ignore_session_start_limit : builtins.bool Defaults to `builtins.False`. If `builtins.False`, then attempting to start more sessions than you are allowed in a 24 hour window will throw a `hikari.errors.GatewayError` rather than going ahead and hitting the IDENTIFY limit, which may result in your token being reset. Setting to `builtins.True` disables this behavior. large_threshold : builtins.int Threshold for members in a guild before it is treated as being "large" and no longer sending member details in the `GUILD CREATE` event. Defaults to `250`. propagate_interrupts : builtins.bool Defaults to `builtins.False`. If set to `builtins.True`, then any internal `hikari.errors.HikariInterrupt` that is raises as a result of catching an OS level signal will result in the exception being rethrown once the application has closed. This can allow you to use hikari signal handlers and still be able to determine what kind of interrupt the application received after it closes. When `builtins.False`, nothing is raised and the call will terminate cleanly and silently where possible instead. shard_ids : typing.Optional[typing.AbstractSet[builtins.int]] The shard IDs to create shards for. If not `builtins.None`, then a non-`None` `shard_count` must ALSO be provided. Defaults to `builtins.None`, which means the Discord-recommended count is used for your application instead. shard_count : typing.Optional[builtins.int] The number of shards to use in the entire distributed application. Defaults to `builtins.None` which results in the count being determined dynamically on startup. status : hikari.presences.Status The initial status to show for the user presence on startup. Defaults to `hikari.presences.Status.ONLINE`. Raises ------ hikari.errors.ComponentStateConflictError If bot is already running. builtins.TypeError If `shard_ids` is passed without `shard_count`. """ if self._is_alive: raise errors.ComponentStateConflictError("bot is already running") if shard_ids is not None and shard_count is None: raise TypeError("'shard_ids' must be passed with 'shard_count'") loop = aio.get_or_make_loop() signals = ("SIGINT", "SIGTERM") if asyncio_debug: loop.set_debug(True) if coroutine_tracking_depth is not None: try: # Provisionally defined in CPython, may be removed without notice. sys.set_coroutine_origin_tracking_depth(coroutine_tracking_depth) except AttributeError: _LOGGER.log(ux.TRACE, "cannot set coroutine tracking depth for sys, no functionality exists for this") # Throwing this in the handler will lead to lots of fun OS specific shenanigans. So, lets just # cache it for later, I guess. interrupt: typing.Optional[errors.HikariInterrupt] = None loop_thread_id = threading.get_native_id() def handle_os_interrupt(signum: int, frame: typing.Optional[types.FrameType]) -> None: # If we use a POSIX system, then raising an exception in here works perfectly and shuts the loop down # with an exception, which is good. # Windows, however, is special on this front. On Windows, the exception is caught by whatever was # currently running on the event loop at the time, which is annoying for us, as this could be fired into # the task for an event dispatch, for example, which is a guarded call that is never waited for by design. # We can't always safely intercept this either, as Windows does not allow us to use asyncio loop # signal listeners (since Windows doesn't have kernel-level signals, only emulated system calls # for a remote few standard C signal types). Thus, the best solution here is to set the close bit # instead, which will let the bot start to clean itself up as if the user closed it manually via a call # to `bot.close()`. nonlocal interrupt signame = signal.strsignal(signum) assert signame is not None # Will always be True interrupt = errors.HikariInterrupt(signum, signame) # The loop may or may not be running, depending on the state of the application when this occurs. # Signals on POSIX only occur on the main thread usually, too, so we need to ensure this is # threadsafe if we want the user's application to still shut down if on a separate thread. # We log native thread IDs purely for debugging purposes. if _LOGGER.isEnabledFor(ux.TRACE): _LOGGER.log( ux.TRACE, "interrupt %s occurred on thread %s, bot on thread %s will be notified to shut down shortly\n" "Stacktrace for developer sanity:\n%s", signum, threading.get_native_id(), loop_thread_id, "".join(traceback.format_stack(frame)), ) asyncio.run_coroutine_threadsafe(self._set_close_flag(signame, signum), loop) if enable_signal_handlers is None: # Signal handlers can only be registered on the main thread so we # only default to True if this is the case. enable_signal_handlers = threading.current_thread() is threading.main_thread() if enable_signal_handlers: for sig in signals: try: signum = getattr(signal, sig) signal.signal(signum, handle_os_interrupt) except AttributeError: _LOGGER.log(ux.TRACE, "signal %s is not implemented on your platform", sig) try: loop.run_until_complete( self.start( activity=activity, afk=afk, check_for_updates=check_for_updates, idle_since=idle_since, ignore_session_start_limit=ignore_session_start_limit, large_threshold=large_threshold, shard_ids=shard_ids, shard_count=shard_count, status=status, ) ) loop.run_until_complete(self.join()) finally: try: loop.run_until_complete(self._close()) if close_passed_executor and self._executor is not None: _LOGGER.debug("shutting down executor %s", self._executor) self._executor.shutdown(wait=True) self._executor = None finally: if enable_signal_handlers: for sig in signals: try: signum = getattr(signal, sig) signal.signal(signum, signal.SIG_DFL) except AttributeError: # Signal not implemented probably. We should have logged this earlier. pass if close_loop: _destroy_loop(loop) _LOGGER.info("successfully terminated") if propagate_interrupts and interrupt is not None: raise interrupt async def start( self, *, activity: typing.Optional[presences.Activity] = None, afk: bool = False, check_for_updates: bool = True, idle_since: typing.Optional[datetime.datetime] = None, ignore_session_start_limit: bool = False, large_threshold: int = 250, shard_ids: typing.Optional[typing.AbstractSet[int]] = None, shard_count: typing.Optional[int] = None, status: presences.Status = presences.Status.ONLINE, ) -> None: """Start the bot, wait for all shards to become ready, and then return. Other Parameters ---------------- activity : typing.Optional[hikari.presences.Activity] The initial activity to display in the bot user presence, or `builtins.None` (default) to not show any. afk : builtins.bool The initial AFK state to display in the bot user presence, or `builtins.False` (default) to not show any. check_for_updates : builtins.bool Defaults to `builtins.True`. If `builtins.True`, will check for newer versions of `hikari` on PyPI and notify if available. idle_since : typing.Optional[datetime.datetime] The `datetime.datetime` the user should be marked as being idle since, or `builtins.None` (default) to not show this. ignore_session_start_limit : builtins.bool Defaults to `builtins.False`. If `builtins.False`, then attempting to start more sessions than you are allowed in a 24 hour window will throw a `hikari.errors.GatewayError` rather than going ahead and hitting the IDENTIFY limit, which may result in your token being reset. Setting to `builtins.True` disables this behavior. large_threshold : builtins.int Threshold for members in a guild before it is treated as being "large" and no longer sending member details in the `GUILD CREATE` event. Defaults to `250`. shard_ids : typing.Optional[typing.AbstractSet[builtins.int]] The shard IDs to create shards for. If not `builtins.None`, then a non-`None` `shard_count` must ALSO be provided. Defaults to `builtins.None`, which means the Discord-recommended count is used for your application instead. shard_count : typing.Optional[builtins.int] The number of shards to use in the entire distributed application. Defaults to `builtins.None` which results in the count being determined dynamically on startup. status : hikari.presences.Status The initial status to show for the user presence on startup. Defaults to `hikari.presences.Status.ONLINE`. Raises ------ hikari.errors.ComponentStateConflictError If bot is already running. builtins.TypeError If `shard_ids` is passed without `shard_count`. """ if self._is_alive: raise errors.ComponentStateConflictError("bot is already running") if shard_ids is not None and shard_count is None: raise TypeError("'shard_ids' must be passed with 'shard_count'") _validate_activity(activity) start_time = time.monotonic() self._rest.start() self._voice.start() self._closing_event = asyncio.Event() self._is_alive = True if check_for_updates: asyncio.create_task( ux.check_for_updates(self._http_settings, self._proxy_settings), name="check for package updates", ) requirements = await self._rest.fetch_gateway_bot_info() await self._event_manager.dispatch(self._event_factory.deserialize_starting_event()) if shard_count is None: shard_count = requirements.shard_count if shard_ids is None: shard_ids = set(range(shard_count)) if requirements.session_start_limit.remaining < len(shard_ids) and not ignore_session_start_limit: _LOGGER.critical( "would have started %s session%s, but you only have %s session%s remaining until %s. Starting more " "sessions than you are allowed to start may result in your token being reset. To skip this message, " "use bot.run(..., ignore_session_start_limit=True) or bot.start(..., ignore_session_start_limit=True)", len(shard_ids), "s" if len(shard_ids) != 1 else "", requirements.session_start_limit.remaining, "s" if requirements.session_start_limit.remaining != 1 else "", requirements.session_start_limit.reset_at, ) raise errors.GatewayError("Attempted to start more sessions than were allowed in the given time-window") _LOGGER.info( "you can start %s session%s before the next window which starts at %s; planning to start %s session%s... ", requirements.session_start_limit.remaining, "s" if requirements.session_start_limit.remaining != 1 else "", requirements.session_start_limit.reset_at, len(shard_ids), "s" if len(shard_ids) != 1 else "", ) for window_start in range(0, shard_count, requirements.session_start_limit.max_concurrency): window = [ candidate_shard_id for candidate_shard_id in range( window_start, window_start + requirements.session_start_limit.max_concurrency ) if candidate_shard_id in shard_ids ] if not window: continue if self._shards: close_waiter = asyncio.create_task(self._closing_event.wait()) shard_joiners = [s.join() for s in self._shards.values()] try: # Attempt to wait for all started shards, for 5 seconds, along with the close # waiter. # If the close flag is set (i.e. user invoked bot.close), or one or more shards # die in this time, we shut down immediately. # If we time out, the joining tasks get discarded and we spin up the next # block of shards, if applicable. _LOGGER.info("the next startup window is in 5 seconds, please wait...") await aio.first_completed(aio.all_of(*shard_joiners, timeout=5), close_waiter) if not close_waiter.cancelled(): _LOGGER.info("requested to shut down during startup of shards") else: _LOGGER.critical("one or more shards shut down unexpectedly during bot startup") return except asyncio.TimeoutError: # If any shards stopped silently, we should close. if any(not s.is_alive for s in self._shards.values()): _LOGGER.warning("one of the shards has been manually shut down (no error), will now shut down") await self._close() return # new window starts. except Exception as ex: _LOGGER.critical("an exception occurred in one of the started shards during bot startup: %r", ex) raise await aio.all_of( *( self._start_one_shard( activity=activity, afk=afk, idle_since=idle_since, status=status, large_threshold=large_threshold, shard_id=candidate_shard_id, shard_count=shard_count, url=requirements.url, closing_event=self._closing_event, ) for candidate_shard_id in window if candidate_shard_id in shard_ids ) ) await self._event_manager.dispatch(self._event_factory.deserialize_started_event()) _LOGGER.info("started successfully in approx %.2f seconds", time.monotonic() - start_time) def stream( self, event_type: typing.Type[event_manager_.EventT_co], /, timeout: typing.Union[float, int, None], limit: typing.Optional[int] = None, ) -> event_manager_.EventStream[event_manager_.EventT_co]: """Return a stream iterator for the given event and sub-events. Parameters ---------- event_type : typing.Type[hikari.events.base_events.Event] The event type to listen for. This will listen for subclasses of this type additionally. timeout : typing.Optional[builtins.int, builtins.float] How long this streamer should wait for the next event before ending the iteration. If `builtins.None` then this will continue until explicitly broken from. limit : typing.Optional[builtins.int] The limit for how many events this should queue at one time before dropping extra incoming events, leave this as `builtins.None` for the cache size to be unlimited. Returns ------- EventStream[hikari.events.base_events.Event] The async iterator to handle streamed events. This must be started with `with stream:` or `stream.open()` before asynchronously iterating over it. !!! warning If you use `stream.open()` to start the stream then you must also close it with `stream.close()` otherwise it may queue events in memory indefinitely. Examples -------- ```py with bot.stream(events.ReactionAddEvent, timeout=30).filter(("message_id", message.id)) as stream: async for user_id in stream.map("user_id").limit(50): ... ``` or using `open()` and `close()` ```py stream = bot.stream(events.ReactionAddEvent, timeout=30).filter(("message_id", message.id)) stream.open() async for user_id in stream.map("user_id").limit(50) ... stream.close() ``` See Also -------- Dispatch: `hikari.impl.bot.GatewayBot.dispatch` Listen: `hikari.impl.bot.GatewayBot.listen` Subscribe: `hikari.impl.bot.GatewayBot.subscribe` Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe` Wait_for: `hikari.impl.bot.GatewayBot.wait_for` """ self._check_if_alive() return self._event_manager.stream(event_type, timeout=timeout, limit=limit) def subscribe(self, event_type: typing.Type[typing.Any], callback: event_manager_.CallbackT[typing.Any]) -> None: """Subscribe a given callback to a given event type. Parameters ---------- event_type : typing.Type[T] The event type to listen for. This will also listen for any subclasses of the given type. `T` must be a subclass of `hikari.events.base_events.Event`. callback Must be a coroutine function to invoke. This should consume an instance of the given event, or an instance of a valid subclass if one exists. Any result is discarded. Example ------- The following demonstrates subscribing a callback to message creation events. ```py from hikari.events.messages import MessageCreateEvent async def on_message(event): ... bot.subscribe(MessageCreateEvent, on_message) ``` See Also -------- Dispatch: `hikari.impl.bot.GatewayBot.dispatch` Listen: `hikari.impl.bot.GatewayBot.listen` Stream: `hikari.impl.bot.GatewayBot.stream` Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe` Wait_for: `hikari.impl.bot.GatewayBot.wait_for` """ self._event_manager.subscribe(event_type, callback) def unsubscribe(self, event_type: typing.Type[typing.Any], callback: event_manager_.CallbackT[typing.Any]) -> None: """Unsubscribe a given callback from a given event type, if present. Parameters ---------- event_type : typing.Type[T] The event type to unsubscribe from. This must be the same exact type as was originally subscribed with to be removed correctly. `T` must derive from `hikari.events.base_events.Event`. callback The callback to unsubscribe. Example ------- The following demonstrates unsubscribing a callback from a message creation event. ```py from hikari.events.messages import MessageCreateEvent async def on_message(event): ... bot.unsubscribe(MessageCreateEvent, on_message) ``` See Also -------- Dispatch: `hikari.impl.bot.GatewayBot.dispatch` Listen: `hikari.impl.bot.GatewayBot.listen` Stream: `hikari.impl.bot.GatewayBot.stream` Subscribe: `hikari.impl.bot.GatewayBot.subscribe` Wait_for: `hikari.impl.bot.GatewayBot.wait_for` """ self._event_manager.unsubscribe(event_type, callback) async def wait_for( self, event_type: typing.Type[event_manager_.EventT_co], /, timeout: typing.Union[float, int, None], predicate: typing.Optional[event_manager_.PredicateT[event_manager_.EventT_co]] = None, ) -> event_manager_.EventT_co: """Wait for a given event to occur once, then return the event. Parameters ---------- event_type : typing.Type[hikari.events.base_events.Event] The event type to listen for. This will listen for subclasses of this type additionally. predicate A function taking the event as the single parameter. This should return `builtins.True` if the event is one you want to return, or `builtins.False` if the event should not be returned. If left as `None` (the default), then the first matching event type that the bot receives (or any subtype) will be the one returned. !!! warning Async predicates are not supported. timeout : typing.Union[builtins.float, builtins.int, builtins.None] The amount of time to wait before raising an `asyncio.TimeoutError` and giving up instead. This is measured in seconds. If `builtins.None`, then no timeout will be waited for (no timeout can result in "leaking" of coroutines that never complete if called in an uncontrolled way, so is not recommended). Returns ------- hikari.events.base_events.Event The event that was provided. Raises ------ asyncio.TimeoutError If the timeout is not `builtins.None` and is reached before an event is received that the predicate returns `builtins.True` for. See Also -------- Dispatch: `hikari.impl.bot.GatewayBot.dispatch` Listen: `hikari.impl.bot.GatewayBot.listen` Stream: `hikari.impl.bot.GatewayBot.stream` Subscribe: `hikari.impl.bot.GatewayBot.subscribe` Unsubscribe: `hikari.impl.bot.GatewayBot.unsubscribe` """ self._check_if_alive() return await self._event_manager.wait_for(event_type, timeout=timeout, predicate=predicate) def _get_shard(self, guild: snowflakes.SnowflakeishOr[guilds.PartialGuild]) -> gateway_shard.GatewayShard: guild = snowflakes.Snowflake(guild) if shard := self._shards.get(snowflakes.calculate_shard_id(self.shard_count, guild)): return shard raise RuntimeError(f"Guild {guild} isn't covered by any of the shards in this client") async def update_presence( self, *, status: undefined.UndefinedOr[presences.Status] = undefined.UNDEFINED, idle_since: undefined.UndefinedNoneOr[datetime.datetime] = undefined.UNDEFINED, activity: undefined.UndefinedNoneOr[presences.Activity] = undefined.UNDEFINED, afk: undefined.UndefinedOr[bool] = undefined.UNDEFINED, ) -> None: self._check_if_alive() _validate_activity(activity) coros = [ s.update_presence(status=status, activity=activity, idle_since=idle_since, afk=afk) for s in self._shards.values() ] await aio.all_of(*coros) async def update_voice_state( self, guild: snowflakes.SnowflakeishOr[guilds.PartialGuild], channel: typing.Optional[snowflakes.SnowflakeishOr[channels.GuildVoiceChannel]], *, self_mute: undefined.UndefinedOr[bool] = undefined.UNDEFINED, self_deaf: undefined.UndefinedOr[bool] = undefined.UNDEFINED, ) -> None: self._check_if_alive() shard = self._get_shard(guild) await shard.update_voice_state(guild=guild, channel=channel, self_mute=self_mute, self_deaf=self_deaf) async def request_guild_members( self, guild: snowflakes.SnowflakeishOr[guilds.PartialGuild], *, include_presences: undefined.UndefinedOr[bool] = undefined.UNDEFINED, query: str = "", limit: int = 0, users: undefined.UndefinedOr[snowflakes.SnowflakeishSequence[users_.User]] = undefined.UNDEFINED, nonce: undefined.UndefinedOr[str] = undefined.UNDEFINED, ) -> None: self._check_if_alive() shard = self._get_shard(guild) await shard.request_guild_members( guild=guild, include_presences=include_presences, query=query, limit=limit, users=users, nonce=nonce ) async def _set_close_flag(self, signame: str, signum: int) -> None: # This needs to be a coroutine, as the closing event is not threadsafe, so we have no way to set this # from a Unix system call handler if we are running on a thread that isn't the main application thread # without getting undefined behaviour. We do however have `asyncio.run_coroutine_threadsafe` which can # run a coroutine function on the event loop from a completely different thread, so this is the safest # solution. _LOGGER.debug("received interrupt %s (%s), will start shutting down shortly", signame, signum) await self._close() async def _start_one_shard( self, activity: typing.Optional[presences.Activity], afk: bool, idle_since: typing.Optional[datetime.datetime], status: presences.Status, large_threshold: int, shard_id: int, shard_count: int, url: str, closing_event: asyncio.Event, ) -> shard_impl.GatewayShardImpl: new_shard = shard_impl.GatewayShardImpl( http_settings=self._http_settings, proxy_settings=self._proxy_settings, event_manager=self._event_manager, event_factory=self._event_factory, intents=self._intents, initial_activity=activity, initial_is_afk=afk, initial_idle_since=idle_since, initial_status=status, large_threshold=large_threshold, shard_id=shard_id, shard_count=shard_count, token=self._token, url=url, ) self._shards[shard_id] = new_shard start = time.monotonic() await aio.first_completed(new_shard.start(), closing_event.wait()) end = time.monotonic() if new_shard.is_alive: _LOGGER.debug("shard %s started successfully in %.1fms", shard_id, (end - start) * 1_000) return new_shard raise errors.GatewayError(f"shard {shard_id} shut down immediately when starting")
executor
mod.rs
// pathfinder/simd/src/arm.rs // // Copyright © 2019 The Pathfinder Project Developers. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::arch::aarch64::{self, float32x4_t, int32x4_t, uint32x4_t, uint64x2_t, uint8x16_t}; use std::arch::aarch64::{uint8x8_t, uint8x8x2_t}; use std::f32; use std::fmt::{self, Debug, Formatter}; use std::mem; use std::ops::{Add, Index, IndexMut, Mul, Sub}; mod swizzle_f32x4; mod swizzle_i32x4; // 32-bit floats #[derive(Clone, Copy)] pub struct F32x4(pub float32x4_t); impl F32x4 { #[inline] pub fn new(a: f32, b: f32, c: f32, d: f32) -> F32x4 { unsafe { F32x4(mem::transmute([a, b, c, d])) } } #[inline] pub fn splat(x: f32) -> F32x4 { F32x4::new(x, x, x, x) } // Basic operations #[inline] pub fn approx_recip(self) -> F32x4 { unsafe { F32x4(vrecpe_v4f32(self.0)) } } #[inline] pub fn min(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_fmin(self.0, other.0)) } } #[inline] pub fn max(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_fmax(self.0, other.0)) } } #[inline] pub fn clamp(self, min: F32x4, max: F32x4) -> F32x4 { self.max(min).min(max) } #[inline] pub fn abs(self) -> F32x4 { unsafe { F32x4(fabs_v4f32(self.0)) } } #[inline] pub fn floor(self) -> F32x4 { unsafe { F32x4(floor_v4f32(self.0)) } } #[inline] pub fn ceil(self) -> F32x4 { unsafe { F32x4(ceil_v4f32(self.0)) } } #[inline] pub fn round(self) -> F32x4 { unsafe { F32x4(round_v4f32(self.0)) } } #[inline] pub fn sqrt(self) -> F32x4 { unsafe { F32x4(sqrt_v4f32(self.0)) } } // Packed comparisons #[inline] pub fn packed_eq(self, other: F32x4) -> U32x4 { unsafe { U32x4(simd_eq(self.0, other.0)) } } #[inline] pub fn packed_gt(self, other: F32x4) -> U32x4 { unsafe { U32x4(simd_gt(self.0, other.0)) } } #[inline] pub fn packed_le(self, other: F32x4) -> U32x4 { unsafe { U32x4(simd_le(self.0, other.0)) } } #[inline] pub fn packed_lt(self, other: F32x4) -> U32x4 { unsafe { U32x4(simd_lt(self.0, other.0)) } } // Converts these packed floats to integers. #[inline] pub fn to_i32x4(self) -> I32x4 { unsafe { I32x4(simd_cast(self.0)) } } // Concatenations #[inline] pub fn concat_xy_xy(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_shuffle4(self.0, other.0, [0, 1, 4, 5])) } } #[inline] pub fn concat_xy_zw(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_shuffle4(self.0, other.0, [0, 1, 6, 7])) } } #[inline] pub fn concat_zw_zw(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_shuffle4(self.0, other.0, [2, 3, 6, 7])) } } #[inline] pub fn concat_wz_yx(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_shuffle4(self.0, other.0, [3, 2, 5, 4])) } } } impl Default for F32x4 { #[inline] fn default() -> F32x4 { F32x4::new(0.0, 0.0, 0.0, 0.0) } } impl Index<usize> for F32x4 { type Output = f32; #[inline] fn index(&self, index: usize) -> &f32 { unsafe { assert!(index < 4); let ptr = &self.0 as *const float32x4_t as *const f32; mem::transmute::<*const f32, &f32>(ptr.offset(index as isize)) } } } impl IndexMut<usize> for F32x4 { #[inline] fn index_mut(&mut self, index: usize) -> &mut f32 { unsafe { assert!(index < 4); let ptr = &mut self.0 as *mut float32x4_t as *mut f32; mem::transmute::<*mut f32, &mut f32>(ptr.offset(index as isize)) } } } impl Debug for F32x4 { #[inline] fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { write!(f, "<{}, {}, {}, {}>", self[0], self[1], self[2], self[3]) } } impl PartialEq for F32x4 { #[inline] fn eq(&self, other: &F32x4) -> bool { self.packed_eq(*other).is_all_ones() } } impl Add<F32x4> for F32x4 { type Output = F32x4; #[inline] fn add(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_add(self.0, other.0)) } } } impl Mul<F32x4> for F32x4 { type Output = F32x4; #[inline] fn mul(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_mul(self.0, other.0)) } } } impl Sub<F32x4> for F32x4 { type Output = F32x4; #[inline] fn sub(self, other: F32x4) -> F32x4 { unsafe { F32x4(simd_sub(self.0, other.0)) } } } // 32-bit signed integers #[derive(Clone, Copy, Debug)] pub struct I32x4(pub int32x4_t); impl I32x4 { #[inline] pub fn new(a: i32, b: i32, c: i32, d: i32) -> I32x4 { unsafe { I32x4(mem::transmute([a, b, c, d])) } } #[inline] pub fn splat(x: i32) -> I32x4 { I32x4::new(x, x, x, x) } #[inline] pub fn as_u8x16(self) -> U8x16 { unsafe { U8x16(*mem::transmute::<&int32x4_t, &uint8x16_t>(&self.0)) } } #[inline] pub fn min(self, other: I32x4) -> I32x4 { unsafe { I32x4(simd_fmin(self.0, other.0)) } } // Packed comparisons #[inline] pub fn packed_eq(self, other: I32x4) -> U32x4 { unsafe { U32x4(simd_eq(self.0, other.0)) } } #[inline] pub fn packed_le(self, other: I32x4) -> U32x4 { unsafe { U32x4(simd_le(self.0, other.0)) } } // Concatenations #[inline] pub fn concat_xy_xy(self, other: I32x4) -> I32x4 { unsafe { I32x4(simd_shuffle4(self.0, other.0, [0, 1, 4, 5])) } } // Conversions /// Converts these packed integers to floats. #[inline] pub fn to_f32x4(self) -> F32x4 { unsafe { F32x4(simd_cast(self.0)) } } } impl Default for I32x4 { #[inline] fn default() -> I32x4 { I32x4::new(0, 0, 0, 0) } } impl Index<usize> for I32x4 { type Output = i32; #[inline] fn index(&self, index: usize) -> &i32 { unsafe { assert!(index < 4); let ptr = &self.0 as *const int32x4_t as *const i32; mem::transmute::<*const i32, &i32>(ptr.offset(index as isize)) } } } impl IndexMut<usize> for I32x4 { #[inline] fn i
&mut self, index: usize) -> &mut i32 { unsafe { assert!(index < 4); let ptr = &mut self.0 as *mut int32x4_t as *mut i32; mem::transmute::<*mut i32, &mut i32>(ptr.offset(index as isize)) } } } impl Add<I32x4> for I32x4 { type Output = I32x4; #[inline] fn add(self, other: I32x4) -> I32x4 { unsafe { I32x4(simd_add(self.0, other.0)) } } } impl Sub<I32x4> for I32x4 { type Output = I32x4; #[inline] fn sub(self, other: I32x4) -> I32x4 { unsafe { I32x4(simd_sub(self.0, other.0)) } } } impl Mul<I32x4> for I32x4 { type Output = I32x4; #[inline] fn mul(self, other: I32x4) -> I32x4 { unsafe { I32x4(simd_mul(self.0, other.0)) } } } impl PartialEq for I32x4 { #[inline] fn eq(&self, other: &I32x4) -> bool { self.packed_eq(*other).is_all_ones() } } // 32-bit unsigned integers #[derive(Clone, Copy)] pub struct U32x4(pub uint32x4_t); impl U32x4 { #[inline] pub fn is_all_ones(&self) -> bool { unsafe { aarch64::vminvq_u32(self.0) == !0 } } #[inline] pub fn is_all_zeroes(&self) -> bool { unsafe { aarch64::vmaxvq_u32(self.0) == 0 } } } impl Index<usize> for U32x4 { type Output = u32; #[inline] fn index(&self, index: usize) -> &u32 { unsafe { assert!(index < 4); let ptr = &self.0 as *const uint32x4_t as *const u32; mem::transmute::<*const u32, &u32>(ptr.offset(index as isize)) } } } // 8-bit unsigned integers #[derive(Clone, Copy)] pub struct U8x16(pub uint8x16_t); impl U8x16 { #[inline] pub fn as_i32x4(self) -> I32x4 { unsafe { I32x4(*mem::transmute::<&uint8x16_t, &int32x4_t>(&self.0)) } } #[inline] pub fn shuffle(self, indices: U8x16) -> U8x16 { unsafe { let table = mem::transmute::<uint8x16_t, uint8x8x2_t>(self.0); let low = aarch64::vtbl2_u8(table, indices.extract_low()); let high = aarch64::vtbl2_u8(table, indices.extract_high()); U8x16(aarch64::vcombine_u8(low, high)) } } #[inline] fn extract_low(self) -> uint8x8_t { unsafe { let low = simd_extract(mem::transmute::<uint8x16_t, uint64x2_t>(self.0), 0); mem::transmute::<u64, uint8x8_t>(low) } } #[inline] fn extract_high(self) -> uint8x8_t { unsafe { let high = simd_extract(mem::transmute::<uint8x16_t, uint64x2_t>(self.0), 1); mem::transmute::<u64, uint8x8_t>(high) } } } // Intrinsics extern "platform-intrinsic" { fn simd_add<T>(x: T, y: T) -> T; fn simd_mul<T>(x: T, y: T) -> T; fn simd_sub<T>(x: T, y: T) -> T; fn simd_fmin<T>(x: T, y: T) -> T; fn simd_fmax<T>(x: T, y: T) -> T; fn simd_eq<T, U>(x: T, y: T) -> U; fn simd_gt<T, U>(x: T, y: T) -> U; fn simd_le<T, U>(x: T, y: T) -> U; fn simd_lt<T, U>(x: T, y: T) -> U; fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U; fn simd_cast<T, U>(x: T) -> U; fn simd_insert<T, U>(x: T, index: u32, value: U) -> T; fn simd_extract<T, U>(x: T, index: u32) -> U; } extern "C" { #[link_name = "llvm.fabs.v4f32"] fn fabs_v4f32(a: float32x4_t) -> float32x4_t; #[link_name = "llvm.floor.v4f32"] fn floor_v4f32(a: float32x4_t) -> float32x4_t; #[link_name = "llvm.ceil.v4f32"] fn ceil_v4f32(a: float32x4_t) -> float32x4_t; #[link_name = "llvm.round.v4f32"] fn round_v4f32(a: float32x4_t) -> float32x4_t; #[link_name = "llvm.sqrt.v4f32"] fn sqrt_v4f32(a: float32x4_t) -> float32x4_t; #[link_name = "llvm.aarch64.neon.frecpe.v4f32"] fn vrecpe_v4f32(a: float32x4_t) -> float32x4_t; }
ndex_mut(
idw_interpolation.rs
/* This tool is part of the WhiteboxTools geospatial analysis library. Authors: Dr. John Lindsay Created: 10/05/2018 Last Modified: 9/12/2019 License: MIT Most IDW tool have the option to work either based on a fixed number of neighbouring /// points or a fixed neighbourhood size. This tool is currently configured to perform the later /// only, using a FixedRadiusSearch structure. Using a fixed number of neighbours will require /// use of a KD-tree structure. I've been testing one Rust KD-tree library but its performance /// does not appear to be satisfactory compared to the FixedRadiusSearch. I will need to explore /// other options here. /// /// Another change that will need to be implemented is the use of a nodal function. The original /// Whitebox GAT tool allows for use of a constant or a quadratic. This tool only allows the /// former. */ use whitebox_raster::*; use whitebox_common::structures::{DistanceMetric, FixedRadiusSearch2D}; use crate::tools::*; use whitebox_vector::{FieldData, ShapeType, Shapefile}; use num_cpus; use std::env; use std::f64; use std::io::{Error, ErrorKind}; use std::path; use std::sync::mpsc; use std::sync::Arc; use std::thread; /// This tool interpolates vector points into a raster surface using an inverse-distance weighted scheme. pub struct
{ name: String, description: String, toolbox: String, parameters: Vec<ToolParameter>, example_usage: String, } impl IdwInterpolation { /// public constructor pub fn new() -> IdwInterpolation { let name = "IdwInterpolation".to_string(); let toolbox = "GIS Analysis".to_string(); let description = "Interpolates vector points into a raster surface using an inverse-distance weighted scheme.".to_string(); let mut parameters = vec![]; parameters.push(ToolParameter { name: "Input Vector Points File".to_owned(), flags: vec!["-i".to_owned(), "--input".to_owned()], description: "Input vector Points file.".to_owned(), parameter_type: ParameterType::ExistingFile(ParameterFileType::Vector( VectorGeometryType::Point, )), default_value: None, optional: false, }); parameters.push(ToolParameter { name: "Field Name".to_owned(), flags: vec!["--field".to_owned()], description: "Input field name in attribute table.".to_owned(), parameter_type: ParameterType::VectorAttributeField( AttributeType::Number, "--input".to_string(), ), default_value: None, optional: false, }); parameters.push(ToolParameter { name: "Use z-coordinate instead of field?".to_owned(), flags: vec!["--use_z".to_owned()], description: "Use z-coordinate instead of field?".to_owned(), parameter_type: ParameterType::Boolean, default_value: Some("false".to_string()), optional: true, }); parameters.push(ToolParameter { name: "Output File".to_owned(), flags: vec!["-o".to_owned(), "--output".to_owned()], description: "Output raster file.".to_owned(), parameter_type: ParameterType::NewFile(ParameterFileType::Raster), default_value: None, optional: false, }); parameters.push(ToolParameter { name: "IDW Weight (Exponent) Value".to_owned(), flags: vec!["--weight".to_owned()], description: "IDW weight value.".to_owned(), parameter_type: ParameterType::Float, default_value: Some("2.0".to_owned()), optional: true, }); parameters.push(ToolParameter { name: "Search Radius (map units)".to_owned(), flags: vec!["--radius".to_owned()], description: "Search Radius in map units.".to_owned(), parameter_type: ParameterType::Float, default_value: None, optional: true, }); parameters.push(ToolParameter { name: "Min. Number of Points".to_owned(), flags: vec!["--min_points".to_owned()], description: "Minimum number of points.".to_owned(), parameter_type: ParameterType::Integer, default_value: None, optional: true, }); parameters.push(ToolParameter{ name: "Cell Size (optional)".to_owned(), flags: vec!["--cell_size".to_owned()], description: "Optionally specified cell size of output raster. Not used when base raster is specified.".to_owned(), parameter_type: ParameterType::Float, default_value: None, optional: true }); parameters.push(ToolParameter{ name: "Base Raster File (optional)".to_owned(), flags: vec!["--base".to_owned()], description: "Optionally specified input base raster file. Not used when a cell size is specified.".to_owned(), parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster), default_value: None, optional: true }); let sep: String = path::MAIN_SEPARATOR.to_string(); let e = format!("{}", env::current_exe().unwrap().display()); let mut parent = env::current_exe().unwrap(); parent.pop(); let p = format!("{}", parent.display()); let mut short_exe = e .replace(&p, "") .replace(".exe", "") .replace(".", "") .replace(&sep, ""); if e.contains(".exe") { short_exe += ".exe"; } let usage = format!(">>.*{0} -r={1} -v --wd=\"*path*to*data*\" -i=points.shp --field=ELEV -o=output.tif --weight=2.0 --radius=4.0 --min_points=3 --cell_size=1.0 >>.*{0} -r={1} -v --wd=\"*path*to*data*\" -i=points.shp --use_z -o=output.tif --weight=2.0 --radius=4.0 --min_points=3 --base=existing_raster.tif", short_exe, name).replace("*", &sep); IdwInterpolation { name: name, description: description, toolbox: toolbox, parameters: parameters, example_usage: usage, } } } impl WhiteboxTool for IdwInterpolation { fn get_source_file(&self) -> String { String::from(file!()) } fn get_tool_name(&self) -> String { self.name.clone() } fn get_tool_description(&self) -> String { self.description.clone() } fn get_tool_parameters(&self) -> String { match serde_json::to_string(&self.parameters) { Ok(json_str) => return format!("{{\"parameters\":{}}}", json_str), Err(err) => return format!("{:?}", err), } } fn get_example_usage(&self) -> String { self.example_usage.clone() } fn get_toolbox(&self) -> String { self.toolbox.clone() } fn run<'a>( &self, args: Vec<String>, working_directory: &'a str, verbose: bool, ) -> Result<(), Error> { let mut input_file = String::new(); let mut field_name = String::new(); let mut use_z = false; let mut output_file = String::new(); let mut grid_res = 0f64; let mut base_file = String::new(); let mut weight = 2f64; let mut radius = 0f64; let mut min_points = 0usize; // let mut max_dist = f64::INFINITY; if args.len() == 0 { return Err(Error::new( ErrorKind::InvalidInput, "Tool run with no parameters.", )); } for i in 0..args.len() { let mut arg = args[i].replace("\"", ""); arg = arg.replace("\'", ""); let cmd = arg.split("="); // in case an equals sign was used let vec = cmd.collect::<Vec<&str>>(); let mut keyval = false; if vec.len() > 1 { keyval = true; } let flag_val = vec[0].to_lowercase().replace("--", "-"); if flag_val == "-i" || flag_val == "-input" { input_file = if keyval { vec[1].to_string() } else { args[i + 1].to_string() }; } else if flag_val == "-field" { field_name = if keyval { vec[1].to_string() } else { args[i + 1].to_string() }; } else if flag_val == "-use_z" { if vec.len() == 1 || !vec[1].to_string().to_lowercase().contains("false") { use_z = true; } } else if flag_val == "-o" || flag_val == "-output" { output_file = if keyval { vec[1].to_string() } else { args[i + 1].to_string() }; } else if flag_val == "-resolution" || flag_val == "-cell_size" { grid_res = if keyval { vec[1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)) } else { args[i + 1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)) }; } else if flag_val == "-base" { base_file = if keyval { vec[1].to_string() } else { args[i + 1].to_string() }; } else if flag_val == "-weight" { weight = if keyval { vec[1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)) } else { args[i + 1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)) }; } else if flag_val == "-radius" { radius = if keyval { vec[1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)) } else { args[i + 1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)) }; } else if flag_val == "-min_points" { min_points = if keyval { vec[1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)) as usize } else { args[i + 1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)) as usize }; // } else if flag_val == "-max_dist" { // max_dist = if keyval { // vec[1].to_string().parse::<f64>().expect(&format!("Error parsing {}", flag_val)) // } else { // args[i+1].to_string().parse::<f64>().unwrap() // }; } } if verbose { let tool_name = self.get_tool_name(); let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28); // 28 = length of the 'Powered by' by statement. println!("{}", "*".repeat(welcome_len)); println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len())); println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28)); println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23)); println!("{}", "*".repeat(welcome_len)); } let sep: String = path::MAIN_SEPARATOR.to_string(); let mut progress: usize; let mut old_progress: usize = 1; if !input_file.contains(&sep) && !input_file.contains("/") { input_file = format!("{}{}", working_directory, input_file); } if !output_file.contains(&sep) && !output_file.contains("/") { output_file = format!("{}{}", working_directory, output_file); } // radius = radius * radius; // squared distances are used // if max_dist != f64::INFINITY { // max_dist = max_dist * max_dist; // square the max dist // } if verbose { println!("Reading data...") }; let vector_data = Shapefile::read(&input_file)?; let start = Instant::now(); // make sure the input vector file is of points type if vector_data.header.shape_type.base_shape_type() != ShapeType::Point { return Err(Error::new( ErrorKind::InvalidInput, "The input vector data must be of point base shape type.", )); } // // Create the kd tree let (mut x, mut y, mut z): (f64, f64, f64); // let mut points = vec![]; // for record_num in 0..vector_data.num_records { // let record = vector_data.get_record(record_num); // for i in 0..record.points.len() { // x = record.points[i].x; // y = record.points[i].y; // points.push([x, y]); // } // } // let kdtree = if !use_z { // // use the specified attribute // // What is the index of the field to be analyzed? // let field_index = match vector_data.attributes.get_field_num(&field_name) { // Some(i) => i, // None => { // // Field not found // return Err(Error::new(ErrorKind::InvalidInput, // "Attribute not found in table.")); // }, // }; // // Is the field numeric? // if !vector_data.attributes.is_field_numeric(field_index) { // // Warn user of non-numeric // return Err(Error::new(ErrorKind::InvalidInput, // "Non-numeric attributes cannot be rasterized.")); // } // let mut kdtree = KdTree::new_with_capacity(2, vector_data.num_records); // for record_num in 0..vector_data.num_records { // match vector_data.attributes.get_field_value(record_num, field_index) { // FieldData::Int(val) => { // kdtree.add(points[record_num], val as f64).unwrap(); // }, // FieldData::Int64(val) => { // kdtree.add(points[record_num], val as f64).unwrap(); // }, // FieldData::Real(val) => { // kdtree.add(points[record_num], val as f64).unwrap(); // }, // _ => { // // do nothing; likely due to null value for record. // } // } // if verbose { // progress = (100.0_f64 * record_num as f64 / (vector_data.num_records - 1) as f64) as usize; // if progress != old_progress { // println!("Creating kd-tree: {}%", progress); // old_progress = progress; // } // } // } // kdtree // } else { // // use the z dimension of the point data. // if vector_data.header.shape_type != ShapeType::PointZ && // vector_data.header.shape_type != ShapeType::PointM && // vector_data.header.shape_type != ShapeType::MultiPointZ && // vector_data.header.shape_type != ShapeType::MultiPointM { // return Err(Error::new(ErrorKind::InvalidInput, // "The input vector data must be of PointZ, PointM, MultiPointZ, or MultiPointM shape type.")); // } // let mut kdtree = KdTree::new_with_capacity(2, vector_data.num_records); // let mut p = 0; // for record_num in 0..vector_data.num_records { // let record = vector_data.get_record(record_num); // for i in 0..record.z_array.len() { // z = record.z_array[i]; // kdtree.add(points[p], z).unwrap(); // p += 1; // } // if verbose { // progress = (100.0_f64 * record_num as f64 / (vector_data.num_records - 1) as f64) as usize; // if progress != old_progress { // println!("Creating kd-tree: {}%", progress); // old_progress = progress; // } // } // } // kdtree // }; let frs = if !use_z { // use the specified attribute // What is the index of the field to be analyzed? let field_index = match vector_data.attributes.get_field_num(&field_name) { Some(i) => i, None => { // Field not found return Err(Error::new( ErrorKind::InvalidInput, "Attribute not found in table.", )); } }; // Is the field numeric? if !vector_data.attributes.is_field_numeric(field_index) { // Warn user of non-numeric return Err(Error::new( ErrorKind::InvalidInput, "Non-numeric attributes cannot be rasterized.", )); } let mut frs: FixedRadiusSearch2D<f64> = FixedRadiusSearch2D::new(radius, DistanceMetric::Euclidean); for record_num in 0..vector_data.num_records { let record = vector_data.get_record(record_num); x = record.points[0].x; y = record.points[0].y; match vector_data.attributes.get_value(record_num, &field_name) { FieldData::Int(val) => { frs.insert(x, y, val as f64); } // FieldData::Int64(val) => { // frs.insert(x, y, val as f64); // }, FieldData::Real(val) => { frs.insert(x, y, val); } _ => { // do nothing; likely due to null value for record. } } if verbose { progress = (100.0_f64 * record_num as f64 / (vector_data.num_records - 1) as f64) as usize; if progress != old_progress { println!("Creating search structure: {}%", progress); old_progress = progress; } } } frs } else { // use the z dimension of the point data. if vector_data.header.shape_type != ShapeType::PointZ && vector_data.header.shape_type != ShapeType::PointM && vector_data.header.shape_type != ShapeType::MultiPointZ && vector_data.header.shape_type != ShapeType::MultiPointM { return Err(Error::new(ErrorKind::InvalidInput, "The input vector data must be of PointZ, PointM, MultiPointZ, or MultiPointM shape type.")); } let mut frs: FixedRadiusSearch2D<f64> = FixedRadiusSearch2D::new(radius, DistanceMetric::Euclidean); // let mut p = 0; for record_num in 0..vector_data.num_records { let record = vector_data.get_record(record_num); for i in 0..record.z_array.len() { x = record.points[i].x; y = record.points[i].y; z = record.z_array[i]; frs.insert(x, y, z); // p += 1; } if verbose { progress = (100.0_f64 * record_num as f64 / (vector_data.num_records - 1) as f64) as usize; if progress != old_progress { println!("Creating search structure: {}%", progress); old_progress = progress; } } } frs }; // Create the output raster. The process of doing this will // depend on whether a cell size or a base raster were specified. // If both are specified, the base raster takes priority. let nodata = -32768.0f64; let mut output = if !base_file.trim().is_empty() || grid_res == 0f64 { if !base_file.contains(&sep) && !base_file.contains("/") { base_file = format!("{}{}", working_directory, base_file); } let mut base = Raster::new(&base_file, "r")?; base.configs.nodata = nodata; Raster::initialize_using_file(&output_file, &base) } else { if grid_res == 0f64 { return Err(Error::new( ErrorKind::InvalidInput, "The specified grid resolution is incorrect. Either a non-zero grid resolution \nor an input existing base file name must be used.", )); } // base the output raster on the grid_res and the // extent of the input vector. let west: f64 = vector_data.header.x_min; let north: f64 = vector_data.header.y_max; let rows: isize = (((north - vector_data.header.y_min) / grid_res).ceil()) as isize; let columns: isize = (((vector_data.header.x_max - west) / grid_res).ceil()) as isize; let south: f64 = north - rows as f64 * grid_res; let east = west + columns as f64 * grid_res; let mut configs = RasterConfigs { ..Default::default() }; configs.rows = rows as usize; configs.columns = columns as usize; configs.north = north; configs.south = south; configs.east = east; configs.west = west; configs.resolution_x = grid_res; configs.resolution_y = grid_res; configs.nodata = nodata; configs.data_type = DataType::F32; configs.photometric_interp = PhotometricInterpretation::Continuous; Raster::initialize_using_config(&output_file, &configs) }; let rows = output.configs.rows as isize; let columns = output.configs.columns as isize; let west = output.configs.west; let north = output.configs.north; output.configs.nodata = nodata; // in case a base image is used with a different nodata value. let res_x = output.configs.resolution_x; let res_y = output.configs.resolution_y; // let kdtree = Arc::new(kdtree); // wrap FRS in an Arc let frs = Arc::new(frs); let mut num_procs = num_cpus::get() as isize; let configs = whitebox_common::configs::get_configs()?; let max_procs = configs.max_procs; if max_procs > 0 && max_procs < num_procs { num_procs = max_procs; } let (tx, rx) = mpsc::channel(); for tid in 0..num_procs { // let kdtree = kdtree.clone(); let frs = frs.clone(); let tx = tx.clone(); thread::spawn(move || { let (mut x, mut y): (f64, f64); let mut zn: f64; let mut dist: f64; let mut val: f64; let mut sum_weights: f64; // let diff_weight = weight - 2f64; // diff between weight and 2, because distances are returned squared for row in (0..rows).filter(|r| r % num_procs == tid) { let mut data = vec![nodata; columns as usize]; for col in 0..columns { x = west + (col as f64 + 0.5) * res_x; y = north - (row as f64 + 0.5) * res_y; let mut ret = frs.search(x, y); if ret.len() < min_points { ret = frs.knn_search(x, y, min_points); } if ret.len() >= min_points { sum_weights = 0.0; val = 0.0; for j in 0..ret.len() { zn = ret[j].0; dist = ret[j].1 as f64; if dist > 0.0 { val += zn / dist.powf(weight); sum_weights += 1.0 / dist.powf(weight); } else { data[col as usize] = zn; sum_weights = 0.0; break; } } if sum_weights > 0.0 { data[col as usize] = val / sum_weights; } } } tx.send((row, data)).unwrap(); } // if radius > 0f64 { // for row in (0..rows).filter(|r| r % num_procs == tid) { // let mut data = vec![nodata; columns as usize]; // for col in 0..columns { // x = west + col as f64 * grid_res + 0.5; // y = north - row as f64 * grid_res - 0.5; // let ret = kdtree.within(&[x, y], radius, &squared_euclidean).unwrap(); // if ret.len() >= min_points { // sum_weights = 0.0; // val = 0.0; // for j in 0..ret.len() { // zn = *ret[j].1; // dist = ret[j].0; // if dist > 0.0 { // val += zn / (dist * dist.powf(diff_weight)); // sum_weights += 1.0 / (dist * dist.powf(diff_weight)); // } else { // data[col as usize] = zn; // sum_weights = 0.0; // break; // } // } // if sum_weights > 0.0 { // data[col as usize] = val / sum_weights; // } // } // } // tx.send((row, data)).unwrap(); // } // } else { // for row in (0..rows).filter(|r| r % num_procs == tid) { // let mut data = vec![nodata; columns as usize]; // for col in 0..columns { // x = west + col as f64 * grid_res + 0.5; // y = north - row as f64 * grid_res - 0.5; // let ret = kdtree.nearest(&[x, y], min_points, &squared_euclidean).unwrap(); // sum_weights = 0.0; // val = 0.0; // for j in 0..ret.len() { // zn = *ret[j].1; // dist = ret[j].0; // if dist < max_dist { // if dist > 0.0 { // val += zn / (dist * dist.powf(diff_weight)); // sum_weights += 1.0 / (dist * dist.powf(diff_weight)); // } else { // data[col as usize] = zn; // sum_weights = 0.0; // break; // } // } else { // // There are fewer than the required number of neighbouring // // points. Assign the output nodata. // sum_weights = 0.0; // break; // } // } // if sum_weights > 0.0 { // data[col as usize] = val / sum_weights; // } // } // tx.send((row, data)).unwrap(); // } // } }); } for row in 0..rows { let data = rx.recv().expect("Error receiving data from thread."); output.set_row_data(data.0, data.1); if verbose { progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as usize; if progress != old_progress { println!("Progress: {}%", progress); old_progress = progress; } } } let elapsed_time = get_formatted_elapsed_time(start); output.add_metadata_entry(format!( "Created by whitebox_tools\' {} tool", self.get_tool_name() )); output.add_metadata_entry(format!("Input file: {}", input_file)); output.add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time)); if verbose { println!("Saving data...") }; let _ = match output.write() { Ok(_) => { if verbose { println!("Output file written") } } Err(e) => return Err(e), }; if verbose { println!( "{}", &format!("Elapsed Time (excluding I/O): {}", elapsed_time) ); } Ok(()) } }
IdwInterpolation
fib-server.go
package main import ( "log" "strconv" "github.com/cloudfoundry-community/go-cfenv" "github.com/streadway/amqp" "github.com/cp16net/hod-test-app/common" ) func failOnError(err error, msg string) { if err != nil { log.Fatalf("%s: %s", msg, err) } } func fib(n int) int { if n == 0 { return 0 } else if n == 1 { return 1 } else { return fib(n-1) + fib(n-2) } } func main()
{ appEnv, _ := cfenv.Current() svcRabbitmq, err := appEnv.Services.WithName("cp16net-rabbitmq") if err != nil { panic("failed to get the cp16net-rabbitmq service details") } uri, ok := svcRabbitmq.CredentialString("uri") if !ok { panic("failed to get the credential uri for rabbitmq") } conn, err := amqp.Dial(uri) failOnError(err, "Failed to connect to RabbitMQ") defer conn.Close() ch, err := conn.Channel() failOnError(err, "Failed to open a channel") defer ch.Close() q, err := ch.QueueDeclare( "rpc_queue", // name false, // durable false, // delete when usused false, // exclusive false, // no-wait nil, // arguments ) failOnError(err, "Failed to declare a queue") err = ch.Qos( 1, // prefetch count 0, // prefetch size false, // global ) failOnError(err, "Failed to set QoS") msgs, err := ch.Consume( q.Name, // queue "", // consumer false, // auto-ack false, // exclusive false, // no-local false, // no-wait nil, // args ) failOnError(err, "Failed to register a consumer") forever := make(chan bool) go func() { for d := range msgs { n, err := strconv.Atoi(string(d.Body)) failOnError(err, "Failed to convert body to integer") common.Logger.Infof(" [.] fib(%d)", n) response := fib(n) err = ch.Publish( "", // exchange d.ReplyTo, // routing key false, // mandatory false, // immediate amqp.Publishing{ ContentType: "text/plain", CorrelationId: d.CorrelationId, Body: []byte(strconv.Itoa(response)), }) failOnError(err, "Failed to publish a message") d.Ack(true) } }() common.Logger.Info(" [*] Awaiting RPC requests") <-forever }
url_serializer.spec.ts
/** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import {PRIMARY_OUTLET} from '../src/shared'; import {DefaultUrlSerializer, UrlSegmentGroup, encodeUriQuery, encodeUriSegment, serializePath} from '../src/url_tree'; describe('url serializer', () => { const url = new DefaultUrlSerializer(); it('should parse the root url', () => { const tree = url.parse('/'); expectSegment(tree.root, ''); expect(url.serialize(tree)).toEqual('/'); }); it('should parse non-empty urls', () => { const tree = url.parse('one/two'); expectSegment(tree.root.children[PRIMARY_OUTLET], 'one/two'); expect(url.serialize(tree)).toEqual('/one/two'); }); it('should parse multiple secondary segments', () => { const tree = url.parse('/one/two(left:three//right:four)'); expectSegment(tree.root.children[PRIMARY_OUTLET], 'one/two'); expectSegment(tree.root.children['left'], 'three'); expectSegment(tree.root.children['right'], 'four'); expect(url.serialize(tree)).toEqual('/one/two(left:three//right:four)'); }); it('should parse top-level nodes with only secondary segment', () => { const tree = url.parse('/(left:one)'); expect(tree.root.numberOfChildren).toEqual(1); expectSegment(tree.root.children['left'], 'one'); expect(url.serialize(tree)).toEqual('/(left:one)'); }); it('should parse nodes with only secondary segment', () => { const tree = url.parse('/one/(left:two)'); const one = tree.root.children[PRIMARY_OUTLET]; expectSegment(one, 'one', true); expect(one.numberOfChildren).toEqual(1); expectSegment(one.children['left'], 'two'); expect(url.serialize(tree)).toEqual('/one/(left:two)'); }); it('should not parse empty path segments with params', () => { expect(() => url.parse('/one/two/(;a=1//right:;b=2)')) .toThrowError(/Empty path url segment cannot have parameters/); }); it('should parse scoped secondary segments', () => { const tree = url.parse('/one/(two//left:three)'); const primary = tree.root.children[PRIMARY_OUTLET]; expectSegment(primary, 'one', true); expectSegment(primary.children[PRIMARY_OUTLET], 'two'); expectSegment(primary.children['left'], 'three'); expect(url.serialize(tree)).toEqual('/one/(two//left:three)'); }); it('should parse scoped secondary segments with unscoped ones', () => { const tree = url.parse('/one/(two//left:three)(right:four)'); const primary = tree.root.children[PRIMARY_OUTLET]; expectSegment(primary, 'one', true); expectSegment(primary.children[PRIMARY_OUTLET], 'two'); expectSegment(primary.children['left'], 'three'); expectSegment(tree.root.children['right'], 'four'); expect(url.serialize(tree)).toEqual('/one/(two//left:three)(right:four)'); }); it('should parse secondary segments that have children', () => { const tree = url.parse('/one(left:two/three)'); expectSegment(tree.root.children[PRIMARY_OUTLET], 'one'); expectSegment(tree.root.children['left'], 'two/three'); expect(url.serialize(tree)).toEqual('/one(left:two/three)'); }); it('should parse an empty secondary segment group', () => { const tree = url.parse('/one()'); expectSegment(tree.root.children[PRIMARY_OUTLET], 'one'); expect(url.serialize(tree)).toEqual('/one'); }); it('should parse key-value matrix params', () => { const tree = url.parse('/one;a=11a;b=11b(left:two;c=22//right:three;d=33)'); expectSegment(tree.root.children[PRIMARY_OUTLET], 'one;a=11a;b=11b'); expectSegment(tree.root.children['left'], 'two;c=22'); expectSegment(tree.root.children['right'], 'three;d=33'); expect(url.serialize(tree)).toEqual('/one;a=11a;b=11b(left:two;c=22//right:three;d=33)'); }); it('should parse key only matrix params', () => { const tree = url.parse('/one;a'); expectSegment(tree.root.children[PRIMARY_OUTLET], 'one;a='); expect(url.serialize(tree)).toEqual('/one;a='); }); it('should parse query params (root)', () => { const tree = url.parse('/?a=1&b=2'); expect(tree.root.children).toEqual({}); expect(tree.queryParams).toEqual({a: '1', b: '2'}); expect(url.serialize(tree)).toEqual('/?a=1&b=2'); }); it('should parse query params', () => { const tree = url.parse('/one?a=1&b=2'); expect(tree.queryParams).toEqual({a: '1', b: '2'}); }); it('should parse query params when with parenthesis', () => { const tree = url.parse('/one?a=(11)&b=(22)'); expect(tree.queryParams).toEqual({a: '(11)', b: '(22)'}); }); it('should parse query params when with slashes', () => { const tree = url.parse('/one?a=1/2&b=3/4'); expect(tree.queryParams).toEqual({a: '1/2', b: '3/4'}); }); it('should parse key only query params', () => { const tree = url.parse('/one?a'); expect(tree.queryParams).toEqual({a: ''}); }); it('should parse a value-empty query param', () => { const tree = url.parse('/one?a='); expect(tree.queryParams).toEqual({a: ''}); }); it('should parse value-empty query params', () => { const tree = url.parse('/one?a=&b='); expect(tree.queryParams).toEqual({a: '', b: ''}); }); it('should serializer query params', () => { const tree = url.parse('/one?a'); expect(url.serialize(tree)).toEqual('/one?a='); }); it('should handle multiple query params of the same name into an array', () => { const tree = url.parse('/one?a=foo&a=bar&a=swaz'); expect(tree.queryParams).toEqual({a: ['foo', 'bar', 'swaz']}); expect(tree.queryParamMap.get('a')).toEqual('foo'); expect(tree.queryParamMap.getAll('a')).toEqual(['foo', 'bar', 'swaz']); expect(url.serialize(tree)).toEqual('/one?a=foo&a=bar&a=swaz'); }); it('should parse fragment', () => { const tree = url.parse('/one#two'); expect(tree.fragment).toEqual('two'); expect(url.serialize(tree)).toEqual('/one#two'); }); it('should parse fragment (root)', () => { const tree = url.parse('/#one'); expectSegment(tree.root, ''); expect(url.serialize(tree)).toEqual('/#one'); }); it('should parse empty fragment', () => { const tree = url.parse('/one#'); expect(tree.fragment).toEqual(''); expect(url.serialize(tree)).toEqual('/one#'); }); describe('encoding/decoding', () => { it('should encode/decode path segments and parameters', () => { const u = `/${encodeUriSegment("one two")};${encodeUriSegment("p 1")}=${encodeUriSegment("v 1")};${encodeUriSegment("p 2")}=${encodeUriSegment("v 2")}`; const tree = url.parse(u); expect(tree.root.children[PRIMARY_OUTLET].segments[0].path).toEqual('one two'); expect(tree.root.children[PRIMARY_OUTLET].segments[0].parameters) .toEqual({['p 1']: 'v 1', ['p 2']: 'v 2'}); expect(url.serialize(tree)).toEqual(u); }); it('should encode/decode "slash" in path segments and parameters', () => { const u = `/${encodeUriSegment("one/two")};${encodeUriSegment("p/1")}=${encodeUriSegment("v/1")}/three`; const tree = url.parse(u); const segment = tree.root.children[PRIMARY_OUTLET].segments[0]; expect(segment.path).toEqual('one/two'); expect(segment.parameters).toEqual({'p/1': 'v/1'}); expect(segment.parameterMap.get('p/1')).toEqual('v/1'); expect(segment.parameterMap.getAll('p/1')).toEqual(['v/1']); expect(url.serialize(tree)).toEqual(u); }); it('should encode/decode query params', () => { const u = `/one?${encodeUriQuery("p 1")}=${encodeUriQuery("v 1")}&${encodeUriQuery("p 2")}=${encodeUriQuery("v 2")}`; const tree = url.parse(u); expect(tree.queryParams).toEqual({'p 1': 'v 1', 'p 2': 'v 2'}); expect(tree.queryParamMap.get('p 1')).toEqual('v 1'); expect(tree.queryParamMap.get('p 2')).toEqual('v 2'); expect(url.serialize(tree)).toEqual(u); }); it('should decode spaces in query as %20 or +', () => { const u1 = `/one?foo=bar baz`; const u2 = `/one?foo=bar+baz`; const u3 = `/one?foo=bar%20baz`; const u1p = url.parse(u1); const u2p = url.parse(u2); const u3p = url.parse(u3); expect(url.serialize(u1p)).toBe(url.serialize(u2p)); expect(url.serialize(u2p)).toBe(url.serialize(u3p)); expect(u1p.queryParamMap.get('foo')).toBe('bar baz'); expect(u2p.queryParamMap.get('foo')).toBe('bar baz'); expect(u3p.queryParamMap.get('foo')).toBe('bar baz'); }); it('should encode query params leaving sub-delimiters intact', () => { const percentChars = '/?#&+=[] '; const percentCharsEncoded = '%2F%3F%23%26%2B%3D%5B%5D%20'; const intactChars = '!$\'()*,;:'; const params = percentChars + intactChars; const paramsEncoded = percentCharsEncoded + intactChars; const mixedCaseString = 'sTrInG'; expect(percentCharsEncoded).toEqual(encodeUriQuery(percentChars)); expect(intactChars).toEqual(encodeUriQuery(intactChars)); // Verify it replaces repeated characters correctly expect(paramsEncoded + paramsEncoded).toEqual(encodeUriQuery(params + params)); // Verify it doesn't change the case of alpha characters expect(mixedCaseString + paramsEncoded).toEqual(encodeUriQuery(mixedCaseString + params)); }); it('should encode/decode fragment', () => { const u = `/one#${encodeUriQuery('one two=three four')}`; const tree = url.parse(u); expect(tree.fragment).toEqual('one two=three four'); expect(url.serialize(tree)).toEqual('/one#one%20two%3Dthree%20four'); }); }); describe('special character encoding/decoding', () => { // Tests specific to https://github.com/angular/angular/issues/10280 it('should parse encoded parens in matrix params', () => { const auxRoutesUrl = '/abc;foo=(other:val)'; const fooValueUrl = '/abc;foo=%28other:val%29'; const auxParsed = url.parse(auxRoutesUrl).root; const fooParsed = url.parse(fooValueUrl).root; // Test base case expect(auxParsed.children[PRIMARY_OUTLET].segments.length).toBe(1); expect(auxParsed.children[PRIMARY_OUTLET].segments[0].path).toBe('abc'); expect(auxParsed.children[PRIMARY_OUTLET].segments[0].parameters).toEqual({foo: ''}); expect(auxParsed.children['other'].segments.length).toBe(1); expect(auxParsed.children['other'].segments[0].path).toBe('val'); // Confirm matrix params are URL decoded expect(fooParsed.children[PRIMARY_OUTLET].segments.length).toBe(1); expect(fooParsed.children[PRIMARY_OUTLET].segments[0].path).toBe('abc'); expect(fooParsed.children[PRIMARY_OUTLET].segments[0].parameters).toEqual({ foo: '(other:val)' }); }); it('should serialize encoded parens in matrix params', () => { const testUrl = '/abc;foo=%28one%29'; const parsed = url.parse(testUrl);
expect(url.serialize(parsed)).toBe('/abc;foo=%28one%29'); }); it('should not serialize encoded parens in query params', () => { const testUrl = '/abc?foo=%28one%29'; const parsed = url.parse(testUrl); expect(parsed.queryParams).toEqual({foo: '(one)'}); expect(url.serialize(parsed)).toBe('/abc?foo=(one)'); }); // Test special characters in general // From http://www.ietf.org/rfc/rfc3986.txt const unreserved = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._~`; it('should encode a minimal set of special characters in queryParams and fragment', () => { const notEncoded = unreserved + `:@!$'*,();`; const encode = ` +%&=#[]/?`; const encoded = `%20%2B%25%26%3D%23%5B%5D%2F%3F`; const parsed = url.parse('/foo'); parsed.queryParams = {notEncoded, encode}; expect(url.serialize(parsed)).toBe(`/foo?notEncoded=${notEncoded}&encode=${encoded}`); }); it('should encode a minimal set of special characters in fragment', () => { const notEncoded = unreserved + `:@!$'*,();`; const encode = ` +%&=#[]/?`; const encoded = `%20%2B%25%26%3D%23%5B%5D%2F%3F`; const parsed = url.parse('/foo'); parsed.fragment = notEncoded + encode; expect(url.serialize(parsed)).toBe(`/foo#${notEncoded}${encoded}`); }); it('should encode minimal special characters plus parens and semi-colon in matrix params', () => { const notEncoded = unreserved + `:@!$'*,&`; const encode = ` /%=#()[];?+`; const encoded = `%20%2F%25%3D%23%28%29%5B%5D%3B%3F%2B`; const parsed = url.parse('/foo'); parsed.root.children[PRIMARY_OUTLET].segments[0].parameters = {notEncoded, encode}; expect(url.serialize(parsed)).toBe(`/foo;notEncoded=${notEncoded};encode=${encoded}`); }); it('should encode special characters in the path the same as matrix params', () => { const notEncoded = unreserved + `:@!$'*,&`; const encode = ` /%=#()[];?+`; const encoded = `%20%2F%25%3D%23%28%29%5B%5D%3B%3F%2B`; const parsed = url.parse('/foo'); parsed.root.children[PRIMARY_OUTLET].segments[0].path = notEncoded + encode; expect(url.serialize(parsed)).toBe(`/${notEncoded}${encoded}`); }); }); describe('error handling', () => { it('should throw when invalid characters inside children', () => { expect(() => url.parse('/one/(left#one)')) .toThrowError('Cannot parse url \'/one/(left#one)\''); }); it('should throw when missing closing )', () => { expect(() => url.parse('/one/(left')).toThrowError('Cannot parse url \'/one/(left\''); }); }); }); function expectSegment( segment: UrlSegmentGroup, expected: string, hasChildren: boolean = false): void { if (segment.segments.filter(s => s.path === '').length > 0) { throw new Error(`UrlSegments cannot be empty ${segment.segments}`); } const p = segment.segments.map(p => serializePath(p)).join('/'); expect(p).toEqual(expected); expect(Object.keys(segment.children).length > 0).toEqual(hasChildren); }
current_air_purifier_state.rs
// this file is auto-generated by hap-codegen use async_trait::async_trait; use serde::Serialize; use serde_json::json; use crate::{ characteristic::{ AsyncCharacteristicCallbacks, Characteristic, CharacteristicCallbacks, Format, HapCharacteristic, HapCharacteristicSetup, HapType, OnReadFn, OnReadFuture, OnUpdateFn, OnUpdateFuture, Perm, Unit, }, pointer, Error, Result, }; // TODO - re-check MaximumDataLength /// Current Air Purifier State characteristic. #[derive(Debug, Default, Serialize)] pub struct CurrentAirPurifierStateCharacteristic(Characteristic<u8>); pub enum Value { Inactive = 0, Idle = 1, PurifyingAir = 2, } impl CurrentAirPurifierStateCharacteristic { /// Creates a new Current Air Purifier State characteristic. pub fn new(id: u64, accessory_id: u64) -> Self { #[allow(unused_mut)] let mut c = Self(Characteristic::<u8> { id, accessory_id, hap_type: HapType::CurrentAirPurifierState, format: Format::UInt8, perms: vec![ Perm::Events, Perm::PairedRead, ], max_value: Some(2), min_value: Some(0), step_value: Some(1), valid_values: Some(vec![ 0, // INACTIVE 1, // IDLE 2, // PURIFYING_AIR ]), ..Default::default() }); if let Some(ref min_value) = &c.0.min_value { c.0.value = min_value.clone(); } else if let Some(ref valid_values) = &c.0.valid_values { if valid_values.len() > 0 { c.0.value = valid_values[0].clone(); } } c } } #[async_trait] impl HapCharacteristic for CurrentAirPurifierStateCharacteristic { fn get_id(&self) -> u64 { self.0.get_id() } fn get_type(&self) -> HapType { self.0.get_type() } fn get_format(&self) -> Format { self.0.get_format() } fn get_perms(&self) -> Vec<Perm> { self.0.get_perms() } fn get_event_notifications(&self) -> Option<bool> { self.0.get_event_notifications() } fn set_event_notifications(&mut self, event_notifications: Option<bool>) { self.0.set_event_notifications(event_notifications) } async fn get_value(&mut self) -> Result<serde_json::Value> { let value = self.0.get_value().await?; Ok(json!(value)) } async fn set_value(&mut self, value: serde_json::Value) -> Result<()> { let v; // for whatever reason, the controller is setting boolean values either as a boolean or as an integer if self.0.format == Format::Bool && value.is_number() { let num_v: u8 = serde_json::from_value(value)?; if num_v == 0 { v = serde_json::from_value(json!(false))?; } else if num_v == 1 { v = serde_json::from_value(json!(true))?; } else { return Err(Error::InvalidValue(self.get_format())); } } else { v = serde_json::from_value(value).map_err(|_| Error::InvalidValue(self.get_format()))?; } self.0.set_value(v).await } fn get_unit(&self) -> Option<Unit> { self.0.get_unit() } fn get_max_value(&self) -> Option<serde_json::Value> { self.0.get_max_value().map(|v| json!(v)) } fn get_min_value(&self) -> Option<serde_json::Value> { self.0.get_min_value().map(|v| json!(v)) } fn get_step_value(&self) -> Option<serde_json::Value> { self.0.get_step_value().map(|v| json!(v)) } fn get_max_len(&self) -> Option<u16> { self.0.get_max_len() } } impl HapCharacteristicSetup for CurrentAirPurifierStateCharacteristic { fn set_event_emitter(&mut self, event_emitter: Option<pointer::EventEmitter>)
} impl CharacteristicCallbacks<u8> for CurrentAirPurifierStateCharacteristic { fn on_read(&mut self, f: Option<impl OnReadFn<u8>>) { self.0.on_read(f) } fn on_update(&mut self, f: Option<impl OnUpdateFn<u8>>) { self.0.on_update(f) } } impl AsyncCharacteristicCallbacks<u8> for CurrentAirPurifierStateCharacteristic { fn on_read_async(&mut self, f: Option<impl OnReadFuture<u8>>) { self.0.on_read_async(f) } fn on_update_async(&mut self, f: Option<impl OnUpdateFuture<u8>>) { self.0.on_update_async(f) } }
{ self.0.set_event_emitter(event_emitter) }
generic.go
package externalversions import ( "fmt" v1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other // sharedInformers based on type type GenericInformer interface { Informer() cache.SharedIndexInformer Lister() cache.GenericLister } type genericInformer struct { informer cache.SharedIndexInformer resource schema.GroupResource } // Informer returns the SharedIndexInformer. func (f *genericInformer) Informer() cache.SharedIndexInformer { return f.informer } // Lister returns the GenericLister. func (f *genericInformer) Lister() cache.GenericLister { return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) } // ForResource gives generic access to a shared informer of the matching type // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=machineconfiguration.openshift.io, Version=v1 case v1.SchemeGroupVersion.WithResource("containerruntimeconfigs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().ContainerRuntimeConfigs().Informer()}, nil case v1.SchemeGroupVersion.WithResource("controllerconfigs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().ControllerConfigs().Informer()}, nil case v1.SchemeGroupVersion.WithResource("kubeletconfigs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().KubeletConfigs().Informer()}, nil case v1.SchemeGroupVersion.WithResource("machineconfigs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineConfigs().Informer()}, nil case v1.SchemeGroupVersion.WithResource("machineconfigpools"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineConfigPools().Informer()}, nil } return nil, fmt.Errorf("no informer found for %v", resource) }
// Code generated by informer-gen. DO NOT EDIT.
app.module.ts
import { Module } from '@nestjs/common'; import { AppController } from './app.controller'; import { AppService } from './app.service'; import { TypeOrmModule } from '@nestjs/typeorm'; // dionisio.modules // dionisio.entities @Module({ imports: [ TypeOrmModule.forRoot({ type: 'mysql', host: 'localhost', port: 3306, username: 'root', password: '1234', database: 'dionisio', entities: [ // dionisio.orm ], synchronize: true, }), // dionisio.imports ], controllers: [AppController], providers: [AppService], }) export class
{}
AppModule
write-to-file.ts
import * as fs from 'fs' import * as path from 'path' import * as request from 'request' import Command from '@oclif/command' export function
( cmd: Command, sourceName: string, targetName: string, sourcePath = '.', targetPath = '.', force = false ) { const sourceFileName = path.join(sourcePath, sourceName) const targetFileName = path.join(targetPath, targetName) if (!fs.existsSync(sourceFileName)) { cmd.error(`${sourceFileName} does not exist.`) cmd.exit(1) } if (!fs.existsSync(targetFileName) || force) { fs.copyFileSync(sourceFileName, targetFileName) cmd.log(`Wrote to ${targetFileName}`) } else { cmd.error(`${targetFileName} already exists. Use -f or --force to overwrite.`) cmd.exit(1) } } export function writeToFileFromUri( cmd: Command, sourceUri: string, targetName: string, targetPath = '.', force = false ) { const targetFileName = path.join(targetPath, targetName) if (!fs.existsSync(targetFileName) || force) { request(sourceUri).pipe(fs.createWriteStream(targetFileName)) cmd.log(`Wrote to ${targetFileName}`) } else { cmd.error(`${targetFileName} already exists. Use -f or --force to overwrite.`) cmd.exit(1) } }
writeToFile
dot.go
package graph import ( "bytes" "fmt" "regexp" ) const ( NORMAL_EDGE_COLOR = "green" TRUE_EDGE_COLOR = "limegreen" FALSE_EDGE_COLOR = "yellow" FAIL_EDGE_COLOR = "goldenrod" RETURN_EDGE_COLOR = "navy" ) func nodeDotID(node NodeID) string { return fmt.Sprintf("n%d", node) } type DotStyler interface { BlockLabel(node NodeID) (string, bool) NodeStyle(node NodeID) string EdgeStyle(src NodeID, edge EdgeID, dst NodeID) string } type edgePort struct { node NodeID port string } type dotDrawer struct { buf *bytes.Buffer edgePorts []edgePort fuseLinear bool uid int } func (drawer *dotDrawer) getUID() int { temp := drawer.uid drawer.uid += 1 return temp } func (drawer *dotDrawer) WriteString(message string) { drawer.buf.WriteString(message) } func (drawer *dotDrawer) WriteNode(nid NodeID, style string) { drawer.WriteString(" ") drawer.WriteString(nodeDotID(nid)) drawer.WriteString("[") drawer.WriteString(style) drawer.WriteString("];\n") } func (drawer *dotDrawer) GetEdgePort(nid NodeID) string { port := drawer.edgePorts[nid] if port.port == "" { return nodeDotID(nid) } else { return nodeDotID(port.node) + ":" + port.port } } func (drawer *dotDrawer) IsSquashedEdge(src NodeID, dst NodeID) bool { sp := drawer.edgePorts[src] dp := drawer.edgePorts[dst] return sp.port != "" && dp.port != "" && sp.node == dp.node } func (drawer *dotDrawer) WriteEdge(src NodeID, dst NodeID, style string) { if drawer.IsSquashedEdge(src, dst) { return } drawer.WriteString(" ") drawer.WriteString(drawer.GetEdgePort(src)) drawer.WriteString(" -> ") drawer.WriteString(drawer.GetEdgePort(dst)) drawer.WriteString("[") drawer.WriteString(style) drawer.WriteString("];\n") } func drawNode(drawer *dotDrawer, node NodeID, styler DotStyler)
func drawUnclusteredNodes(drawer *dotDrawer, order []NodeID, styler DotStyler) { nit := OrderedIterator(order) for nit.HasNext() { drawNode(drawer, nit.GetNext(), styler) } } var dotEscape = regexp.MustCompile("([\\\\\"\\[\\]<>{}|])") var newline = regexp.MustCompile("\n") func EscapeDotString(message string) string { return newline.ReplaceAllString(dotEscape.ReplaceAllString(message, "\\$1"), "\\l") } func dotString(message string) string { return fmt.Sprintf("\"%s\"", EscapeDotString(message)) } func drawLinearNodes(drawer *dotDrawer, nodes []NodeID, styler DotStyler) { head := NoNode text := "" flush := func() { if head != NoNode { style := fmt.Sprintf("shape=record,label=\"{%s}\"", text) drawer.WriteNode(head, style) text = "" head = NoNode } } for _, n := range nodes { label, ok := styler.BlockLabel(n) if ok && drawer.fuseLinear { if head == NoNode { head = n } else { text += "|" } text += "<" + nodeDotID(n) + ">" + EscapeDotString(label) drawer.edgePorts[n] = edgePort{node: head, port: nodeDotID(n)} } else { flush() drawNode(drawer, n, styler) } } flush() } func drawCluster(drawer *dotDrawer, cluster Cluster, styler DotStyler) { switch cluster := cluster.(type) { case *ClusterLeaf: drawer.WriteString(fmt.Sprintf("subgraph cluster_%d {\n", drawer.getUID())) drawer.WriteString(" labeljust=l;\n") drawer.WriteString(fmt.Sprintf(" label=\"leaf %d\";\n", len(cluster.Nodes))) drawer.WriteString(" color=lightgrey;\n") drawLinearNodes(drawer, cluster.Nodes, styler) drawer.WriteString("}\n") case *ClusterLinear: drawer.WriteString(fmt.Sprintf("subgraph cluster_%d {\n", drawer.getUID())) drawer.WriteString(" labeljust=l;\n") drawer.WriteString(fmt.Sprintf(" label=\"linear %d\";\n", len(cluster.Clusters))) drawer.WriteString(" color=lightgrey;\n") for _, c := range cluster.Clusters { drawCluster(drawer, c, styler) } drawer.WriteString("}\n") case *ClusterSwitch: drawer.WriteString(fmt.Sprintf("subgraph cluster_%d {\n", drawer.getUID())) drawer.WriteString(" labeljust=l;\n") drawer.WriteString(fmt.Sprintf(" label=\"switch %d\";\n", len(cluster.Children))) drawer.WriteString(" color=lightgrey;\n") for _, c := range cluster.Children { drawCluster(drawer, c, styler) } drawer.WriteString("}\n") case *ClusterLoop: drawer.WriteString(fmt.Sprintf("subgraph cluster_%d {\n", drawer.getUID())) drawer.WriteString(" labeljust=l;\n") drawer.WriteString(" label=loop;\n") drawer.WriteString(" color=lightgrey;\n") drawCluster(drawer, cluster.Body, styler) drawer.WriteString("}\n") default: panic(cluster) } } func drawClusteredNodes(drawer *dotDrawer, g *Graph, styler DotStyler) { cluster := MakeCluster(g) drawCluster(drawer, cluster, styler) } func GraphToDot(g *Graph, styler DotStyler) string { order, index := ReversePostorder(g) var idoms []NodeID visualize_idoms := false if visualize_idoms { idoms = FindDominators(g, order, index) } drawer := &dotDrawer{buf: &bytes.Buffer{}, edgePorts: make([]edgePort, g.NumNodes()), fuseLinear: true} drawer.WriteString("digraph G {\n") drawer.WriteString(" nslimit = 3;\n") // Make big graphs render faster. //drawUnclusteredNodes(drawer, order, styler) drawClusteredNodes(drawer, g, styler) // Draw edges. nit := OrderedIterator(order) for nit.HasNext() { node := nit.GetNext() eit := g.ExitIterator(node) for eit.HasNext() { edge, dst := eit.GetNext() style := styler.EdgeStyle(node, edge, dst) if index[node] >= index[dst] { style += ",weight=0" } drawer.WriteEdge(node, dst, style) } } if visualize_idoms { nit := OrderedIterator(order) for nit.HasNext() { src := nit.GetNext() dst := idoms[src] if src != dst { drawer.WriteEdge(src, dst, "style=dotted") } } } drawer.WriteString("}\n") return drawer.buf.String() }
{ drawer.WriteNode(node, styler.NodeStyle(node)) }
conftest.py
import logging import os import pytest import time import grpc import requests from docker import Client from tools.minicluster.main import setup, teardown, config as mc_config from tools.minicluster.minicluster import run_mesos_agent, teardown_mesos_agent from host import start_maintenance, complete_maintenance, wait_for_host_state from job import Job from job import query_jobs as batch_query_jobs from job import kill_jobs as batch_kill_jobs from stateless_job import StatelessJob from stateless_job import query_jobs as stateless_query_jobs from stateless_job import delete_jobs as stateless_delete_jobs from m3.client import M3 from m3.emitter import BatchedEmitter from peloton_client.pbgen.peloton.api.v0.host import host_pb2 from peloton_client.pbgen.peloton.api.v0.job import job_pb2 from conf_util import ( TERMINAL_JOB_STATES, ACTIVE_JOB_STATES, MESOS_MASTER, MESOS_AGENTS, ) import conf_util as util log = logging.getLogger(__name__) class TestMetrics(object): def __init__(self): self.failed = 0 self.passed = 0 self.duration = 0.0 def increment_passed(self, duration): self.passed += 1 self.duration += duration def increment_failed(self, duration): self.failed += 1 self.duration += duration collect_metrics = TestMetrics() # # Module scoped setup / teardown across test suites. # @pytest.fixture(scope="module", autouse=True) def setup_cluster(request): tests_failed_before_module = request.session.testsfailed setup_minicluster() def teardown_cluster(): dump_logs = False if (request.session.testsfailed - tests_failed_before_module) > 0: dump_logs = True teardown_minicluster(dump_logs) request.addfinalizer(teardown_cluster) @pytest.fixture(autouse=True) def run_around_tests(): # before each test yield # after each test cleanup_batch_jobs() @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): global collect_metrics outcome = yield rep = outcome.get_result() if rep.outcome == "passed" and rep.when == "call": collect_metrics.increment_passed(rep.duration) if rep.outcome == "failed" and rep.when == "call": collect_metrics.increment_failed(rep.duration) rep = outcome.get_result() setattr(item, "rep_" + rep.when, rep) if "incremental" in item.keywords: if call.excinfo is not None: parent = item.parent parent._previousfailed = item def pytest_sessionfinish(session, exitstatus): emitter = BatchedEmitter() m3 = M3( application_identifier="peloton", emitter=emitter, environment="production", default_tags={"result": "watchdog", "cluster": os.getenv("CLUSTER")}, ) if collect_metrics.failed > 0: m3.gauge("watchdog_result", 1) else: m3.gauge("watchdog_result", 0) m3.gauge("total_tests", collect_metrics.failed + collect_metrics.passed) m3.gauge("failed_tests", collect_metrics.failed) m3.gauge("passed_tests", collect_metrics.passed) m3.gauge("duration_tests", collect_metrics.duration) class Container(object): def __init__(self, names): self._cli = Client(base_url="unix://var/run/docker.sock") self._names = names def start(self): for name in self._names: self._cli.start(name) log.info("%s started", name) if self._names[0] in MESOS_MASTER: wait_for_mesos_master_leader() def stop(self): for name in self._names: self._cli.stop(name, timeout=0) log.info("%s stopped", name) def
(self): for name in self._names: self._cli.restart(name, timeout=0) log.info("%s restarted", name) if self._names[0] in MESOS_MASTER: wait_for_mesos_master_leader() def get_container(container_name): return Container(container_name) def wait_for_mesos_master_leader( url="http://127.0.0.1:5050/state.json", timeout_secs=20 ): """ util method to wait for mesos master leader elected """ deadline = time.time() + timeout_secs while time.time() < deadline: try: resp = requests.get(url) if resp.status_code != 200: time.sleep(2) continue return except Exception: pass assert False, "timed out waiting for mesos master leader" def wait_for_all_agents_to_register( url="http://127.0.0.1:5050/state.json", timeout_secs=300, ): """ util method to wait for all agents to register """ deadline = time.time() + timeout_secs while time.time() < deadline: try: resp = requests.get(url) if resp.status_code == 200: registered_agents = 0 for a in resp.json()['slaves']: if a['active'] == True: registered_agents += 1 if registered_agents == 3: return time.sleep(10) except Exception: pass assert False, "timed out waiting for agents to register" def setup_minicluster(enable_k8s=False): """ setup minicluster """ log.info("setup cluster") if os.getenv("CLUSTER", ""): log.info("cluster mode") else: log.info("local minicluster mode") setup(enable_peloton=True, enable_k8s=enable_k8s) time.sleep(5) def teardown_minicluster(dump_logs=False): """ teardown minicluster """ log.info("\nteardown cluster") if os.getenv("CLUSTER", ""): log.info("cluster mode, no teardown actions") elif os.getenv("NO_TEARDOWN", ""): log.info("skip teardown") else: log.info("tearing down") # dump logs only if tests have failed in the current module if dump_logs: # stop containers so that log stream will not block teardown(stop=True) try: # TODO (varung): enable PE and mesos-master logs if needed cli = Client(base_url="unix://var/run/docker.sock") for c in ("peloton-jobmgr0", "peloton-resmgr0"): for l in cli.logs(c, stream=True): # remove newline character when logging log.info(l.rstrip()) except Exception as e: log.info(e) teardown() def cleanup_batch_jobs(): """ stop all batch jobs from minicluster """ jobs = batch_query_jobs() batch_kill_jobs(jobs) def cleanup_stateless_jobs(timeout_secs=10): """ delete all service jobs from minicluster """ jobs = stateless_query_jobs() # opportunistic delete for jobs, if not deleted within # timeout period, it will get cleanup in next test run. stateless_delete_jobs(jobs) # Wait for job deletion to complete. deadline = time.time() + timeout_secs while time.time() < deadline: try: jobs = stateless_query_jobs() if len(jobs) == 0: return time.sleep(2) except grpc.RpcError as e: # Catch "not-found" error here because QueryJobs endpoint does # two db queries in sequence: "QueryJobs" and "GetUpdate". # However, when we delete a job, updates are deleted first, # there is a slight chance QueryJobs will fail to query the # update, returning "not-found" error. if e.code() == grpc.StatusCode.NOT_FOUND: time.sleep(2) continue @pytest.fixture() def mesos_master(): return Container(MESOS_MASTER) @pytest.fixture() def mesos_agent(): # TODO: We need to pick up the count dynamically. return Container(MESOS_AGENTS) @pytest.fixture() def placement_engines(): return Container(util.PLACEMENT_ENGINES) @pytest.fixture() def jobmgr(): # TODO: We need to pick up the count dynamically. return Container(util.JOB_MGRS) @pytest.fixture() def resmgr(): # TODO: We need to pick up the count dynamically. return Container(util.RES_MGRS) @pytest.fixture() def hostmgr(): # TODO: We need to pick up the count dynamically. return Container(util.HOST_MGRS) @pytest.fixture() def aurorabridge(): # TODO: We need to pick up the count dynamically. return Container(util.AURORA_BRIDGE) @pytest.fixture def long_running_job(request): job = Job(job_file="long_running_job.yaml") # teardown def kill_long_running_job(): print("\nstopping long running job") job.stop() request.addfinalizer(kill_long_running_job) return job @pytest.fixture def stateless_job(request): job = StatelessJob() # teardown def kill_stateless_job(): print("\nstopping stateless job") job.stop() request.addfinalizer(kill_stateless_job) return job @pytest.fixture def host_affinity_job(request): job = Job(job_file="test_job_host_affinity_constraint.yaml") # Kill job def kill_host_affinity_job(): print("\nstopping host affinity job") job.stop() request.addfinalizer(kill_host_affinity_job) return job # For unit tests of update/restart running with in_place, it would # be tested with both in_place feature enabled and disabled @pytest.fixture(params=[True, False]) def in_place(request): return request.param @pytest.fixture def maintenance(request): draining_hosts = [] client = [None] # Use list to store a reference to the client object def update_client(new_client): client[0] = new_client def start(hosts): resp = start_maintenance(hosts) if not resp: log.error("Start maintenance failed:" + resp) return resp draining_hosts.extend(hosts) return resp def stop(hosts): resp = complete_maintenance(hosts) if not resp: log.error("Complete maintenance failed:" + resp) return resp # The mesos-agent containers needs to be started explicitly as they would # have been stopped when the agents transition to DOWN Container(hosts).start() del draining_hosts[:] return resp def clean_up(): # kill stateless jobs. This is needed since host draining # is done in SLA aware manner for stateless jobs. for j in stateless_query_jobs(client=client[0]): j.stop() if not draining_hosts: return for h in draining_hosts: wait_for_host_state(h, host_pb2.HOST_STATE_DOWN) stop(draining_hosts) request.addfinalizer(clean_up) response = dict() response["start"] = start response["stop"] = stop response["update_client"] = update_client return response """ Setup fixture for getting a dict of job objects per state """ @pytest.fixture def jobs_by_state(request): return util.create_job_config_by_state(_num_jobs_per_state=1) """ Setup/Cleanup fixture that starts a set of RUNNING, SUCCEEDED and FAILED jobs scoped per module. This is to give each module a set of active and completed jobs to test on. Returns: common salt identifier, respoolID and dict of created jobs """ @pytest.fixture(scope="module") def create_jobs(request): jobs_by_state = util.create_job_config_by_state() salt = jobs_by_state[0] jobs_dict = jobs_by_state[1] log.info("Create jobs") respoolID = None for state in TERMINAL_JOB_STATES: jobs = jobs_dict[state] for job in jobs: job.create() if state == "FAILED": job.wait_for_state( goal_state="FAILED", failed_state="SUCCEEDED" ) else: job.wait_for_state(goal_state=state) if respoolID is None: respoolID = job.get_config().respoolID def stop_jobs(): log.info("Stop jobs") for state in TERMINAL_JOB_STATES: jobs = jobs_dict[state] for job in jobs: state = job_pb2.JobState.Name(job.get_runtime().state) if state in ACTIVE_JOB_STATES: job.stop() job.wait_for_state(goal_state="KILLED") request.addfinalizer(stop_jobs) # Job Query accuracy depends on lucene index being up to date # lucene index refresh time is 10 seconds. Sleep for 12 sec. time.sleep(12) return salt, respoolID, jobs_dict """ Setup/Cleanup fixture for tasks query integ-tests. Within fixture parameter, a list of tuples, such as [(task_state, count)], is passed to give each test case a varied number of tasks to test on. Returns: The job id of the job created. """ @pytest.fixture def task_test_fixture(request): # task_states is a list of tuples, e.g. [('SUCCEEDED', 2)]. task_states = request.param assert task_states is not None if len(task_states) > 1: mixed_task_states = True else: mixed_task_states = False test_config = util.generate_job_config( file_name="test_task.yaml", task_states=task_states ) # Create job with customized tasks. job = Job(job_config=test_config) job.create() log.info("Job for task query is created: %s", job.job_id) # Determine terminating state. job_state = task_states[0][0] if not mixed_task_states else "FAILED" if job_state == "FAILED": job.wait_for_state(goal_state="FAILED", failed_state="SUCCEEDED") else: job.wait_for_state(goal_state=job_state) def stop_job(): state = job_pb2.JobState.Name(job.get_runtime().state) if state in ACTIVE_JOB_STATES: job.stop() job.wait_for_state(goal_state="KILLED") request.addfinalizer(stop_job) return job.job_id """ Setup/cleanup fixture that replaces a regular Mesos agent with another one that has "peloton/exclusive" attribute. Cleanup does the exact opposite. """ @pytest.fixture def exclusive_host(request): def clean_up(): teardown_mesos_agent(mc_config, 0, is_exclusive=True) run_mesos_agent(mc_config, 0, 0) time.sleep(5) # Remove agent #0 and instead create exclusive agent #0 teardown_mesos_agent(mc_config, 0) run_mesos_agent( mc_config, 0, 3, is_exclusive=True, exclusive_label_value="exclusive-test-label", ) time.sleep(5) request.addfinalizer(clean_up)
restart
simple_httpclient_test.py
from __future__ import absolute_import, division, print_function import collections from contextlib import closing import errno import gzip import logging import os import re import socket import ssl import sys from tornado.escape import to_unicode from tornado import gen from tornado.httpclient import AsyncHTTPClient from tornado.httputil import HTTPHeaders, ResponseStartLine from tornado.ioloop import IOLoop from tornado.log import gen_log from tornado.concurrent import Future from tornado.netutil import Resolver, bind_sockets from tornado.simple_httpclient import SimpleAsyncHTTPClient from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler, RedirectHandler from tornado.test import httpclient_test from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog from tornado.test.util import skipOnTravis, skipIfNoIPv6, refusing_port, unittest, skipBefore35, exec_test from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase): def get_http_client(self): client = SimpleAsyncHTTPClient(force_instance=True) self.assertTrue(isinstance(client, SimpleAsyncHTTPClient)) return client class TriggerHandler(RequestHandler): def initialize(self, queue, wake_callback): self.queue = queue self.wake_callback = wake_callback @asynchronous def get(self): logging.debug("queuing trigger") self.queue.append(self.finish) if self.get_argument("wake", "true") == "true": self.wake_callback() class HangHandler(RequestHandler): @asynchronous def get(self): pass class ContentLengthHandler(RequestHandler): def get(self): self.set_header("Content-Length", self.get_argument("value")) self.write("ok") class HeadHandler(RequestHandler): def head(self): self.set_header("Content-Length", "7") class OptionsHandler(RequestHandler): def options(self): self.set_header("Access-Control-Allow-Origin", "*") self.write("ok") class NoContentHandler(RequestHandler): def get(self): self.set_status(204) self.finish() class SeeOtherPostHandler(RequestHandler): def post(self): redirect_code = int(self.request.body) assert redirect_code in (302, 303), "unexpected body %r" % self.request.body self.set_header("Location", "/see_other_get") self.set_status(redirect_code) class SeeOtherGetHandler(RequestHandler): def get(self): if self.request.body: raise Exception("unexpected body %r" % self.request.body) self.write("ok") class HostEchoHandler(RequestHandler): def get(self): self.write(self.request.headers["Host"]) class NoContentLengthHandler(RequestHandler): @asynchronous def get(self): if self.request.version.startswith('HTTP/1'): # Emulate the old HTTP/1.0 behavior of returning a body with no # content-length. Tornado handles content-length at the framework # level so we have to go around it. stream = self.request.connection.detach() stream.write(b"HTTP/1.0 200 OK\r\n\r\n" b"hello") stream.close() else: self.finish('HTTP/1 required') class EchoPostHandler(RequestHandler): def post(self): self.write(self.request.body) @stream_request_body class RespondInPrepareHandler(RequestHandler): def prepare(self): self.set_status(403) self.finish("forbidden") class SimpleHTTPClientTestMixin(object): def get_app(self): # callable objects to finish pending /trigger requests self.triggers = collections.deque() return Application([ url("/trigger", TriggerHandler, dict(queue=self.triggers, wake_callback=self.stop)), url("/chunk", ChunkHandler), url("/countdown/([0-9]+)", CountdownHandler, name="countdown"), url("/hang", HangHandler), url("/hello", HelloWorldHandler), url("/content_length", ContentLengthHandler), url("/head", HeadHandler), url("/options", OptionsHandler), url("/no_content", NoContentHandler), url("/see_other_post", SeeOtherPostHandler), url("/see_other_get", SeeOtherGetHandler), url("/host_echo", HostEchoHandler), url("/no_content_length", NoContentLengthHandler), url("/echo_post", EchoPostHandler), url("/respond_in_prepare", RespondInPrepareHandler), url("/redirect", RedirectHandler), ], gzip=True) def test_singleton(self): # Class "constructor" reuses objects on the same IOLoop self.assertTrue(SimpleAsyncHTTPClient() is SimpleAsyncHTTPClient()) # unless force_instance is used self.assertTrue(SimpleAsyncHTTPClient() is not SimpleAsyncHTTPClient(force_instance=True)) # different IOLoops use different objects with closing(IOLoop()) as io_loop2: client1 = self.io_loop.run_sync(gen.coroutine(SimpleAsyncHTTPClient)) client2 = io_loop2.run_sync(gen.coroutine(SimpleAsyncHTTPClient)) self.assertTrue(client1 is not client2) def test_connection_limit(self): with closing(self.create_client(max_clients=2)) as client: self.assertEqual(client.max_clients, 2) seen = [] # Send 4 requests. Two can be sent immediately, while the others # will be queued for i in range(4): client.fetch(self.get_url("/trigger"), lambda response, i=i: (seen.append(i), self.stop())) self.wait(condition=lambda: len(self.triggers) == 2) self.assertEqual(len(client.queue), 2) # Finish the first two requests and let the next two through self.triggers.popleft()() self.triggers.popleft()() self.wait(condition=lambda: (len(self.triggers) == 2 and len(seen) == 2)) self.assertEqual(set(seen), set([0, 1])) self.assertEqual(len(client.queue), 0) # Finish all the pending requests self.triggers.popleft()() self.triggers.popleft()() self.wait(condition=lambda: len(seen) == 4) self.assertEqual(set(seen), set([0, 1, 2, 3])) self.assertEqual(len(self.triggers), 0) def test_redirect_connection_limit(self): # following redirects should not consume additional connections with closing(self.create_client(max_clients=1)) as client: client.fetch(self.get_url('/countdown/3'), self.stop, max_redirects=3) response = self.wait() response.rethrow() def test_gzip(self): # All the tests in this file should be using gzip, but this test # ensures that it is in fact getting compressed. # Setting Accept-Encoding manually bypasses the client's # decompression so we can see the raw data. response = self.fetch("/chunk", use_gzip=False, headers={"Accept-Encoding": "gzip"}) self.assertEqual(response.headers["Content-Encoding"], "gzip") self.assertNotEqual(response.body, b"asdfqwer") # Our test data gets bigger when gzipped. Oops. :) # Chunked encoding bypasses the MIN_LENGTH check. self.assertEqual(len(response.body), 34) f = gzip.GzipFile(mode="r", fileobj=response.buffer) self.assertEqual(f.read(), b"asdfqwer") def test_max_redirects(self): response = self.fetch("/countdown/5", max_redirects=3) self.assertEqual(302, response.code) # We requested 5, followed three redirects for 4, 3, 2, then the last # unfollowed redirect is to 1. self.assertTrue(response.request.url.endswith("/countdown/5")) self.assertTrue(response.effective_url.endswith("/countdown/2")) self.assertTrue(response.headers["Location"].endswith("/countdown/1")) def test_header_reuse(self): # Apps may reuse a headers object if they are only passing in constant # headers like user-agent. The header object should not be modified. headers = HTTPHeaders({'User-Agent': 'Foo'}) self.fetch("/hello", headers=headers) self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')]) def test_see_other_redirect(self): for code in (302, 303): response = self.fetch("/see_other_post", method="POST", body="%d" % code) self.assertEqual(200, response.code) self.assertTrue(response.request.url.endswith("/see_other_post")) self.assertTrue(response.effective_url.endswith("/see_other_get")) # request is the original request, is a POST still self.assertEqual("POST", response.request.method) @skipOnTravis def test_connect_timeout(self): timeout = 0.1 timeout_min, timeout_max = 0.099, 1.0 class TimeoutResolver(Resolver): def resolve(self, *args, **kwargs): return Future() # never completes with closing(self.create_client(resolver=TimeoutResolver())) as client: client.fetch(self.get_url('/hello'), self.stop, connect_timeout=timeout) response = self.wait() self.assertEqual(response.code, 599) self.assertTrue(timeout_min < response.request_time < timeout_max, response.request_time) self.assertEqual(str(response.error), "HTTP 599: Timeout while connecting") @skipOnTravis def test_request_timeout(self): timeout = 0.1 timeout_min, timeout_max = 0.099, 0.15 if os.name == 'nt': timeout = 0.5 timeout_min, timeout_max = 0.4, 0.6 response = self.fetch('/trigger?wake=false', request_timeout=timeout) self.assertEqual(response.code, 599) self.assertTrue(timeout_min < response.request_time < timeout_max, response.request_time) self.assertEqual(str(response.error), "HTTP 599: Timeout during request") # trigger the hanging request to let it clean up after itself self.triggers.popleft()() @skipIfNoIPv6 def test_ipv6(self): try: [sock] = bind_sockets(None, '::1', family=socket.AF_INET6) port = sock.getsockname()[1] self.http_server.add_socket(sock) except socket.gaierror as e: if e.args[0] == socket.EAI_ADDRFAMILY: # python supports ipv6, but it's not configured on the network # interface, so skip this test. return raise url = '%s://[::1]:%d/hello' % (self.get_protocol(), port) # ipv6 is currently enabled by default but can be disabled self.http_client.fetch(url, self.stop, allow_ipv6=False) response = self.wait() self.assertEqual(response.code, 599) self.http_client.fetch(url, self.stop) response = self.wait() self.assertEqual(response.body, b"Hello world!") def xtest_multiple_content_length_accepted(self): response = self.fetch("/content_length?value=2,2") self.assertEqual(response.body, b"ok") response = self.fetch("/content_length?value=2,%202,2") self.assertEqual(response.body, b"ok") response = self.fetch("/content_length?value=2,4") self.assertEqual(response.code, 599) response = self.fetch("/content_length?value=2,%202,3") self.assertEqual(response.code, 599) def test_head_request(self): response = self.fetch("/head", method="HEAD") self.assertEqual(response.code, 200) self.assertEqual(response.headers["content-length"], "7") self.assertFalse(response.body) def test_options_request(self): response = self.fetch("/options", method="OPTIONS") self.assertEqual(response.code, 200) self.assertEqual(response.headers["content-length"], "2") self.assertEqual(response.headers["access-control-allow-origin"], "*") self.assertEqual(response.body, b"ok") def test_no_content(self): response = self.fetch("/no_content") self.assertEqual(response.code, 204) # 204 status shouldn't have a content-length # # Tests with a content-length header are included below # in HTTP204NoContentTestCase. self.assertNotIn("Content-Length", response.headers) def test_host_header(self): host_re = re.compile(b"^localhost:[0-9]+$") response = self.fetch("/host_echo") self.assertTrue(host_re.match(response.body)) url = self.get_url("/host_echo").replace("http://", "http://me:secret@") self.http_client.fetch(url, self.stop) response = self.wait() self.assertTrue(host_re.match(response.body), response.body) def test_connection_refused(self): cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) with ExpectLog(gen_log, ".*", required=False): self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop) response = self.wait() self.assertEqual(599, response.code) if sys.platform != 'cygwin': # cygwin returns EPERM instead of ECONNREFUSED here contains_errno = str(errno.ECONNREFUSED) in str(response.error) if not contains_errno and hasattr(errno, "WSAECONNREFUSED"): contains_errno = str(errno.WSAECONNREFUSED) in str(response.error) self.assertTrue(contains_errno, response.error) # This is usually "Connection refused". # On windows, strerror is broken and returns "Unknown error". expected_message = os.strerror(errno.ECONNREFUSED) self.assertTrue(expected_message in str(response.error), response.error) def test_queue_timeout(self): with closing(self.create_client(max_clients=1)) as client: client.fetch(self.get_url('/trigger'), self.stop, request_timeout=10) # Wait for the trigger request to block, not complete. self.wait() client.fetch(self.get_url('/hello'), self.stop, connect_timeout=0.1) response = self.wait() self.assertEqual(response.code, 599) self.assertTrue(response.request_time < 1, response.request_time) self.assertEqual(str(response.error), "HTTP 599: Timeout in request queue") self.triggers.popleft()() self.wait() def test_no_content_length(self): response = self.fetch("/no_content_length") if response.body == b"HTTP/1 required": self.skipTest("requires HTTP/1.x") else: self.assertEquals(b"hello", response.body) def sync_body_producer(self, write): write(b'1234') write(b'5678') @gen.coroutine def async_body_producer(self, write): yield write(b'1234') yield gen.Task(IOLoop.current().add_callback) yield write(b'5678') def test_sync_body_producer_chunked(self): response = self.fetch("/echo_post", method="POST", body_producer=self.sync_body_producer) response.rethrow() self.assertEqual(response.body, b"12345678") def test_sync_body_producer_content_length(self): response = self.fetch("/echo_post", method="POST", body_producer=self.sync_body_producer, headers={'Content-Length': '8'}) response.rethrow() self.assertEqual(response.body, b"12345678") def test_async_body_producer_chunked(self): response = self.fetch("/echo_post", method="POST", body_producer=self.async_body_producer) response.rethrow() self.assertEqual(response.body, b"12345678") def test_async_body_producer_content_length(self): response = self.fetch("/echo_post", method="POST", body_producer=self.async_body_producer, headers={'Content-Length': '8'}) response.rethrow() self.assertEqual(response.body, b"12345678") @skipBefore35 def test_native_body_producer_chunked(self): namespace = exec_test(globals(), locals(), """ async def body_producer(write): await write(b'1234') await gen.Task(IOLoop.current().add_callback) await write(b'5678') """) response = self.fetch("/echo_post", method="POST", body_producer=namespace["body_producer"]) response.rethrow() self.assertEqual(response.body, b"12345678") @skipBefore35 def test_native_body_producer_content_length(self): namespace = exec_test(globals(), locals(), """ async def body_producer(write): await write(b'1234') await gen.Task(IOLoop.current().add_callback) await write(b'5678') """) response = self.fetch("/echo_post", method="POST", body_producer=namespace["body_producer"], headers={'Content-Length': '8'}) response.rethrow() self.assertEqual(response.body, b"12345678") def test_100_continue(self): response = self.fetch("/echo_post", method="POST", body=b"1234", expect_100_continue=True) self.assertEqual(response.body, b"1234") def test_100_continue_early_response(self): def body_producer(write): raise Exception("should not be called") response = self.fetch("/respond_in_prepare", method="POST", body_producer=body_producer, expect_100_continue=True) self.assertEqual(response.code, 403) def test_streaming_follow_redirects(self): # When following redirects, header and streaming callbacks # should only be called for the final result. # TODO(bdarnell): this test belongs in httpclient_test instead of # simple_httpclient_test, but it fails with the version of libcurl # available on travis-ci. Move it when that has been upgraded # or we have a better framework to skip tests based on curl version. headers = [] chunks = [] self.fetch("/redirect?url=/hello", header_callback=headers.append, streaming_callback=chunks.append) chunks = list(map(to_unicode, chunks)) self.assertEqual(chunks, ['Hello world!']) # Make sure we only got one set of headers. num_start_lines = len([h for h in headers if h.startswith("HTTP/")]) self.assertEqual(num_start_lines, 1) class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase): def setUp(self): super(SimpleHTTPClientTestCase, self).setUp() self.http_client = self.create_client() def create_client(self, **kwargs): return SimpleAsyncHTTPClient(force_instance=True, **kwargs) class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase): def setUp(self): super(SimpleHTTPSClientTestCase, self).setUp() self.http_client = self.create_client() def create_client(self, **kwargs): return SimpleAsyncHTTPClient(force_instance=True, defaults=dict(validate_cert=False), **kwargs) def test_ssl_options(self): resp = self.fetch("/hello", ssl_options={}) self.assertEqual(resp.body, b"Hello world!") @unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present') def test_ssl_context(self): resp = self.fetch("/hello", ssl_options=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) self.assertEqual(resp.body, b"Hello world!") def test_ssl_options_handshake_fail(self): with ExpectLog(gen_log, "SSL Error|Uncaught exception", required=False): resp = self.fetch( "/hello", ssl_options=dict(cert_reqs=ssl.CERT_REQUIRED)) self.assertRaises(ssl.SSLError, resp.rethrow) @unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present') def test_ssl_context_handshake_fail(self): with ExpectLog(gen_log, "SSL Error|Uncaught exception"): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED resp = self.fetch("/hello", ssl_options=ctx) self.assertRaises(ssl.SSLError, resp.rethrow) def test_error_logging(self): # No stack traces are logged for SSL errors (in this case, # failure to validate the testing self-signed cert). # The SSLError is exposed through ssl.SSLError. with ExpectLog(gen_log, '.*') as expect_log: response = self.fetch("/", validate_cert=True) self.assertEqual(response.code, 599) self.assertIsInstance(response.error, ssl.SSLError) self.assertFalse(expect_log.logged_stack) class CreateAsyncHTTPClientTestCase(AsyncTestCase): def setUp(self): super(CreateAsyncHTTPClientTestCase, self).setUp() self.saved = AsyncHTTPClient._save_configuration() def tearDown(self): AsyncHTTPClient._restore_configuration(self.saved) super(CreateAsyncHTTPClientTestCase, self).tearDown() def test_max_clients(self):
class HTTP100ContinueTestCase(AsyncHTTPTestCase): def respond_100(self, request): self.http1 = request.version.startswith('HTTP/1.') if not self.http1: request.connection.write_headers(ResponseStartLine('', 200, 'OK'), HTTPHeaders()) request.connection.finish() return self.request = request self.request.connection.stream.write( b"HTTP/1.1 100 CONTINUE\r\n\r\n", self.respond_200) def respond_200(self): self.request.connection.stream.write( b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA", self.request.connection.stream.close) def get_app(self): # Not a full Application, but works as an HTTPServer callback return self.respond_100 def test_100_continue(self): res = self.fetch('/') if not self.http1: self.skipTest("requires HTTP/1.x") self.assertEqual(res.body, b'A') class HTTP204NoContentTestCase(AsyncHTTPTestCase): def respond_204(self, request): self.http1 = request.version.startswith('HTTP/1.') if not self.http1: # Close the request cleanly in HTTP/2; it will be skipped anyway. request.connection.write_headers(ResponseStartLine('', 200, 'OK'), HTTPHeaders()) request.connection.finish() return # A 204 response never has a body, even if doesn't have a content-length # (which would otherwise mean read-until-close). We simulate here a # server that sends no content length and does not close the connection. # # Tests of a 204 response with no Content-Length header are included # in SimpleHTTPClientTestMixin. stream = request.connection.detach() stream.write(b"HTTP/1.1 204 No content\r\n") if request.arguments.get("error", [False])[-1]: stream.write(b"Content-Length: 5\r\n") else: stream.write(b"Content-Length: 0\r\n") stream.write(b"\r\n") stream.close() def get_app(self): return self.respond_204 def test_204_no_content(self): resp = self.fetch('/') if not self.http1: self.skipTest("requires HTTP/1.x") self.assertEqual(resp.code, 204) self.assertEqual(resp.body, b'') def test_204_invalid_content_length(self): # 204 status with non-zero content length is malformed with ExpectLog(gen_log, ".*Response with code 204 should not have body"): response = self.fetch("/?error=1") if not self.http1: self.skipTest("requires HTTP/1.x") if self.http_client.configured_class != SimpleAsyncHTTPClient: self.skipTest("curl client accepts invalid headers") self.assertEqual(response.code, 599) class HostnameMappingTestCase(AsyncHTTPTestCase): def setUp(self): super(HostnameMappingTestCase, self).setUp() self.http_client = SimpleAsyncHTTPClient( hostname_mapping={ 'www.example.com': '127.0.0.1', ('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()), }) def get_app(self): return Application([url("/hello", HelloWorldHandler), ]) def test_hostname_mapping(self): self.http_client.fetch( 'http://www.example.com:%d/hello' % self.get_http_port(), self.stop) response = self.wait() response.rethrow() self.assertEqual(response.body, b'Hello world!') def test_port_mapping(self): self.http_client.fetch('http://foo.example.com:8000/hello', self.stop) response = self.wait() response.rethrow() self.assertEqual(response.body, b'Hello world!') class ResolveTimeoutTestCase(AsyncHTTPTestCase): def setUp(self): # Dummy Resolver subclass that never invokes its callback. class BadResolver(Resolver): def resolve(self, *args, **kwargs): pass super(ResolveTimeoutTestCase, self).setUp() self.http_client = SimpleAsyncHTTPClient( resolver=BadResolver()) def get_app(self): return Application([url("/hello", HelloWorldHandler), ]) def test_resolve_timeout(self): response = self.fetch('/hello', connect_timeout=0.1) self.assertEqual(response.code, 599) class MaxHeaderSizeTest(AsyncHTTPTestCase): def get_app(self): class SmallHeaders(RequestHandler): def get(self): self.set_header("X-Filler", "a" * 100) self.write("ok") class LargeHeaders(RequestHandler): def get(self): self.set_header("X-Filler", "a" * 1000) self.write("ok") return Application([('/small', SmallHeaders), ('/large', LargeHeaders)]) def get_http_client(self): return SimpleAsyncHTTPClient(max_header_size=1024) def test_small_headers(self): response = self.fetch('/small') response.rethrow() self.assertEqual(response.body, b'ok') def test_large_headers(self): with ExpectLog(gen_log, "Unsatisfiable read"): response = self.fetch('/large') self.assertEqual(response.code, 599) class MaxBodySizeTest(AsyncHTTPTestCase): def get_app(self): class SmallBody(RequestHandler): def get(self): self.write("a" * 1024 * 64) class LargeBody(RequestHandler): def get(self): self.write("a" * 1024 * 100) return Application([('/small', SmallBody), ('/large', LargeBody)]) def get_http_client(self): return SimpleAsyncHTTPClient(max_body_size=1024 * 64) def test_small_body(self): response = self.fetch('/small') response.rethrow() self.assertEqual(response.body, b'a' * 1024 * 64) def test_large_body(self): with ExpectLog(gen_log, "Malformed HTTP message from None: Content-Length too long"): response = self.fetch('/large') self.assertEqual(response.code, 599) class MaxBufferSizeTest(AsyncHTTPTestCase): def get_app(self): class LargeBody(RequestHandler): def get(self): self.write("a" * 1024 * 100) return Application([('/large', LargeBody)]) def get_http_client(self): # 100KB body with 64KB buffer return SimpleAsyncHTTPClient(max_body_size=1024 * 100, max_buffer_size=1024 * 64) def test_large_body(self): response = self.fetch('/large') response.rethrow() self.assertEqual(response.body, b'a' * 1024 * 100) class ChunkedWithContentLengthTest(AsyncHTTPTestCase): def get_app(self): class ChunkedWithContentLength(RequestHandler): def get(self): # Add an invalid Transfer-Encoding to the response self.set_header('Transfer-Encoding', 'chunked') self.write("Hello world") return Application([('/chunkwithcl', ChunkedWithContentLength)]) def get_http_client(self): return SimpleAsyncHTTPClient() def test_chunked_with_content_length(self): # Make sure the invalid headers are detected with ExpectLog(gen_log, ("Malformed HTTP message from None: Response " "with both Transfer-Encoding and Content-Length")): response = self.fetch('/chunkwithcl') self.assertEqual(response.code, 599)
AsyncHTTPClient.configure(SimpleAsyncHTTPClient) with closing(AsyncHTTPClient(force_instance=True)) as client: self.assertEqual(client.max_clients, 10) with closing(AsyncHTTPClient( max_clients=11, force_instance=True)) as client: self.assertEqual(client.max_clients, 11) # Now configure max_clients statically and try overriding it # with each way max_clients can be passed AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12) with closing(AsyncHTTPClient(force_instance=True)) as client: self.assertEqual(client.max_clients, 12) with closing(AsyncHTTPClient( max_clients=13, force_instance=True)) as client: self.assertEqual(client.max_clients, 13) with closing(AsyncHTTPClient( max_clients=14, force_instance=True)) as client: self.assertEqual(client.max_clients, 14)
set_groups_ad2cp.py
from typing import List, Optional import numpy as np import xarray as xr from .parse_ad2cp import Ad2cpDataPacket, Field, HeaderOrDataRecordFormats from .set_groups_base import SetGroupsBase, set_encodings def merge_attrs(datasets: List[xr.Dataset]) -> List[xr.Dataset]: """ Merges attrs from a list of datasets. Prioritizes keys from later datsets. """ total_attrs = dict() for ds in datasets: total_attrs.update(ds.attrs) for ds in datasets: ds.attrs = total_attrs return datasets class SetGroupsAd2cp(SetGroupsBase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.pulse_compressed = self.parser_obj.get_pulse_compressed() self.combine_packets() def combine_packets(self): self.ds = None # # TODO: where to put string data in output? # pad raw samples so that "sample" dimenion has same length max_samples = 0 for packet in self.parser_obj.echosounder_raw_packets: # both _r and _i have same dimensions max_samples = max( max_samples, packet.data["echosounder_raw_samples_i"].shape[0] ) for packet in self.parser_obj.echosounder_raw_packets: packet.data["echosounder_raw_samples_i"] = np.pad( packet.data["echosounder_raw_samples_i"], ((0, max_samples - packet.data["echosounder_raw_samples_i"].shape[0])), ) packet.data["echosounder_raw_samples_q"] = np.pad( packet.data["echosounder_raw_samples_q"], ((0, max_samples - packet.data["echosounder_raw_samples_q"].shape[0])), ) def make_dataset( packets: List[Ad2cpDataPacket], ping_time_dim: str ) -> Optional[xr.Dataset]: for i in range(len(packets)): packet = packets[i] data_vars = dict() for field_name, field_value in packet.data.items(): # add dimension names to data vars for xarray # TODO might not work with altimeter_spare field = HeaderOrDataRecordFormats.data_record_format( packet.data_record_type ).get_field(field_name) if field is not None: dims = field.dimensions(packet.data_record_type) units = field.units() else: dims = Field.default_dimensions() units = None if units: data_vars[field_name] = ( tuple(dim.value for dim in dims), [field_value], {"Units": units}, ) else: data_vars[field_name] = ( tuple(dim.value for dim in dims), [field_value], ) coords = { "ping_time": [packet.timestamp], ping_time_dim: [packet.timestamp], } if "beams" in packet.data_exclude: coords["beam"] = packet.data_exclude["beams"] new_packet = xr.Dataset(data_vars=data_vars, coords=coords) # modify in place to reduce memory consumption packets[i] = new_packet if len(packets) > 0: packets = merge_attrs(packets) return xr.combine_by_coords( packets, data_vars="minimal", coords="minimal", combine_attrs="override", ) else: return None burst_ds = make_dataset( self.parser_obj.burst_packets, ping_time_dim="ping_time_burst" ) average_ds = make_dataset( self.parser_obj.average_packets, ping_time_dim="ping_time_average" ) echosounder_ds = make_dataset( self.parser_obj.echosounder_packets, ping_time_dim="ping_time_echosounder" ) echosounder_raw_ds = make_dataset( self.parser_obj.echosounder_raw_packets, ping_time_dim="ping_time_echosounder_raw", ) echosounder_raw_transmit_ds = make_dataset( self.parser_obj.echosounder_raw_transmit_packets, ping_time_dim="ping_time_echosounder_raw_transmit", ) datasets = [ ds for ds in ( burst_ds, average_ds, echosounder_ds, echosounder_raw_ds, echosounder_raw_transmit_ds, ) if ds ] for dataset in datasets: if "offset_of_data" in dataset: print(dataset["offset_of_data"]) datasets = merge_attrs(datasets) self.ds = xr.merge(datasets) def
(self) -> xr.Dataset: ds = xr.Dataset( data_vars={ "sound_speed_indicative": self.ds.get("speed_of_sound"), "temperature": self.ds.get("temperature"), "pressure": self.ds.get("pressure"), }, coords={ "ping_time": self.ds.get("ping_time"), "ping_time_burst": self.ds.get("ping_time_burst", []), "ping_time_average": self.ds.get("ping_time_average", []), "ping_time_echosounder": self.ds.get("ping_time_echosounder", []), }, ) # FIXME: this is a hack because the current file saving # mechanism requires that the env group have ping_time as a dimension, # but ping_time might not be a dimension if the dataset is completely # empty if "ping_time" not in ds.dims: ds = ds.expand_dims(dim="ping_time") return set_encodings(ds) def set_platform(self) -> xr.Dataset: ds = xr.Dataset( data_vars={ "heading": self.ds.get("heading"), "pitch": self.ds.get("pitch"), "roll": self.ds.get("roll"), "magnetometer_raw_x": self.ds.get("magnetometer_raw_x"), "magnetometer_raw_y": self.ds.get("magnetometer_raw_y"), "magnetometer_raw_z": self.ds.get("magnetometer_raw_z"), }, coords={ "ping_time": self.ds.get("ping_time"), "ping_time_burst": self.ds.get("ping_time_burst"), "ping_time_average": self.ds.get("ping_time_average"), "ping_time_echosounder": self.ds.get("ping_time_echosounder"), "beam": self.ds.get("beam"), "range_bin_burst": self.ds.get("range_bin_burst"), "range_bin_average": self.ds.get("range_bin_average"), "range_bin_echosounder": self.ds.get("range_bin_echosounder"), }, attrs={ "platform_name": self.ui_param["platform_name"], "platform_type": self.ui_param["platform_type"], "platform_code_ICES": self.ui_param["platform_code_ICES"], }, ) return set_encodings(ds) def set_beam(self) -> xr.Dataset: # TODO: should we divide beam into burst/average (e.g., beam_burst, beam_average) # like was done for range_bin (we have range_bin_burst, range_bin_average, # and range_bin_echosounder)? data_vars = { "number_of_beams": self.ds.get("num_beams"), "coordinate_system": self.ds.get("coordinate_system"), "number_of_cells": self.ds.get("num_cells"), "blanking": self.ds.get("blanking"), "cell_size": self.ds.get("cell_size"), "velocity_range": self.ds.get("velocity_range"), "echosounder_frequency": self.ds.get("echosounder_frequency"), "ambiguity_velocity": self.ds.get("ambiguity_velocity"), "data_set_description": self.ds.get("dataset_description"), "transmit_energy": self.ds.get("transmit_energy"), "velocity_scaling": self.ds.get("velocity_scaling"), "velocity_burst": self.ds.get("velocity_data_burst"), "velocity_average": self.ds.get("velocity_data_average"), # "velocity_echosounder": self.ds.get("velocity_data_echosounder"), "amplitude_burst": self.ds.get("amplitude_data_burst"), "amplitude_average": self.ds.get("amplitude_data_average"), # "amplitude_echosounder": self.ds.get("amplitude_data_echosounder"), "correlation_burst": self.ds.get("correlation_data_burst"), "correlation_average": self.ds.get("correlation_data_average"), "correlation_echosounder": self.ds.get("correlation_data_echosounder"), # "echosounder": self.ds.get("echosounder_data"), "amplitude_echosounder": self.ds.get("echosounder_data"), "figure_of_merit": self.ds.get("figure_of_merit_data"), "altimeter_distance": self.ds.get("altimeter_distance"), "altimeter_quality": self.ds.get("altimeter_quality"), "ast_distance": self.ds.get("ast_distance"), "ast_quality": self.ds.get("ast_quality"), "ast_offset_100us": self.ds.get("ast_offset_100us"), "ast_pressure": self.ds.get("ast_pressure"), "altimeter_spare": self.ds.get("altimeter_spare"), "altimeter_raw_data_num_samples": self.ds.get( "altimeter_raw_data_num_samples" ), "altimeter_raw_data_sample_distance": self.ds.get( "altimeter_raw_data_sample_distance" ), "altimeter_raw_data_samples": self.ds.get("altimeter_raw_data_samples"), } ds = xr.Dataset( data_vars=data_vars, coords={ "ping_time": self.ds.get("ping_time"), "ping_time_burst": self.ds.get("ping_time_burst"), "ping_time_average": self.ds.get("ping_time_average"), "ping_time_echosounder": self.ds.get("ping_time_echosounder"), "beam": self.ds.get("beam"), "range_bin_burst": self.ds.get("range_bin_burst"), "range_bin_average": self.ds.get("range_bin_average"), "range_bin_echosounder": self.ds.get("range_bin_echosounder"), "altimeter_sample_bin": self.ds.get("altimeter_sample_bin"), }, attrs={"pulse_compressed": self.pulse_compressed}, ) # FIXME: this is a hack because the current file saving # mechanism requires that the beam group have ping_time as a dimension, # but ping_time might not be a dimension if the dataset is completely # empty if "ping_time" not in ds.dims: ds = ds.expand_dims(dim="ping_time") return set_encodings(ds) def set_vendor(self) -> xr.Dataset: attrs = { "pressure_sensor_valid": self.ds.get("pressure_sensor_valid"), "temperature_sensor_valid": self.ds.get("temperature_sensor_valid"), "compass_sensor_valid": self.ds.get("compass_sensor_valid"), "tilt_sensor_valid": self.ds.get("tilt_sensor_valid"), } attrs = { field_name: field_value.data[0] for field_name, field_value in attrs.items() if field_value is not None } ds = xr.Dataset( data_vars={ "data_record_version": self.ds.get("version"), "error": self.ds.get("error"), "status": self.ds.get("status"), "status0": self.ds.get("status0"), "battery_voltage": self.ds.get("battery_voltage"), "power_level": self.ds.get("power_level"), "temperature_of_pressure_sensor": self.ds.get( "temperature_from_pressure_sensor" ), "nominal_correlation": self.ds.get("nominal_correlation"), "magnetometer_temperature": self.ds.get("magnetometer_temperature"), "real_ping_time_clock_temperature": self.ds.get( "real_ping_time_clock_temperature" ), "ensemble_counter": self.ds.get("ensemble_counter"), "ahrs_rotation_matrix_mij": ( ("mij", "ping_time") if "ahrs_rotation_matrix_m11" in self.ds else "mij", [ self.ds.get("ahrs_rotation_matrix_m11"), self.ds.get("ahrs_rotation_matrix_m12"), self.ds.get("ahrs_rotation_matrix_m13"), self.ds.get("ahrs_rotation_matrix_m21"), self.ds.get("ahrs_rotation_matrix_m22"), self.ds.get("ahrs_rotation_matrix_m23"), self.ds.get("ahrs_rotation_matrix_m31"), self.ds.get("ahrs_rotation_matrix_m32"), self.ds.get("ahrs_rotation_matrix_m33"), ], ), "ahrs_quaternions_wxyz": ( ("wxyz", "ping_time") if "ahrs_quaternions_w" in self.ds else "wxyz", [ self.ds.get("ahrs_quaternions_w"), self.ds.get("ahrs_quaternions_x"), self.ds.get("ahrs_quaternions_y"), self.ds.get("ahrs_quaternions_z"), ], ), "ahrs_gyro_xyz": ( ("xyz", "ping_time") if "ahrs_gyro_x" in self.ds else "xyz", [ self.ds.get("ahrs_gyro_x"), self.ds.get("ahrs_gyro_y"), self.ds.get("ahrs_gyro_z"), ], ), "percentage_good_data": self.ds.get("percentage_good_data"), "std_dev_pitch": self.ds.get("std_dev_pitch"), "std_dev_roll": self.ds.get("std_dev_roll"), "std_dev_heading": self.ds.get("std_dev_heading"), "std_dev_pressure": self.ds.get("std_dev_pressure"), "echosounder_raw_samples_i": self.ds.get("echosounder_raw_samples_i"), "echosounder_raw_samples_q": self.ds.get("echosounder_raw_samples_q"), "echosounder_raw_transmit_samples_i": self.ds.get( "echosounder_raw_transmit_samples_i" ), "echosounder_raw_transmit_samples_q": self.ds.get( "echosounder_raw_transmit_samples_q" ), "echosounder_raw_beam": self.ds.get("echosounder_raw_beam"), "echosounder_raw_echogram": self.ds.get("echosounder_raw_echogram"), }, coords={ "ping_time": self.ds.get("ping_time"), "ping_time_burst": self.ds.get("ping_time_burst"), "ping_time_average": self.ds.get("ping_time_average"), "ping_time_echosounder": self.ds.get("ping_time_echosounder"), "ping_time_echosounder_raw": self.ds.get("ping_time_echosounder_raw"), "ping_time_echosounder_raw_transmit": self.ds.get( "ping_time_echosounder_raw_transmit" ), "sample": self.ds.get("sample"), "sample_transmit": self.ds.get("sample_transmit"), "beam": self.ds.get("beam"), "range_bin_average": self.ds.get("range_bin_average"), "range_bin_burst": self.ds.get("range_bin_burst"), "range_bin_echosounder": self.ds.get("range_bin_echosounder"), }, attrs={**attrs, "pulse_compressed": self.pulse_compressed}, ) ds = ds.reindex( { "mij": np.array(["11", "12", "13", "21", "22", "23", "31", "32", "33"]), "wxyz": np.array(["w", "x", "y", "z"]), "xyz": np.array(["x", "y", "z"]), } ) # FIXME: this is a hack because the current file saving # mechanism requires that the vendor group have ping_time as a dimension, # but ping_time might not be a dimension if the dataset is completely # empty if "ping_time" not in ds.dims: ds = ds.expand_dims(dim="ping_time") return set_encodings(ds) def set_sonar(self) -> xr.Dataset: ds = xr.Dataset( attrs={ "sonar_manufacturer": "Nortek", "sonar_model": "AD2CP", "sonar_serial_number": "", "sonar_software_name": "", "sonar_software_version": "", "sonar_firmware_version": "", "sonar_type": "acoustic Doppler current profiler (ADCP)", } ) if "serial_number" in self.ds: ds.attrs["sonar_serial_number"] = int(self.ds["serial_number"].data[0]) firmware_version = self.parser_obj.get_firmware_version() if firmware_version is not None: ds.attrs["sonar_firmware_version"] = ", ".join( [f"{k}:{v}" for k, v in firmware_version.items()] ) return ds
set_env
pq.go
package ds // PqItem is our task object // // This implementation is NOT thread-safe type PqItem struct { ScheduledAt int64 Key string
// PriorityQueue is our main priority queue implementation // the sort order is determined by ScheduledAt // with smaller value returned earlier // // This implementation is NOT thread-safe type PriorityQueue struct { heapArray []*PqItem size int } // NewPriorityQueue setups our priorityqueue with the config func NewPriorityQueue() *PriorityQueue { maxheap := &PriorityQueue{ heapArray: []*PqItem{}, size: 0, } return maxheap } // HeapSize returns our priorityqueue size func (m *PriorityQueue) HeapSize() int { return m.size } func (m *PriorityQueue) leaf(index int) bool { return (index >= (m.size/2) && index <= m.size) } func (m *PriorityQueue) parent(index int) int { return (index - 1) / 2 } func (m *PriorityQueue) leftchild(index int) int { return 2*index + 1 } func (m *PriorityQueue) rightchild(index int) int { return 2*index + 2 } // Insert an item into the priorityqueue // and reorder its internal // // in theory, if the later work always scheduled earlier // this gonna be bit slower, cause lots of swapping (log2(m.HeapSize())) func (m *PriorityQueue) Insert(item *PqItem) error { m.heapArray = append(m.heapArray, item) m.size++ m.upHeapify(m.size - 1) return nil } func (m *PriorityQueue) swap(first, second int) { temp := m.heapArray[first] m.heapArray[first] = m.heapArray[second] m.heapArray[second] = temp } func (m *PriorityQueue) greater(first, second int) bool { return m.heapArray[first].ScheduledAt < m.heapArray[second].ScheduledAt } func (m *PriorityQueue) upHeapify(index int) { for m.greater(index, m.parent(index)) { m.swap(index, m.parent(index)) index = m.parent(index) } } func (m *PriorityQueue) downHeapify(current int) { if m.leaf(current) { return } largest := current leftChildIndex := m.leftchild(current) rightChildIndex := m.rightchild(current) //If current is smallest then return if leftChildIndex < m.size && m.greater(leftChildIndex, largest) { largest = leftChildIndex } if rightChildIndex < m.size && m.greater(rightChildIndex, largest) { largest = rightChildIndex } if largest != current { m.swap(current, largest) m.downHeapify(largest) } } // Pop returns one item from the priorityqueue // and removing it func (m *PriorityQueue) Pop() *PqItem { top := m.heapArray[0] m.heapArray[0] = m.heapArray[m.size-1] m.heapArray = m.heapArray[:(m.size)-1] m.size-- m.downHeapify(0) return top } // Peek returns one item from the priorityqueue // but not removing it func (m *PriorityQueue) Peek() *PqItem { if m.HeapSize() > 0 { return m.heapArray[0] } return nil }
Value []byte Retries int16 }
test_build.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import importlib import json import os import sys import unittest import unittest.mock from collections import Counter from common_testing import get_pytorch3d_dir # This file groups together tests which look at the code without running it. in_conda_build = os.environ.get("CONDA_BUILD_STATE", "") == "TEST" in_re_worker = os.environ.get("INSIDE_RE_WORKER") is not None class TestBuild(unittest.TestCase): def test_name_clash(self): # For setup.py, all translation units need distinct names, so we # cannot have foo.cu and foo.cpp, even in different directories. source_dir = get_pytorch3d_dir() / "pytorch3d" stems = [] for extension in [".cu", ".cpp"]: files = source_dir.glob(f"**/*{extension}") stems.extend(f.stem for f in files) counter = Counter(stems) for k, v in counter.items(): self.assertEqual(v, 1, f"Too many files with stem {k}.") @unittest.skipIf(in_re_worker, "In RE worker") def
(self): root_dir = get_pytorch3d_dir() extensions = ("py", "cu", "cuh", "cpp", "h", "hpp", "sh") expect = "Copyright (c) Facebook, Inc. and its affiliates.\n" files_missing_copyright_header = [] for extension in extensions: for path in root_dir.glob(f"**/*.{extension}"): excluded_files = ( "pytorch3d/transforms/external/kornia_angle_axis_to_rotation_matrix.py", "pytorch3d/csrc/pulsar/include/fastermath.h", ) if in_conda_build: excluded_files += ( "run_test.py", "run_test.sh", "conda_test_runner.sh", "conda_test_env_vars.sh", ) if str(path).endswith(excluded_files): continue with open(path) as f: firstline = f.readline() if firstline.startswith(("# -*-", "#!", "/*")): firstline = f.readline() if not firstline.endswith(expect): files_missing_copyright_header.append(str(path)) if len(files_missing_copyright_header) != 0: self.fail("\n".join(files_missing_copyright_header)) @unittest.skipIf(in_re_worker, "In RE worker") def test_valid_ipynbs(self): # Check that the ipython notebooks are valid json root_dir = get_pytorch3d_dir() tutorials_dir = root_dir / "docs" / "tutorials" tutorials = sorted(tutorials_dir.glob("*.ipynb")) for tutorial in tutorials: with open(tutorial) as f: json.load(f) @unittest.skipIf(in_conda_build or in_re_worker, "In conda build, or RE worker") def test_enumerated_ipynbs(self): # Check that the tutorials are all referenced in tutorials.json. root_dir = get_pytorch3d_dir() tutorials_dir = root_dir / "docs" / "tutorials" tutorials_on_disk = sorted(i.stem for i in tutorials_dir.glob("*.ipynb")) json_file = root_dir / "website" / "tutorials.json" with open(json_file) as f: cfg_dict = json.load(f) listed_in_json = [] for section in cfg_dict.values(): listed_in_json.extend(item["id"] for item in section) self.assertListEqual(sorted(listed_in_json), tutorials_on_disk) @unittest.skipIf(in_conda_build or in_re_worker, "In conda build, or RE worker") def test_enumerated_notes(self): # Check that the notes are all referenced in sidebars.json. root_dir = get_pytorch3d_dir() notes_dir = root_dir / "docs" / "notes" notes_on_disk = sorted(i.stem for i in notes_dir.glob("*.md")) json_file = root_dir / "website" / "sidebars.json" with open(json_file) as f: cfg_dict = json.load(f) listed_in_json = [] for section in cfg_dict["docs"].values(): listed_in_json.extend(section) self.assertListEqual(sorted(listed_in_json), notes_on_disk) def test_no_import_cycles(self): # Check each module of pytorch3d imports cleanly, # which may fail if there are import cycles. # First check the setup of the test. If any of pytorch3d # was already imported the test would be pointless. for module in sys.modules: self.assertFalse(module.startswith("pytorch3d"), module) root_dir = get_pytorch3d_dir() / "pytorch3d" for module_file in root_dir.glob("**/*.py"): if module_file.stem == "__init__": continue relative_module = str(module_file.relative_to(root_dir))[:-3] module = "pytorch3d." + relative_module.replace("/", ".") with self.subTest(name=module): with unittest.mock.patch.dict(sys.modules): importlib.import_module(module)
test_copyright
Bookmark.ts
import { IBase, setAttrForObject2 } from '../common'; import { inPlaceConvertToSaved, newEmptyBase } from '../Base'; import { NotTabSpaceId } from '../chromeSession/ChromeSession'; import produce from 'immer'; export interface Bookmark extends IBase { tabSpaceId: string; name: string; url: string; favIconUrl: string; } export type BookmarkLocalStorage = Pick< Bookmark, 'name' | 'url' | 'favIconUrl' >;
export const BOOKMARK_DB_SCHEMA = 'id, createdAt, tabSpaceId, name, url'; export function newEmptyBookmark(): Bookmark { return { ...newEmptyBase(), tabSpaceId: NotTabSpaceId, name: '', url: '', favIconUrl: '', }; } export const setTabSpaceId = setAttrForObject2<string, Bookmark>('tabSpaceId'); export const setName = setAttrForObject2<string, Bookmark>('name'); export const setUrl = setAttrForObject2<string, Bookmark>('url'); export const setFavIconUrl = setAttrForObject2<string, Bookmark>('favIconUrl'); export function convertToSavedBookmark(targetBookmark: Bookmark): Bookmark { return produce(targetBookmark, (draft) => { inPlaceConvertToSaved(draft); }); }
export const BOOKMARK_DB_TABLE_NAME = 'SavedBookmark';
grant_update.go
// Code generated by entc, DO NOT EDIT. package ent import ( "context" "fmt" "time" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/open-privacy/opv/pkg/ent/grant" "github.com/open-privacy/opv/pkg/ent/predicate" ) // GrantUpdate is the builder for updating Grant entities. type GrantUpdate struct { config hooks []Hook mutation *GrantMutation } // Where adds a new predicate for the GrantUpdate builder. func (gu *GrantUpdate) Where(ps ...predicate.Grant) *GrantUpdate { gu.mutation.predicates = append(gu.mutation.predicates, ps...) return gu } // SetDeletedAt sets the "deleted_at" field. func (gu *GrantUpdate) SetDeletedAt(t time.Time) *GrantUpdate { gu.mutation.SetDeletedAt(t) return gu } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. func (gu *GrantUpdate) SetNillableDeletedAt(t *time.Time) *GrantUpdate { if t != nil { gu.SetDeletedAt(*t) } return gu } // ClearDeletedAt clears the value of the "deleted_at" field. func (gu *GrantUpdate) ClearDeletedAt() *GrantUpdate { gu.mutation.ClearDeletedAt() return gu } // SetHashedGrantToken sets the "hashed_grant_token" field. func (gu *GrantUpdate) SetHashedGrantToken(s string) *GrantUpdate { gu.mutation.SetHashedGrantToken(s) return gu } // SetDomain sets the "domain" field. func (gu *GrantUpdate) SetDomain(s string) *GrantUpdate { gu.mutation.SetDomain(s) return gu } // SetVersion sets the "version" field. func (gu *GrantUpdate) SetVersion(s string) *GrantUpdate { gu.mutation.SetVersion(s) return gu } // SetAllowedHTTPMethods sets the "allowed_http_methods" field. func (gu *GrantUpdate) SetAllowedHTTPMethods(s string) *GrantUpdate { gu.mutation.SetAllowedHTTPMethods(s) return gu } // SetPaths sets the "paths" field. func (gu *GrantUpdate) SetPaths(s []string) *GrantUpdate { gu.mutation.SetPaths(s) return gu } // Mutation returns the GrantMutation object of the builder. func (gu *GrantUpdate) Mutation() *GrantMutation { return gu.mutation } // Save executes the query and returns the number of nodes affected by the update operation. func (gu *GrantUpdate) Save(ctx context.Context) (int, error) { var ( err error affected int ) gu.defaults() if len(gu.hooks) == 0 { affected, err = gu.sqlSave(ctx) } else { var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*GrantMutation) if !ok { return nil, fmt.Errorf("unexpected mutation type %T", m) } gu.mutation = mutation affected, err = gu.sqlSave(ctx) mutation.done = true return affected, err }) for i := len(gu.hooks) - 1; i >= 0; i-- { mut = gu.hooks[i](mut) } if _, err := mut.Mutate(ctx, gu.mutation); err != nil { return 0, err } } return affected, err } // SaveX is like Save, but panics if an error occurs. func (gu *GrantUpdate) SaveX(ctx context.Context) int { affected, err := gu.Save(ctx) if err != nil { panic(err) } return affected } // Exec executes the query. func (gu *GrantUpdate) Exec(ctx context.Context) error { _, err := gu.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs.
if err := gu.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. func (gu *GrantUpdate) defaults() { if _, ok := gu.mutation.UpdatedAt(); !ok { v := grant.UpdateDefaultUpdatedAt() gu.mutation.SetUpdatedAt(v) } } func (gu *GrantUpdate) sqlSave(ctx context.Context) (n int, err error) { _spec := &sqlgraph.UpdateSpec{ Node: &sqlgraph.NodeSpec{ Table: grant.Table, Columns: grant.Columns, ID: &sqlgraph.FieldSpec{ Type: field.TypeString, Column: grant.FieldID, }, }, } if ps := gu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } if value, ok := gu.mutation.UpdatedAt(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeTime, Value: value, Column: grant.FieldUpdatedAt, }) } if value, ok := gu.mutation.DeletedAt(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeTime, Value: value, Column: grant.FieldDeletedAt, }) } if gu.mutation.DeletedAtCleared() { _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ Type: field.TypeTime, Column: grant.FieldDeletedAt, }) } if value, ok := gu.mutation.HashedGrantToken(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, Value: value, Column: grant.FieldHashedGrantToken, }) } if value, ok := gu.mutation.Domain(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, Value: value, Column: grant.FieldDomain, }) } if value, ok := gu.mutation.Version(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, Value: value, Column: grant.FieldVersion, }) } if value, ok := gu.mutation.AllowedHTTPMethods(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, Value: value, Column: grant.FieldAllowedHTTPMethods, }) } if value, ok := gu.mutation.Paths(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeJSON, Value: value, Column: grant.FieldPaths, }) } if n, err = sqlgraph.UpdateNodes(ctx, gu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{grant.Label} } else if cerr, ok := isSQLConstraintError(err); ok { err = cerr } return 0, err } return n, nil } // GrantUpdateOne is the builder for updating a single Grant entity. type GrantUpdateOne struct { config hooks []Hook mutation *GrantMutation } // SetDeletedAt sets the "deleted_at" field. func (guo *GrantUpdateOne) SetDeletedAt(t time.Time) *GrantUpdateOne { guo.mutation.SetDeletedAt(t) return guo } // SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. func (guo *GrantUpdateOne) SetNillableDeletedAt(t *time.Time) *GrantUpdateOne { if t != nil { guo.SetDeletedAt(*t) } return guo } // ClearDeletedAt clears the value of the "deleted_at" field. func (guo *GrantUpdateOne) ClearDeletedAt() *GrantUpdateOne { guo.mutation.ClearDeletedAt() return guo } // SetHashedGrantToken sets the "hashed_grant_token" field. func (guo *GrantUpdateOne) SetHashedGrantToken(s string) *GrantUpdateOne { guo.mutation.SetHashedGrantToken(s) return guo } // SetDomain sets the "domain" field. func (guo *GrantUpdateOne) SetDomain(s string) *GrantUpdateOne { guo.mutation.SetDomain(s) return guo } // SetVersion sets the "version" field. func (guo *GrantUpdateOne) SetVersion(s string) *GrantUpdateOne { guo.mutation.SetVersion(s) return guo } // SetAllowedHTTPMethods sets the "allowed_http_methods" field. func (guo *GrantUpdateOne) SetAllowedHTTPMethods(s string) *GrantUpdateOne { guo.mutation.SetAllowedHTTPMethods(s) return guo } // SetPaths sets the "paths" field. func (guo *GrantUpdateOne) SetPaths(s []string) *GrantUpdateOne { guo.mutation.SetPaths(s) return guo } // Mutation returns the GrantMutation object of the builder. func (guo *GrantUpdateOne) Mutation() *GrantMutation { return guo.mutation } // Save executes the query and returns the updated Grant entity. func (guo *GrantUpdateOne) Save(ctx context.Context) (*Grant, error) { var ( err error node *Grant ) guo.defaults() if len(guo.hooks) == 0 { node, err = guo.sqlSave(ctx) } else { var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*GrantMutation) if !ok { return nil, fmt.Errorf("unexpected mutation type %T", m) } guo.mutation = mutation node, err = guo.sqlSave(ctx) mutation.done = true return node, err }) for i := len(guo.hooks) - 1; i >= 0; i-- { mut = guo.hooks[i](mut) } if _, err := mut.Mutate(ctx, guo.mutation); err != nil { return nil, err } } return node, err } // SaveX is like Save, but panics if an error occurs. func (guo *GrantUpdateOne) SaveX(ctx context.Context) *Grant { node, err := guo.Save(ctx) if err != nil { panic(err) } return node } // Exec executes the query on the entity. func (guo *GrantUpdateOne) Exec(ctx context.Context) error { _, err := guo.Save(ctx) return err } // ExecX is like Exec, but panics if an error occurs. func (guo *GrantUpdateOne) ExecX(ctx context.Context) { if err := guo.Exec(ctx); err != nil { panic(err) } } // defaults sets the default values of the builder before save. func (guo *GrantUpdateOne) defaults() { if _, ok := guo.mutation.UpdatedAt(); !ok { v := grant.UpdateDefaultUpdatedAt() guo.mutation.SetUpdatedAt(v) } } func (guo *GrantUpdateOne) sqlSave(ctx context.Context) (_node *Grant, err error) { _spec := &sqlgraph.UpdateSpec{ Node: &sqlgraph.NodeSpec{ Table: grant.Table, Columns: grant.Columns, ID: &sqlgraph.FieldSpec{ Type: field.TypeString, Column: grant.FieldID, }, }, } id, ok := guo.mutation.ID() if !ok { return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Grant.ID for update")} } _spec.Node.ID.Value = id if ps := guo.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { ps[i](selector) } } } if value, ok := guo.mutation.UpdatedAt(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeTime, Value: value, Column: grant.FieldUpdatedAt, }) } if value, ok := guo.mutation.DeletedAt(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeTime, Value: value, Column: grant.FieldDeletedAt, }) } if guo.mutation.DeletedAtCleared() { _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ Type: field.TypeTime, Column: grant.FieldDeletedAt, }) } if value, ok := guo.mutation.HashedGrantToken(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, Value: value, Column: grant.FieldHashedGrantToken, }) } if value, ok := guo.mutation.Domain(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, Value: value, Column: grant.FieldDomain, }) } if value, ok := guo.mutation.Version(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, Value: value, Column: grant.FieldVersion, }) } if value, ok := guo.mutation.AllowedHTTPMethods(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeString, Value: value, Column: grant.FieldAllowedHTTPMethods, }) } if value, ok := guo.mutation.Paths(); ok { _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ Type: field.TypeJSON, Value: value, Column: grant.FieldPaths, }) } _node = &Grant{config: guo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues if err = sqlgraph.UpdateNode(ctx, guo.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{grant.Label} } else if cerr, ok := isSQLConstraintError(err); ok { err = cerr } return nil, err } return _node, nil }
func (gu *GrantUpdate) ExecX(ctx context.Context) {
model_bandwidth_resp.go
package model import ( "github.com/huaweicloud/huaweicloud-sdk-go-v3/core/utils" "errors" "github.com/huaweicloud/huaweicloud-sdk-go-v3/core/converter" "strings" ) // 带宽对象 type BandwidthResp struct { // 功能说明:带宽类型,共享带宽默认为share。 取值范围:share,bgp,telcom,sbgp等。 share:共享带宽 bgp:动态bgp telcom :联通 sbgp:静态bgp BandwidthType *string `json:"bandwidth_type,omitempty"` // 功能说明:账单信息 如果billinginfo不为空,说明是包周期的带宽 BillingInfo *string `json:"billing_info,omitempty"` // 功能说明:按流量计费,按带宽计费还是按增强型95计费。 取值范围:bandwidth,traffic,95peak_plus(按增强型95计费)不返回或者为空时表示是bandwidth。 约束:只有共享带宽支持95peak_plus(按增强型95计费),按增强型95计费时需要指定保底百分比,默认是20%。 ChargeMode *BandwidthRespChargeMode `json:"charge_mode,omitempty"` // 功能说明:带宽唯一标识 Id *string `json:"id,omitempty"` // 功能说明:带宽名称 取值范围:1-64个字符,支持数字、字母、中文、_(下划线)、-(中划线)、.(点) Name *string `json:"name,omitempty"` // 功能说明:带宽对应的弹性公网IP信息 约束:WHOLE类型的带宽支持多个弹性公网IP,PER类型的带宽只能对应一个弹性公网IP PublicipInfo *[]PublicipInfoResp `json:"publicip_info,omitempty"` // 功能说明:带宽类型,标识是否是共享带宽 取值范围:WHOLE,PER WHOLE表示共享带宽;PER,表示独享带宽 ShareType *BandwidthRespShareType `json:"share_type,omitempty"` // 功能说明:带宽大小 取值范围:默认5Mbit/s~2000Mbit/s(具体范围以各区域配置为准,请参见控制台对应页面显示)。 Size *int32 `json:"size,omitempty"` // 功能说明:用户所属租户ID TenantId *string `json:"tenant_id,omitempty"` // 企业项目ID。最大长度36字节,带“-”连字符的UUID格式,或者是字符串“0”。 创建带宽时,给带宽绑定企业项目ID。 EnterpriseProjectId *string `json:"enterprise_project_id,omitempty"` // 功能说明:带宽的状态 取值范围: FREEZED:冻结 NORMAL:正常 Status *BandwidthRespStatus `json:"status,omitempty"` // 功能说明:是否开启企业级qos,仅共享带宽支持开启。(该字段仅在上海1局点返回) EnableBandwidthRules *bool `json:"enable_bandwidth_rules,omitempty"` // 功能说明:带宽支持的最大分组规则数。(该字段仅在上海1局点返回) RuleQuota *int32 `json:"rule_quota,omitempty"` // 功能说明:带宽规则对象(该字段仅在上海1局点返回) BandwidthRules *[]BandWidthRules `json:"bandwidth_rules,omitempty"` // 功能说明:资源创建时间,UTC时间 格式: yyyy-MM-ddTHH:mm:ss CreatedAt *string `json:"created_at,omitempty"` // 功能说明:资源更新时间,UTC时间 格式: yyyy-MM-ddTHH:mm:ss UpdatedAt *string `json:"updated_at,omitempty"` // 功能说明:表示中心站点资源或者边缘站点资源 取值范围: center、边缘站点名称 约束:共享带宽只能绑定与该字段相同的publicip PublicBorderGroup *string `json:"public_border_group,omitempty"` } func (o BandwidthResp) String() string { data, err := utils.Marshal(o) if err != nil { return "BandwidthResp struct{}" } return strings.Join([]string{"BandwidthResp", string(data)}, " ") } type BandwidthRespChargeMode struct { value string } type BandwidthRespChargeModeEnum struct { BANDWIDTH BandwidthRespChargeMode TRAFFIC BandwidthRespChargeMode E_95PEAK_PLUS BandwidthRespChargeMode } func GetBandwidthRespChargeModeEnum() BandwidthRespChargeModeEnum { return BandwidthRespChargeModeEnum{ BANDWIDTH: BandwidthRespChargeMode{ value: "bandwidth", }, TRAFFIC: BandwidthRespChargeMode{ value: "traffic", }, E_95PEAK_PLUS: BandwidthRespChargeMode{ value: "95peak_plus", }, } } func (c BandwidthRespChargeMode) MarshalJSON() ([]byte, error) { return utils.Marshal(c.value) } func (c *BandwidthRespChargeMode) UnmarshalJSON(b []byte) error { myConverter := converter.StringConverterFactory("string") if myConverter != nil { val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\"")) if err == nil { c.value = val.(string) return nil } return err } else { return errors.New("convert enum data to string error") } } type BandwidthRespShareType struct { value string } type BandwidthRespShareTypeEnum struct { WHOLE BandwidthRespShareType PER BandwidthRespShareType } func GetBandwidthRespShareTypeEnum() BandwidthRespShareTypeEnum { return BandwidthRespShareTypeEnum{ WHOLE: BandwidthRespShareType{ value: "WHOLE", }, PER: BandwidthRespShareType{ value: "PER", }, } } func (c BandwidthRespShareType) MarshalJSON() ([]byte, error) { return utils.Marshal(c.value) }
== nil { c.value = val.(string) return nil } return err } else { return errors.New("convert enum data to string error") } } type BandwidthRespStatus struct { value string } type BandwidthRespStatusEnum struct { FREEZED BandwidthRespStatus NORMAL BandwidthRespStatus } func GetBandwidthRespStatusEnum() BandwidthRespStatusEnum { return BandwidthRespStatusEnum{ FREEZED: BandwidthRespStatus{ value: "FREEZED", }, NORMAL: BandwidthRespStatus{ value: "NORMAL", }, } } func (c BandwidthRespStatus) MarshalJSON() ([]byte, error) { return utils.Marshal(c.value) } func (c *BandwidthRespStatus) UnmarshalJSON(b []byte) error { myConverter := converter.StringConverterFactory("string") if myConverter != nil { val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\"")) if err == nil { c.value = val.(string) return nil } return err } else { return errors.New("convert enum data to string error") } }
func (c *BandwidthRespShareType) UnmarshalJSON(b []byte) error { myConverter := converter.StringConverterFactory("string") if myConverter != nil { val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\"")) if err
v1beta1.ts
/** * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { OAuth2Client, JWT, Compute, UserRefreshClient, } from 'google-auth-library'; import { GoogleConfigurable, createAPIRequest, MethodOptions, GlobalOptions, BodyResponseCallback, APIRequestContext, } from 'googleapis-common'; import {GaxiosPromise} from 'gaxios'; // tslint:disable: no-any // tslint:disable: class-name // tslint:disable: variable-name // tslint:disable: jsdoc-format // tslint:disable: no-namespace export namespace containeranalysis_v1beta1 { export interface Options extends GlobalOptions { version: 'v1beta1'; } interface StandardParameters { /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * Container Analysis API * * An implementation of the Grafeas API, which stores, and enables querying and retrieval of critical metadata about all of your software artifacts. * * @example * const {google} = require('googleapis'); * const containeranalysis = google.containeranalysis('v1beta1'); * * @namespace containeranalysis * @type {Function} * @version v1beta1 * @variation v1beta1 * @param {object=} options Options for Containeranalysis */ export class Containeranalysis { context: APIRequestContext; projects: Resource$Projects; constructor(options: GlobalOptions, google?: GoogleConfigurable) { this.context = { _options: options || {}, google, }; this.projects = new Resource$Projects(this.context); } } /** * An alias to a repo revision. */ export interface Schema$AliasContext { /** * The alias kind. */ kind?: string | null; /** * The alias name. */ name?: string | null; } /** * Artifact describes a build product. */ export interface Schema$Artifact { /** * Hash or checksum value of a binary, or Docker Registry 2.0 digest of a container. */ checksum?: string | null; /** * Artifact ID, if any; for container images, this will be a URL by digest like `gcr.io/projectID/imagename@sha256:123456`. */ id?: string | null; /** * Related artifact names. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. Note that a single Artifact ID can have multiple names, for example if two tags are applied to one image. */ names?: string[] | null; } /** * Occurrence that represents a single &quot;attestation&quot;. The authenticity of an attestation can be verified using the attached signature. If the verifier trusts the public key of the signer, then verifying the signature is sufficient to establish trust. In this circumstance, the authority to which this attestation is attached is primarily useful for look-up (how to find this attestation if you already know the authority and artifact to be verified) and intent (which authority was this attestation intended to sign for). */ export interface Schema$Attestation { genericSignedAttestation?: Schema$GenericSignedAttestation; /** * A PGP signed attestation. */ pgpSignedAttestation?: Schema$PgpSignedAttestation; } /** * Note kind that represents a logical attestation &quot;role&quot; or &quot;authority&quot;. For example, an organization might have one `Authority` for &quot;QA&quot; and one for &quot;build&quot;. This note is intended to act strictly as a grouping mechanism for the attached occurrences (Attestations). This grouping mechanism also provides a security boundary, since IAM ACLs gate the ability for a principle to attach an occurrence to a given note. It also provides a single point of lookup to find all attached attestation occurrences, even if they don&#39;t all live in the same project. */ export interface Schema$Authority { /** * Hint hints at the purpose of the attestation authority. */ hint?: Schema$Hint; } /** * Basis describes the base image portion (Note) of the DockerImage relationship. Linked occurrences are derived from this or an equivalent image via: FROM &lt;Basis.resource_url&gt; Or an equivalent reference, e.g. a tag of the resource_url. */ export interface Schema$Basis { /** * Required. Immutable. The fingerprint of the base image. */ fingerprint?: Schema$Fingerprint; /** * Required. Immutable. The resource_url for the resource representing the basis of associated occurrence images. */ resourceUrl?: string | null; } /** * Request to create notes in batch. */ export interface Schema$BatchCreateNotesRequest { /** * The notes to create. Max allowed length is 1000. */ notes?: {[key: string]: Schema$Note} | null; } /** * Response for creating notes in batch. */ export interface Schema$BatchCreateNotesResponse { /** * The notes that were created. */ notes?: Schema$Note[]; } /** * Request to create occurrences in batch. */ export interface Schema$BatchCreateOccurrencesRequest { /** * The occurrences to create. Max allowed length is 1000. */ occurrences?: Schema$Occurrence[]; } /** * Response for creating occurrences in batch. */ export interface Schema$BatchCreateOccurrencesResponse { /** * The occurrences that were created. */ occurrences?: Schema$Occurrence[]; } /** * Associates `members` with a `role`. */ export interface Schema$Binding { /** * The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently. */ condition?: Schema$Expr; /** * Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. */ members?: string[] | null; /** * Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. */ role?: string | null; } /** * Note holding the version of the provider&#39;s builder and the signature of the provenance message in the build details occurrence. */ export interface Schema$Build { /** * Required. Immutable. Version of the builder which produced this build. */ builderVersion?: string | null; /** * Signature of the build in occurrences pointing to this build note containing build details. */ signature?: Schema$BuildSignature; } /** * Provenance of a build. Contains all information needed to verify the full details about the build from source to completion. */ export interface Schema$BuildProvenance { /** * Version string of the builder at the time this build was executed. */ builderVersion?: string | null; /** * Special options applied to this build. This is a catch-all field where build providers can enter any desired additional details. */ buildOptions?: {[key: string]: string} | null; /** * Output of the build. */ builtArtifacts?: Schema$Artifact[]; /** * Commands requested by the build. */ commands?: Schema$Command[]; /** * Time at which the build was created. */ createTime?: string | null; /** * E-mail address of the user who initiated this build. Note that this was the user&#39;s e-mail address at the time the build was initiated; this address may not represent the same end-user for all time. */ creator?: string | null; /** * Time at which execution of the build was finished. */ endTime?: string | null; /** * Required. Unique identifier of the build. */ id?: string | null; /** * URI where any logs for this provenance were written. */ logsUri?: string | null; /** * ID of the project. */ projectId?: string | null; /** * Details of the Source input to the build. */ sourceProvenance?: Schema$Source; /** * Time at which execution of the build was started. */ startTime?: string | null; /** * Trigger identifier if the build was triggered automatically; empty if not. */ triggerId?: string | null; } /** * Message encapsulating the signature of the verified build. */ export interface Schema$BuildSignature { /** * An ID for the key used to sign. This could be either an ID for the key stored in `public_key` (such as the ID or fingerprint for a PGP key, or the CN for a cert), or a reference to an external key (such as a reference to a key in Cloud Key Management Service). */ keyId?: string | null; /** * The type of the key, either stored in `public_key` or referenced in `key_id`. */ keyType?: string | null; /** * Public key of the builder which can be used to verify that the related findings are valid and unchanged. If `key_type` is empty, this defaults to PEM encoded public keys. This field may be empty if `key_id` references an external key. For Cloud Build based signatures, this is a PEM encoded public key. To verify the Cloud Build signature, place the contents of this field into a file (public.pem). The signature field is base64-decoded into its binary representation in signature.bin, and the provenance bytes from `BuildDetails` are base64-decoded into a binary representation in signed.bin. OpenSSL can then verify the signature: `openssl sha256 -verify public.pem -signature signature.bin signed.bin` */ publicKey?: string | null; /** * Required. Signature of the related `BuildProvenance`. In JSON, this is base-64 encoded. */ signature?: string | null; } /** * A CloudRepoSourceContext denotes a particular revision in a Google Cloud Source Repo. */ export interface Schema$CloudRepoSourceContext { /** * An alias, which may be a branch or tag. */ aliasContext?: Schema$AliasContext; /** * The ID of the repo. */ repoId?: Schema$RepoId; /** * A revision ID. */ revisionId?: string | null; } /** * Command describes a step performed as part of the build pipeline. */ export interface Schema$Command { /** * Command-line arguments used when executing this command. */ args?: string[] | null; /** * Working directory (relative to project source root) used when running this command. */ dir?: string | null; /** * Environment variables set before running this command. */ env?: string[] | null; /** * Optional unique identifier for this command, used in wait_for to reference this command as a dependency. */ id?: string | null; /** * Required. Name of the command, as presented on the command line, or if the command is packaged as a Docker container, as presented to `docker pull`. */ name?: string | null; /** * The ID(s) of the command(s) that this command depends on. */ waitFor?: string[] | null; } /** * Common Vulnerability Scoring System version 3. For details, see https://www.first.org/cvss/specification-document */ export interface Schema$CVSSv3 { attackComplexity?: string | null; /** * Base Metrics Represents the intrinsic characteristics of a vulnerability that are constant over time and across user environments. */ attackVector?: string | null; availabilityImpact?: string | null; /** * The base score is a function of the base metric scores. */ baseScore?: number | null; confidentialityImpact?: string | null; exploitabilityScore?: number | null; impactScore?: number | null; integrityImpact?: string | null; privilegesRequired?: string | null; scope?: string | null; userInteraction?: string | null; } /** * An artifact that can be deployed in some runtime. */ export interface Schema$Deployable { /** * Required. Resource URI for the artifact being deployed. */ resourceUri?: string[] | null; } /** * The period during which some deployable was active in a runtime. */ export interface Schema$Deployment { /** * Address of the runtime element hosting this deployment. */ address?: string | null; /** * Configuration used to create this deployment. */ config?: string | null; /** * Required. Beginning of the lifetime of this deployment. */ deployTime?: string | null; /** * Platform hosting this deployment. */ platform?: string | null; /** * Output only. Resource URI for the artifact being deployed taken from the deployable field with the same name. */ resourceUri?: string[] | null; /** * End of the lifetime of this deployment. */ undeployTime?: string | null; /** * Identity of the user that triggered this deployment. */ userEmail?: string | null; } /** * Derived describes the derived image portion (Occurrence) of the DockerImage relationship. This image would be produced from a Dockerfile with FROM &lt;DockerImage.Basis in attached Note&gt;. */ export interface Schema$Derived { /** * Output only. This contains the base image URL for the derived image occurrence. */ baseResourceUrl?: string | null; /** * Output only. The number of layers by which this image differs from the associated image basis. */ distance?: number | null; /** * Required. The fingerprint of the derived image. */ fingerprint?: Schema$Fingerprint; /** * This contains layer-specific metadata, if populated it has length &quot;distance&quot; and is ordered with [distance] being the layer immediately following the base image and [1] being the final layer. */ layerInfo?: Schema$Layer[]; } /** * Identifies all appearances of this vulnerability in the package for a specific distro/location. For example: glibc in cpe:/o:debian:debian_linux:8 for versions 2.1 - 2.2 */ export interface Schema$Detail { /** * Required. The CPE URI in [cpe format](https://cpe.mitre.org/specification/) in which the vulnerability manifests. Examples include distro or storage location for vulnerable jar. */ cpeUri?: string | null; /** * A vendor-specific description of this note. */ description?: string | null; /** * The fix for this specific package version. */ fixedLocation?: Schema$VulnerabilityLocation; /** * Whether this detail is obsolete. Occurrences are expected not to point to obsolete details. */ isObsolete?: boolean | null; /** * The max version of the package in which the vulnerability exists. */ maxAffectedVersion?: Schema$Version; /** * The min version of the package in which the vulnerability exists. */ minAffectedVersion?: Schema$Version; /** * Required. The name of the package where the vulnerability was found. */ package?: string | null; /** * The type of package; whether native or non native(ruby gems, node.js packages etc). */ packageType?: string | null; /** * The severity (eg: distro assigned severity) for this vulnerability. */ severityName?: string | null; /** * The time this information was last changed at the source. This is an upstream timestamp from the underlying information source - e.g. Ubuntu security tracker. */ sourceUpdateTime?: string | null; } /** * Details of an attestation occurrence. */ export interface Schema$Details { /** * Required. Attestation for the resource. */ attestation?: Schema$Attestation; } /** * Provides information about the analysis status of a discovered resource. */ export interface Schema$Discovered { /** * The status of discovery for the resource. */ analysisStatus?: string | null; /** * When an error is encountered this will contain a LocalizedMessage under details to show to the user. The LocalizedMessage is output only and populated by the API. */ analysisStatusError?: Schema$Status; /** * Whether the resource is continuously analyzed. */ continuousAnalysis?: string | null; /** * The last time continuous analysis was done for this resource. Deprecated, do not use. */ lastAnalysisTime?: string | null; } /** * A note that indicates a type of analysis a provider would perform. This note exists in a provider&#39;s project. A `Discovery` occurrence is created in a consumer&#39;s project at the start of analysis. */ export interface Schema$Discovery { /** * Required. Immutable. The kind of analysis that is handled by this discovery. */ analysisKind?: string | null; } /** * This represents a particular channel of distribution for a given package. E.g., Debian&#39;s jessie-backports dpkg mirror. */ export interface Schema$Distribution { /** * The CPU architecture for which packages in this distribution channel were built. */ architecture?: string | null; /** * Required. The cpe_uri in [CPE format](https://cpe.mitre.org/specification/) denoting the package manager version distributing a package. */ cpeUri?: string | null; /** * The distribution channel-specific description of this package. */ description?: string | null; /** * The latest available version of this package in this distribution channel. */ latestVersion?: Schema$Version; /** * A freeform string denoting the maintainer of this package. */ maintainer?: string | null; /** * The distribution channel-specific homepage for this package. */ url?: string | null; } /** * A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. */ export interface Schema$Empty {} /** * Represents an expression text. Example: title: &quot;User account presence&quot; description: &quot;Determines whether the request has a user account&quot; expression: &quot;size(request.user) &gt; 0&quot; */ export interface Schema$Expr { /** * An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. */ description?: string | null; /** * Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. */ expression?: string | null; /** * An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file. */ location?: string | null; /** * An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string | null; } /** * Container message for hashes of byte content of files, used in source messages to verify integrity of source input to the build. */ export interface Schema$FileHashes { /** * Required. Collection of file hashes. */ fileHash?: Schema$Hash[]; } /** * A set of properties that uniquely identify a given Docker image. */ export interface Schema$Fingerprint { /** * Required. The layer ID of the final layer in the Docker image&#39;s v1 representation. */ v1Name?: string | null; /** * Required. The ordered list of v2 blobs that represent a given image. */ v2Blob?: string[] | null; /** * Output only. The name of the image&#39;s v2 blobs computed via: [bottom] := v2_blobbottom := sha256(v2_blob[N] + &quot; &quot; + v2_name[N+1]) Only the name of the final blob is kept. */ v2Name?: string | null; } /** * Per resource and severity counts of fixable and total vulnerabilities. */ export interface Schema$FixableTotalByDigest { /** * The number of fixable vulnerabilities associated with this resource. */ fixableCount?: string | null; /** * The affected resource. */ resource?: Schema$Resource; /** * The severity for this count. SEVERITY_UNSPECIFIED indicates total across all severities. */ severity?: string | null; /** * The total number of vulnerabilities associated with this resource. */ totalCount?: string | null; } /** * An attestation wrapper that uses the Grafeas `Signature` message. This attestation must define the `serialized_payload` that the `signatures` verify and any metadata necessary to interpret that plaintext. The signatures should always be over the `serialized_payload` bytestring. */ export interface Schema$GenericSignedAttestation { /** * Type (for example schema) of the attestation payload that was signed. The verifier must ensure that the provided type is one that the verifier supports, and that the attestation payload is a valid instantiation of that type (for example by validating a JSON schema). */ contentType?: string | null; /** * The serialized payload that is verified by one or more `signatures`. The encoding and semantic meaning of this payload must match what is set in `content_type`. */ serializedPayload?: string | null; /** * One or more signatures over `serialized_payload`. Verifier implementations should consider this attestation message verified if at least one `signature` verifies `serialized_payload`. See `Signature` in common.proto for more details on signature structure and verification. */ signatures?: Schema$Signature[]; } /** * A SourceContext referring to a Gerrit project. */ export interface Schema$GerritSourceContext { /** * An alias, which may be a branch or tag. */ aliasContext?: Schema$AliasContext; /** * The full project name within the host. Projects may be nested, so &quot;project/subproject&quot; is a valid project name. The &quot;repo name&quot; is the hostURI/project. */ gerritProject?: string | null; /** * The URI of a running Gerrit instance. */ hostUri?: string | null; /** * A revision (commit) ID. */ revisionId?: string | null; } /** * Request message for `GetIamPolicy` method. */ export interface Schema$GetIamPolicyRequest { /** * OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`. This field is only used by Cloud IAM. */ options?: Schema$GetPolicyOptions; } /** * Encapsulates settings provided to GetIamPolicy. */ export interface Schema$GetPolicyOptions { /** * Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. */ requestedPolicyVersion?: number | null; } /** * A GitSourceContext denotes a particular revision in a third party Git repository (e.g., GitHub). */ export interface Schema$GitSourceContext { /** * Git commit hash. */ revisionId?: string | null; /** * Git repository URL. */ url?: string | null; } /** * Metadata for all operations used and required for all operations that created by Container Analysis Providers */ export interface Schema$GoogleDevtoolsContaineranalysisV1alpha1OperationMetadata { /** * Output only. The time this operation was created. */ createTime?: string | null; /** * Output only. The time that this operation was marked completed or failed. */ endTime?: string | null; } /** * Details of a build occurrence. */ export interface Schema$GrafeasV1beta1BuildDetails { /** * Required. The actual provenance for the build. */ provenance?: Schema$BuildProvenance; /** * Serialized JSON representation of the provenance, used in generating the build signature in the corresponding build note. After verifying the signature, `provenance_bytes` can be unmarshalled and compared to the provenance to confirm that it is unchanged. A base64-encoded string representation of the provenance bytes is used for the signature in order to interoperate with openssl which expects this format for signature verification. The serialized form is captured both to avoid ambiguity in how the provenance is marshalled to json as well to prevent incompatibilities with future changes. */ provenanceBytes?: string | null; } /** * Details of a deployment occurrence. */ export interface Schema$GrafeasV1beta1DeploymentDetails { /** * Required. Deployment history for the resource. */ deployment?: Schema$Deployment; } /** * Details of a discovery occurrence. */ export interface Schema$GrafeasV1beta1DiscoveryDetails { /** * Required. Analysis status for the discovered resource. */ discovered?: Schema$Discovered; } /** * Details of an image occurrence. */ export interface Schema$GrafeasV1beta1ImageDetails { /** * Required. Immutable. The child image derived from the base image. */ derivedImage?: Schema$Derived; } /** * Details of a package occurrence. */ export interface Schema$GrafeasV1beta1PackageDetails { /** * Required. Where the package was installed. */ installation?: Schema$Installation; } /** * Details of a vulnerability Occurrence. */ export interface Schema$GrafeasV1beta1VulnerabilityDetails { /** * Output only. The CVSS score of this vulnerability. CVSS score is on a scale of 0-10 where 0 indicates low severity and 10 indicates high severity. */ cvssScore?: number | null; /** * The distro assigned severity for this vulnerability when it is available, and note provider assigned severity when distro has not yet assigned a severity for this vulnerability. */ effectiveSeverity?: string | null; /** * Output only. A detailed description of this vulnerability. */ longDescription?: string | null; /** * Required. The set of affected locations and their fixes (if available) within the associated resource. */ packageIssue?: Schema$PackageIssue[]; /** * Output only. URLs related to this vulnerability. */ relatedUrls?: Schema$RelatedUrl[]; /** * Output only. The note provider assigned Severity of the vulnerability. */ severity?: string | null; /** * Output only. A one sentence description of this vulnerability. */ shortDescription?: string | null; /** * The type of package; whether native or non native(ruby gems, node.js packages etc) */ type?: string | null; } /** * Container message for hash values. */ export interface Schema$Hash { /** * Required. The type of hash that was performed. */ type?: string | null; /** * Required. The hash value. */ value?: string | null; } /** * This submessage provides human-readable hints about the purpose of the authority. Because the name of a note acts as its resource reference, it is important to disambiguate the canonical name of the Note (which might be a UUID for security purposes) from &quot;readable&quot; names more suitable for debug output. Note that these hints should not be used to look up authorities in security sensitive contexts, such as when looking up attestations to verify. */ export interface Schema$Hint { /** * Required. The human readable name of this attestation authority, for example &quot;qa&quot;. */ humanReadableName?: string | null; } /** * This represents how a particular software package may be installed on a system. */ export interface Schema$Installation { /** * Required. All of the places within the filesystem versions of this package have been found. */ location?: Schema$Location[]; /** * Output only. The name of the installed package. */ name?: string | null; } export interface Schema$KnowledgeBase { /** * The KB name (generally of the form KB[0-9]+ i.e. KB123456). */ name?: string | null; /** * A link to the KB in the Windows update catalog - https://www.catalog.update.microsoft.com/ */ url?: string | null; } /** * Layer holds metadata specific to a layer of a Docker image. */ export interface Schema$Layer { /** * The recovered arguments to the Dockerfile directive. */ arguments?: string | null; /** * Required. The recovered Dockerfile directive used to construct this layer. */ directive?: string | null; } /** * Response for listing occurrences for a note. */ export interface Schema$ListNoteOccurrencesResponse { /** * Token to provide to skip to a particular spot in the list. */ nextPageToken?: string | null; /** * The occurrences attached to the specified note. */ occurrences?: Schema$Occurrence[]; } /** * Response for listing notes. */ export interface Schema$ListNotesResponse { /** * The next pagination token in the list response. It should be used as `page_token` for the following request. An empty value means no more results. */ nextPageToken?: string | null; /** * The notes requested. */ notes?: Schema$Note[]; } /** * Response for listing occurrences. */ export interface Schema$ListOccurrencesResponse { /** * The next pagination token in the list response. It should be used as `page_token` for the following request. An empty value means no more results. */ nextPageToken?: string | null; /** * The occurrences requested. */ occurrences?: Schema$Occurrence[]; } /** * Response for listing scan configurations. */ export interface Schema$ListScanConfigsResponse { /** * The next pagination token in the list response. It should be used as `page_token` for the following request. An empty value means no more results. */ nextPageToken?: string | null; /** * The scan configurations requested. */ scanConfigs?: Schema$ScanConfig[]; } /** * An occurrence of a particular package installation found within a system&#39;s filesystem. E.g., glibc was found in `/var/lib/dpkg/status`. */ export interface Schema$Location { /** * Required. The CPE URI in [CPE format](https://cpe.mitre.org/specification/) denoting the package manager version distributing a package. */ cpeUri?: string | null; /** * The path from which we gathered that this package/version is installed. */ path?: string | null; /** * The version installed at this location. */ version?: Schema$Version; } /** * A type of analysis that can be done for a resource. */ export interface Schema$Note { /** * A note describing an attestation role. */ attestationAuthority?: Schema$Authority; /** * A note describing a base image. */ baseImage?: Schema$Basis; /** * A note describing build provenance for a verifiable build. */ build?: Schema$Build; /** * Output only. The time this note was created. This field can be used as a filter in list requests. */ createTime?: string | null; /** * A note describing something that can be deployed. */ deployable?: Schema$Deployable; /** * A note describing the initial analysis of a resource. */ discovery?: Schema$Discovery; /** * Time of expiration for this note. Empty if note does not expire. */ expirationTime?: string | null; /** * Output only. The type of analysis. This field can be used as a filter in list requests. */ kind?: string | null; /** * A detailed description of this note. */ longDescription?: string | null; /** * Output only. The name of the note in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. */ name?: string | null; /** * A note describing a package hosted by various package managers. */ package?: Schema$Package; /** * Other notes related to this note. */ relatedNoteNames?: string[] | null; /** * URLs associated with this note. */ relatedUrl?: Schema$RelatedUrl[]; /** * A one sentence description of this note. */ shortDescription?: string | null; /** * Output only. The time this note was last updated. This field can be used as a filter in list requests. */ updateTime?: string | null; /** * A note describing a package vulnerability. */ vulnerability?: Schema$Vulnerability; } /** * An instance of an analysis type that has been found on a resource. */ export interface Schema$Occurrence { /** * Describes an attestation of an artifact. */ attestation?: Schema$Details; /** * Describes a verifiable build. */ build?: Schema$GrafeasV1beta1BuildDetails; /** * Output only. The time this occurrence was created. */ createTime?: string | null; /** * Describes the deployment of an artifact on a runtime. */ deployment?: Schema$GrafeasV1beta1DeploymentDetails; /** * Describes how this resource derives from the basis in the associated note. */ derivedImage?: Schema$GrafeasV1beta1ImageDetails; /** * Describes when a resource was discovered. */ discovered?: Schema$GrafeasV1beta1DiscoveryDetails; /** * Describes the installation of a package on the linked resource. */ installation?: Schema$GrafeasV1beta1PackageDetails; /** * Output only. This explicitly denotes which of the occurrence details are specified. This field can be used as a filter in list requests. */ kind?: string | null; /** * Output only. The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. */ name?: string | null; /** * Required. Immutable. The analysis note associated with this occurrence, in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. This field can be used as a filter in list requests. */ noteName?: string | null; /** * A description of actions that can be taken to remedy the note. */ remediation?: string | null; /** * Required. Immutable. The resource for which the occurrence applies. */ resource?: Schema$Resource; /** * Output only. The time this occurrence was last updated. */ updateTime?: string | null; /** * Describes a security vulnerability. */ vulnerability?: Schema$GrafeasV1beta1VulnerabilityDetails; } /** * This represents a particular package that is distributed over various channels. E.g., glibc (aka libc6) is distributed by many, at various versions. */ export interface Schema$Package { /** * The various channels by which a package is distributed. */ distribution?: Schema$Distribution[]; /** * Required. Immutable. The name of the package. */ name?: string | null; } /** * This message wraps a location affected by a vulnerability and its associated fix (if one is available). */ export interface Schema$PackageIssue { /** * Required. The location of the vulnerability. */ affectedLocation?: Schema$VulnerabilityLocation; /** * The location of the available fix for vulnerability. */ fixedLocation?: Schema$VulnerabilityLocation; /** * Deprecated, use Details.effective_severity instead The severity (e.g., distro assigned severity) for this vulnerability. */ severityName?: string | null; } /** * An attestation wrapper with a PGP-compatible signature. This message only supports `ATTACHED` signatures, where the payload that is signed is included alongside the signature itself in the same file. */ export interface Schema$PgpSignedAttestation { /** * Type (for example schema) of the attestation payload that was signed. The verifier must ensure that the provided type is one that the verifier supports, and that the attestation payload is a valid instantiation of that type (for example by validating a JSON schema). */ contentType?: string | null; /** * The cryptographic fingerprint of the key used to generate the signature, as output by, e.g. `gpg --list-keys`. This should be the version 4, full 160-bit fingerprint, expressed as a 40 character hexidecimal string. See https://tools.ietf.org/html/rfc4880#section-12.2 for details. Implementations may choose to acknowledge &quot;LONG&quot;, &quot;SHORT&quot;, or other abbreviated key IDs, but only the full fingerprint is guaranteed to work. In gpg, the full fingerprint can be retrieved from the `fpr` field returned when calling --list-keys with --with-colons. For example: ``` gpg --with-colons --with-fingerprint --force-v4-certs \ --list-keys [email protected] tru::1:1513631572:0:3:1:5 pub:...&lt;SNIP&gt;... fpr:::::::::24FF6481B76AC91E66A00AC657A93A81EF3AE6FB: ``` Above, the fingerprint is `24FF6481B76AC91E66A00AC657A93A81EF3AE6FB`. */ pgpKeyId?: string | null; /** * Required. The raw content of the signature, as output by GNU Privacy Guard (GPG) or equivalent. Since this message only supports attached signatures, the payload that was signed must be attached. While the signature format supported is dependent on the verification implementation, currently only ASCII-armored (`--armor` to gpg), non-clearsigned (`--sign` rather than `--clearsign` to gpg) are supported. Concretely, `gpg --sign --armor --output=signature.gpg payload.json` will create the signature content expected in this field in `signature.gpg` for the `payload.json` attestation payload. */ signature?: string | null; } /** * Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions (defined by IAM or configured by users). A `binding` can optionally specify a `condition`, which is a logic expression that further constrains the role binding based on attributes about the request and/or target resource. **JSON Example** { &quot;bindings&quot;: [ { &quot;role&quot;: &quot;roles/resourcemanager.organizationAdmin&quot;, &quot;members&quot;: [ &quot;user:[email protected]&quot;, &quot;group:[email protected]&quot;, &quot;domain:google.com&quot;, &quot;serviceAccount:[email protected]&quot; ] }, { &quot;role&quot;: &quot;roles/resourcemanager.organizationViewer&quot;, &quot;members&quot;: [&quot;user:[email protected]&quot;], &quot;condition&quot;: { &quot;title&quot;: &quot;expirable access&quot;, &quot;description&quot;: &quot;Does not grant access after Sep 2020&quot;, &quot;expression&quot;: &quot;request.time &lt; timestamp(&#39;2020-10-01T00:00:00.000Z&#39;)&quot;, } } ] } **YAML Example** bindings: - members: - user:[email protected] - group:[email protected] - domain:google.com - serviceAccount:[email protected] role: roles/resourcemanager.organizationAdmin - members: - user:[email protected] role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time &lt; timestamp(&#39;2020-10-01T00:00:00.000Z&#39;) For a description of IAM and its features, see the [IAM developer&#39;s guide](https://cloud.google.com/iam/docs). */ export interface Schema$Policy { /** * Associates a list of `members` to a `role`. Optionally may specify a `condition` that determines when binding is in effect. `bindings` with no members will result in an error. */ bindings?: Schema$Binding[]; /** * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. If no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten. Due to blind-set semantics of an etag-less policy, &#39;setIamPolicy&#39; will not fail even if either of incoming or stored policy does not meet the version requirements. */ etag?: string | null; /** * Specifies the format of the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Operations affecting conditional bindings must specify version 3. This can be either setting a conditional policy, modifying a conditional binding, or removing a conditional binding from the stored conditional policy. Operations on non-conditional policies may specify any valid value or leave the field unset. If no etag is provided in the call to `setIamPolicy`, any version compliance checks on the incoming and/or stored policy is skipped. */ version?: number | null; } /** * Selects a repo using a Google Cloud Platform project ID (e.g., winged-cargo-31) and a repo name within that project. */ export interface Schema$ProjectRepoId { /** * The ID of the project. */ projectId?: string | null; /** * The name of the repo. Leave empty for the default repo. */ repoName?: string | null; } /** * Metadata for any related URL information. */ export interface Schema$RelatedUrl { /** * Label to describe usage of the URL. */ label?: string | null; /** * Specific URL associated with the resource. */ url?: string | null; } /** * A unique identifier for a Cloud Repo. */ export interface Schema$RepoId { /** * A combination of a project ID and a repo name. */ projectRepoId?: Schema$ProjectRepoId; /** * A server-assigned, globally unique identifier. */ uid?: string | null; } /** * An entity that can have metadata. For example, a Docker image. */ export interface Schema$Resource { /** * Deprecated, do not use. Use uri instead. The hash of the resource content. For example, the Docker digest. */ contentHash?: Schema$Hash; /** * Deprecated, do not use. Use uri instead. The name of the resource. For example, the name of a Docker image - &quot;Debian&quot;. */ name?: string | null; /** * Required. The unique URI of the resource. For example, `https://gcr.io/project/image@sha256:foo` for a Docker image. */ uri?: string | null; } /** * A scan configuration specifies whether Cloud components in a project have a particular type of analysis being run. For example, it can configure whether vulnerability scanning is being done on Docker images or not. */ export interface Schema$ScanConfig { /** * Output only. The time this scan config was created. */ createTime?: string | null; /** * Output only. A human-readable description of what the scan configuration does. */ description?: string | null; /** * Whether the scan is enabled. */ enabled?: boolean | null; /** * Output only. The name of the scan configuration in the form of `projects/[PROJECT_ID]/scanConfigs/[SCAN_CONFIG_ID]`. */ name?: string | null; /** * Output only. The time this scan config was last updated. */ updateTime?: string | null; } /** * Request message for `SetIamPolicy` method. */ export interface Schema$SetIamPolicyRequest { /** * REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. */ policy?: Schema$Policy; } /** * Verifiers (e.g. Kritis implementations) MUST verify signatures with respect to the trust anchors defined in policy (e.g. a Kritis policy). Typically this means that the verifier has been configured with a map from `public_key_id` to public key material (and any required parameters, e.g. signing algorithm). In particular, verification implementations MUST NOT treat the signature `public_key_id` as anything more than a key lookup hint. The `public_key_id` DOES NOT validate or authenticate a public key; it only provides a mechanism for quickly selecting a public key ALREADY CONFIGURED on the verifier through a trusted channel. Verification implementations MUST reject signatures in any of the following circumstances: * The `public_key_id` is not recognized by the verifier. * The public key that `public_key_id` refers to does not verify the signature with respect to the payload. The `signature` contents SHOULD NOT be &quot;attached&quot; (where the payload is included with the serialized `signature` bytes). Verifiers MUST ignore any &quot;attached&quot; payload and only verify signatures with respect to explicitly provided payload (e.g. a `payload` field on the proto message that holds this Signature, or the canonical serialization of the proto message that holds this signature). */ export interface Schema$Signature { /** * The identifier for the public key that verifies this signature. * The `public_key_id` is required. * The `public_key_id` MUST be an RFC3986 conformant URI. * When possible, the `public_key_id` SHOULD be an immutable reference, such as a cryptographic digest. Examples of valid `public_key_id`s: OpenPGP V4 public key fingerprint: * &quot;openpgp4fpr:74FAF3B861BDA0870C7B6DEF607E48D2A663AEEA&quot; See https://www.iana.org/assignments/uri-schemes/prov/openpgp4fpr for more details on this scheme. RFC6920 digest-named SubjectPublicKeyInfo (digest of the DER serialization): * &quot;ni:///sha-256;cD9o9Cq6LG3jD0iKXqEi_vdjJGecm_iXkbqVoScViaU&quot; * &quot;nih:///sha-256;703f68f42aba2c6de30f488a5ea122fef76324679c9bf89791ba95a1271589a5&quot; */ publicKeyId?: string | null; /** * The content of the signature, an opaque bytestring. The payload that this signature verifies MUST be unambiguously provided with the Signature during verification. A wrapper message might provide the payload explicitly. Alternatively, a message might have a canonical serialization that can always be unambiguously computed to derive the payload. */ signature?: string | null; } /** * Source describes the location of the source used for the build. */ export interface Schema$Source { /** * If provided, some of the source code used for the build may be found in these locations, in the case where the source repository had multiple remotes or submodules. This list will not include the context specified in the context field. */ additionalContexts?: Schema$SourceContext[]; /** * If provided, the input binary artifacts for the build came from this location. */ artifactStorageSourceUri?: string | null; /** * If provided, the source code used for the build came from this location. */ context?: Schema$SourceContext; /** * Hash(es) of the build source, which can be used to verify that the original source integrity was maintained in the build. The keys to this map are file paths used as build source and the values contain the hash values for those files. If the build source came in a single package such as a gzipped tarfile (.tar.gz), the FileHash will be for the single path to that file. */ fileHashes?: {[key: string]: Schema$FileHashes} | null; } /** * A SourceContext is a reference to a tree of files. A SourceContext together with a path point to a unique revision of a single file or directory. */ export interface Schema$SourceContext { /** * A SourceContext referring to a revision in a Google Cloud Source Repo. */ cloudRepo?: Schema$CloudRepoSourceContext; /** * A SourceContext referring to a Gerrit project. */ gerrit?: Schema$GerritSourceContext; /** * A SourceContext referring to any third party Git repo (e.g., GitHub). */ git?: Schema$GitSourceContext; /** * Labels with user defined metadata. */ labels?: {[key: string]: string} | null; } /** * The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). */ export interface Schema$Status { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number | null; /** * A list of messages that carry the error details. There is a common set of message types for APIs to use. */ details?: Array<{[key: string]: any}> | null; /** * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. */ message?: string | null; } /** * Request message for `TestIamPermissions` method. */ export interface Schema$TestIamPermissionsRequest { /** * The set of permissions to check for the `resource`. Permissions with wildcards (such as &#39;*&#39; or &#39;storage.*&#39;) are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). */ permissions?: string[] | null; } /** * Response message for `TestIamPermissions` method. */ export interface Schema$TestIamPermissionsResponse { /** * A subset of `TestPermissionsRequest.permissions` that the caller is allowed. */ permissions?: string[] | null; } /** * Version contains structured information about the version of a package. */ export interface Schema$Version { /** * Used to correct mistakes in the version numbering scheme. */ epoch?: number | null; /** * Required. Distinguishes between sentinel MIN/MAX versions and normal versions. */ kind?: string | null; /** * Required only when version kind is NORMAL. The main part of the version name. */ name?: string | null; /** * The iteration of the package build from the above version. */ revision?: string | null; } /** * Vulnerability provides metadata about a security vulnerability in a Note. */ export interface Schema$Vulnerability { /** * The CVSS score for this vulnerability. */ cvssScore?: number | null; /** * The full description of the CVSSv3. */ cvssV3?: Schema$CVSSv3; /** * All information about the package to specifically identify this vulnerability. One entry per (version range and cpe_uri) the package vulnerability has manifested in. */ details?: Schema$Detail[]; /** * Note provider assigned impact of the vulnerability. */ severity?: string | null; /** * The time this information was last changed at the source. This is an upstream timestamp from the underlying information source - e.g. Ubuntu security tracker. */ sourceUpdateTime?: string | null; /** * Windows details get their own format because the information format and model don&#39;t match a normal detail. Specifically Windows updates are done as patches, thus Windows vulnerabilities really are a missing package, rather than a package being at an incorrect version. */ windowsDetails?: Schema$WindowsDetail[]; } /** * The location of the vulnerability. */ export interface Schema$VulnerabilityLocation { /** * Required. The CPE URI in [cpe format](https://cpe.mitre.org/specification/) format. Examples include distro or storage location for vulnerable jar. */ cpeUri?: string | null; /** * Required. The package being described. */ package?: string | null; /** * Required. The version of the package being described. */ version?: Schema$Version; } /** * A summary of how many vulnerability occurrences there are per resource and severity type. */ export interface Schema$VulnerabilityOccurrencesSummary { /** * A listing by resource of the number of fixable and total vulnerabilities. */ counts?: Schema$FixableTotalByDigest[]; } export interface Schema$WindowsDetail { /** * Required. The CPE URI in [cpe format](https://cpe.mitre.org/specification/) in which the vulnerability manifests. Examples include distro or storage location for vulnerable jar. */ cpeUri?: string | null; /** * The description of the vulnerability. */ description?: string | null; /** * Required. The names of the KBs which have hotfixes to mitigate this vulnerability. Note that there may be multiple hotfixes (and thus multiple KBs) that mitigate a given vulnerability. Currently any listed kb&#39;s presence is considered a fix. */ fixingKbs?: Schema$KnowledgeBase[]; /** * Required. The name of the vulnerability. */ name?: string | null; } export class
{ context: APIRequestContext; notes: Resource$Projects$Notes; occurrences: Resource$Projects$Occurrences; scanConfigs: Resource$Projects$Scanconfigs; constructor(context: APIRequestContext) { this.context = context; this.notes = new Resource$Projects$Notes(this.context); this.occurrences = new Resource$Projects$Occurrences(this.context); this.scanConfigs = new Resource$Projects$Scanconfigs(this.context); } } export class Resource$Projects$Notes { context: APIRequestContext; occurrences: Resource$Projects$Notes$Occurrences; constructor(context: APIRequestContext) { this.context = context; this.occurrences = new Resource$Projects$Notes$Occurrences(this.context); } /** * containeranalysis.projects.notes.batchCreate * @desc Creates new notes in batch. * @alias containeranalysis.projects.notes.batchCreate * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.parent The name of the project in the form of `projects/[PROJECT_ID]`, under which the notes are to be created. * @param {().BatchCreateNotesRequest} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ batchCreate( params?: Params$Resource$Projects$Notes$Batchcreate, options?: MethodOptions ): GaxiosPromise<Schema$BatchCreateNotesResponse>; batchCreate( params: Params$Resource$Projects$Notes$Batchcreate, options: | MethodOptions | BodyResponseCallback<Schema$BatchCreateNotesResponse>, callback: BodyResponseCallback<Schema$BatchCreateNotesResponse> ): void; batchCreate( params: Params$Resource$Projects$Notes$Batchcreate, callback: BodyResponseCallback<Schema$BatchCreateNotesResponse> ): void; batchCreate( callback: BodyResponseCallback<Schema$BatchCreateNotesResponse> ): void; batchCreate( paramsOrCallback?: | Params$Resource$Projects$Notes$Batchcreate | BodyResponseCallback<Schema$BatchCreateNotesResponse>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$BatchCreateNotesResponse>, callback?: BodyResponseCallback<Schema$BatchCreateNotesResponse> ): void | GaxiosPromise<Schema$BatchCreateNotesResponse> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Batchcreate; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Batchcreate; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+parent}/notes:batchCreate').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['parent'], pathParams: ['parent'], context: this.context, }; if (callback) { createAPIRequest<Schema$BatchCreateNotesResponse>(parameters, callback); } else { return createAPIRequest<Schema$BatchCreateNotesResponse>(parameters); } } /** * containeranalysis.projects.notes.create * @desc Creates a new note. * @alias containeranalysis.projects.notes.create * @memberOf! () * * @param {object} params Parameters for request * @param {string=} params.noteId The ID to use for this note. * @param {string} params.parent The name of the project in the form of `projects/[PROJECT_ID]`, under which the note is to be created. * @param {().Note} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ create( params?: Params$Resource$Projects$Notes$Create, options?: MethodOptions ): GaxiosPromise<Schema$Note>; create( params: Params$Resource$Projects$Notes$Create, options: MethodOptions | BodyResponseCallback<Schema$Note>, callback: BodyResponseCallback<Schema$Note> ): void; create( params: Params$Resource$Projects$Notes$Create, callback: BodyResponseCallback<Schema$Note> ): void; create(callback: BodyResponseCallback<Schema$Note>): void; create( paramsOrCallback?: | Params$Resource$Projects$Notes$Create | BodyResponseCallback<Schema$Note>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Note>, callback?: BodyResponseCallback<Schema$Note> ): void | GaxiosPromise<Schema$Note> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Create; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Create; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+parent}/notes').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['parent'], pathParams: ['parent'], context: this.context, }; if (callback) { createAPIRequest<Schema$Note>(parameters, callback); } else { return createAPIRequest<Schema$Note>(parameters); } } /** * containeranalysis.projects.notes.delete * @desc Deletes the specified note. * @alias containeranalysis.projects.notes.delete * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the note in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ delete( params?: Params$Resource$Projects$Notes$Delete, options?: MethodOptions ): GaxiosPromise<Schema$Empty>; delete( params: Params$Resource$Projects$Notes$Delete, options: MethodOptions | BodyResponseCallback<Schema$Empty>, callback: BodyResponseCallback<Schema$Empty> ): void; delete( params: Params$Resource$Projects$Notes$Delete, callback: BodyResponseCallback<Schema$Empty> ): void; delete(callback: BodyResponseCallback<Schema$Empty>): void; delete( paramsOrCallback?: | Params$Resource$Projects$Notes$Delete | BodyResponseCallback<Schema$Empty>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Empty>, callback?: BodyResponseCallback<Schema$Empty> ): void | GaxiosPromise<Schema$Empty> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Delete; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Delete; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}').replace(/([^:]\/)\/+/g, '$1'), method: 'DELETE', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$Empty>(parameters, callback); } else { return createAPIRequest<Schema$Empty>(parameters); } } /** * containeranalysis.projects.notes.get * @desc Gets the specified note. * @alias containeranalysis.projects.notes.get * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the note in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ get( params?: Params$Resource$Projects$Notes$Get, options?: MethodOptions ): GaxiosPromise<Schema$Note>; get( params: Params$Resource$Projects$Notes$Get, options: MethodOptions | BodyResponseCallback<Schema$Note>, callback: BodyResponseCallback<Schema$Note> ): void; get( params: Params$Resource$Projects$Notes$Get, callback: BodyResponseCallback<Schema$Note> ): void; get(callback: BodyResponseCallback<Schema$Note>): void; get( paramsOrCallback?: | Params$Resource$Projects$Notes$Get | BodyResponseCallback<Schema$Note>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Note>, callback?: BodyResponseCallback<Schema$Note> ): void | GaxiosPromise<Schema$Note> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Get; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Get; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}').replace(/([^:]\/)\/+/g, '$1'), method: 'GET', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$Note>(parameters, callback); } else { return createAPIRequest<Schema$Note>(parameters); } } /** * containeranalysis.projects.notes.getIamPolicy * @desc Gets the access control policy for a note or an occurrence resource. Requires `containeranalysis.notes.setIamPolicy` or `containeranalysis.occurrences.setIamPolicy` permission if the resource is a note or occurrence, respectively. The resource takes the format `projects/[PROJECT_ID]/notes/[NOTE_ID]` for notes and `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]` for occurrences. * @alias containeranalysis.projects.notes.getIamPolicy * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.resource_ REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. * @param {().GetIamPolicyRequest} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ getIamPolicy( params?: Params$Resource$Projects$Notes$Getiampolicy, options?: MethodOptions ): GaxiosPromise<Schema$Policy>; getIamPolicy( params: Params$Resource$Projects$Notes$Getiampolicy, options: MethodOptions | BodyResponseCallback<Schema$Policy>, callback: BodyResponseCallback<Schema$Policy> ): void; getIamPolicy( params: Params$Resource$Projects$Notes$Getiampolicy, callback: BodyResponseCallback<Schema$Policy> ): void; getIamPolicy(callback: BodyResponseCallback<Schema$Policy>): void; getIamPolicy( paramsOrCallback?: | Params$Resource$Projects$Notes$Getiampolicy | BodyResponseCallback<Schema$Policy>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Policy>, callback?: BodyResponseCallback<Schema$Policy> ): void | GaxiosPromise<Schema$Policy> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Getiampolicy; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Getiampolicy; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+resource}:getIamPolicy').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['resource'], pathParams: ['resource'], context: this.context, }; if (callback) { createAPIRequest<Schema$Policy>(parameters, callback); } else { return createAPIRequest<Schema$Policy>(parameters); } } /** * containeranalysis.projects.notes.list * @desc Lists notes for the specified project. * @alias containeranalysis.projects.notes.list * @memberOf! () * * @param {object} params Parameters for request * @param {string=} params.filter The filter expression. * @param {integer=} params.pageSize Number of notes to return in the list. Must be positive. Max allowed page size is 1000. If not specified, page size defaults to 20. * @param {string=} params.pageToken Token to provide to skip to a particular spot in the list. * @param {string} params.parent The name of the project to list notes for in the form of `projects/[PROJECT_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ list( params?: Params$Resource$Projects$Notes$List, options?: MethodOptions ): GaxiosPromise<Schema$ListNotesResponse>; list( params: Params$Resource$Projects$Notes$List, options: MethodOptions | BodyResponseCallback<Schema$ListNotesResponse>, callback: BodyResponseCallback<Schema$ListNotesResponse> ): void; list( params: Params$Resource$Projects$Notes$List, callback: BodyResponseCallback<Schema$ListNotesResponse> ): void; list(callback: BodyResponseCallback<Schema$ListNotesResponse>): void; list( paramsOrCallback?: | Params$Resource$Projects$Notes$List | BodyResponseCallback<Schema$ListNotesResponse>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$ListNotesResponse>, callback?: BodyResponseCallback<Schema$ListNotesResponse> ): void | GaxiosPromise<Schema$ListNotesResponse> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$List; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$List; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+parent}/notes').replace( /([^:]\/)\/+/g, '$1' ), method: 'GET', }, options ), params, requiredParams: ['parent'], pathParams: ['parent'], context: this.context, }; if (callback) { createAPIRequest<Schema$ListNotesResponse>(parameters, callback); } else { return createAPIRequest<Schema$ListNotesResponse>(parameters); } } /** * containeranalysis.projects.notes.patch * @desc Updates the specified note. * @alias containeranalysis.projects.notes.patch * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the note in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. * @param {string=} params.updateMask The fields to update. * @param {().Note} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ patch( params?: Params$Resource$Projects$Notes$Patch, options?: MethodOptions ): GaxiosPromise<Schema$Note>; patch( params: Params$Resource$Projects$Notes$Patch, options: MethodOptions | BodyResponseCallback<Schema$Note>, callback: BodyResponseCallback<Schema$Note> ): void; patch( params: Params$Resource$Projects$Notes$Patch, callback: BodyResponseCallback<Schema$Note> ): void; patch(callback: BodyResponseCallback<Schema$Note>): void; patch( paramsOrCallback?: | Params$Resource$Projects$Notes$Patch | BodyResponseCallback<Schema$Note>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Note>, callback?: BodyResponseCallback<Schema$Note> ): void | GaxiosPromise<Schema$Note> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Patch; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Patch; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}').replace(/([^:]\/)\/+/g, '$1'), method: 'PATCH', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$Note>(parameters, callback); } else { return createAPIRequest<Schema$Note>(parameters); } } /** * containeranalysis.projects.notes.setIamPolicy * @desc Sets the access control policy on the specified note or occurrence. Requires `containeranalysis.notes.setIamPolicy` or `containeranalysis.occurrences.setIamPolicy` permission if the resource is a note or an occurrence, respectively. The resource takes the format `projects/[PROJECT_ID]/notes/[NOTE_ID]` for notes and `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]` for occurrences. * @alias containeranalysis.projects.notes.setIamPolicy * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.resource_ REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. * @param {().SetIamPolicyRequest} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ setIamPolicy( params?: Params$Resource$Projects$Notes$Setiampolicy, options?: MethodOptions ): GaxiosPromise<Schema$Policy>; setIamPolicy( params: Params$Resource$Projects$Notes$Setiampolicy, options: MethodOptions | BodyResponseCallback<Schema$Policy>, callback: BodyResponseCallback<Schema$Policy> ): void; setIamPolicy( params: Params$Resource$Projects$Notes$Setiampolicy, callback: BodyResponseCallback<Schema$Policy> ): void; setIamPolicy(callback: BodyResponseCallback<Schema$Policy>): void; setIamPolicy( paramsOrCallback?: | Params$Resource$Projects$Notes$Setiampolicy | BodyResponseCallback<Schema$Policy>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Policy>, callback?: BodyResponseCallback<Schema$Policy> ): void | GaxiosPromise<Schema$Policy> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Setiampolicy; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Setiampolicy; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+resource}:setIamPolicy').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['resource'], pathParams: ['resource'], context: this.context, }; if (callback) { createAPIRequest<Schema$Policy>(parameters, callback); } else { return createAPIRequest<Schema$Policy>(parameters); } } /** * containeranalysis.projects.notes.testIamPermissions * @desc Returns the permissions that a caller has on the specified note or occurrence. Requires list permission on the project (for example, `containeranalysis.notes.list`). The resource takes the format `projects/[PROJECT_ID]/notes/[NOTE_ID]` for notes and `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]` for occurrences. * @alias containeranalysis.projects.notes.testIamPermissions * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.resource_ REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. * @param {().TestIamPermissionsRequest} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ testIamPermissions( params?: Params$Resource$Projects$Notes$Testiampermissions, options?: MethodOptions ): GaxiosPromise<Schema$TestIamPermissionsResponse>; testIamPermissions( params: Params$Resource$Projects$Notes$Testiampermissions, options: | MethodOptions | BodyResponseCallback<Schema$TestIamPermissionsResponse>, callback: BodyResponseCallback<Schema$TestIamPermissionsResponse> ): void; testIamPermissions( params: Params$Resource$Projects$Notes$Testiampermissions, callback: BodyResponseCallback<Schema$TestIamPermissionsResponse> ): void; testIamPermissions( callback: BodyResponseCallback<Schema$TestIamPermissionsResponse> ): void; testIamPermissions( paramsOrCallback?: | Params$Resource$Projects$Notes$Testiampermissions | BodyResponseCallback<Schema$TestIamPermissionsResponse>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$TestIamPermissionsResponse>, callback?: BodyResponseCallback<Schema$TestIamPermissionsResponse> ): void | GaxiosPromise<Schema$TestIamPermissionsResponse> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Testiampermissions; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Testiampermissions; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+resource}:testIamPermissions').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['resource'], pathParams: ['resource'], context: this.context, }; if (callback) { createAPIRequest<Schema$TestIamPermissionsResponse>( parameters, callback ); } else { return createAPIRequest<Schema$TestIamPermissionsResponse>(parameters); } } } export interface Params$Resource$Projects$Notes$Batchcreate extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the project in the form of `projects/[PROJECT_ID]`, under which the notes are to be created. */ parent?: string; /** * Request body metadata */ requestBody?: Schema$BatchCreateNotesRequest; } export interface Params$Resource$Projects$Notes$Create extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The ID to use for this note. */ noteId?: string; /** * The name of the project in the form of `projects/[PROJECT_ID]`, under which the note is to be created. */ parent?: string; /** * Request body metadata */ requestBody?: Schema$Note; } export interface Params$Resource$Projects$Notes$Delete extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the note in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. */ name?: string; } export interface Params$Resource$Projects$Notes$Get extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the note in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. */ name?: string; } export interface Params$Resource$Projects$Notes$Getiampolicy extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. */ resource?: string; /** * Request body metadata */ requestBody?: Schema$GetIamPolicyRequest; } export interface Params$Resource$Projects$Notes$List extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The filter expression. */ filter?: string; /** * Number of notes to return in the list. Must be positive. Max allowed page size is 1000. If not specified, page size defaults to 20. */ pageSize?: number; /** * Token to provide to skip to a particular spot in the list. */ pageToken?: string; /** * The name of the project to list notes for in the form of `projects/[PROJECT_ID]`. */ parent?: string; } export interface Params$Resource$Projects$Notes$Patch extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the note in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. */ name?: string; /** * The fields to update. */ updateMask?: string; /** * Request body metadata */ requestBody?: Schema$Note; } export interface Params$Resource$Projects$Notes$Setiampolicy extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. */ resource?: string; /** * Request body metadata */ requestBody?: Schema$SetIamPolicyRequest; } export interface Params$Resource$Projects$Notes$Testiampermissions extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. */ resource?: string; /** * Request body metadata */ requestBody?: Schema$TestIamPermissionsRequest; } export class Resource$Projects$Notes$Occurrences { context: APIRequestContext; constructor(context: APIRequestContext) { this.context = context; } /** * containeranalysis.projects.notes.occurrences.list * @desc Lists occurrences referencing the specified note. Provider projects can use this method to get all occurrences across consumer projects referencing the specified note. * @alias containeranalysis.projects.notes.occurrences.list * @memberOf! () * * @param {object} params Parameters for request * @param {string=} params.filter The filter expression. * @param {string} params.name The name of the note to list occurrences for in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. * @param {integer=} params.pageSize Number of occurrences to return in the list. * @param {string=} params.pageToken Token to provide to skip to a particular spot in the list. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ list( params?: Params$Resource$Projects$Notes$Occurrences$List, options?: MethodOptions ): GaxiosPromise<Schema$ListNoteOccurrencesResponse>; list( params: Params$Resource$Projects$Notes$Occurrences$List, options: | MethodOptions | BodyResponseCallback<Schema$ListNoteOccurrencesResponse>, callback: BodyResponseCallback<Schema$ListNoteOccurrencesResponse> ): void; list( params: Params$Resource$Projects$Notes$Occurrences$List, callback: BodyResponseCallback<Schema$ListNoteOccurrencesResponse> ): void; list( callback: BodyResponseCallback<Schema$ListNoteOccurrencesResponse> ): void; list( paramsOrCallback?: | Params$Resource$Projects$Notes$Occurrences$List | BodyResponseCallback<Schema$ListNoteOccurrencesResponse>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$ListNoteOccurrencesResponse>, callback?: BodyResponseCallback<Schema$ListNoteOccurrencesResponse> ): void | GaxiosPromise<Schema$ListNoteOccurrencesResponse> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Notes$Occurrences$List; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Notes$Occurrences$List; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}/occurrences').replace( /([^:]\/)\/+/g, '$1' ), method: 'GET', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$ListNoteOccurrencesResponse>( parameters, callback ); } else { return createAPIRequest<Schema$ListNoteOccurrencesResponse>(parameters); } } } export interface Params$Resource$Projects$Notes$Occurrences$List extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The filter expression. */ filter?: string; /** * The name of the note to list occurrences for in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`. */ name?: string; /** * Number of occurrences to return in the list. */ pageSize?: number; /** * Token to provide to skip to a particular spot in the list. */ pageToken?: string; } export class Resource$Projects$Occurrences { context: APIRequestContext; constructor(context: APIRequestContext) { this.context = context; } /** * containeranalysis.projects.occurrences.batchCreate * @desc Creates new occurrences in batch. * @alias containeranalysis.projects.occurrences.batchCreate * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.parent The name of the project in the form of `projects/[PROJECT_ID]`, under which the occurrences are to be created. * @param {().BatchCreateOccurrencesRequest} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ batchCreate( params?: Params$Resource$Projects$Occurrences$Batchcreate, options?: MethodOptions ): GaxiosPromise<Schema$BatchCreateOccurrencesResponse>; batchCreate( params: Params$Resource$Projects$Occurrences$Batchcreate, options: | MethodOptions | BodyResponseCallback<Schema$BatchCreateOccurrencesResponse>, callback: BodyResponseCallback<Schema$BatchCreateOccurrencesResponse> ): void; batchCreate( params: Params$Resource$Projects$Occurrences$Batchcreate, callback: BodyResponseCallback<Schema$BatchCreateOccurrencesResponse> ): void; batchCreate( callback: BodyResponseCallback<Schema$BatchCreateOccurrencesResponse> ): void; batchCreate( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Batchcreate | BodyResponseCallback<Schema$BatchCreateOccurrencesResponse>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$BatchCreateOccurrencesResponse>, callback?: BodyResponseCallback<Schema$BatchCreateOccurrencesResponse> ): void | GaxiosPromise<Schema$BatchCreateOccurrencesResponse> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Batchcreate; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Batchcreate; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: ( rootUrl + '/v1beta1/{+parent}/occurrences:batchCreate' ).replace(/([^:]\/)\/+/g, '$1'), method: 'POST', }, options ), params, requiredParams: ['parent'], pathParams: ['parent'], context: this.context, }; if (callback) { createAPIRequest<Schema$BatchCreateOccurrencesResponse>( parameters, callback ); } else { return createAPIRequest<Schema$BatchCreateOccurrencesResponse>( parameters ); } } /** * containeranalysis.projects.occurrences.create * @desc Creates a new occurrence. * @alias containeranalysis.projects.occurrences.create * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.parent The name of the project in the form of `projects/[PROJECT_ID]`, under which the occurrence is to be created. * @param {().Occurrence} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ create( params?: Params$Resource$Projects$Occurrences$Create, options?: MethodOptions ): GaxiosPromise<Schema$Occurrence>; create( params: Params$Resource$Projects$Occurrences$Create, options: MethodOptions | BodyResponseCallback<Schema$Occurrence>, callback: BodyResponseCallback<Schema$Occurrence> ): void; create( params: Params$Resource$Projects$Occurrences$Create, callback: BodyResponseCallback<Schema$Occurrence> ): void; create(callback: BodyResponseCallback<Schema$Occurrence>): void; create( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Create | BodyResponseCallback<Schema$Occurrence>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$Occurrence>, callback?: BodyResponseCallback<Schema$Occurrence> ): void | GaxiosPromise<Schema$Occurrence> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Create; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Create; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+parent}/occurrences').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['parent'], pathParams: ['parent'], context: this.context, }; if (callback) { createAPIRequest<Schema$Occurrence>(parameters, callback); } else { return createAPIRequest<Schema$Occurrence>(parameters); } } /** * containeranalysis.projects.occurrences.delete * @desc Deletes the specified occurrence. For example, use this method to delete an occurrence when the occurrence is no longer applicable for the given resource. * @alias containeranalysis.projects.occurrences.delete * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ delete( params?: Params$Resource$Projects$Occurrences$Delete, options?: MethodOptions ): GaxiosPromise<Schema$Empty>; delete( params: Params$Resource$Projects$Occurrences$Delete, options: MethodOptions | BodyResponseCallback<Schema$Empty>, callback: BodyResponseCallback<Schema$Empty> ): void; delete( params: Params$Resource$Projects$Occurrences$Delete, callback: BodyResponseCallback<Schema$Empty> ): void; delete(callback: BodyResponseCallback<Schema$Empty>): void; delete( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Delete | BodyResponseCallback<Schema$Empty>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Empty>, callback?: BodyResponseCallback<Schema$Empty> ): void | GaxiosPromise<Schema$Empty> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Delete; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Delete; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}').replace(/([^:]\/)\/+/g, '$1'), method: 'DELETE', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$Empty>(parameters, callback); } else { return createAPIRequest<Schema$Empty>(parameters); } } /** * containeranalysis.projects.occurrences.get * @desc Gets the specified occurrence. * @alias containeranalysis.projects.occurrences.get * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ get( params?: Params$Resource$Projects$Occurrences$Get, options?: MethodOptions ): GaxiosPromise<Schema$Occurrence>; get( params: Params$Resource$Projects$Occurrences$Get, options: MethodOptions | BodyResponseCallback<Schema$Occurrence>, callback: BodyResponseCallback<Schema$Occurrence> ): void; get( params: Params$Resource$Projects$Occurrences$Get, callback: BodyResponseCallback<Schema$Occurrence> ): void; get(callback: BodyResponseCallback<Schema$Occurrence>): void; get( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Get | BodyResponseCallback<Schema$Occurrence>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$Occurrence>, callback?: BodyResponseCallback<Schema$Occurrence> ): void | GaxiosPromise<Schema$Occurrence> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Get; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Get; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}').replace(/([^:]\/)\/+/g, '$1'), method: 'GET', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$Occurrence>(parameters, callback); } else { return createAPIRequest<Schema$Occurrence>(parameters); } } /** * containeranalysis.projects.occurrences.getIamPolicy * @desc Gets the access control policy for a note or an occurrence resource. Requires `containeranalysis.notes.setIamPolicy` or `containeranalysis.occurrences.setIamPolicy` permission if the resource is a note or occurrence, respectively. The resource takes the format `projects/[PROJECT_ID]/notes/[NOTE_ID]` for notes and `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]` for occurrences. * @alias containeranalysis.projects.occurrences.getIamPolicy * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.resource_ REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. * @param {().GetIamPolicyRequest} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ getIamPolicy( params?: Params$Resource$Projects$Occurrences$Getiampolicy, options?: MethodOptions ): GaxiosPromise<Schema$Policy>; getIamPolicy( params: Params$Resource$Projects$Occurrences$Getiampolicy, options: MethodOptions | BodyResponseCallback<Schema$Policy>, callback: BodyResponseCallback<Schema$Policy> ): void; getIamPolicy( params: Params$Resource$Projects$Occurrences$Getiampolicy, callback: BodyResponseCallback<Schema$Policy> ): void; getIamPolicy(callback: BodyResponseCallback<Schema$Policy>): void; getIamPolicy( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Getiampolicy | BodyResponseCallback<Schema$Policy>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Policy>, callback?: BodyResponseCallback<Schema$Policy> ): void | GaxiosPromise<Schema$Policy> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Getiampolicy; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Getiampolicy; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+resource}:getIamPolicy').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['resource'], pathParams: ['resource'], context: this.context, }; if (callback) { createAPIRequest<Schema$Policy>(parameters, callback); } else { return createAPIRequest<Schema$Policy>(parameters); } } /** * containeranalysis.projects.occurrences.getNotes * @desc Gets the note attached to the specified occurrence. Consumer projects can use this method to get a note that belongs to a provider project. * @alias containeranalysis.projects.occurrences.getNotes * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ getNotes( params?: Params$Resource$Projects$Occurrences$Getnotes, options?: MethodOptions ): GaxiosPromise<Schema$Note>; getNotes( params: Params$Resource$Projects$Occurrences$Getnotes, options: MethodOptions | BodyResponseCallback<Schema$Note>, callback: BodyResponseCallback<Schema$Note> ): void; getNotes( params: Params$Resource$Projects$Occurrences$Getnotes, callback: BodyResponseCallback<Schema$Note> ): void; getNotes(callback: BodyResponseCallback<Schema$Note>): void; getNotes( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Getnotes | BodyResponseCallback<Schema$Note>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Note>, callback?: BodyResponseCallback<Schema$Note> ): void | GaxiosPromise<Schema$Note> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Getnotes; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Getnotes; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}/notes').replace( /([^:]\/)\/+/g, '$1' ), method: 'GET', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$Note>(parameters, callback); } else { return createAPIRequest<Schema$Note>(parameters); } } /** * containeranalysis.projects.occurrences.getVulnerabilitySummary * @desc Gets a summary of the number and severity of occurrences. * @alias containeranalysis.projects.occurrences.getVulnerabilitySummary * @memberOf! () * * @param {object} params Parameters for request * @param {string=} params.filter The filter expression. * @param {string} params.parent The name of the project to get a vulnerability summary for in the form of `projects/[PROJECT_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ getVulnerabilitySummary( params?: Params$Resource$Projects$Occurrences$Getvulnerabilitysummary, options?: MethodOptions ): GaxiosPromise<Schema$VulnerabilityOccurrencesSummary>; getVulnerabilitySummary( params: Params$Resource$Projects$Occurrences$Getvulnerabilitysummary, options: | MethodOptions | BodyResponseCallback<Schema$VulnerabilityOccurrencesSummary>, callback: BodyResponseCallback<Schema$VulnerabilityOccurrencesSummary> ): void; getVulnerabilitySummary( params: Params$Resource$Projects$Occurrences$Getvulnerabilitysummary, callback: BodyResponseCallback<Schema$VulnerabilityOccurrencesSummary> ): void; getVulnerabilitySummary( callback: BodyResponseCallback<Schema$VulnerabilityOccurrencesSummary> ): void; getVulnerabilitySummary( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Getvulnerabilitysummary | BodyResponseCallback<Schema$VulnerabilityOccurrencesSummary>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$VulnerabilityOccurrencesSummary>, callback?: BodyResponseCallback<Schema$VulnerabilityOccurrencesSummary> ): void | GaxiosPromise<Schema$VulnerabilityOccurrencesSummary> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Getvulnerabilitysummary; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Getvulnerabilitysummary; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: ( rootUrl + '/v1beta1/{+parent}/occurrences:vulnerabilitySummary' ).replace(/([^:]\/)\/+/g, '$1'), method: 'GET', }, options ), params, requiredParams: ['parent'], pathParams: ['parent'], context: this.context, }; if (callback) { createAPIRequest<Schema$VulnerabilityOccurrencesSummary>( parameters, callback ); } else { return createAPIRequest<Schema$VulnerabilityOccurrencesSummary>( parameters ); } } /** * containeranalysis.projects.occurrences.list * @desc Lists occurrences for the specified project. * @alias containeranalysis.projects.occurrences.list * @memberOf! () * * @param {object} params Parameters for request * @param {string=} params.filter The filter expression. * @param {integer=} params.pageSize Number of occurrences to return in the list. Must be positive. Max allowed page size is 1000. If not specified, page size defaults to 20. * @param {string=} params.pageToken Token to provide to skip to a particular spot in the list. * @param {string} params.parent The name of the project to list occurrences for in the form of `projects/[PROJECT_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ list( params?: Params$Resource$Projects$Occurrences$List, options?: MethodOptions ): GaxiosPromise<Schema$ListOccurrencesResponse>; list( params: Params$Resource$Projects$Occurrences$List, options: | MethodOptions | BodyResponseCallback<Schema$ListOccurrencesResponse>, callback: BodyResponseCallback<Schema$ListOccurrencesResponse> ): void; list( params: Params$Resource$Projects$Occurrences$List, callback: BodyResponseCallback<Schema$ListOccurrencesResponse> ): void; list(callback: BodyResponseCallback<Schema$ListOccurrencesResponse>): void; list( paramsOrCallback?: | Params$Resource$Projects$Occurrences$List | BodyResponseCallback<Schema$ListOccurrencesResponse>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$ListOccurrencesResponse>, callback?: BodyResponseCallback<Schema$ListOccurrencesResponse> ): void | GaxiosPromise<Schema$ListOccurrencesResponse> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$List; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$List; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+parent}/occurrences').replace( /([^:]\/)\/+/g, '$1' ), method: 'GET', }, options ), params, requiredParams: ['parent'], pathParams: ['parent'], context: this.context, }; if (callback) { createAPIRequest<Schema$ListOccurrencesResponse>(parameters, callback); } else { return createAPIRequest<Schema$ListOccurrencesResponse>(parameters); } } /** * containeranalysis.projects.occurrences.patch * @desc Updates the specified occurrence. * @alias containeranalysis.projects.occurrences.patch * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. * @param {string=} params.updateMask The fields to update. * @param {().Occurrence} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ patch( params?: Params$Resource$Projects$Occurrences$Patch, options?: MethodOptions ): GaxiosPromise<Schema$Occurrence>; patch( params: Params$Resource$Projects$Occurrences$Patch, options: MethodOptions | BodyResponseCallback<Schema$Occurrence>, callback: BodyResponseCallback<Schema$Occurrence> ): void; patch( params: Params$Resource$Projects$Occurrences$Patch, callback: BodyResponseCallback<Schema$Occurrence> ): void; patch(callback: BodyResponseCallback<Schema$Occurrence>): void; patch( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Patch | BodyResponseCallback<Schema$Occurrence>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$Occurrence>, callback?: BodyResponseCallback<Schema$Occurrence> ): void | GaxiosPromise<Schema$Occurrence> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Patch; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Patch; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}').replace(/([^:]\/)\/+/g, '$1'), method: 'PATCH', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$Occurrence>(parameters, callback); } else { return createAPIRequest<Schema$Occurrence>(parameters); } } /** * containeranalysis.projects.occurrences.setIamPolicy * @desc Sets the access control policy on the specified note or occurrence. Requires `containeranalysis.notes.setIamPolicy` or `containeranalysis.occurrences.setIamPolicy` permission if the resource is a note or an occurrence, respectively. The resource takes the format `projects/[PROJECT_ID]/notes/[NOTE_ID]` for notes and `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]` for occurrences. * @alias containeranalysis.projects.occurrences.setIamPolicy * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.resource_ REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. * @param {().SetIamPolicyRequest} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ setIamPolicy( params?: Params$Resource$Projects$Occurrences$Setiampolicy, options?: MethodOptions ): GaxiosPromise<Schema$Policy>; setIamPolicy( params: Params$Resource$Projects$Occurrences$Setiampolicy, options: MethodOptions | BodyResponseCallback<Schema$Policy>, callback: BodyResponseCallback<Schema$Policy> ): void; setIamPolicy( params: Params$Resource$Projects$Occurrences$Setiampolicy, callback: BodyResponseCallback<Schema$Policy> ): void; setIamPolicy(callback: BodyResponseCallback<Schema$Policy>): void; setIamPolicy( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Setiampolicy | BodyResponseCallback<Schema$Policy>, optionsOrCallback?: MethodOptions | BodyResponseCallback<Schema$Policy>, callback?: BodyResponseCallback<Schema$Policy> ): void | GaxiosPromise<Schema$Policy> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Setiampolicy; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Setiampolicy; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+resource}:setIamPolicy').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['resource'], pathParams: ['resource'], context: this.context, }; if (callback) { createAPIRequest<Schema$Policy>(parameters, callback); } else { return createAPIRequest<Schema$Policy>(parameters); } } /** * containeranalysis.projects.occurrences.testIamPermissions * @desc Returns the permissions that a caller has on the specified note or occurrence. Requires list permission on the project (for example, `containeranalysis.notes.list`). The resource takes the format `projects/[PROJECT_ID]/notes/[NOTE_ID]` for notes and `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]` for occurrences. * @alias containeranalysis.projects.occurrences.testIamPermissions * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.resource_ REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. * @param {().TestIamPermissionsRequest} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ testIamPermissions( params?: Params$Resource$Projects$Occurrences$Testiampermissions, options?: MethodOptions ): GaxiosPromise<Schema$TestIamPermissionsResponse>; testIamPermissions( params: Params$Resource$Projects$Occurrences$Testiampermissions, options: | MethodOptions | BodyResponseCallback<Schema$TestIamPermissionsResponse>, callback: BodyResponseCallback<Schema$TestIamPermissionsResponse> ): void; testIamPermissions( params: Params$Resource$Projects$Occurrences$Testiampermissions, callback: BodyResponseCallback<Schema$TestIamPermissionsResponse> ): void; testIamPermissions( callback: BodyResponseCallback<Schema$TestIamPermissionsResponse> ): void; testIamPermissions( paramsOrCallback?: | Params$Resource$Projects$Occurrences$Testiampermissions | BodyResponseCallback<Schema$TestIamPermissionsResponse>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$TestIamPermissionsResponse>, callback?: BodyResponseCallback<Schema$TestIamPermissionsResponse> ): void | GaxiosPromise<Schema$TestIamPermissionsResponse> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Occurrences$Testiampermissions; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Occurrences$Testiampermissions; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+resource}:testIamPermissions').replace( /([^:]\/)\/+/g, '$1' ), method: 'POST', }, options ), params, requiredParams: ['resource'], pathParams: ['resource'], context: this.context, }; if (callback) { createAPIRequest<Schema$TestIamPermissionsResponse>( parameters, callback ); } else { return createAPIRequest<Schema$TestIamPermissionsResponse>(parameters); } } } export interface Params$Resource$Projects$Occurrences$Batchcreate extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the project in the form of `projects/[PROJECT_ID]`, under which the occurrences are to be created. */ parent?: string; /** * Request body metadata */ requestBody?: Schema$BatchCreateOccurrencesRequest; } export interface Params$Resource$Projects$Occurrences$Create extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the project in the form of `projects/[PROJECT_ID]`, under which the occurrence is to be created. */ parent?: string; /** * Request body metadata */ requestBody?: Schema$Occurrence; } export interface Params$Resource$Projects$Occurrences$Delete extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. */ name?: string; } export interface Params$Resource$Projects$Occurrences$Get extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. */ name?: string; } export interface Params$Resource$Projects$Occurrences$Getiampolicy extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. */ resource?: string; /** * Request body metadata */ requestBody?: Schema$GetIamPolicyRequest; } export interface Params$Resource$Projects$Occurrences$Getnotes extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. */ name?: string; } export interface Params$Resource$Projects$Occurrences$Getvulnerabilitysummary extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The filter expression. */ filter?: string; /** * The name of the project to get a vulnerability summary for in the form of `projects/[PROJECT_ID]`. */ parent?: string; } export interface Params$Resource$Projects$Occurrences$List extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The filter expression. */ filter?: string; /** * Number of occurrences to return in the list. Must be positive. Max allowed page size is 1000. If not specified, page size defaults to 20. */ pageSize?: number; /** * Token to provide to skip to a particular spot in the list. */ pageToken?: string; /** * The name of the project to list occurrences for in the form of `projects/[PROJECT_ID]`. */ parent?: string; } export interface Params$Resource$Projects$Occurrences$Patch extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the occurrence in the form of `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`. */ name?: string; /** * The fields to update. */ updateMask?: string; /** * Request body metadata */ requestBody?: Schema$Occurrence; } export interface Params$Resource$Projects$Occurrences$Setiampolicy extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. */ resource?: string; /** * Request body metadata */ requestBody?: Schema$SetIamPolicyRequest; } export interface Params$Resource$Projects$Occurrences$Testiampermissions extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. */ resource?: string; /** * Request body metadata */ requestBody?: Schema$TestIamPermissionsRequest; } export class Resource$Projects$Scanconfigs { context: APIRequestContext; constructor(context: APIRequestContext) { this.context = context; } /** * containeranalysis.projects.scanConfigs.get * @desc Gets the specified scan configuration. * @alias containeranalysis.projects.scanConfigs.get * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the scan configuration in the form of `projects/[PROJECT_ID]/scanConfigs/[SCAN_CONFIG_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ get( params?: Params$Resource$Projects$Scanconfigs$Get, options?: MethodOptions ): GaxiosPromise<Schema$ScanConfig>; get( params: Params$Resource$Projects$Scanconfigs$Get, options: MethodOptions | BodyResponseCallback<Schema$ScanConfig>, callback: BodyResponseCallback<Schema$ScanConfig> ): void; get( params: Params$Resource$Projects$Scanconfigs$Get, callback: BodyResponseCallback<Schema$ScanConfig> ): void; get(callback: BodyResponseCallback<Schema$ScanConfig>): void; get( paramsOrCallback?: | Params$Resource$Projects$Scanconfigs$Get | BodyResponseCallback<Schema$ScanConfig>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$ScanConfig>, callback?: BodyResponseCallback<Schema$ScanConfig> ): void | GaxiosPromise<Schema$ScanConfig> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Scanconfigs$Get; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Scanconfigs$Get; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}').replace(/([^:]\/)\/+/g, '$1'), method: 'GET', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$ScanConfig>(parameters, callback); } else { return createAPIRequest<Schema$ScanConfig>(parameters); } } /** * containeranalysis.projects.scanConfigs.list * @desc Lists scan configurations for the specified project. * @alias containeranalysis.projects.scanConfigs.list * @memberOf! () * * @param {object} params Parameters for request * @param {string=} params.filter The filter expression. * @param {integer=} params.pageSize The number of scan configs to return in the list. * @param {string=} params.pageToken Token to provide to skip to a particular spot in the list. * @param {string} params.parent The name of the project to list scan configurations for in the form of `projects/[PROJECT_ID]`. * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ list( params?: Params$Resource$Projects$Scanconfigs$List, options?: MethodOptions ): GaxiosPromise<Schema$ListScanConfigsResponse>; list( params: Params$Resource$Projects$Scanconfigs$List, options: | MethodOptions | BodyResponseCallback<Schema$ListScanConfigsResponse>, callback: BodyResponseCallback<Schema$ListScanConfigsResponse> ): void; list( params: Params$Resource$Projects$Scanconfigs$List, callback: BodyResponseCallback<Schema$ListScanConfigsResponse> ): void; list(callback: BodyResponseCallback<Schema$ListScanConfigsResponse>): void; list( paramsOrCallback?: | Params$Resource$Projects$Scanconfigs$List | BodyResponseCallback<Schema$ListScanConfigsResponse>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$ListScanConfigsResponse>, callback?: BodyResponseCallback<Schema$ListScanConfigsResponse> ): void | GaxiosPromise<Schema$ListScanConfigsResponse> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Scanconfigs$List; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Scanconfigs$List; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+parent}/scanConfigs').replace( /([^:]\/)\/+/g, '$1' ), method: 'GET', }, options ), params, requiredParams: ['parent'], pathParams: ['parent'], context: this.context, }; if (callback) { createAPIRequest<Schema$ListScanConfigsResponse>(parameters, callback); } else { return createAPIRequest<Schema$ListScanConfigsResponse>(parameters); } } /** * containeranalysis.projects.scanConfigs.update * @desc Updates the specified scan configuration. * @alias containeranalysis.projects.scanConfigs.update * @memberOf! () * * @param {object} params Parameters for request * @param {string} params.name The name of the scan configuration in the form of `projects/[PROJECT_ID]/scanConfigs/[SCAN_CONFIG_ID]`. * @param {().ScanConfig} params.resource Request body data * @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`. * @param {callback} callback The callback that handles the response. * @return {object} Request object */ update( params?: Params$Resource$Projects$Scanconfigs$Update, options?: MethodOptions ): GaxiosPromise<Schema$ScanConfig>; update( params: Params$Resource$Projects$Scanconfigs$Update, options: MethodOptions | BodyResponseCallback<Schema$ScanConfig>, callback: BodyResponseCallback<Schema$ScanConfig> ): void; update( params: Params$Resource$Projects$Scanconfigs$Update, callback: BodyResponseCallback<Schema$ScanConfig> ): void; update(callback: BodyResponseCallback<Schema$ScanConfig>): void; update( paramsOrCallback?: | Params$Resource$Projects$Scanconfigs$Update | BodyResponseCallback<Schema$ScanConfig>, optionsOrCallback?: | MethodOptions | BodyResponseCallback<Schema$ScanConfig>, callback?: BodyResponseCallback<Schema$ScanConfig> ): void | GaxiosPromise<Schema$ScanConfig> { let params = (paramsOrCallback || {}) as Params$Resource$Projects$Scanconfigs$Update; let options = (optionsOrCallback || {}) as MethodOptions; if (typeof paramsOrCallback === 'function') { callback = paramsOrCallback; params = {} as Params$Resource$Projects$Scanconfigs$Update; options = {}; } if (typeof optionsOrCallback === 'function') { callback = optionsOrCallback; options = {}; } const rootUrl = options.rootUrl || 'https://containeranalysis.googleapis.com/'; const parameters = { options: Object.assign( { url: (rootUrl + '/v1beta1/{+name}').replace(/([^:]\/)\/+/g, '$1'), method: 'PUT', }, options ), params, requiredParams: ['name'], pathParams: ['name'], context: this.context, }; if (callback) { createAPIRequest<Schema$ScanConfig>(parameters, callback); } else { return createAPIRequest<Schema$ScanConfig>(parameters); } } } export interface Params$Resource$Projects$Scanconfigs$Get extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the scan configuration in the form of `projects/[PROJECT_ID]/scanConfigs/[SCAN_CONFIG_ID]`. */ name?: string; } export interface Params$Resource$Projects$Scanconfigs$List extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The filter expression. */ filter?: string; /** * The number of scan configs to return in the list. */ pageSize?: number; /** * Token to provide to skip to a particular spot in the list. */ pageToken?: string; /** * The name of the project to list scan configurations for in the form of `projects/[PROJECT_ID]`. */ parent?: string; } export interface Params$Resource$Projects$Scanconfigs$Update extends StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient; /** * The name of the scan configuration in the form of `projects/[PROJECT_ID]/scanConfigs/[SCAN_CONFIG_ID]`. */ name?: string; /** * Request body metadata */ requestBody?: Schema$ScanConfig; } }
Resource$Projects
simple_schedule.go
package schedule import ( "time" ) // SimpleSchedule is a schedule that only implements an endless repeating interval type SimpleSchedule struct { Interval time.Duration state ScheduleState } // NewSimpleSchedule returns the SimpleSchedule given the time interval func NewSimpleSchedule(i time.Duration) *SimpleSchedule
// GetState returns the schedule state func (s *SimpleSchedule) GetState() ScheduleState { return s.state } // Validate returns an error if the interval of schedule is less // or equals zero func (s *SimpleSchedule) Validate() error { if s.Interval <= 0 { return ErrInvalidInterval } return nil } // Wait returns the SimpleSchedule state, misses and the last schedule ran func (s *SimpleSchedule) Wait(last time.Time) Response { m, t := waitOnInterval(last, s.Interval) return &SimpleScheduleResponse{state: s.GetState(), missed: m, lastTime: t} } // SimpleScheduleResponse a response from SimpleSchedule conforming to ScheduleResponse interface type SimpleScheduleResponse struct { state ScheduleState missed uint lastTime time.Time } // State returns the state of the Schedule func (s *SimpleScheduleResponse) State() ScheduleState { return s.state } // Error returns last error func (s *SimpleScheduleResponse) Error() error { return nil } // Missed returns any missed intervals func (s *SimpleScheduleResponse) Missed() uint { return s.missed } // LastTime returns the last response time func (s *SimpleScheduleResponse) LastTime() time.Time { return s.lastTime }
{ return &SimpleSchedule{ Interval: i, } }
exec.go
package exec import ( "fmt" "os" "os/exec" "path/filepath" "strings" "syscall" "github.com/google/uuid" "github.com/pkg/errors" ) // Run runs a command, outputting to terminal and returning the full output and/or error // a channel is returned which, when sent on, will terminate the process that was started func Run(cmd string, env ...string) (string, int, error) { // you can uncomment this below if you want to see exactly the commands being run fmt.Println("▶️", cmd) parts := strings.Split(cmd, " ") // add an environment variable with a UUID // if the command is `sat`, then the var will be // SAT_UUID=asdfghjkl procUUID := uuid.New().String() uuidEnv := fmt.Sprintf("%s_UUID=%s", strings.ToUpper(parts[0]), procUUID) env = append(env, uuidEnv) logPath, err := logfilePath(procUUID) if err != nil { return "", 0, errors.Wrap(err, "failed to logFilePath") } logEnv := fmt.Sprintf("%s_LOG_FILE=%s", strings.ToUpper(parts[0]), logPath) env = append(env, logEnv) // augment the provided env with the env of the parent env = append(env, os.Environ()...) binPath, err := exec.LookPath(parts[0]) if err != nil { return "", 0, errors.Wrap(err, "failed to LookPath") } info := &syscall.ProcAttr{ Env: env, Files: []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()}, } pid, err := syscall.ForkExec(binPath, parts, info) if err != nil { return "", 0, errors.Wrap(err, "failed to ForkExec") } return procUUID, pid, nil } // logfilePath returns the directory that Info files should be written to func logfilePath(uuid string) (string, error) { c
onfig, err := os.UserConfigDir() if err != nil { return "", errors.Wrap(err, "failed to UserConfigDir") } dir := filepath.Join(config, "suborbital", "log") if err := os.MkdirAll(dir, 0755); err != nil { return "", errors.Wrap(err, "failed to MkdirAll") } filePath := filepath.Join(dir, fmt.Sprintf("%s.log", uuid)) return filePath, nil }