file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
z2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == "__main__":
def midlgeom(a):
|
raw = input('Введите последовательность чисел через пробел: ')
mas = [int(i) for i in raw.split(' ') if i.isdigit()]
print(midlgeom(mas))
| if len(a) != 0:
res = 0
for i in range(len(a)):
res += 1/a[i]
return len(a) / res
else:
return None |
storage.py | import torch
import numpy as np
def store_value(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
#print(i)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:,:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1],np.shape(main_array)[2],np.shape(main_array)[3])
return main_array
def store_value_3d(main_array,cu_fl,i,name):
|
def store_value_2d(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
#print(i)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1])
return main_array
def store_value2(main_array,cu_fl,i,name):
cu_uint8 = cu_fl.type(torch.ByteTensor)
main_array = torch.cat((main_array,cu_uint8),0)
#print(i)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:])
main_array = torch.ByteTensor(1)
return main_array
def store_all_weights(dict_wb):
weight_matrix = torch.Tensor(1,8).type(torch.cuda.FloatTensor)
bias_matrix = torch.Tensor(1).type(torch.cuda.FloatTensor)
for items in dict_wb:
print(weight_matrix.size())
if 'weight' in items:
print(dict_wb[items].size())
weight_matrix = torch.cat((weight_matrix,dict_wb[items]),0)
if 'bias' in items:
bias_matrix = torch.cat((bias_matrix,dict_wb[items]),0)
np.save('weight_matrix.npy',weight_matrix[1:,:].cpu().numpy())
np.save('bias_matrix.npy',bias_matrix[1:].cpu().numpy()) | cu_uint8 = cu_fl.type(torch.ByteTensor)
cu_uint8 = torch.reshape(cu_uint8,(cu_fl.size()[0],cu_fl.size()[2],cu_fl.size()[3]))
main_array = torch.cat((main_array,cu_uint8),0)
#print(i)
if (i + 1)%100 == 0:
main_array_np = main_array.cpu().numpy()
np.save(name + str(int(i/100)) + '.npy',main_array[1:,:,:])
main_array = torch.ByteTensor(1,np.shape(main_array)[1],np.shape(main_array)[2])
return main_array |
Boat.js | class Boat {
constructor(x, y, width, height, boatPos, boatAnimation) {
var options = {
restitution: 0.8,
friction: 1.0,
density: 1.0,
label: "boat"
};
this.animation = boatAnimation;
this.speed = 0.05;
this.body = Bodies.rectangle(x, y, width, height, options);
this.width = width;
this.height = height;
this.boatPosition = boatPos;
this.image = loadImage("assets/boat.png");
World.add(world, this.body);
}
animate() {
this.speed += 0.05 % 1.1;
}
remove(index) {
this.animation=brokenBoatAnimation;
this.speed = 0.05;
| boats.splice(index, 1);}, 2000);
}
display() {
var angle = this.body.angle;
var pos = this.body.position;
var index = floor(this.speed % this.animation.length);
push();
translate(pos.x, pos.y);
rotate(angle);
imageMode(CENTER);
image(this.animation[index], 0, this.boatPosition, this.width, this.height);
noTint();
pop();
}
} | this.width = 300;
this.height = 300;
setTimeout(()=>{Matter.World.remove(world, boats[index].body);
|
chain.go | package ibctesting
import (
"bytes"
"fmt"
"strconv"
"testing"
"time"
abci "github.com/line/ostracon/abci/types"
"github.com/line/ostracon/crypto"
"github.com/line/ostracon/crypto/tmhash"
ocproto "github.com/line/ostracon/proto/ostracon/types"
ocprotoversion "github.com/line/ostracon/proto/ostracon/version"
octypes "github.com/line/ostracon/types"
tmversion "github.com/line/ostracon/version"
"github.com/stretchr/testify/require"
"github.com/line/lbm-sdk/client"
"github.com/line/lbm-sdk/codec"
"github.com/line/lbm-sdk/crypto/keys/secp256k1"
cryptotypes "github.com/line/lbm-sdk/crypto/types"
"github.com/line/lbm-sdk/simapp"
sdk "github.com/line/lbm-sdk/types"
sdkerrors "github.com/line/lbm-sdk/types/errors"
authtypes "github.com/line/lbm-sdk/x/auth/types"
banktypes "github.com/line/lbm-sdk/x/bank/types"
capabilitytypes "github.com/line/lbm-sdk/x/capability/types"
ibctransfertypes "github.com/line/lbm-sdk/x/ibc/applications/transfer/types"
clienttypes "github.com/line/lbm-sdk/x/ibc/core/02-client/types"
connectiontypes "github.com/line/lbm-sdk/x/ibc/core/03-connection/types"
channeltypes "github.com/line/lbm-sdk/x/ibc/core/04-channel/types"
commitmenttypes "github.com/line/lbm-sdk/x/ibc/core/23-commitment/types"
host "github.com/line/lbm-sdk/x/ibc/core/24-host"
"github.com/line/lbm-sdk/x/ibc/core/exported"
"github.com/line/lbm-sdk/x/ibc/core/types"
ibctmtypes "github.com/line/lbm-sdk/x/ibc/light-clients/99-ostracon/types"
"github.com/line/lbm-sdk/x/ibc/testing/mock"
"github.com/line/lbm-sdk/x/staking/teststaking"
stakingtypes "github.com/line/lbm-sdk/x/staking/types"
)
const (
// Default params constants used to create a TM client
TrustingPeriod time.Duration = time.Hour * 24 * 7 * 2
UnbondingPeriod time.Duration = time.Hour * 24 * 7 * 3
MaxClockDrift time.Duration = time.Second * 10
DefaultDelayPeriod uint64 = 0
DefaultChannelVersion = ibctransfertypes.Version
InvalidID = "IDisInvalid"
ConnectionIDPrefix = "conn"
ChannelIDPrefix = "chan"
TransferPort = ibctransfertypes.ModuleName
MockPort = mock.ModuleName
// used for testing UpdateClientProposal
Title = "title"
Description = "description"
)
var (
DefaultOpenInitVersion *connectiontypes.Version
// Default params variables used to create a TM client
DefaultTrustLevel ibctmtypes.Fraction = ibctmtypes.DefaultTrustLevel
TestHash = tmhash.Sum([]byte("TESTING HASH"))
TestCoin = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
UpgradePath = []string{"upgrade", "upgradedIBCState"}
ConnectionVersion = connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions())[0]
MockAcknowledgement = mock.MockAcknowledgement
MockCommitment = mock.MockCommitment
)
// TestChain is a testing struct that wraps a simapp with the last TM Header, the current ABCI
// header and the validators of the TestChain. It also contains a field called ChainID. This
// is the clientID that *other* chains use to refer to this TestChain. The SenderAccount
// is used for delivering transactions through the application state.
// NOTE: the actual application uses an empty chain-id for ease of testing.
type TestChain struct {
t *testing.T
App *simapp.SimApp
ChainID string
LastHeader *ibctmtypes.Header // header for last block height committed
CurrentHeader ocproto.Header // header for current block height
QueryServer types.QueryServer
TxConfig client.TxConfig
Codec codec.Codec
Vals *octypes.ValidatorSet
Voters *octypes.VoterSet
Signers []octypes.PrivValidator
senderPrivKey cryptotypes.PrivKey
SenderAccount authtypes.AccountI
// IBC specific helpers
ClientIDs []string // ClientID's used on this chain
Connections []*TestConnection // track connectionID's created for this chain
}
func NewTestValidator(pubkey crypto.PubKey, stakingPower int64) *octypes.Validator {
val := octypes.NewValidator(pubkey, stakingPower)
val.VotingPower = val.StakingPower
return val
}
// NewTestChain initializes a new TestChain instance with a single validator set using a
// generated private key. It also creates a sender account to be used for delivering transactions.
//
// The first block height is committed to state in order to allow for client creations on
// counterparty chains. The TestChain will return with a block height starting at 2.
//
// Time management is handled by the Coordinator in order to ensure synchrony between chains.
// Each update of any chain increments the block header time for all chains by 5 seconds.
func NewTestChain(t *testing.T, chainID string) *TestChain {
// generate validator private/public key
privVal := mock.NewPV()
pubKey, err := privVal.GetPubKey()
require.NoError(t, err)
// create validator set with single validator
validator := NewTestValidator(pubKey, 1)
valSet := octypes.NewValidatorSet([]*octypes.Validator{validator})
signers := []octypes.PrivValidator{privVal}
// generate genesis account
senderPrivKey := secp256k1.GenPrivKey()
acc := authtypes.NewBaseAccount(sdk.BytesToAccAddress(senderPrivKey.PubKey().Address()), senderPrivKey.PubKey(), 0, 0)
balance := banktypes.Balance{
Address: acc.GetAddress().String(),
Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100000000000000))),
}
app := simapp.SetupWithGenesisValSet(t, valSet, []authtypes.GenesisAccount{acc}, balance)
// create current header and call begin block
header := ocproto.Header{
ChainID: chainID,
Height: 1,
Time: globalStartTime,
}
txConfig := simapp.MakeTestEncodingConfig().TxConfig
// create an account to send transactions from
chain := &TestChain{
t: t,
ChainID: chainID,
App: app,
CurrentHeader: header,
QueryServer: app.IBCKeeper,
TxConfig: txConfig,
Codec: app.AppCodec(),
Vals: valSet,
Voters: octypes.WrapValidatorsToVoterSet(valSet.Validators),
Signers: signers,
senderPrivKey: senderPrivKey,
SenderAccount: acc,
ClientIDs: make([]string, 0),
Connections: make([]*TestConnection, 0),
}
cap := chain.App.IBCKeeper.PortKeeper.BindPort(chain.GetContext(), MockPort)
err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(MockPort))
require.NoError(t, err)
chain.NextBlock()
return chain
}
// GetContext returns the current context for the application.
func (chain *TestChain) GetContext() sdk.Context {
return chain.App.BaseApp.NewContext(false, chain.CurrentHeader)
}
// QueryProof performs an abci query with the given key and returns the proto encoded merkle proof
// for the query and the height at which the proof will succeed on a tendermint verifier.
func (chain *TestChain) QueryProof(key []byte) ([]byte, clienttypes.Height) {
res := chain.App.Query(abci.RequestQuery{
Path: fmt.Sprintf("store/%s/key", host.StoreKey),
Height: chain.App.LastBlockHeight() - 1,
Data: key,
Prove: true,
})
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
require.NoError(chain.t, err)
proof, err := chain.App.AppCodec().Marshal(&merkleProof)
require.NoError(chain.t, err)
revision := clienttypes.ParseChainID(chain.ChainID)
// proof height + 1 is returned as the proof created corresponds to the height the proof
// was created in the IAVL tree. Ostracon and subsequently the clients that rely on it
// have heights 1 above the IAVL tree. Thus we return proof height + 1
return proof, clienttypes.NewHeight(revision, uint64(res.Height)+1)
}
// QueryUpgradeProof performs an abci query with the given key and returns the proto encoded merkle proof
// for the query and the height at which the proof will succeed on a tendermint verifier.
func (chain *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, clienttypes.Height) {
res := chain.App.Query(abci.RequestQuery{
Path: "store/upgrade/key",
Height: int64(height - 1),
Data: key,
Prove: true,
})
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
require.NoError(chain.t, err)
proof, err := chain.App.AppCodec().Marshal(&merkleProof)
require.NoError(chain.t, err)
revision := clienttypes.ParseChainID(chain.ChainID)
// proof height + 1 is returned as the proof created corresponds to the height the proof
// was created in the IAVL tree. Ostracon and subsequently the clients that rely on it
// have heights 1 above the IAVL tree. Thus we return proof height + 1
return proof, clienttypes.NewHeight(revision, uint64(res.Height+1))
}
// QueryClientStateProof performs and abci query for a client state
// stored with a given clientID and returns the ClientState along with the proof
func (chain *TestChain) QueryClientStateProof(clientID string) (exported.ClientState, []byte) {
// retrieve client state to provide proof for
clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID)
require.True(chain.t, found)
clientKey := host.FullClientStateKey(clientID)
proofClient, _ := chain.QueryProof(clientKey)
return clientState, proofClient
}
// QueryConsensusStateProof performs an abci query for a consensus state
// stored on the given clientID. The proof and consensusHeight are returned.
func (chain *TestChain) QueryConsensusStateProof(clientID string) ([]byte, clienttypes.Height) {
clientState := chain.GetClientState(clientID)
consensusHeight := clientState.GetLatestHeight().(clienttypes.Height)
consensusKey := host.FullConsensusStateKey(clientID, consensusHeight)
proofConsensus, _ := chain.QueryProof(consensusKey)
return proofConsensus, consensusHeight
}
// NextBlock sets the last header to the current header and increments the current header to be
// at the next block height. It does not update the time as that is handled by the Coordinator.
//
// CONTRACT: this function must only be called after app.Commit() occurs
func (chain *TestChain) NextBlock() {
// set the last header to the current header
// use nil trusted fields
chain.LastHeader = chain.CurrentOCClientHeader()
// increment the current header
chain.CurrentHeader = ocproto.Header{
ChainID: chain.ChainID,
Height: chain.App.LastBlockHeight() + 1,
AppHash: chain.App.LastCommitID().Hash,
// NOTE: the time is increased by the coordinator to maintain time synchrony amongst
// chains.
Time: chain.CurrentHeader.Time,
ValidatorsHash: chain.Vals.Hash(),
NextValidatorsHash: chain.Vals.Hash(),
}
chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
}
func (chain *TestChain) CommitBlock() {
chain.App.EndBlock(abci.RequestEndBlock{Height: chain.CurrentHeader.Height})
chain.App.Commit()
chain.App.BeginRecheckTx(abci.RequestBeginRecheckTx{Header: chain.CurrentHeader})
chain.App.EndRecheckTx(abci.RequestEndRecheckTx{Height: chain.CurrentHeader.Height})
}
// sendMsgs delivers a transaction through the application without returning the result.
func (chain *TestChain) sendMsgs(msgs ...sdk.Msg) error {
_, err := chain.SendMsgs(msgs...)
return err
}
// SendMsgs delivers a transaction through the application. It updates the senders sequence
// number and updates the TestChain's headers. It returns the result and error if one
// occurred.
func (chain *TestChain) SendMsgs(msgs ...sdk.Msg) (*sdk.Result, error) {
_, r, err := simapp.SignCheckDeliver(
chain.t,
chain.TxConfig,
chain.App.BaseApp,
chain.GetContext().BlockHeader(),
msgs,
chain.ChainID,
[]uint64{chain.SenderAccount.GetAccountNumber()},
[]uint64{chain.SenderAccount.GetSequence()},
true, true, chain.senderPrivKey,
)
if err != nil {
return nil, err
}
// SignCheckDeliver calls app.Commit()
chain.NextBlock()
// increment sequence for successful transaction execution
chain.SenderAccount.SetSequence(chain.SenderAccount.GetSequence() + 1)
return r, nil
}
// GetClientState retrieves the client state for the provided clientID. The client is
// expected to exist otherwise testing will fail.
func (chain *TestChain) GetClientState(clientID string) exported.ClientState {
clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID)
require.True(chain.t, found)
return clientState
}
// GetConsensusState retrieves the consensus state for the provided clientID and height.
// It will return a success boolean depending on if consensus state exists or not.
func (chain *TestChain) GetConsensusState(clientID string, height exported.Height) (exported.ConsensusState, bool) {
return chain.App.IBCKeeper.ClientKeeper.GetClientConsensusState(chain.GetContext(), clientID, height)
}
// GetValsAtHeight will return the validator set of the chain at a given height. It will return
// a success boolean depending on if the validator set exists or not at that height.
func (chain *TestChain) GetValsAtHeight(height int64) (*octypes.ValidatorSet, bool) {
histInfo, ok := chain.App.StakingKeeper.GetHistoricalInfo(chain.GetContext(), height)
if !ok {
return nil, false
}
valSet := stakingtypes.Validators(histInfo.Valset)
ocValidators, err := teststaking.ToOcValidators(valSet, sdk.DefaultPowerReduction)
if err != nil {
panic(err)
}
return octypes.NewValidatorSet(ocValidators), true
}
func (chain *TestChain) GetVotersAtHeight(height int64) (*octypes.VoterSet, bool) {
histInfo, ok := chain.App.StakingKeeper.GetHistoricalInfo(chain.GetContext(), height)
if !ok {
return nil, false
}
// Voters of test chain is always same to validator set
voters := stakingtypes.Validators(histInfo.Valset)
ocVoters, err := teststaking.ToOcValidators(voters, sdk.DefaultPowerReduction)
if err != nil {
panic(err)
}
// Validators saved in HistoricalInfo store have no voting power.
// We set voting power same as staking power for test.
for i := 0; i < len(ocVoters); i++ {
ocVoters[i].VotingPower = ocVoters[i].StakingPower
}
return octypes.WrapValidatorsToVoterSet(ocVoters), true
}
// GetConnection retrieves an IBC Connection for the provided TestConnection. The
// connection is expected to exist otherwise testing will fail.
func (chain *TestChain) GetConnection(testConnection *TestConnection) connectiontypes.ConnectionEnd {
connection, found := chain.App.IBCKeeper.ConnectionKeeper.GetConnection(chain.GetContext(), testConnection.ID)
require.True(chain.t, found)
return connection
}
// GetChannel retrieves an IBC Channel for the provided TestChannel. The channel
// is expected to exist otherwise testing will fail.
func (chain *TestChain) GetChannel(testChannel TestChannel) channeltypes.Channel {
channel, found := chain.App.IBCKeeper.ChannelKeeper.GetChannel(chain.GetContext(), testChannel.PortID, testChannel.ID)
require.True(chain.t, found)
return channel
}
// GetAcknowledgement retrieves an acknowledgement for the provided packet. If the
// acknowledgement does not exist then testing will fail.
func (chain *TestChain) GetAcknowledgement(packet exported.PacketI) []byte {
ack, found := chain.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(chain.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
require.True(chain.t, found)
return ack
}
// GetPrefix returns the prefix for used by a chain in connection creation
func (chain *TestChain) GetPrefix() commitmenttypes.MerklePrefix {
return commitmenttypes.NewMerklePrefix(chain.App.IBCKeeper.ConnectionKeeper.GetCommitmentPrefix().Bytes())
}
// NewClientID appends a new clientID string in the format:
// ClientFor<counterparty-chain-id><index>
func (chain *TestChain) NewClientID(clientType string) string {
clientID := fmt.Sprintf("%s-%s", clientType, strconv.Itoa(len(chain.ClientIDs)))
chain.ClientIDs = append(chain.ClientIDs, clientID)
return clientID
}
// AddTestConnection appends a new TestConnection which contains references
// to the connection id, client id and counterparty client id.
func (chain *TestChain) AddTestConnection(clientID, counterpartyClientID string) *TestConnection {
conn := chain.ConstructNextTestConnection(clientID, counterpartyClientID)
chain.Connections = append(chain.Connections, conn)
return conn
}
// ConstructNextTestConnection constructs the next test connection to be
// created given a clientID and counterparty clientID. The connection id
// format: <chainID>-conn<index>
func (chain *TestChain) ConstructNextTestConnection(clientID, counterpartyClientID string) *TestConnection {
connectionID := connectiontypes.FormatConnectionIdentifier(uint64(len(chain.Connections)))
return &TestConnection{
ID: connectionID,
ClientID: clientID,
NextChannelVersion: DefaultChannelVersion,
CounterpartyClientID: counterpartyClientID,
}
}
// GetFirstTestConnection returns the first test connection for a given clientID.
// The connection may or may not exist in the chain state.
func (chain *TestChain) GetFirstTestConnection(clientID, counterpartyClientID string) *TestConnection {
if len(chain.Connections) > 0 {
return chain.Connections[0]
}
return chain.ConstructNextTestConnection(clientID, counterpartyClientID)
}
// AddTestChannel appends a new TestChannel which contains references to the port and channel ID
// used for channel creation and interaction. See 'NextTestChannel' for channel ID naming format.
func (chain *TestChain) AddTestChannel(conn *TestConnection, portID string) TestChannel {
channel := chain.NextTestChannel(conn, portID)
conn.Channels = append(conn.Channels, channel)
return channel
}
// NextTestChannel returns the next test channel to be created on this connection, but does not
// add it to the list of created channels. This function is expected to be used when the caller
// has not created the associated channel in app state, but would still like to refer to the
// non-existent channel usually to test for its non-existence.
//
// channel ID format: <connectionid>-chan<channel-index>
//
// The port is passed in by the caller.
func (chain *TestChain) NextTestChannel(conn *TestConnection, portID string) TestChannel {
nextChanSeq := chain.App.IBCKeeper.ChannelKeeper.GetNextChannelSequence(chain.GetContext())
channelID := channeltypes.FormatChannelIdentifier(nextChanSeq)
return TestChannel{
PortID: portID,
ID: channelID,
ClientID: conn.ClientID,
CounterpartyClientID: conn.CounterpartyClientID,
Version: conn.NextChannelVersion,
}
}
// ConstructMsgCreateClient constructs a message to create a new client state (ostracon or solomachine).
// NOTE: a solo machine client will be created with an empty diversifier.
func (chain *TestChain) ConstructMsgCreateClient(counterparty *TestChain, clientID string, clientType string) *clienttypes.MsgCreateClient {
var (
clientState exported.ClientState
consensusState exported.ConsensusState
)
switch clientType {
case exported.Ostracon:
height := counterparty.LastHeader.GetHeight().(clienttypes.Height)
clientState = ibctmtypes.NewClientState(
counterparty.ChainID, DefaultTrustLevel, TrustingPeriod, UnbondingPeriod, MaxClockDrift,
height, commitmenttypes.GetSDKSpecs(), UpgradePath, false, false,
)
consensusState = counterparty.LastHeader.ConsensusState()
case exported.Solomachine:
solo := NewSolomachine(chain.t, chain.Codec, clientID, "", 1)
clientState = solo.ClientState()
consensusState = solo.ConsensusState()
default:
chain.t.Fatalf("unsupported client state type %s", clientType)
}
msg, err := clienttypes.NewMsgCreateClient(
clientState, consensusState, chain.SenderAccount.GetAddress(),
)
require.NoError(chain.t, err)
return msg
}
// CreateOCClient will construct and execute a 99-ostracon MsgCreateClient. A counterparty
// client will be created on the (target) chain.
func (chain *TestChain) CreateOCClient(counterparty *TestChain, clientID string) error {
// construct MsgCreateClient using counterparty
msg := chain.ConstructMsgCreateClient(counterparty, clientID, exported.Ostracon)
return chain.sendMsgs(msg)
}
// UpdateOCClient will construct and execute a 99-ostracon MsgUpdateClient. The counterparty
// client will be updated on the (target) chain. UpdateOCClient mocks the relayer flow
// necessary for updating a Ostracon client.
func (chain *TestChain) UpdateOCClient(counterparty *TestChain, clientID string) error {
header, err := chain.ConstructUpdateOCClientHeader(counterparty, clientID)
require.NoError(chain.t, err)
msg, err := clienttypes.NewMsgUpdateClient(
clientID, header,
chain.SenderAccount.GetAddress(),
)
require.NoError(chain.t, err)
return chain.sendMsgs(msg)
}
// ConstructUpdateOCClientHeader will construct a valid 99-ostracon Header to update the
// light client on the source chain.
func (chain *TestChain) ConstructUpdateOCClientHeader(counterparty *TestChain, clientID string) (*ibctmtypes.Header, error) {
header := counterparty.LastHeader
// Relayer must query for LatestHeight on client to get TrustedHeight
trustedHeight := chain.GetClientState(clientID).GetLatestHeight().(clienttypes.Height)
var (
ocTrustedVals *octypes.ValidatorSet
ocTrustedVoters *octypes.VoterSet
ok bool
)
// Once we get TrustedHeight from client, we must query the validators from the counterparty chain
// If the LatestHeight == LastHeader.Height, then TrustedValidators are current validators
// If LatestHeight < LastHeader.Height, we can query the historical validator set from HistoricalInfo
if trustedHeight == counterparty.LastHeader.GetHeight() {
ocTrustedVals = counterparty.Vals
ocTrustedVoters = counterparty.Voters
} else {
// NOTE: We need to get validators from counterparty at height: trustedHeight+1
// since the last trusted validators for a header at height h
// is the NextValidators at h+1 committed to in header h by
// NextValidatorsHash
ocTrustedVals, ok = counterparty.GetValsAtHeight(int64(trustedHeight.RevisionHeight + 1))
if !ok {
return nil, sdkerrors.Wrapf(ibctmtypes.ErrInvalidHeaderHeight, "could not retrieve trusted validators at trustedHeight: %d", trustedHeight)
}
ocTrustedVoters, ok = counterparty.GetVotersAtHeight(int64(trustedHeight.RevisionHeight + 1))
if !ok |
}
// inject trusted fields into last header
// for now assume revision number is 0
header.TrustedHeight = trustedHeight
trustedVals, err := ocTrustedVals.ToProto()
if err != nil {
return nil, err
}
trustedVoters, err := ocTrustedVoters.ToProto()
if err != nil {
return nil, err
}
header.TrustedValidators = trustedVals
header.TrustedVoters = trustedVoters
return header, nil
}
// ExpireClient fast forwards the chain's block time by the provided amount of time which will
// expire any clients with a trusting period less than or equal to this amount of time.
func (chain *TestChain) ExpireClient(amount time.Duration) {
chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(amount)
}
// CurrentOCClientHeader creates a OC header using the current header parameters
// on the chain. The trusted fields in the header are set to nil.
func (chain *TestChain) CurrentOCClientHeader() *ibctmtypes.Header {
return chain.CreateOCClientHeader(chain.ChainID, chain.CurrentHeader.Height, clienttypes.Height{}, chain.CurrentHeader.Time, chain.Vals, nil, chain.Voters, nil, chain.Signers)
}
// CreateOCClientHeader creates a TM header to update the OC client. Args are passed in to allow
// caller flexibility to use params that differ from the chain.
func (chain *TestChain) CreateOCClientHeader(chainID string, blockHeight int64, trustedHeight clienttypes.Height, timestamp time.Time, ocValSet, ocTrustedVals *octypes.ValidatorSet, ocVoterSet, ocTrustedVoterSet *octypes.VoterSet, signers []octypes.PrivValidator) *ibctmtypes.Header {
require.NotNil(chain.t, ocValSet)
require.NotNil(chain.t, ocVoterSet)
vsetHash := ocValSet.Hash()
proposer := ocValSet.SelectProposer([]byte{}, blockHeight, 0)
ocHeader := octypes.Header{
Version: ocprotoversion.Consensus{Block: tmversion.BlockProtocol, App: 2},
ChainID: chainID,
Height: blockHeight,
Time: timestamp,
LastBlockID: MakeBlockID(make([]byte, tmhash.Size), 10_000, make([]byte, tmhash.Size)),
LastCommitHash: chain.App.LastCommitID().Hash,
DataHash: tmhash.Sum([]byte("data_hash")),
ValidatorsHash: vsetHash,
VotersHash: ocVoterSet.Hash(),
NextValidatorsHash: vsetHash,
ConsensusHash: tmhash.Sum([]byte("consensus_hash")),
AppHash: chain.CurrentHeader.AppHash,
LastResultsHash: tmhash.Sum([]byte("last_results_hash")),
EvidenceHash: tmhash.Sum([]byte("evidence_hash")),
ProposerAddress: proposer.Address,
}
hhash := ocHeader.Hash()
blockID := MakeBlockID(hhash, 3, tmhash.Sum([]byte("part_set")))
voteSet := octypes.NewVoteSet(chainID, blockHeight, 1, ocproto.PrecommitType, ocVoterSet)
commit, err := octypes.MakeCommit(blockID, blockHeight, 1, voteSet, signers, timestamp)
require.NoError(chain.t, err)
signedHeader := &ocproto.SignedHeader{
Header: ocHeader.ToProto(),
Commit: commit.ToProto(),
}
valSet, err := ocValSet.ToProto()
if err != nil {
panic(err)
}
voterSet, err := ocVoterSet.ToProto()
if err != nil {
panic(err)
}
var trustedVals *ocproto.ValidatorSet
if ocTrustedVals != nil {
trustedVals, err = ocTrustedVals.ToProto()
if err != nil {
panic(err)
}
}
var trustedVoters *ocproto.VoterSet
if ocTrustedVoterSet != nil {
trustedVoters, err = ocTrustedVoterSet.ToProto()
if err != nil {
panic(err)
}
}
// The trusted fields may be nil. They may be filled before relaying messages to a client.
// The relayer is responsible for querying client and injecting appropriate trusted fields.
return &ibctmtypes.Header{
SignedHeader: signedHeader,
ValidatorSet: valSet,
VoterSet: voterSet,
TrustedHeight: trustedHeight,
TrustedValidators: trustedVals,
TrustedVoters: trustedVoters,
}
}
// MakeBlockID copied unimported test functions from octypes to use them here
func MakeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) octypes.BlockID {
return octypes.BlockID{
Hash: hash,
PartSetHeader: octypes.PartSetHeader{
Total: partSetSize,
Hash: partSetHash,
},
}
}
// CreateSortedSignerArray takes two PrivValidators, and the corresponding Validator structs
// (including voting power). It returns a signer array of PrivValidators that matches the
// sorting of ValidatorSet.
// The sorting is first by .VotingPower (descending), with secondary index of .Address (ascending).
func CreateSortedSignerArray(altPrivVal, suitePrivVal octypes.PrivValidator,
altVal, suiteVal *octypes.Validator) []octypes.PrivValidator {
switch {
case altVal.VotingPower > suiteVal.VotingPower:
return []octypes.PrivValidator{altPrivVal, suitePrivVal}
case altVal.VotingPower < suiteVal.VotingPower:
return []octypes.PrivValidator{suitePrivVal, altPrivVal}
default:
if bytes.Compare(altVal.Address, suiteVal.Address) == -1 {
return []octypes.PrivValidator{altPrivVal, suitePrivVal}
}
return []octypes.PrivValidator{suitePrivVal, altPrivVal}
}
}
// ConnectionOpenInit will construct and execute a MsgConnectionOpenInit.
func (chain *TestChain) ConnectionOpenInit(
counterparty *TestChain,
connection, counterpartyConnection *TestConnection,
) error {
msg := connectiontypes.NewMsgConnectionOpenInit(
connection.ClientID,
connection.CounterpartyClientID,
counterparty.GetPrefix(), DefaultOpenInitVersion, DefaultDelayPeriod,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// ConnectionOpenTry will construct and execute a MsgConnectionOpenTry.
func (chain *TestChain) ConnectionOpenTry(
counterparty *TestChain,
connection, counterpartyConnection *TestConnection,
) error {
counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID)
connectionKey := host.ConnectionKey(counterpartyConnection.ID)
proofInit, proofHeight := counterparty.QueryProof(connectionKey)
proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID)
msg := connectiontypes.NewMsgConnectionOpenTry(
"", connection.ClientID, // does not support handshake continuation
counterpartyConnection.ID, counterpartyConnection.ClientID,
counterpartyClient, counterparty.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, DefaultDelayPeriod,
proofInit, proofClient, proofConsensus,
proofHeight, consensusHeight,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// ConnectionOpenAck will construct and execute a MsgConnectionOpenAck.
func (chain *TestChain) ConnectionOpenAck(
counterparty *TestChain,
connection, counterpartyConnection *TestConnection,
) error {
counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID)
connectionKey := host.ConnectionKey(counterpartyConnection.ID)
proofTry, proofHeight := counterparty.QueryProof(connectionKey)
proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID)
msg := connectiontypes.NewMsgConnectionOpenAck(
connection.ID, counterpartyConnection.ID, counterpartyClient, // testing doesn't use flexible selection
proofTry, proofClient, proofConsensus,
proofHeight, consensusHeight,
ConnectionVersion,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// ConnectionOpenConfirm will construct and execute a MsgConnectionOpenConfirm.
func (chain *TestChain) ConnectionOpenConfirm(
counterparty *TestChain,
connection, counterpartyConnection *TestConnection,
) error {
connectionKey := host.ConnectionKey(counterpartyConnection.ID)
proof, height := counterparty.QueryProof(connectionKey)
msg := connectiontypes.NewMsgConnectionOpenConfirm(
connection.ID,
proof, height,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// CreatePortCapability binds and claims a capability for the given portID if it does not
// already exist. This function will fail testing on any resulting error.
// NOTE: only creation of a capbility for a transfer or mock port is supported
// Other applications must bind to the port in InitGenesis or modify this code.
func (chain *TestChain) CreatePortCapability(portID string) {
// check if the portId is already binded, if not bind it
_, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID))
if !ok {
// create capability using the IBC capability keeper
cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), host.PortPath(portID))
require.NoError(chain.t, err)
switch portID {
case MockPort:
// claim capability using the mock capability keeper
err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
require.NoError(chain.t, err)
case TransferPort:
// claim capability using the transfer capability keeper
err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
require.NoError(chain.t, err)
default:
panic(fmt.Sprintf("unsupported ibc testing package port ID %s", portID))
}
}
chain.CommitBlock()
chain.NextBlock()
}
// GetPortCapability returns the port capability for the given portID. The capability must
// exist, otherwise testing will fail.
func (chain *TestChain) GetPortCapability(portID string) *capabilitytypes.Capability {
cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID))
require.True(chain.t, ok)
return cap
}
// CreateChannelCapability binds and claims a capability for the given portID and channelID
// if it does not already exist. This function will fail testing on any resulting error.
func (chain *TestChain) CreateChannelCapability(portID, channelID string) {
capName := host.ChannelCapabilityPath(portID, channelID)
// check if the portId is already binded, if not bind it
_, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), capName)
if !ok {
cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), capName)
require.NoError(chain.t, err)
err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, capName)
require.NoError(chain.t, err)
}
chain.CommitBlock()
chain.NextBlock()
}
// GetChannelCapability returns the channel capability for the given portID and channelID.
// The capability must exist, otherwise testing will fail.
func (chain *TestChain) GetChannelCapability(portID, channelID string) *capabilitytypes.Capability {
cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.ChannelCapabilityPath(portID, channelID))
require.True(chain.t, ok)
return cap
}
// ChanOpenInit will construct and execute a MsgChannelOpenInit.
func (chain *TestChain) ChanOpenInit(
ch, counterparty TestChannel,
order channeltypes.Order,
connectionID string,
) error {
msg := channeltypes.NewMsgChannelOpenInit(
ch.PortID,
ch.Version, order, []string{connectionID},
counterparty.PortID,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// ChanOpenTry will construct and execute a MsgChannelOpenTry.
func (chain *TestChain) ChanOpenTry(
counterparty *TestChain,
ch, counterpartyCh TestChannel,
order channeltypes.Order,
connectionID string,
) error {
proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
msg := channeltypes.NewMsgChannelOpenTry(
ch.PortID, "", // does not support handshake continuation
ch.Version, order, []string{connectionID},
counterpartyCh.PortID, counterpartyCh.ID, counterpartyCh.Version,
proof, height,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// ChanOpenAck will construct and execute a MsgChannelOpenAck.
func (chain *TestChain) ChanOpenAck(
counterparty *TestChain,
ch, counterpartyCh TestChannel,
) error {
proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
msg := channeltypes.NewMsgChannelOpenAck(
ch.PortID, ch.ID,
counterpartyCh.ID, counterpartyCh.Version, // testing doesn't use flexible selection
proof, height,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// ChanOpenConfirm will construct and execute a MsgChannelOpenConfirm.
func (chain *TestChain) ChanOpenConfirm(
counterparty *TestChain,
ch, counterpartyCh TestChannel,
) error {
proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
msg := channeltypes.NewMsgChannelOpenConfirm(
ch.PortID, ch.ID,
proof, height,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// ChanCloseInit will construct and execute a MsgChannelCloseInit.
//
// NOTE: does not work with ibc-transfer module
func (chain *TestChain) ChanCloseInit(
counterparty *TestChain,
channel TestChannel,
) error {
msg := channeltypes.NewMsgChannelCloseInit(
channel.PortID, channel.ID,
chain.SenderAccount.GetAddress(),
)
return chain.sendMsgs(msg)
}
// GetPacketData returns a ibc-transfer marshalled packet to be used for
// callback testing.
func (chain *TestChain) GetPacketData(counterparty *TestChain) []byte {
packet := ibctransfertypes.FungibleTokenPacketData{
Denom: TestCoin.Denom,
Amount: TestCoin.Amount.Uint64(),
Sender: chain.SenderAccount.GetAddress().String(),
Receiver: counterparty.SenderAccount.GetAddress().String(),
}
return packet.GetBytes()
}
// SendPacket simulates sending a packet through the channel keeper. No message needs to be
// passed since this call is made from a module.
func (chain *TestChain) SendPacket(
packet exported.PacketI,
) error {
channelCap := chain.GetChannelCapability(packet.GetSourcePort(), packet.GetSourceChannel())
// no need to send message, acting as a module
err := chain.App.IBCKeeper.ChannelKeeper.SendPacket(chain.GetContext(), channelCap, packet)
if err != nil {
return err
}
// commit changes
chain.CommitBlock()
chain.NextBlock()
return nil
}
// WriteAcknowledgement simulates writing an acknowledgement to the chain.
func (chain *TestChain) WriteAcknowledgement(
packet exported.PacketI,
) error {
channelCap := chain.GetChannelCapability(packet.GetDestPort(), packet.GetDestChannel())
// no need to send message, acting as a handler
err := chain.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(chain.GetContext(), channelCap, packet, TestHash)
if err != nil {
return err
}
// commit changes
chain.CommitBlock()
chain.NextBlock()
return nil
}
| {
return nil, sdkerrors.Wrapf(ibctmtypes.ErrInvalidHeaderHeight, "could not retrieve trusted voters at trustedHeight: %d", trustedHeight)
} |
Mts20140618QueryAnalysisJobListRequest.py | '''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class | (RestApi):
def __init__(self,domain='mts.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AnalysisJobIds = None
def getapiname(self):
return 'mts.aliyuncs.com.QueryAnalysisJobList.2014-06-18'
| Mts20140618QueryAnalysisJobListRequest |
problem.py | # coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for problem/dataset definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
# Dependency imports
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
class SpaceID(object):
"""Input and target space ids. Add more as needed."""
# Generic / unknown output space (default)
GENERIC = 0
# Image labels
IMAGE_LABEL = 1
# English characters
EN_CHR = 2
# English tokens
EN_TOK = 3
# English bpe tokens
EN_BPE_TOK = 4
# French characters
FR_CHR = 5
# French tokens
FR_TOK = 6
# German characters
DE_CHR = 7
# German tokens
DE_TOK = 8
# German bpe tokens
DE_BPE_TOK = 9
# Digit cipher lexicon 0
DIGIT_0 = 10
# Digit cipher lexicon 1
DIGIT_1 = 11
# Audio waveform domain
AUDIO_WAV = 12
# Audio spectral domain
AUDIO_SPECTRAL = 13
# Parse characters
PARSE_CHR = 14
# Parse tokens
PARSE_TOK = 15
# Chinese tokens
ZH_TOK = 16
# Icelandic characters
ICE_CHAR = 17
# Icelandic tokens
ICE_TOK = 18
# Icelandic parse tokens
ICE_PARSE_TOK = 19
# Macedonian tokens
MK_TOK = 20
# Czech tokens
CS_TOK = 21
# Czech characters
CS_CHR = 22
# Genetic bases (ACTG)
DNA = 23
# Real numbers
REAL = 24
# Images
IMAGE = 25
# Peptide
PEPTIDE = 26
# Python
PY_TOK = 27
# C++
CPP_TOK = 28
# Strokes
STROKES = 29
# Pickled Python
PICKLED_PYTHON = 30
def | ():
return tf.contrib.training.HParams(
max_input_seq_length=0,
max_target_seq_length=0,
prepend_mode="none",
data_dir=None)
def preprocess_example_common(example, hparams, mode):
"""Preprocessing steps common to all models."""
if hparams.max_input_seq_length > 0:
example["inputs"] = example["inputs"][:hparams.max_input_seq_length]
if hparams.max_target_seq_length > 0:
example["targets"] = example["targets"][:hparams.max_target_seq_length]
if hparams.prepend_mode != "none":
if mode == tf.estimator.ModeKeys.PREDICT:
example["partial_targets"] = tf.concat([example["inputs"], [0]], 0)
else:
example["targets"] = tf.concat(
[example["inputs"], [0], example["targets"]], 0)
return example
class Problem(object):
"""Problem base class. Specifies a T2T problem.
Problems unify the specification of a problem for data generation, training,
and inference.
New problems are specified by the following methods:
Data generation:
* generate_data(data_dir, tmp_dir)
- Generate training and dev datasets into data_dir.
- Additional files, e.g. vocabulary files, should also be written to
data_dir. Vocab files are newline-separated files with each line
containing a token. The standard convention for the filename is to
set it to be
${Problem.vocab_name}.${Problem.targeted_vocab_size}
- Downloads and other files can be written to tmp_dir
- If you have a training and dev generator, you can generate the
training and dev datasets with
generator_utils.generate_dataset_and_shuffle.
- Use the self.training_filepaths and self.dev_filepaths functions to
get sharded filenames. If shuffled=False, the filenames will contain
an "unshuffled" suffix; you should then shuffle the data
shard-by-shard with generator_utils.shuffle_dataset.
- Allows to specify the number of shards, optionally (can be omitted).
- Subclasses must override
* dataset_filename()
- Base filename for problem.
- Defaults to registered name (self.name).
Training:
* hparams(defaults, model_hparams)
- Specify the problem hyperparameters (see _default_hparams)
- Mutate defaults as needed
* example_reading_spec
- Specify the names and types of the features on disk.
- Specify tf.contrib.slim.tfexample_decoder
* preprocess_example(example, mode)
- Preprocess the example feature dict from feature name to Tensor or
SparseTensor.
- Used in training, eval, and inference (specified by mode).
Eval:
* eval_metrics
- Specify the set of evaluation metrics for this problem.
Inference:
* feature_encoders(data_dir)
- Return a dict of <feature name, TextEncoder> for encoding and decoding
inference input/output.
- Defaults to TextEncoder for inputs and targets.
"""
# ============================================================================
# BEGIN SUBCLASS INTERFACE
# ============================================================================
def generate_data(self, data_dir, tmp_dir, task_id=-1):
raise NotImplementedError()
def hparams(self, defaults, model_hparams):
pass
def dataset_filename(self):
return self.name
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.TextEncoder()
}
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
def preprocess_example(self, example, mode, hparams):
return preprocess_example_common(example, hparams, mode)
def eval_metrics(self):
return [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
# ============================================================================
# END SUBCLASS INTERFACE
# ============================================================================
def training_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.train_data_filenames(file_basename, data_dir,
num_shards)
def dev_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.dev_data_filenames(file_basename, data_dir,
num_shards)
def test_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.test_data_filenames(file_basename, data_dir,
num_shards)
def filepattern(self, data_dir, mode, shard=None):
"""Get filepattern for data files for mode.
Matches mode to a suffix.
* TRAIN: train
* EVAL: dev
* PREDICT: dev
* test: test
Args:
data_dir: str, data directory.
mode: tf.estimator.ModeKeys or "test".
shard: int, if provided, will only read data from the specified shard.
Returns:
filepattern str
"""
path = os.path.join(data_dir, self.dataset_filename())
shard_str = "-%05d" % shard if shard is not None else ""
if mode == tf.estimator.ModeKeys.TRAIN:
suffix = "train"
elif mode in [tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT]:
suffix = "dev"
else:
assert mode == "test"
suffix = "test"
return "%s-%s%s*" % (path, suffix, shard_str)
def __init__(self, was_reversed=False, was_copy=False):
"""Create a Problem.
Args:
was_reversed: bool, whether to reverse inputs and targets.
was_copy: bool, whether to copy inputs to targets. Can be composed with
was_reversed so that if both are true, the targets become the inputs,
which are then copied to targets so that the task is targets->targets.
"""
self._was_reversed = was_reversed
self._was_copy = was_copy
self._encoders = None
self._hparams = None
self._feature_info = None
def get_feature_encoders(self, data_dir=None):
if self._encoders is None:
self._encoders = self.feature_encoders(data_dir)
return self._encoders
def get_hparams(self, model_hparams=None):
"""Returns problem_hparams."""
if self._hparams is not None:
return self._hparams
if self._encoders is None:
data_dir = (model_hparams and model_hparams.data_dir) or None
self.get_feature_encoders(data_dir)
hp = _default_hparams()
ret = self.hparams(hp, model_hparams)
if ret is not None:
raise ValueError("The Problem subclass hparams function should mutate "
"the defaults passed in and return None.")
hp.add_hparam("vocabulary", self._encoders)
hp.add_hparam("was_reversed", self._was_reversed)
hp.add_hparam("was_copy", self._was_copy)
if self._was_reversed:
_reverse_problem_hparams(hp)
if self._was_copy:
_copy_problem_hparams(hp)
self._hparams = hp
return self._hparams
def maybe_reverse_features(self, feature_map):
if not self._was_reversed:
return
inputs, targets = feature_map["inputs"], feature_map["targets"]
feature_map["inputs"], feature_map["targets"] = targets, inputs
def maybe_copy_features(self, feature_map):
if not self._was_copy:
return
feature_map["targets"] = feature_map["inputs"]
def dataset(self,
mode,
data_dir=None,
num_threads=None,
output_buffer_size=None,
shuffle_files=None,
hparams=None,
preprocess=True,
dataset_split=None,
shard=None):
"""Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch in Dataset.map
calls.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: tf.contrib.training.HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: tf.estimator.ModeKeys + ["test"], which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
Returns:
Dataset containing dict<feature name, Tensor>.
"""
dataset_split = dataset_split or mode
assert data_dir
if hparams is None:
hparams = default_model_hparams()
if not hasattr(hparams, "data_dir"):
hparams.add_hparam("data_dir", data_dir)
if not hparams.data_dir:
hparams.data_dir = data_dir
# Construct the Problem's hparams so that items within it are accessible
_ = self.get_hparams(hparams)
data_fields, data_items_to_decoders = self.example_reading_spec()
if data_items_to_decoders is None:
data_items_to_decoders = {
field: tf.contrib.slim.tfexample_decoder.Tensor(field)
for field in data_fields
}
is_training = mode == tf.estimator.ModeKeys.TRAIN
data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)
tf.logging.info("Reading data files from %s", data_filepattern)
data_files = tf.contrib.slim.parallel_reader.get_data_files(
data_filepattern)
if shuffle_files or shuffle_files is None and is_training:
random.shuffle(data_files)
dataset = tf.contrib.data.TFRecordDataset(data_files)
def decode_record(record):
"""Serialized Example to dict of <feature name, Tensor>."""
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(data_items_to_decoders)
decoded = decoder.decode(record, items=decode_items)
return dict(zip(decode_items, decoded))
def _preprocess(example):
example = self.preprocess_example(example, mode, hparams)
self.maybe_reverse_features(example)
self.maybe_copy_features(example)
return example
dataset = dataset.map(decode_record, num_threads=num_threads)
if preprocess:
dataset = dataset.map(
_preprocess,
num_threads=num_threads,
output_buffer_size=output_buffer_size)
return dataset
@property
def has_inputs(self):
return "inputs" in self.get_feature_encoders()
@property
def feature_info(self):
"""Retrieve dict<feature name, FeatureInfo>.
Must first call Problem.get_hparams or Problem.dataset to have the problem's
internal hparams already constructed.
Returns:
dict<feature name, FeatureInfo>
"""
if self._feature_info is not None:
return self._feature_info
assert self._hparams is not None
hp = self.get_hparams()
input_mods = hp.input_modality
target_mod = hp.target_modality
vocabs = hp.vocabulary
if self.has_inputs:
in_id = hp.input_space_id
out_id = hp.target_space_id
features = collections.defaultdict(FeatureInfo)
for name, mod_spec in six.iteritems(input_mods):
mod, vocab_size = mod_spec
finfo = features[name]
finfo.modality = mod
finfo.vocab_size = vocab_size
mod, vocab_size = target_mod
features["targets"].modality = mod
features["targets"].vocab_size = vocab_size
for name, encoder in six.iteritems(vocabs):
features[name].encoder = encoder
if self.has_inputs:
features["inputs"].space_id = in_id
features["targets"].space_id = out_id
self._feature_info = features
return features
class FeatureInfo(object):
def __init__(self,
encoder=None,
modality=None,
vocab_size=None,
space_id=None):
self.encoder = encoder
self.modality = modality
self.vocab_size = vocab_size
self.space_id = space_id
def _copy_problem_hparams(p_hparams):
"""Use input modality, vocab, and space id for target."""
p = p_hparams
# Duplicate input modality.
p.target_modality = p.input_modality["inputs"]
# Duplicate input vocabulary.
p.vocabulary["targets"] = p.vocabulary["inputs"]
# Duplicate input space ids.
p.target_space_id = p.input_space_id
# Mark that p was reversed.
p.was_copy = True
def _reverse_problem_hparams(p_hparams):
"""Swap input/output modalities, vocab, and space ids."""
p = p_hparams
# Swap modalities.
input_modality = p.input_modality["inputs"]
target_modality = p.target_modality
p.input_modality["inputs"] = target_modality
p.target_modality = input_modality
# Swap vocabularies.
input_vocabulary = p.vocabulary["inputs"]
target_vocabulary = p.vocabulary["targets"]
p.vocabulary["inputs"] = target_vocabulary
p.vocabulary["targets"] = input_vocabulary
# Swap input/target space ids.
input_space_id = p.input_space_id
target_space_id = p.target_space_id
p.input_space_id = target_space_id
p.target_space_id = input_space_id
# Mark that p was reversed.
p.was_reversed = True
def _default_hparams():
"""A set of basic model hyperparameters."""
return tf.contrib.training.HParams(
# Use this parameter to get comparable perplexity numbers with different
# tokenizations. This value should be set to the ratio of the number of
# tokens in the test set according to the tokenization used to the number
# of tokens in the test set in the "official" tokenization. For
# example, if we are using a word-piece based model and we want to
# compute per-word perplexity, then we set loss_multiplier to the number
# of wordpieces per word in the test set.
loss_multiplier=1.0,
# Use this parameter to allow for larger sequences in the batch. Without
# the use of this parameter, the size of the inner two dimensions will
# be used to judge the sequence length.
batch_size_multiplier=1,
# To make queues of the right capacity, it's good to know the maximal
# expected batch size, as it can vary a lot. It only affects performance
# of input readers and memory use. The defaults should be safe and fast,
# but decrease if your reader uses a lot of memory and increase if slow.
max_expected_batch_size_per_shard=64,
# During inference for autoregressive problems, if the batch_size is 1,
# the inference will stop when the model predict a text_encoder.EOS_ID
# token.
stop_at_eos=False,
# Modalities used to map from input features to a space compatible with
# chosen model architecture. One modality spec (which is a 2-tuple,
# (modality_full_name, vocab_size)) per feature key. modality_full_name
# is a string type:name, e.g. class_label:class_label_2d. Leaving off
# the name uses the default modality for that type (e.g. class_label ==
# class_label:default).
input_modality={},
# Modality used to map from hidden representation to the target space.
# Specified as a modality spec, a 2-tuple described above.
target_modality=None,
# Identifiers used to tell the model which input/target space will be
# expected. For example, it can tell that we expect French as characters
# as output, or Spanish as sound. Spaces defined as constants in SpaceID
# class.
input_space_id=SpaceID.GENERIC,
target_space_id=SpaceID.GENERIC)
class Text2TextProblem(Problem):
"""Base class for text-to-text problems."""
@property
def is_character_level(self):
"""Whether the inputs and targets are sequences of characters."""
raise NotImplementedError()
@property
def targeted_vocab_size(self):
raise NotImplementedError() # Not needed if self.is_character_level.
def generator(self, data_dir, tmp_dir, is_training):
"""Generator for the training and evaluation data.
Args:
data_dir: The directory in which to assets, e.g. the vocab file.
tmp_dir: A scratch directory (if needed).
is_training: A boolean indicating if we should generate training data
(True) or dev set data (False).
Yields:
dicts with keys "inputs" and "targets", with values being lists of token
ids.
"""
raise NotImplementedError()
@property
def use_train_shards_for_dev(self):
"""If true, we only generate training data and hold out shards for dev."""
return False
@property
def input_space_id(self):
raise NotImplementedError()
@property
def target_space_id(self):
raise NotImplementedError()
@property
def num_shards(self):
raise NotImplementedError()
@property
def num_dev_shards(self):
return 1
@property
def vocab_name(self):
raise NotImplementedError()
@property
def vocab_file(self):
return "%s.%d" % (self.vocab_name, self.targeted_vocab_size)
@property
def use_subword_tokenizer(self):
raise NotImplementedError()
@property
def has_inputs(self):
return True # Set to False for language models.
def generate_data(self, data_dir, tmp_dir, task_id=-1):
train_paths = self.training_filepaths(
data_dir, self.num_shards, shuffled=False)
dev_paths = self.dev_filepaths(
data_dir, self.num_dev_shards, shuffled=False)
if self.use_train_shards_for_dev:
all_paths = train_paths + dev_paths
generator_utils.generate_files(
self.generator(data_dir, tmp_dir, True), all_paths)
generator_utils.shuffle_dataset(all_paths)
else:
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True), train_paths,
self.generator(data_dir, tmp_dir, False), dev_paths)
def feature_encoders(self, data_dir):
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
elif self.use_subword_tokenizer:
vocab_filename = os.path.join(data_dir, self.vocab_file)
encoder = text_encoder.SubwordTextEncoder(vocab_filename)
else:
vocab_filename = os.path.join(data_dir, self.vocab_file)
encoder = text_encoder.TokenTextEncoder(vocab_filename)
if self.has_inputs:
return {"inputs": encoder, "targets": encoder}
return {"targets": encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.stop_at_eos = int(True)
if self.has_inputs:
source_vocab_size = self._encoders["inputs"].vocab_size
p.input_modality = {
"inputs": (registry.Modalities.SYMBOL, source_vocab_size)
}
target_vocab_size = self._encoders["targets"].vocab_size
p.target_modality = (registry.Modalities.SYMBOL, target_vocab_size)
if self.has_inputs:
p.input_space_id = self.input_space_id
p.target_space_id = self.target_space_id
if self.is_character_level:
p.loss_multiplier = 2.0
def eval_metrics(self):
return [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY,
metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F,
metrics.Metrics.ROUGE_L_F
]
| default_model_hparams |
items.py | import scrapy
class StarAcmSpiderItem(scrapy.Item):
username = scrapy.Field()
source = scrapy.Field()
run_id = scrapy.Field() | data = scrapy.Field() |
|
math.service.ts | import { Hook, Logger } from '../../../mod.ts';
export class MathService {
public add(a: number, b: number): number {
return a + b;
}
@Hook({
application: '*',
container: '*',
type: MathService, | public onInit(): void {
Logger.info('Hello, math service is ready!');
}
@Hook({
application: '*',
scope: 'post',
})
public onApplicationInit(): void {
Logger.info('Math service was notified, that the application is ready');
}
} | scope: 'post',
}) |
directory_list.rs | // Take a look at the license at the top of the repository in the LICENSE file.
use crate::DirectoryList;
use glib::translate::*;
use glib::{Cast, IsA, ToValue};
impl DirectoryList {
#[doc(alias = "gtk_directory_list_get_io_priority")]
#[doc(alias = "get_io_priority")]
pub fn io_priority(&self) -> glib::Priority {
unsafe {
from_glib(ffi::gtk_directory_list_get_io_priority(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_directory_list_set_io_priority")]
pub fn set_io_priority(&self, io_priority: glib::Priority) {
unsafe {
ffi::gtk_directory_list_set_io_priority(self.to_glib_none().0, io_priority.into_glib());
}
}
}
#[derive(Clone, Default)]
pub struct DirectoryListBuilder {
attributes: Option<String>,
file: Option<gio::File>,
io_priority: Option<i32>,
monitored: Option<bool>,
}
impl DirectoryListBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn build(self) -> DirectoryList {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref attributes) = self.attributes {
properties.push(("attributes", attributes));
}
if let Some(ref file) = self.file {
properties.push(("file", file));
}
if let Some(ref io_priority) = self.io_priority {
properties.push(("io-priority", io_priority));
}
if let Some(ref monitored) = self.monitored {
properties.push(("monitored", monitored));
}
glib::Object::new::<DirectoryList>(&properties)
.expect("Failed to create an instance of DirectoryList")
}
| self.attributes = Some(attributes.to_string());
self
}
pub fn file<P: IsA<gio::File>>(mut self, file: &P) -> Self {
self.file = Some(file.clone().upcast());
self
}
pub fn io_priority(mut self, io_priority: glib::Priority) -> Self {
self.io_priority = Some(io_priority.into_glib());
self
}
pub fn monitored(mut self, monitored: bool) -> Self {
self.monitored = Some(monitored);
self
}
} | pub fn attributes(mut self, attributes: &str) -> Self { |
test_parser.py | # All content Copyright (C) 2018 Genomics plc
import os
import re
import unittest
from wecall.genomics.variant import Variant
from wecall.vcfutils.genotype_call import GenotypeCall
from wecall.vcfutils.parser import VCFReader, VCFReaderContextManager, decode_VCF_string, \
parse_VCF_comma_separated_pair_value
from wecall.vcfutils.schema import Schema
from wecall.vcfutils.writer import VCFWriterContextManager
from wecall_test_drivers.base_test import BaseTest
class ParserTest(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.data_dir = os.path.join(os.path.dirname(__file__), "example_data")
def variant_is_equal(self, var1, var2):
self.assertEqual(var1.chrom, var2[0])
self.assertEqual(var1.pos_from, var2[1])
self.assertEqual(var1.ids, var2[2])
self.assertEqual(var1.ref, var2[3])
self.assertEqual(var1.alt, var2[4])
def test_read_VCF_line(self):
with open(os.path.join(self.data_dir, "vcf_example.vcf"), "r") as vcf_file:
vcf_handler = VCFReader(vcf_file)
vcf_handler.read_header()
self.assertEqual(len(vcf_handler.header.file_metadata), 7)
self.assertEqual(len(vcf_handler.header.samples), 2)
records = list(vcf_handler.read_records())
self.assertEqual(len(records), 2)
# test first record fully
self.variant_is_equal(records[0], ("20", 9, set(), "CT", "C")) # zero=based representation
self.assertEqual(records[0].filters, set())
self.assertEqual(records[0].passes_filter, True)
self.assertEqual(len(records[0].info), 12)
self.assertEqual(records[0].info["PP"], [3000])
self.assertEqual(records[0].info["DP"], [250])
self.assertEqual(records[0].info["DPR"], [140])
self.assertEqual(records[0].info["DPF"], [110])
self.assertEqual(records[0].info["VC"], [100])
self.assertEqual(records[0].info["VCR"], [49])
self.assertEqual(records[0].info["VCF"], [51])
self.assertEqual(records[0].info["ABPV"], [0.2])
self.assertEqual(records[0].info["SBPV"], [0.3])
self.assertEqual(records[0].info["MQ"], [70])
self.assertEqual(records[0].info["BR"], [31])
self.assertEqual(records[0].info["QD"], [None])
self.assertEqual(records[0].samples, ['sample1', 'sample2'])
self.assertEqual(records[0].sample_info.get_field('sample1', "GT"), GenotypeCall("0/1"))
self.assertEqual(records[0].sample_info.get_field('sample2', "GT"), GenotypeCall("1/1"))
self.assertEqual(records[0].sample_info.get_field('sample1', 'PL'), [3000, 0, 3000])
self.assertEqual(records[0].sample_info.get_field('sample2', 'PL'), [114, 0, 0])
self.assertEqual(records[0].sample_info.get_field('sample1', 'GQ'), [1000])
self.assertEqual(records[0].sample_info.get_field('sample2', 'GQ'), [None])
# check that ordering in the dictionaries is preserved
expected_keys = ["PP", "DP", "DPR", "DPF", "VC", "VCR",
"VCF", "ABPV", "SBPV", "MQ", "BR", "QD"]
self.assertEqual(list(records[0].info.keys()), expected_keys)
# ensure last record is still being read correctly
self.variant_is_equal(records[-1], ("20", 10, set(), "T", "G"))
def test_reads_simple_file(self):
filename = os.path.join(self.work_dir, "test.vcf")
with VCFWriterContextManager(filename) as left_vcf:
left_vcf.write_variant(Variant("1", 1, "A", "T"))
left_vcf.write_variant(Variant("2", 1, "A", "T"))
left_vcf.write_variant(Variant("10", 1, "A", "T"))
expected_variants = [
Variant("1", 1, "A", "T"),
Variant("2", 1, "A", "T"),
Variant("10", 1, "A", "T"),
]
with VCFReaderContextManager(filename) as vcf_reader:
actual_variants = [record.variant for record in vcf_reader.read_records()]
self.assertEqual(expected_variants, actual_variants)
class TestVCFStringParsing(unittest.TestCase):
def test_should_decode_empty_VCF_string(self):
self.assertEqual('', decode_VCF_string('""'))
def test_should_decode_simple_VCF_string(self):
self.assertEqual('foo', decode_VCF_string('"foo"'))
def test_should_decode_VCF_string_with_single_double_quote(self):
self.assertEqual('"', decode_VCF_string('"\\""'))
def test_should_decode_VCF_string_with_single_backslash(self):
self.assertEqual('\\', decode_VCF_string('"\\\\"'))
def | (self):
self.assertEqual(
'abc\\def"ghi',
decode_VCF_string('"abc\\\\def\\\"ghi"'))
def test_should_fail_to_decode_unquoted_string(self):
with self.assertRaisesRegex(Exception, 'expected a VCF encoded string: \'foo\''):
print(decode_VCF_string('foo'))
def test_should_fail_to_decode_string_with_stray_backslash(self):
with self.assertRaisesRegex(Exception, re.escape('expected a VCF encoded string: \'"\\\\"\'')):
print(decode_VCF_string('"\\"'))
def test_should_fail_to_decode_string_with_unencoded_double_quote(self):
with self.assertRaisesRegex(Exception, 'expected a VCF encoded string: \'"\""\''):
print(decode_VCF_string('"\""'))
class TestCommaSeparatedPairParser(unittest.TestCase):
def test_should_parse_simple_comma_separated_pairs(self):
parsed = parse_VCF_comma_separated_pair_value('<first=foo,second=bar>')
expected = {'first': 'foo', 'second': 'bar'}
self.assertEqual(expected, parsed)
def test_should_parse_empty_simple_value(self):
parsed = parse_VCF_comma_separated_pair_value('<first=,second=bar>')
expected = {'first': '', 'second': 'bar'}
self.assertEqual(expected, parsed)
def test_should_fail_to_parse_non_bracketed_string(self):
with self.assertRaisesRegex(Exception, 'expected braced key-value pairs: \'first=foo\''):
print(parse_VCF_comma_separated_pair_value('first=foo'))
def test_should_parse_quoted_comma_separated_pairs(self):
parsed = parse_VCF_comma_separated_pair_value(
'<first="foo",second="bar">')
expected = {'first': '"foo"', 'second': '"bar"'}
self.assertEqual(expected, parsed)
def test_should_parse_empty_quoted_value(self):
parsed = parse_VCF_comma_separated_pair_value('<first="">')
expected = {'first': '""'}
self.assertEqual(expected, parsed)
def test_should_parse_values_with_quoted_commas(self):
parsed = parse_VCF_comma_separated_pair_value('<first="foo,bar">')
expected = {'first': '"foo,bar"'}
self.assertEqual(expected, parsed)
def test_should_parse_values_with_quoted_double_quote(self):
parsed = parse_VCF_comma_separated_pair_value('<first="foo\\\"bar">')
expected = {'first': '"foo\\\"bar"'}
self.assertEqual(expected, parsed)
def test_should_fail_with_badly_quoted_double_quote(self):
with self.assertRaisesRegex(Exception, 'failed to parse key-value pairs from \'<first="foo\"bar">\''):
print(parse_VCF_comma_separated_pair_value('<first="foo\"bar">'))
class TestHeaderParsing(unittest.TestCase):
# version parsing
def test_should_parse_well_formatted_version(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
self.assertEqual(expected, header)
def test_should_store_header_as_attribute_of_parser(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
self.assertEqual(header, reader.header)
def test_should_fail_with_unexpected_version(self):
lines = [
'##fileformat=VCFv0.0\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
with self.assertRaisesRegex(Exception, 'unexpected version: \'0.0\''):
print(reader.read_header())
def test_should_fail_to_parse_malformed_header_line(self):
lines = [
'##fileformat=VCFv4.2\n',
'##malformed line!\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
with self.assertRaisesRegex(Exception, 'failed to parse header line: \'##malformed line!\''):
print(reader.read_header())
def test_should_fail_if_version_is_not_defined(self):
lines = [
'##notFileformat=foo\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
with self.assertRaisesRegex(Exception, 'unrecognised file format line: \'##notFileformat=foo\''):
print(reader.read_header())
# file metadata parsing
def test_should_parse_well_formatted_file_metadata(self):
lines = [
'##fileformat=VCFv4.2\n',
'##fileDate=2013-07-08\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
expected.file_metadata['fileDate'] = '2013-07-08'
self.assertEqual(expected, header)
# info data parsing
def test_should_parse_minimal_info_header_fields(self):
lines = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=key,Number=1,Type=String,Description="description">\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
expected.set_info_data('key', '1', 'String', 'description')
self.assertEqual(expected, header)
def test_should_parse_all_info_header_fields(self):
lines = [
'##fileformat=VCFv4.2\n',
'##INFO=<ID=key,Number=1,Type=String,Description="description",Source="foo",Version="bar">\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
expected.set_info_data(
'key',
'1',
'String',
'description',
'foo',
'bar')
self.assertEqual(expected, header)
# sample data parsing
def test_should_parse_valid_sample_header_fields(self):
lines = [
'##fileformat=VCFv4.2\n',
'##FORMAT=<ID=key,Number=1,Type=String,Description="description">\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
expected.set_sample_data('key', '1', 'String', 'description')
self.assertEqual(expected, header)
# filter parsing
def test_should_parse_valid_filter_header_fields(self):
lines = [
'##fileformat=VCFv4.2\n',
'##FILTER=<ID=key,Description="description">\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
expected.set_filter('key', 'description')
self.assertEqual(expected, header)
# contig parsing
def test_should_parse_valid_contig_header_fields(self):
lines = [
'##fileformat=VCFv4.2\n',
'##contig=<ID=key,length=666>\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
expected.set_contig('key', 666)
self.assertEqual(expected, header)
# column headers + sample names
def test_should_parse_required_column_headers(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
self.assertEqual(expected, header)
def test_should_fail_without_required_column_headers(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\n',
]
reader = VCFReader(iter(lines))
with self.assertRaisesRegex(
Exception,
re.escape("expected column header line: '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER'")
):
print(reader.read_header())
def test_should_parse_column_headers_with_format_but_no_samples(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
self.assertEqual(expected, header)
def test_should_parse_column_headers_with_complex_sample_names(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tOWEN_TOBY-RHYS.JONES\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
expected.samples = ['OWEN_TOBY-RHYS.JONES']
self.assertEqual(expected, header)
def test_should_not_parse_column_headers_with_sample_names_containing_white_space(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tOWEN JONES\n',
]
reader = VCFReader(iter(lines))
with self.assertRaisesRegex(
Exception,
re.escape(
'expected column header line: '
'\'#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tOWEN JONES\''
)
):
print(reader.read_header())
def test_should_fail_with_malformed_format_column_header(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFOO\n',
]
reader = VCFReader(iter(lines))
with self.assertRaisesRegex(
Exception,
re.escape('expected column header line: \'#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFOO\'')
):
print(reader.read_header())
def test_should_parse_column_headers_with_samples(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tFOO\tBAR\n',
]
reader = VCFReader(iter(lines))
header = reader.read_header()
expected = Schema()
expected.samples.append('FOO')
expected.samples.append('BAR')
self.assertEqual(expected, header)
def test_should_fail_if_column_header_line_is_missing(self):
lines = [
'##fileformat=VCFv4.2\n',
'the line after the header\n',
]
reader = VCFReader(iter(lines))
with self.assertRaisesRegex(Exception, 'expected column header line: \'the line after the header\''):
print(reader.read_header())
def test_should_fail_on_unexpected_EOF(self):
lines = [
'##fileformat=VCFv4.2\n',
]
reader = VCFReader(iter(lines))
with self.assertRaisesRegex(Exception, 'unexpected EOF'):
print(reader.read_header())
class TestRecordParsing(unittest.TestCase):
# version parsing
def test_should_parse_single_record(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
'chr0\t0\t.\tP\tQ\t0\tPASS\t\n',
]
reader = VCFReader(iter(lines))
record_count = len(list(reader.read_records()))
self.assertEqual(1, record_count)
def test_should_parse_header_when_parsing_records(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
'chr0\t0\t.\tP\tQ\t0\tPASS\t\n',
]
reader = VCFReader(iter(lines))
self.assertIsNone(reader.header)
list(reader.read_records())
self.assertIsNotNone(reader.header)
def test_should_parse_empty_file(self):
lines = [
'##fileformat=VCFv4.2\n',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n',
]
reader = VCFReader(iter(lines))
record_count = len(list(reader.read_records()))
self.assertEqual(0, record_count)
| test_should_decode_complex_VCF_string |
scatter_gather.py | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
import numbers
from coremltools.converters.mil.mil import Operation, types
from coremltools.converters.mil.mil.input_type import (
DefaultInputs,
InputSpec,
IntInputType,
IntTensorInputType,
TensorInputType,
StringInputType,
)
from coremltools.converters.mil.mil.operation import precondition
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.types.symbolic import is_compatible_symbolic_vector, is_symbolic
from coremltools.converters.mil.mil.operation import (
SYMBOL,
VALUE
)
@register_op(doc_str="")
class gather(Operation):
"""
Gather slices from input ``x`` along dimension ``axis`` according to ``indices``,
similar to `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.
* If ``indices`` is scalar (0-D):
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~~~ indices, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]
Where ``rank(x)`` is the rank of ``x``. The ``output`` has rank ``rank(x) - 1``.
* If ``indices`` is 1-D tensor:
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~ i, ~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~~ indices[i], ~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}]
The output has rank ``rank(x)``.
* In general:
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~ i_0, ..., i_{M-1}, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~ indices[i_0, ..., i_{M-1}], ~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]
Where ``M = rank(x)``.
Parameters
----------
x: tensor<\*D,T> (Required)
indices: tensor<\*N,i32> (Required)
* Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``.
axis: const i32 (Optional. Default=``0``)
* Negative axis is supported.
Returns
-------
tensor<\*K,T>
* Where ``K = D[:axis] + N + D[axis+1:]``.
Attributes
----------
T: fp32
References
----------
See `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.
"""
input_spec = InputSpec(
x=TensorInputType(),
indices=IntInputType(),
axis=IntInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
)
def __init__(self, **kwargs):
super(gather, self).__init__(**kwargs)
@precondition(allow=VALUE | SYMBOL)
def value_inference(self):
x = self.x.sym_val
indices = self.indices.val
if indices is None:
# only allow x to be symbolic. indices cannot.
return None
scalar_indices = isinstance(indices, numbers.Integral)
axis = self.axis.val
if scalar_indices:
res = np.take(x, [indices], axis)
res2 = np.squeeze(res, axis=axis)
if isinstance(res2, np.ndarray) and len(res2.shape) == 0:
# res2 is a scalar, but represented as np.array(symbol,
# dtype=np.object) which np.squeeze can't remove.
return res2.item()
return res2
return np.take(x, indices, axis)
def type_inference(self):
out_type = self.x.dtype
if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
output_rank = self.x.rank - 1 + self.indices.rank
if output_rank == 0:
# output scalar
return out_type
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.x.rank
out_shape = self.x.shape[:axis] + self.indices.shape + self.x.shape[axis + 1 :]
return types.tensor(out_type, out_shape)
@register_op(doc_str="")
class scatter(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``
by operation ``mode``.
Example: ``mode == update``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Example: ``mode == add``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
.. math::
x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Parameters
----------
data: tensor<\*D, T> (Required)
indices: tensor<[C],T> (Required)
* 1-D tensor.
updates: tensor<\*K, T> (Required)
* ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``.
axis: const i32 (Optional)
* Default to ``0``.
mode: const string (Optional)
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
* Default value is ``update``. | Returns
-------
tensor<\*D, T>
* With the same type and shape as input ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
axis=IntInputType(const=True, optional=True),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
mode="add",
)
def __init__(self, **kwargs):
super(scatter, self).__init__(**kwargs)
def type_inference(self):
if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.data.rank
expected_updates_shape = (
self.data.shape[:axis] + self.indices.shape + self.data.shape[axis + 1 :]
)
err = "Updates shape {} is incorrect. It should be {}.".format(self.updates.shape, expected_updates_shape)
assert is_compatible_symbolic_vector(
self.updates.shape, tuple(expected_updates_shape)
), err
return self.data.sym_type
@register_op(doc_str="")
class gather_along_axis(Operation):
"""
Take the values along ``axis`` at locations ``indices``.
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] = = x[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D]
Parameters
----------
x: tensor<\*D, T> (Required)
indices: tensor<\*K, T> (Required)
* ``rank(indices) == rank(x)``.
axis: const i32 (Optional):
* Default to ``0``.
Returns
-------
tensor<\*D, T>:
* Output tensor has the same shape as ``indices``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(),
indices=IntTensorInputType(),
axis=IntInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
)
def __init__(self, **kwargs):
super(gather_along_axis, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
x = self.x.val
indices = self.indices.val
axis = self.axis.val
return np.take_along_axis(x, indices, axis)
def type_inference(self):
if self.x.rank != self.indices.rank:
raise ValueError(
"Rank mismatch between input and indices. \
Input rank: {}, indices rank: {}".format(
self.x.rank, self.indices.rank
)
)
if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.x.rank
for i in range(self.x.rank):
if i != axis:
assert self.x.shape[i] == self.indices.shape[i]
return types.tensor(self.x.dtype, self.indices.shape)
@register_op(doc_str="")
class scatter_along_axis(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``
by operation ``mode``.
Example: ``mode == update``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Example: ``mode == add``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
.. math::
x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Parameters
----------
data: tensor<\*D, T> (Required)
indices: tensor<\*K,T> (Required)
* ``rank(indices) == rank(data)``.
updates: tensor<\*K, T> (Required)
* Must be the same shape as ``indices``.
axis: const i32 (Optional)
* Default to ``0``.
mode: const string (Optional)
* Default to ``add``.
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
Returns
-------
tensor<\*D, T>
* With the same type and shape as input ``x``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
axis=IntInputType(const=True, optional=True),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
axis=0,
mode="add",
)
def __init__(self, **kwargs):
super(scatter_along_axis, self).__init__(**kwargs)
@precondition(allow=VALUE)
def value_inference(self):
data = np.copy(self.data.val)
indices = self.indices.val
updates = self.updates.val
axis = self.axis.val
np_output = data
np.put_along_axis(np_output, indices, updates, axis=axis)
return np_output
def type_inference(self):
if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.data.rank
assert is_compatible_symbolic_vector(
self.indices.shape, self.updates.shape
)
assert self.data.rank == self.indices.rank
for i in range(self.data.rank):
if i != axis:
assert self.data.shape[i] == self.indices.shape[i]
return self.data.sym_type
@register_op(doc_str="")
class gather_nd(Operation):
"""
Gather slices from ``x`` according to ``indices``, similar to `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.
The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a slice
of ``x``:
.. math::
output[i_0, ..., i_{K-2}]= x[indices[i_0, ..., i_{K-2}]]
Where ``K = rank(indices)`` and ``x[indices[i_0, ..., i_{K-2}]]`` has rank
``rank(x) - indices.shape[-1]``.
Parameters
----------
x: tensor<\*D,T> (Required)
indices: tensor<\*K,i32> (Required)
Returns
-------
tensor<\*V,T>
* ``V = K[:-1] + D[K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``.
Attributes
----------
T: fp32
References
----------
See `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.
"""
input_spec = InputSpec(
x=TensorInputType(),
indices=IntTensorInputType(),
)
def __init__(self, **kwargs):
super(gather_nd, self).__init__(**kwargs)
def type_inference(self):
assert self.indices.shape[-1] <= self.x.rank
out_type = self.x.dtype
out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.shape[-1] :]
return types.tensor(out_type, out_shape)
@register_op(doc_str="")
class scatter_nd(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices``.
The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a
slice of ``data``, ``K = rank(indices)``, and ``data[indices[i_0, ..., i_{K-2}]]``
has rank ``rank(data) - indices.shape[-1]``.
* Example: ``mode == update``: The ``output`` is set to ``data`` initially, and
the op updates ``output`` as follows:
.. math::
output[indices[i_0, ..., i_{K-2}]]= updates[indices[i_0, ..., i_{K-2}]]
* Example: ``mode == add``. The update rule is:
.. math::
output[indices[i_0, ..., i_{K-2}]] += updates[indices[i_0, ..., i_{K-2}]]
Parameters
----------
data: tensor<\*D,T> (Required)
indices: tensor<\*K,i32> (Required)
updates: tensor<\*K, T> (Required)
* Must be the shape as ``K[:-1]+data.shape[K[-1]:]``.
mode: const string (Optional)
* Default to ``add``.
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
Returns
-------
tensor<\*D,T>
* A tensor with the same shape and type as ``data``.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
data=TensorInputType(),
indices=IntTensorInputType(),
updates=TensorInputType(),
mode=StringInputType(const=True, optional=True),
)
def default_inputs(self):
return DefaultInputs(
mode="add",
)
def __init__(self, **kwargs):
super(scatter_nd, self).__init__(**kwargs)
def type_inference(self):
assert self.indices.shape[-1] <= self.data.rank
expected_updates_shape = (
self.indices.shape[:-1] + self.data.shape[self.indices.shape[-1] :]
)
assert is_compatible_symbolic_vector(
self.updates.shape, tuple(expected_updates_shape)
)
return self.data.sym_type | |
assets.py | from django.conf import settings
from django.db import models
from django.views.generic import TemplateView
User = settings.AUTH_USER_MODEL
class | (models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
User,
blank=True,
null=True,
related_name="%(class)s_created_by",
on_delete=models.SET_DEFAULT, default=1
)
updated_by = models.ForeignKey(
User,
blank=True,
null=True,
related_name="%(class)s_updated_by",
on_delete=models.SET_DEFAULT, default=1,
)
class Meta:
abstract = True
class IndexView(TemplateView):
template_name = "index.html"
| TimeStampMixin |
spi.rs | //! Implementation of SPI for NRF52 using EasyDMA.
//!
//! This file only implements support for the three SPI master (`SPIM`)
//! peripherals, and not SPI slave (`SPIS`).
//!
//! Although `kernel::hil::spi::SpiMaster` is implemented for `SPIM`,
//! only the functions marked with `x` are fully defined:
//!
//! * ✓ set_client
//! * ✓ init
//! * ✓ is_busy
//! * ✓ read_write_bytes
//! * write_byte
//! * read_byte
//! * read_write_byte
//! * ✓ specify_chip_select
//! * ✓ set_rate
//! * ✓ get_rate
//! * ✓ set_clock
//! * ✓ get_clock
//! * ✓ set_phase
//! * ✓ get_phase
//! * hold_low
//! * release_low
//!
//! Author
//! -------------------
//!
//! * Author: Jay Kickliter
//! * Date: Sep 10, 2017
use core::cell::Cell;
use core::{cmp, ptr};
use kernel::common::cells::{OptionalCell, TakeCell, VolatileCell};
use kernel::common::registers::{register_bitfields, ReadWrite, WriteOnly};
use kernel::common::StaticRef;
use kernel::hil;
use kernel::ReturnCode;
use nrf5x::pinmux::Pinmux;
const INSTANCES: [StaticRef<SpimRegisters>; 3] = unsafe {
[
StaticRef::new(0x40003000 as *const SpimRegisters),
StaticRef::new(0x40004000 as *const SpimRegisters),
StaticRef::new(0x40023000 as *const SpimRegisters),
]
};
#[repr(C)]
struct SpimRegisters {
_reserved0: [u8; 16], // reserved
tasks_start: WriteOnly<u32, TASK::Register>, // Start SPI transaction
tasks_stop: WriteOnly<u32, TASK::Register>, // Stop SPI transaction
_reserved1: [u8; 4], // reserved
tasks_suspend: WriteOnly<u32, TASK::Register>, // Suspend SPI transaction
tasks_resume: WriteOnly<u32, TASK::Register>, // Resume SPI transaction
_reserved2: [u8; 224], // reserved
events_stopped: ReadWrite<u32, EVENT::Register>, // SPI transaction has stopped
_reserved3: [u8; 8], // reserved
events_endrx: ReadWrite<u32, EVENT::Register>, // End of RXD buffer reached
_reserved4: [u8; 4], // reserved
events_end: ReadWrite<u32, EVENT::Register>, // End of RXD buffer and TXD buffer reached
_reserved5: [u8; 4], // reserved
events_endtx: ReadWrite<u32, EVENT::Register>, // End of TXD buffer reached
_reserved6: [u8; 40], // reserved
events_started: ReadWrite<u32, EVENT::Register>, // Transaction started
_reserved7: [u8; 176], // reserved
shorts: ReadWrite<u32>, // Shortcut register
_reserved8: [u8; 256], // reserved
intenset: ReadWrite<u32, INTE::Register>, // Enable interrupt
intenclr: ReadWrite<u32, INTE::Register>, // Disable interrupt
_reserved9: [u8; 500], // reserved
enable: ReadWrite<u32, ENABLE::Register>, // Enable SPIM
_reserved10: [u8; 4], // reserved
psel_sck: VolatileCell<Pinmux>, // Pin select for SCK
psel_mosi: VolatileCell<Pinmux>, // Pin select for MOSI signal
psel_miso: VolatileCell<Pinmux>, // Pin select for MISO signal
_reserved11: [u8; 16], // reserved
frequency: ReadWrite<u32>, // SPI frequency
_reserved12: [u8; 12], // reserved
rxd_ptr: VolatileCell<*mut u8>, // Data pointer
rxd_maxcnt: ReadWrite<u32, MAXCNT::Register>, // Maximum number of bytes in receive buffer
rxd_amount: ReadWrite<u32>, // Number of bytes transferred
rxd_list: ReadWrite<u32>, // EasyDMA list type
txd_ptr: VolatileCell<*const u8>, // Data pointer
txd_maxcnt: ReadWrite<u32, MAXCNT::Register>, // Maximum number of bytes in transmit buffer
txd_amount: ReadWrite<u32>, // Number of bytes transferred
txd_list: ReadWrite<u32>, // EasyDMA list type
config: ReadWrite<u32, CONFIG::Register>, // Configuration register
_reserved13: [u8; 104], // reserved
orc: ReadWrite<u32>, // Over-read character.
}
register_bitfields![u32,
INTE [
/// Write '1' to Enable interrupt on EVENTS_STOPPED event
STOPPED OFFSET(1) NUMBITS(1) [
/// Read: Disabled
ReadDisabled = 0,
/// Enable
Enable = 1
],
/// Write '1' to Enable interrupt on EVENTS_ENDRX event
ENDRX OFFSET(4) NUMBITS(1) [
/// Read: Disabled
ReadDisabled = 0,
/// Enable
Enable = 1
],
/// Write '1' to Enable interrupt on EVENTS_END event
END OFFSET(6) NUMBITS(1) [
/// Read: Disabled
ReadDisabled = 0,
/// Enable
Enable = 1
],
/// Write '1' to Enable interrupt on EVENTS_ENDTX event
ENDTX OFFSET(8) NUMBITS(1) [
/// Read: Disabled
ReadDisabled = 0,
/// Enable
Enable = 1
],
/// Write '1' to Enable interrupt on EVENTS_STARTED event
STARTED OFFSET(19) NUMBITS(1) [
/// Read: Disabled
ReadDisabled = 0,
/// Enable
Enable = 1
]
],
MAXCNT [
/// Maximum number of bytes in buffer
MAXCNT OFFSET(0) NUMBITS(16)
],
CONFIG [
/// Bit order
ORDER OFFSET(0) NUMBITS(1) [
/// Most significant bit shifted out first
MostSignificantBitShiftedOutFirst = 0,
/// Least significant bit shifted out first
LeastSignificantBitShiftedOutFirst = 1
],
/// Serial clock (SCK) phase
CPHA OFFSET(1) NUMBITS(1) [
/// Sample on leading edge of clock, shift serial data on trailing edge
SampleOnLeadingEdge = 0,
/// Sample on trailing edge of clock, shift serial data on leading edge
SampleOnTrailingEdge = 1
],
/// Serial clock (SCK) polarity
CPOL OFFSET(2) NUMBITS(1) [
/// Active high
ActiveHigh = 0,
/// Active low
ActiveLow = 1
]
],
ENABLE [
ENABLE OFFSET(0) NUMBITS(4) [
Disable = 0,
Enable = 7
]
],
EVENT [
EVENT 0
],
TASK [
TASK 0
]
];
/// An enum representing all allowable `frequency` register values.
#[repr(u32)]
#[derive(Copy, Clone)]
pub enum Frequency {
K125 = 0x02000000,
K250 = 0x04000000,
K500 = 0x08000000,
M1 = 0x10000000,
M2 = 0x20000000,
M4 = 0x40000000,
M8 = 0x80000000,
}
impl Frequency {
pub fn from_register(reg: u32) -> Option<Frequency> {
match reg {
0x02000000 => Some(Frequency::K125),
0x04000000 => Some(Frequency::K250),
0x08000000 => Some(Frequency::K500),
0x10000000 => Some(Frequency::M1),
0x20000000 => Some(Frequency::M2),
0x40000000 => Some(Frequency::M4),
0x80000000 => Some(Frequency::M8),
_ => None,
}
}
pub fn into_spi_rate(&self) -> u32 {
match *self {
Frequency::K125 => 125_000,
Frequency::K250 => 250_000,
Frequency::K500 => 500_000,
Frequency::M1 => 1_000_000,
Frequency::M2 => 2_000_000,
Frequency::M4 => 4_000_000,
Frequency::M8 => 8_000_000,
}
}
pub fn from_spi_rate(freq: u32) -> Frequency {
if freq < 250_000 {
Frequency::K125
} else if freq < 500_000 {
Frequency::K250
} else if freq < 1_000_000 {
Frequency::K500
} else if freq < 2_000_000 {
Frequency::M1
} else if freq < 4_000_000 {
Frequency::M2
} else if freq < 8_000_000 {
Frequency::M4
} else {
Frequency::M8
}
}
}
/// A SPI master device.
///
/// A `SPIM` instance wraps a `registers::spim::SPIM` together with
/// addition data necessary to implement an asynchronous interface.
pub struct SPIM {
registers: StaticRef<SpimRegisters>,
client: OptionalCell<&'static dyn hil::spi::SpiMasterClient>,
chip_select: OptionalCell<&'static dyn hil::gpio::Pin>,
initialized: Cell<bool>,
busy: Cell<bool>,
tx_buf: TakeCell<'static, [u8]>,
rx_buf: TakeCell<'static, [u8]>,
transfer_len: Cell<usize>,
}
impl SPIM {
pub const fn new(instance: usize) -> SPIM {
SPIM {
registers: INSTANCES[instance],
client: OptionalCell::empty(),
chip_select: OptionalCell::empty(),
initialized: Cell::new(false),
busy: Cell::new(false),
tx_buf: TakeCell::empty(),
rx_buf: TakeCell::empty(),
transfer_len: Cell::new(0),
}
}
#[inline(never)]
pub fn handle_interrupt(&self) {
if self.registers.events_end.is_set(EVENT::EVENT) {
// End of RXD buffer and TXD buffer reached
if self.chip_select.is_none() {
debug_assert!(false, "Invariant violated. Chip-select must be Some.");
return;
}
self.chip_select.map(|cs| cs.set());
self.registers.events_end.write(EVENT::EVENT::CLEAR);
self.client.map(|client| match self.tx_buf.take() {
None => (),
Some(tx_buf) => {
client.read_write_done(tx_buf, self.rx_buf.take(), self.transfer_len.take())
}
});
self.busy.set(false);
}
// Although we only configured the chip interrupt on the | // SPI transaction has stopped
self.registers.events_stopped.write(EVENT::EVENT::CLEAR);
}
if self.registers.events_endrx.is_set(EVENT::EVENT) {
// End of RXD buffer reached
self.registers.events_endrx.write(EVENT::EVENT::CLEAR);
}
if self.registers.events_endtx.is_set(EVENT::EVENT) {
// End of TXD buffer reached
self.registers.events_endtx.write(EVENT::EVENT::CLEAR);
}
if self.registers.events_started.is_set(EVENT::EVENT) {
// Transaction started
self.registers.events_started.write(EVENT::EVENT::CLEAR);
}
}
/// Configures an already constructed `SPIM`.
pub fn configure(&self, mosi: Pinmux, miso: Pinmux, sck: Pinmux) {
self.registers.psel_mosi.set(mosi);
self.registers.psel_miso.set(miso);
self.registers.psel_sck.set(sck);
self.enable();
}
/// Enables `SPIM` peripheral.
pub fn enable(&self) {
self.registers.enable.write(ENABLE::ENABLE::Enable);
}
/// Disables `SPIM` peripheral.
pub fn disable(&self) {
self.registers.enable.write(ENABLE::ENABLE::Disable);
}
pub fn is_enabled(&self) -> bool {
self.registers.enable.matches_all(ENABLE::ENABLE::Enable)
}
}
impl hil::spi::SpiMaster for SPIM {
type ChipSelect = &'static dyn hil::gpio::Pin;
fn set_client(&self, client: &'static dyn hil::spi::SpiMasterClient) {
self.client.set(client);
}
fn init(&self) {
self.registers.intenset.write(INTE::END::Enable);
self.initialized.set(true);
}
fn is_busy(&self) -> bool {
self.busy.get()
}
fn read_write_bytes(
&self,
tx_buf: &'static mut [u8],
rx_buf: Option<&'static mut [u8]>,
len: usize,
) -> ReturnCode {
debug_assert!(self.initialized.get());
debug_assert!(!self.busy.get());
debug_assert!(self.tx_buf.is_none());
debug_assert!(self.rx_buf.is_none());
// Clear (set to low) chip-select
if self.chip_select.is_none() {
return ReturnCode::ENODEVICE;
}
self.chip_select.map(|cs| cs.clear());
// Setup transmit data registers
let tx_len: u32 = cmp::min(len, tx_buf.len()) as u32;
self.registers.txd_ptr.set(tx_buf.as_ptr());
self.registers.txd_maxcnt.write(MAXCNT::MAXCNT.val(tx_len));
self.tx_buf.replace(tx_buf);
// Setup receive data registers
match rx_buf {
None => {
self.registers.rxd_ptr.set(ptr::null_mut());
self.registers.rxd_maxcnt.write(MAXCNT::MAXCNT.val(0));
self.transfer_len.set(tx_len as usize);
self.rx_buf.put(None);
}
Some(buf) => {
self.registers.rxd_ptr.set(buf.as_mut_ptr());
let rx_len: u32 = cmp::min(len, buf.len()) as u32;
self.registers.rxd_maxcnt.write(MAXCNT::MAXCNT.val(rx_len));
self.transfer_len.set(cmp::min(tx_len, rx_len) as usize);
self.rx_buf.put(Some(buf));
}
}
// Start the transfer
self.busy.set(true);
self.registers.tasks_start.write(TASK::TASK::SET);
ReturnCode::SUCCESS
}
fn write_byte(&self, _val: u8) {
debug_assert!(self.initialized.get());
unimplemented!("SPI: Use `read_write_bytes()` instead.");
}
fn read_byte(&self) -> u8 {
debug_assert!(self.initialized.get());
unimplemented!("SPI: Use `read_write_bytes()` instead.");
}
fn read_write_byte(&self, _val: u8) -> u8 {
debug_assert!(self.initialized.get());
unimplemented!("SPI: Use `read_write_bytes()` instead.");
}
// Tell the SPI peripheral what to use as a chip select pin.
// The type of the argument is based on what makes sense for the
// peripheral when this trait is implemented.
fn specify_chip_select(&self, cs: Self::ChipSelect) {
cs.make_output();
cs.set();
self.chip_select.set(cs);
}
// Returns the actual rate set
fn set_rate(&self, rate: u32) -> u32 {
debug_assert!(self.initialized.get());
let f = Frequency::from_spi_rate(rate);
self.registers.frequency.set(f as u32);
f.into_spi_rate()
}
fn get_rate(&self) -> u32 {
debug_assert!(self.initialized.get());
// Reset value is a valid frequency (250kbps), so .expect
// should be safe here
let f = Frequency::from_register(self.registers.frequency.get())
.expect("nrf52 unknown spi rate");
f.into_spi_rate()
}
fn set_clock(&self, polarity: hil::spi::ClockPolarity) {
debug_assert!(self.initialized.get());
debug_assert!(self.initialized.get());
let new_polarity = match polarity {
hil::spi::ClockPolarity::IdleLow => CONFIG::CPOL::ActiveHigh,
hil::spi::ClockPolarity::IdleHigh => CONFIG::CPOL::ActiveLow,
};
self.registers.config.modify(new_polarity);
}
fn get_clock(&self) -> hil::spi::ClockPolarity {
debug_assert!(self.initialized.get());
match self.registers.config.read(CONFIG::CPOL) {
0 => hil::spi::ClockPolarity::IdleLow,
1 => hil::spi::ClockPolarity::IdleHigh,
_ => unreachable!(),
}
}
fn set_phase(&self, phase: hil::spi::ClockPhase) {
debug_assert!(self.initialized.get());
let new_phase = match phase {
hil::spi::ClockPhase::SampleLeading => CONFIG::CPHA::SampleOnLeadingEdge,
hil::spi::ClockPhase::SampleTrailing => CONFIG::CPHA::SampleOnTrailingEdge,
};
self.registers.config.modify(new_phase);
}
fn get_phase(&self) -> hil::spi::ClockPhase {
debug_assert!(self.initialized.get());
match self.registers.config.read(CONFIG::CPHA) {
0 => hil::spi::ClockPhase::SampleLeading,
1 => hil::spi::ClockPhase::SampleTrailing,
_ => unreachable!(),
}
}
// The following two trait functions are not implemented for
// SAM4L, and appear to not provide much functionality. Let's not
// bother implementing them unless needed.
fn hold_low(&self) {
unimplemented!("SPI: Use `read_write_bytes()` instead.");
}
fn release_low(&self) {
unimplemented!("SPI: Use `read_write_bytes()` instead.");
}
} | // above 'end' event, the other event fields also get set by
// the chip. Let's clear those flags.
if self.registers.events_stopped.is_set(EVENT::EVENT) { |
conftest.py | import pytest
import allure
from _pytest.nodes import Item
from _pytest.runner import CallInfo
from selene.core.exceptions import TimeoutException
from selene.support.shared import browser
@pytest.fixture(scope='function', autouse=True)
def browser_management():
"""
Here, before yield,
goes all "setup" code for each test case
aka "before test function" hook
"""
# def attach_snapshots_on_failure(error: TimeoutException) -> Exception:
# """
# An example of selene hook_wait_failure that attaches snapshots to failed test step.
# It is actually not needed and optional,
# because in the pytest_runtest_makereport hook below
# we attach screenshots to the test body itself,
# that is more handy during analysis of test report
#
# but if you need it, you can enable it by uncommenting
# together with the following ``browser.config.hook_wait_failure =`` line;)
#
# otherwise, you can remove it
# """
# last_screenshot = browser.config.last_screenshot
# if last_screenshot:
# allure.attach.file(source=last_screenshot,
# name='screenshot on failure',
# attachment_type=allure.attachment_type.PNG)
#
# last_page_source = browser.config.last_page_source
# if last_page_source:
# allure.attach.file(source=last_page_source,
# name='page source on failure',
# attachment_type=allure.attachment_type.HTML)
# return error
# browser.config.hook_wait_failure = attach_snapshots_on_failure
browser.config.timeout = 3
# todo: add your before setup here...
|
"""
Here, after yield,
goes all "tear down" code for each test case
aka "after test function" hook
"""
# todo: add your after setup here...
browser.quit()
prev_test_screenshot = None
prev_test_page_source = None
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_setup(item):
yield
global prev_test_screenshot
prev_test_screenshot = browser.config.last_screenshot
global prev_test_page_source
prev_test_page_source = browser.config.last_page_source
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo):
"""
Attach snapshots on test failure
"""
# All code prior to yield statement would be ran prior
# to any other of the same fixtures defined
outcome = yield # Run all other pytest_runtest_makereport non wrapped hooks
result = outcome.get_result()
if result.when == "call" and result.failed:
last_screenshot = browser.config.last_screenshot
if last_screenshot and not last_screenshot == prev_test_screenshot:
allure.attach.file(source=last_screenshot,
name='screenshot',
attachment_type=allure.attachment_type.PNG)
last_page_source = browser.config.last_page_source
if last_page_source and not last_page_source == prev_test_page_source:
allure.attach.file(source=last_page_source,
name='page source',
attachment_type=allure.attachment_type.HTML) | yield |
WidgetTrash.tsx | import styles from "./WidgetTrash.module.css"
interface WidgetTrashProps {
onMouseUp: (event: React.MouseEvent) => void
}
export function | ({onMouseUp}: WidgetTrashProps) {
return (
<div className={styles.widgetTrash} onMouseUp={onMouseUp}>
T
</div>
)
} | WidgetTrash |
testsys_test.go | /*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package simplebft
import (
"crypto/ecdsa"
"crypto/elliptic"
crand "crypto/rand"
"crypto/sha256"
"encoding/asn1"
"fmt"
"math/big"
"math/rand"
"reflect"
"runtime"
"time"
"github.com/golang/protobuf/proto"
)
type testSystemAdapter struct {
id uint64
sys *testSystem
receiver Receiver
batches []*Batch
arrivals map[uint64]time.Duration
persistence map[string][]byte
key *ecdsa.PrivateKey
}
func (t *testSystemAdapter) SetReceiver(recv Receiver) {
if t.receiver != nil {
// remove all events for us
t.sys.queue.filter(func(e testElem) bool {
switch e := e.ev.(type) {
case *testTimer:
if e.id == t.id {
return false
}
case *testMsgEvent:
if e.dst == t.id {
return false
}
}
return true
})
}
t.receiver = recv
}
func (t *testSystemAdapter) getArrival(dest uint64) time.Duration {
// XXX for now, define fixed variance per destination
arr, ok := t.arrivals[dest]
if !ok {
inflight := 20 * time.Millisecond
variance := 1 * time.Millisecond
if dest == t.id {
inflight = 0
}
variance = time.Duration(t.sys.rand.Int31n(int32(variance)))
arr = inflight + variance
t.arrivals[dest] = arr
}
return arr
}
func (t *testSystemAdapter) Send(msg *Msg, dest uint64) {
arr := t.getArrival(dest)
ev := &testMsgEvent{
inflight: arr,
src: t.id,
dst: dest,
msg: msg,
}
// simulate time for marshalling (and unmarshalling)
bytes, _ := proto.Marshal(msg)
m2 := &Msg{}
_ = proto.Unmarshal(bytes, m2)
t.sys.enqueue(arr, ev)
}
type testMsgEvent struct {
inflight time.Duration
src, dst uint64
msg *Msg
}
func (ev *testMsgEvent) Exec(t *testSystem) {
r := t.adapters[ev.dst]
if r == nil {
testLog.Errorf("message to non-existing %s", ev)
return
}
r.receiver.Receive(ev.msg, ev.src)
}
func (ev *testMsgEvent) String() string {
return fmt.Sprintf("Message<from %d, to %d, inflight %s, %v",
ev.src, ev.dst, ev.inflight, ev.msg)
}
type testTimer struct {
id uint64
tf func()
cancelled bool
}
func (t *testTimer) Cancel() {
t.cancelled = true
}
func (t *testTimer) Exec(_ *testSystem) {
if !t.cancelled {
t.tf()
}
}
func (t *testTimer) String() string {
fun := runtime.FuncForPC(reflect.ValueOf(t.tf).Pointer()).Name()
return fmt.Sprintf("Timer<on %d, cancelled %v, fun %s>", t.id, t.cancelled, fun)
}
func (t *testSystemAdapter) Timer(d time.Duration, tf func()) Canceller {
tt := &testTimer{id: t.id, tf: tf}
t.sys.enqueue(d, tt)
return tt
}
func (t *testSystemAdapter) Deliver(batch *Batch) {
t.batches = append(t.batches, batch)
}
func (t *testSystemAdapter) Persist(key string, data proto.Message) {
if data == nil {
delete(t.persistence, key)
} else {
bytes, err := proto.Marshal(data)
if err != nil {
panic(err)
}
t.persistence[key] = bytes
}
}
func (t *testSystemAdapter) Restore(key string, out proto.Message) bool {
val, ok := t.persistence[key]
if !ok {
return false
}
err := proto.Unmarshal(val, out)
return (err == nil)
}
func (t *testSystemAdapter) LastBatch() *Batch {
if len(t.batches) == 0 {
return t.receiver.(*SBFT).makeBatch(0, nil, nil)
} else {
return t.batches[len(t.batches)-1]
}
}
func (t *testSystemAdapter) Sign(data []byte) []byte {
hash := sha256.Sum256(data)
r, s, err := ecdsa.Sign(crand.Reader, t.key, hash[:])
if err != nil {
panic(err)
}
sig, err := asn1.Marshal(struct{ R, S *big.Int }{r, s})
if err != nil {
panic(err)
}
return sig
}
func (t *testSystemAdapter) CheckSig(data []byte, src uint64, sig []byte) error {
rs := struct{ R, S *big.Int }{}
rest, err := asn1.Unmarshal(sig, &rs)
if err != nil {
return err
}
if len(rest) != 0 {
return fmt.Errorf("invalid signature")
}
hash := sha256.Sum256(data)
ok := ecdsa.Verify(&t.sys.adapters[src].key.PublicKey, hash[:], rs.R, rs.S)
if !ok {
return fmt.Errorf("invalid signature")
}
return nil
}
func (t *testSystemAdapter) Reconnect(replica uint64) {
testLog.Infof("dropping connection from %d to %d", replica, t.id)
t.sys.queue.filter(func(e testElem) bool {
switch e := e.ev.(type) {
case *testMsgEvent:
if e.dst == t.id && e.src == replica {
return false
}
}
return true
})
arr := t.sys.adapters[replica].arrivals[t.id] * 10
t.sys.enqueue(arr, &testTimer{id: t.id, tf: func() {
testLog.Infof("reconnecting %d to %d", replica, t.id)
t.sys.adapters[replica].receiver.Connection(t.id)
}})
}
// ==============================================
type testEvent interface {
Exec(t *testSystem)
}
// ==============================================
type testSystem struct {
rand *rand.Rand
now time.Duration
queue *calendarQueue
adapters map[uint64]*testSystemAdapter
filterFn func(testElem) (testElem, bool)
}
type testElem struct {
at time.Duration
ev testEvent
}
func (t testElem) String() string {
return fmt.Sprintf("Event<%s: %s>", t.at, t.ev)
}
func | (n uint64) *testSystem {
return &testSystem{
rand: rand.New(rand.NewSource(0)),
adapters: make(map[uint64]*testSystemAdapter),
queue: newCalendarQueue(time.Millisecond/time.Duration(n*n), int(n*n)),
}
}
func (t *testSystem) NewAdapter(id uint64) *testSystemAdapter {
key, err := ecdsa.GenerateKey(elliptic.P256(), crand.Reader)
if err != nil {
panic(err)
}
a := &testSystemAdapter{
id: id,
sys: t,
arrivals: make(map[uint64]time.Duration),
persistence: make(map[string][]byte),
key: key,
}
t.adapters[id] = a
return a
}
func (t *testSystem) enqueue(d time.Duration, ev testEvent) {
e := testElem{at: t.now + d, ev: ev}
if t.filterFn != nil {
var keep bool
e, keep = t.filterFn(e)
if !keep {
return
}
}
testLog.Debugf("enqueuing %s\n", e)
t.queue.Add(e)
}
func (t *testSystem) Run() {
for {
e, ok := t.queue.Pop()
if !ok {
break
}
t.now = e.at
testLog.Debugf("executing %s\n", e)
e.ev.Exec(t)
}
testLog.Debugf("max len: %d", t.queue.maxLen)
t.queue.maxLen = 0
}
| newTestSystem |
compare.go | // Copyright 2016 OpenConfigd Project.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"os"
"os/exec"
)
func Compare() string {
configActive.WriteTo("/tmp/config.1")
configCandidate.WriteTo("/tmp/config.2")
var config string
out, err := exec.Command("diff", "-U", "-1", "/tmp/config.1", "/tmp/config.2").Output()
if err != nil {
lines := bytes.Split(out, []byte{'\n'})
if len(lines) > 3 {
lines = lines[3:]
}
config = ""
for _, s := range lines {
config = config + string(s) + "\n"
}
} else {
config = configCandidate.String()
}
os.Remove("/tmp/config.1")
os.Remove("/tmp/config.2")
return config
}
func JsonMarshal() string {
return configCandidate.JsonMarshal()
}
func CompareCommand() string {
configActive.WriteCommandTo("/tmp/config.1")
configCandidate.WriteCommandTo("/tmp/config.2")
var config string
out, err := exec.Command("diff", "-U", "-1", "/tmp/config.1", "/tmp/config.2").Output()
if err != nil {
lines := bytes.Split(out, []byte{'\n'})
if len(lines) > 3 {
lines = lines[3:]
}
config = ""
for _, s := range lines {
config = config + string(s) + "\n"
}
}
os.Remove("/tmp/config.1")
os.Remove("/tmp/config.2")
return config
}
func | () string {
return configActive.CommandString()
}
| Commands |
constantify.d.ts | export default function constantify(word: string): string; |
||
admin.py | from django.contrib import admin
from .models import Post
from .models import PostComment
# Register your models here. | admin.site.register(Post)
admin.site.register(PostComment) | |
ssh_test.go | /*
Copyright 2018 The Doctl Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"strconv"
"testing"
"github.com/digitalocean/doctl"
"github.com/digitalocean/doctl/pkg/runner"
"github.com/digitalocean/doctl/pkg/ssh"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
)
func TestSSHComand(t *testing.T) {
parent := &Command{
Command: &cobra.Command{
Use: "compute",
Short: "compute commands",
Long: "compute commands are for controlling and managing infrastructure",
},
}
cmd := SSH(parent)
assert.NotNil(t, cmd)
assertCommandNames(t, cmd)
}
func TestSSH_ID(t *testing.T) {
withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.droplets.EXPECT().Get(testDroplet.ID).Return(&testDroplet, nil)
config.Args = append(config.Args, strconv.Itoa(testDroplet.ID))
err := RunSSH(config)
assert.NoError(t, err)
})
}
func TestSSH_InvalidID(t *testing.T) {
withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
err := RunSSH(config)
assert.Error(t, err)
})
}
func TestSSH_UnknownDroplet(t *testing.T) {
withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.droplets.EXPECT().List().Return(testDropletList, nil)
config.Args = append(config.Args, "missing")
err := RunSSH(config)
assert.EqualError(t, err, "Could not find Droplet")
})
}
func TestSSH_DropletWithNoPublic(t *testing.T) {
withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.droplets.EXPECT().List().Return(testPrivateDropletList, nil)
config.Args = append(config.Args, testPrivateDroplet.Name) | assert.EqualError(t, err, "Could not find Droplet address")
})
}
func TestSSH_CustomPort(t *testing.T) {
withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.sshRunner.EXPECT().Run().Return(nil)
tc := config.Doit.(*doctl.TestConfig)
tc.SSHFn = func(user, host, keyPath string, port int, opts ssh.Options) runner.Runner {
assert.Equal(t, 2222, port)
return tm.sshRunner
}
tm.droplets.EXPECT().List().Return(testDropletList, nil)
config.Doit.Set(config.NS, doctl.ArgsSSHPort, "2222")
config.Args = append(config.Args, testDroplet.Name)
err := RunSSH(config)
assert.NoError(t, err)
})
}
func TestSSH_CustomUser(t *testing.T) {
withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.sshRunner.EXPECT().Run().Return(nil)
tc := config.Doit.(*doctl.TestConfig)
tc.SSHFn = func(user, host, keyPath string, port int, opts ssh.Options) runner.Runner {
assert.Equal(t, "foobar", user)
return tm.sshRunner
}
tm.droplets.EXPECT().List().Return(testDropletList, nil)
config.Doit.Set(config.NS, doctl.ArgSSHUser, "foobar")
config.Args = append(config.Args, testDroplet.Name)
err := RunSSH(config)
assert.NoError(t, err)
})
}
func TestSSH_AgentForwarding(t *testing.T) {
withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.sshRunner.EXPECT().Run().Return(nil)
tc := config.Doit.(*doctl.TestConfig)
tc.SSHFn = func(user, host, keyPath string, port int, opts ssh.Options) runner.Runner {
assert.Equal(t, true, opts[doctl.ArgsSSHAgentForwarding])
return tm.sshRunner
}
tm.droplets.EXPECT().List().Return(testDropletList, nil)
config.Doit.Set(config.NS, doctl.ArgsSSHAgentForwarding, true)
config.Args = append(config.Args, testDroplet.Name)
err := RunSSH(config)
assert.NoError(t, err)
})
}
func TestSSH_CommandExecuting(t *testing.T) {
withTestClient(t, func(config *CmdConfig, tm *tcMocks) {
tm.sshRunner.EXPECT().Run().Return(nil)
tc := config.Doit.(*doctl.TestConfig)
tc.SSHFn = func(user, host, keyPath string, port int, opts ssh.Options) runner.Runner {
assert.Equal(t, "uptime", opts[doctl.ArgSSHCommand])
return tm.sshRunner
}
tm.droplets.EXPECT().List().Return(testDropletList, nil)
config.Doit.Set(config.NS, doctl.ArgSSHCommand, "uptime")
config.Args = append(config.Args, testDroplet.Name)
err := RunSSH(config)
assert.NoError(t, err)
})
}
func Test_extractHostInfo(t *testing.T) {
cases := []struct {
s string
e sshHostInfo
}{
{s: "host", e: sshHostInfo{host: "host"}},
{s: "root@host", e: sshHostInfo{user: "root", host: "host"}},
{s: "root@host:22", e: sshHostInfo{user: "root", host: "host", port: "22"}},
{s: "host:22", e: sshHostInfo{host: "host", port: "22"}},
{s: "dokku@simple-task-02efb9c544", e: sshHostInfo{host: "simple-task-02efb9c544", user: "dokku"}},
}
for _, c := range cases {
i := extractHostInfo(c.s)
assert.Equal(t, c.e, i)
}
} |
err := RunSSH(config) |
release_notes.go | /*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"os"
"os/exec"
"regexp"
"sort"
"strings"
"sync"
"text/template"
)
type (
label struct {
Name string `json:"name"`
}
author struct {
Login string `json:"login"`
}
prInfo struct {
Labels []label `json:"labels"`
Number int `json:"number"`
Title string `json:"title"`
Author author `json:"author"`
}
prsByComponent = map[string][]prInfo
prsByType = map[string]prsByComponent
sortedPRComponent struct {
Name string
PrInfos []prInfo
}
sortedPRType struct {
Name string
Components []sortedPRComponent
}
)
const (
markdownTemplate = `
{{- range $type := . }}
## {{ $type.Name }}
{{- range $component := $type.Components }}
### {{ $component.Name }}
{{- range $prInfo := $component.PrInfos }}
* {{ $prInfo.Title }} #{{ $prInfo.Number }}
{{- end }}
{{- end }}
{{- end }}
`
prefixType = "Type: "
prefixComponent = "Component: "
numberOfThreads = 10
lengthOfSingleSHA = 40
)
func loadMergedPRs(from, to string) (prs []string, authors []string, commitCount int, err error) {
// load the git log with "author \t title \t parents"
out, err := execCmd("git", "log", `--pretty=format:%ae%x09%s%x09%P%x09%h`, fmt.Sprintf("%s..%s", from, to))
if err != nil {
return
}
return parseGitLog(string(out))
}
func parseGitLog(s string) (prs []string, authorCommits []string, commitCount int, err error) {
rx := regexp.MustCompile(`(.+)\t(.+)\t(.+)\t(.+)`)
mergePR := regexp.MustCompile(`Merge pull request #(\d+)`)
squashPR := regexp.MustCompile(`\(#(\d+)\)`)
authMap := map[string]string{} // here we will store email <-> gh user mappings
lines := strings.Split(s, "\n")
for _, line := range lines {
lineInfo := rx.FindStringSubmatch(line)
if len(lineInfo) != 5 {
log.Fatalf("failed to parse the output from git log: %s", line)
}
authorEmail := lineInfo[1]
title := lineInfo[2]
parents := lineInfo[3]
sha := lineInfo[4] | if len(merged) == 2 {
// this is a merged PR. remember the PR #
prs = append(prs, merged[1])
continue
}
if len(parents) <= lengthOfSingleSHA {
// we have a single parent, and the commit counts
commitCount++
if _, exists := authMap[authorEmail]; !exists {
authMap[authorEmail] = sha
}
}
squashed := squashPR.FindStringSubmatch(title)
if len(squashed) == 2 {
// this is a merged PR. remember the PR #
prs = append(prs, squashed[1])
continue
}
}
for _, author := range authMap {
authorCommits = append(authorCommits, author)
}
sort.Strings(prs)
sort.Strings(authorCommits) // not really needed, but makes testing easier
return
}
func execCmd(name string, arg ...string) ([]byte, error) {
out, err := exec.Command(name, arg...).Output()
if err != nil {
execErr, ok := err.(*exec.ExitError)
if ok {
return nil, fmt.Errorf("%s:\nstderr: %s\nstdout: %s", err.Error(), execErr.Stderr, out)
}
if strings.Contains(err.Error(), " executable file not found in") {
return nil, fmt.Errorf("the command `gh` seems to be missing. Please install it from https://github.com/cli/cli")
}
return nil, err
}
return out, nil
}
func loadPRInfo(pr string) (prInfo, error) {
out, err := execCmd("gh", "pr", "view", pr, "--json", "title,number,labels,author")
if err != nil {
return prInfo{}, err
}
var prInfo prInfo
err = json.Unmarshal(out, &prInfo)
return prInfo, err
}
func loadAuthorInfo(sha string) (string, error) {
out, err := execCmd("gh", "api", "/repos/vitessio/vitess/commits/"+sha)
if err != nil {
return "", err
}
var prInfo prInfo
err = json.Unmarshal(out, &prInfo)
if err != nil {
return "", err
}
return prInfo.Author.Login, nil
}
type req struct {
isPR bool
key string
}
func loadAllPRs(prs, authorCommits []string) ([]prInfo, []string, error) {
errChan := make(chan error)
wgDone := make(chan bool)
prChan := make(chan req, len(prs)+len(authorCommits))
// fill the work queue
for _, s := range prs {
prChan <- req{isPR: true, key: s}
}
for _, s := range authorCommits {
prChan <- req{isPR: false, key: s}
}
close(prChan)
var prInfos []prInfo
var authors []string
fmt.Printf("Found %d merged PRs. Loading PR info", len(prs))
wg := sync.WaitGroup{}
mu := sync.Mutex{}
shouldLoad := func(in string) bool {
if in == "" {
return false
}
mu.Lock()
defer mu.Unlock()
for _, existing := range authors {
if existing == in {
return false
}
}
return true
}
addAuthor := func(in string) {
mu.Lock()
defer mu.Unlock()
authors = append(authors, in)
}
addPR := func(in prInfo) {
mu.Lock()
defer mu.Unlock()
prInfos = append(prInfos, in)
}
for i := 0; i < numberOfThreads; i++ {
wg.Add(1)
go func() {
// load meta data about PRs
defer wg.Done()
for b := range prChan {
fmt.Print(".")
if b.isPR {
prInfo, err := loadPRInfo(b.key)
if err != nil {
errChan <- err
break
}
addPR(prInfo)
continue
}
author, err := loadAuthorInfo(b.key)
if err != nil {
errChan <- err
break
}
if shouldLoad(author) {
addAuthor(author)
}
}
}()
}
go func() {
// wait for the loading to finish
wg.Wait()
close(wgDone)
}()
var err error
select {
case <-wgDone:
break
case err = <-errChan:
break
}
fmt.Println()
sort.Strings(authors)
return prInfos, authors, err
}
func groupPRs(prInfos []prInfo) prsByType {
prPerType := prsByType{}
for _, info := range prInfos {
var typ, component string
for _, lbl := range info.Labels {
switch {
case strings.HasPrefix(lbl.Name, prefixType):
typ = strings.TrimPrefix(lbl.Name, prefixType)
case strings.HasPrefix(lbl.Name, prefixComponent):
component = strings.TrimPrefix(lbl.Name, prefixComponent)
}
}
switch typ {
case "":
typ = "Other"
case "Bug":
typ = "Bug fixes"
}
if component == "" {
component = "Other"
}
components, exists := prPerType[typ]
if !exists {
components = prsByComponent{}
prPerType[typ] = components
}
prsPerComponentAndType := components[component]
components[component] = append(prsPerComponentAndType, info)
}
return prPerType
}
func createSortedPrTypeSlice(prPerType prsByType) []sortedPRType {
var data []sortedPRType
for typeKey, typeElem := range prPerType {
newPrType := sortedPRType{
Name: typeKey,
}
for componentKey, prInfos := range typeElem {
newComponent := sortedPRComponent{
Name: componentKey,
PrInfos: prInfos,
}
sort.Slice(newComponent.PrInfos, func(i, j int) bool {
return newComponent.PrInfos[i].Number < newComponent.PrInfos[j].Number
})
newPrType.Components = append(newPrType.Components, newComponent)
}
sort.Slice(newPrType.Components, func(i, j int) bool {
return newPrType.Components[i].Name < newPrType.Components[j].Name
})
data = append(data, newPrType)
}
sort.Slice(data, func(i, j int) bool {
return data[i].Name < data[j].Name
})
return data
}
func getOutput(fileout string) (*os.File, error) {
if fileout == "" {
return os.Stdout, nil
}
return os.OpenFile(fileout, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
}
func writePrInfos(writeTo *os.File, prPerType prsByType) (err error) {
data := createSortedPrTypeSlice(prPerType)
t := template.Must(template.New("markdownTemplate").Parse(markdownTemplate))
err = t.ExecuteTemplate(writeTo, "markdownTemplate", data)
if err != nil {
return err
}
return nil
}
func main() {
from := flag.String("from", "", "from sha/tag/branch")
to := flag.String("to", "HEAD", "to sha/tag/branch")
fileout := flag.String("file", "", "file on which to write release notes, stdout if empty")
flag.Parse()
prs, authorCommits, commits, err := loadMergedPRs(*from, *to)
if err != nil {
log.Fatal(err)
}
prInfos, authors, err := loadAllPRs(prs, authorCommits)
if err != nil {
log.Fatal(err)
}
prPerType := groupPRs(prInfos)
out, err := getOutput(*fileout)
if err != nil {
log.Fatal(err)
}
defer func() {
_ = out.Close()
}()
err = writePrInfos(out, prPerType)
if err != nil {
log.Fatal(err)
}
_, err = out.WriteString(fmt.Sprintf("\n\nThe release includes %d commits (excluding merges)\n", commits))
if err != nil {
log.Fatal(err)
}
_, err = out.WriteString(fmt.Sprintf("Thanks to all our contributors: @%s\n", strings.Join(authors, ", @")))
if err != nil {
log.Fatal(err)
}
} | merged := mergePR.FindStringSubmatch(title) |
trace.rs | use chrono::{DateTime, SecondsFormat, Utc};
use crate::context::TelemetryContext;
use crate::contracts::{SeverityLevel as ContractsSeverityLevel, *};
use crate::telemetry::{ContextTags, Measurements, Properties, Telemetry};
use crate::time;
/// Represents printf-like trace statements that can be text searched. A trace telemetry items have
/// a message and an associated [`SeverityLevel`](enum.SeverityLevel.html).
///
/// # Examples
/// ```rust, no_run
/// # use appinsights::TelemetryClient;
/// # let client = TelemetryClient::new("<instrumentation key>".to_string());
/// use appinsights::telemetry::{TraceTelemetry, SeverityLevel, Telemetry};
///
/// // create a telemetry item
/// let mut telemetry = TraceTelemetry::new("Starting data processing".to_string(), SeverityLevel::Information);
///
/// // attach custom properties, measurements and context tags
/// telemetry.properties_mut().insert("component".to_string(), "data_processor".to_string());
/// telemetry.tags_mut().insert("os_version".to_string(), "linux x86_64".to_string());
/// telemetry.measurements_mut().insert("records_count".to_string(), 115.0);
///
/// // submit telemetry item to server
/// client.track(telemetry);
/// ```
pub struct TraceTelemetry {
/// A trace message.
message: String,
/// Severity level.
severity: SeverityLevel,
/// The time stamp when this telemetry was measured.
timestamp: DateTime<Utc>,
/// Custom properties.
properties: Properties,
/// Telemetry context containing extra, optional tags.
tags: ContextTags,
/// Custom measurements.
measurements: Measurements,
}
impl TraceTelemetry {
/// Creates an event telemetry item with specified name.
pub fn new(message: String, severity: SeverityLevel) -> Self {
Self {
message,
severity,
timestamp: time::now(),
properties: Properties::default(),
tags: ContextTags::default(),
measurements: Measurements::default(),
}
}
/// Returns custom measurements to submit with the telemetry item.
pub fn measurements(&self) -> &Measurements {
&self.measurements
}
/// Returns mutable reference to custom measurements.
pub fn measurements_mut(&mut self) -> &mut Measurements {
&mut self.measurements
}
}
impl Telemetry for TraceTelemetry {
/// Returns the time when this telemetry was measured.
fn timestamp(&self) -> DateTime<Utc> {
self.timestamp
}
/// Returns custom properties to submit with the telemetry item.
fn properties(&self) -> &Properties {
&self.properties
} | /// Returns mutable reference to custom properties.
fn properties_mut(&mut self) -> &mut Properties {
&mut self.properties
}
/// Returns context data containing extra, optional tags. Overrides values found on client telemetry context.
fn tags(&self) -> &ContextTags {
&self.tags
}
/// Returns mutable reference to custom tags.
fn tags_mut(&mut self) -> &mut ContextTags {
&mut self.tags
}
}
impl From<(TelemetryContext, TraceTelemetry)> for Envelope {
fn from((context, telemetry): (TelemetryContext, TraceTelemetry)) -> Self {
Self {
name: "Microsoft.ApplicationInsights.Message".into(),
time: telemetry.timestamp.to_rfc3339_opts(SecondsFormat::Millis, true),
i_key: Some(context.i_key),
tags: Some(ContextTags::combine(context.tags, telemetry.tags).into()),
data: Some(Base::Data(Data::MessageData(MessageData {
message: telemetry.message,
severity_level: Some(telemetry.severity.into()),
properties: Some(Properties::combine(context.properties, telemetry.properties).into()),
measurements: Some(telemetry.measurements.into()),
..MessageData::default()
}))),
..Envelope::default()
}
}
}
/// Defines the level of severity for the event.
pub enum SeverityLevel {
/// Verbose severity level.
Verbose,
/// Information severity level.
Information,
/// Warning severity level.
Warning,
/// Error severity level.
Error,
/// Critical severity level.
Critical,
}
impl From<SeverityLevel> for ContractsSeverityLevel {
fn from(severity: SeverityLevel) -> Self {
match severity {
SeverityLevel::Verbose => ContractsSeverityLevel::Verbose,
SeverityLevel::Information => ContractsSeverityLevel::Information,
SeverityLevel::Warning => ContractsSeverityLevel::Warning,
SeverityLevel::Error => ContractsSeverityLevel::Error,
SeverityLevel::Critical => ContractsSeverityLevel::Critical,
}
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use chrono::TimeZone;
use super::*;
#[test]
fn it_overrides_properties_from_context() {
time::set(Utc.ymd(2019, 1, 2).and_hms_milli(3, 4, 5, 800));
let mut context =
TelemetryContext::new("instrumentation".into(), ContextTags::default(), Properties::default());
context.properties_mut().insert("test".into(), "ok".into());
context.properties_mut().insert("no-write".into(), "fail".into());
let mut telemetry = TraceTelemetry::new("message".into(), SeverityLevel::Information);
telemetry.properties_mut().insert("no-write".into(), "ok".into());
telemetry.measurements_mut().insert("value".into(), 5.0);
let envelop = Envelope::from((context, telemetry));
let expected = Envelope {
name: "Microsoft.ApplicationInsights.Message".into(),
time: "2019-01-02T03:04:05.800Z".into(),
i_key: Some("instrumentation".into()),
tags: Some(BTreeMap::default()),
data: Some(Base::Data(Data::MessageData(MessageData {
message: "message".into(),
severity_level: Some(crate::contracts::SeverityLevel::Information),
properties: Some({
let mut properties = BTreeMap::default();
properties.insert("test".into(), "ok".into());
properties.insert("no-write".into(), "ok".into());
properties
}),
measurements: Some({
let mut measurements = BTreeMap::default();
measurements.insert("value".into(), 5.0);
measurements
}),
..MessageData::default()
}))),
..Envelope::default()
};
assert_eq!(envelop, expected)
}
#[test]
fn it_overrides_tags_from_context() {
time::set(Utc.ymd(2019, 1, 2).and_hms_milli(3, 4, 5, 700));
let mut context =
TelemetryContext::new("instrumentation".into(), ContextTags::default(), Properties::default());
context.tags_mut().insert("test".into(), "ok".into());
context.tags_mut().insert("no-write".into(), "fail".into());
let mut telemetry = TraceTelemetry::new("message".into(), SeverityLevel::Information);
telemetry.tags_mut().insert("no-write".into(), "ok".into());
let envelop = Envelope::from((context, telemetry));
let expected = Envelope {
name: "Microsoft.ApplicationInsights.Message".into(),
time: "2019-01-02T03:04:05.700Z".into(),
i_key: Some("instrumentation".into()),
tags: Some({
let mut tags = BTreeMap::default();
tags.insert("test".into(), "ok".into());
tags.insert("no-write".into(), "ok".into());
tags
}),
data: Some(Base::Data(Data::MessageData(MessageData {
message: "message".into(),
severity_level: Some(crate::contracts::SeverityLevel::Information),
properties: Some(BTreeMap::default()),
measurements: Some(BTreeMap::default()),
..MessageData::default()
}))),
..Envelope::default()
};
assert_eq!(envelop, expected)
}
} | |
router.rs | use crate::api::*;
use crate::common::AppState;
use crate::middleware::auth::Auth;
use actix_web::{
http::{header, Method},
middleware::{self, cors::Cors},
App,
};
pub fn app_hato(app_state: AppState) -> App<AppState> |
pub fn app_common() -> App {
App::new()
.middleware(middleware::Logger::default())
.resource("/", |r| r.f(index))
.resource("/ping", |r| r.f(ping))
.resource("/webhook", |r| {
r.method(Method::POST).with(webhook::webhook)
})
}
| {
App::with_state(app_state)
.middleware(middleware::Logger::default())
.prefix("/api")
.configure(|app| {
Cors::for_app(app)
.allowed_methods(vec!["GET", "POST", "PUT", "DELETE"])
.allowed_headers(vec![
header::ORIGIN,
header::AUTHORIZATION,
header::ACCEPT,
header::CONTENT_TYPE,
])
.supports_credentials()
.max_age(3600)
.resource("/register", |r| r.method(Method::POST).with(user::register))
.resource("/login", |r| r.method(Method::POST).with(user::login))
.resource("/me", |r| {
r.middleware(Auth);
r.method(Method::GET).f(user::me)
})
.resource("/repo", |r| r.method(Method::POST).with(repo::create_repo))
.register()
})
} |
stake.rs | use serde::{Serialize, Deserialize};
use diesel::{Queryable, Insertable};
use crate::schema::stakes;
use chrono::{NaiveDateTime,Duration,Utc};
#[derive(Debug, Clone, Queryable, Serialize, Deserialize)]
pub struct Stake {
pub id: i32,
pub stake_from: String,
pub stake_signature: String,
pub public_key: String,
pub amount: String,
pub withdraw_day: Option<NaiveDateTime>,
pub isactive: bool,
pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime
}
#[derive(Debug, Clone, Insertable, Serialize, Deserialize)]
#[table_name="stakes"]
pub struct NewStake {
pub stake_from: String,
pub stake_signature: String,
pub public_key: String,
pub amount: String,
pub withdraw_day: Option<NaiveDateTime>,
pub isactive:bool
}
impl NewStake {
pub fn new(stake_from: String, stake_signature: String, public_key: String, amount: String, deadline: i64) -> NewStake {
let days;
if deadline < 15 {
days = Utc::now() + Duration::days(137);
}else {
days = Utc::now() + Duration::days(15)
}
NewStake {
stake_from,
stake_signature,
public_key,
amount,
withdraw_day: Some(days.naive_utc()),
isactive:true | #[derive(Debug, Serialize, Deserialize, )]
pub struct StakeInfo {
pub stake_from: String,
pub stake_signature: String,
pub public_key: String,
pub amount: String,
pub deadline: i64
}
#[derive(Debug, Serialize, Deserialize, )]
pub struct WithdrawStake {
pub stake_from: String,
pub stake_signature: String,
pub public_key: String,
pub deadline: i64
} | }
}
}
|
c.py | import sys
input = sys.stdin.readline
def | ():
N, K = map(int, input().split())
A = tuple(map(int, input().split()))
ans = [0] * (N - K)
for i in range(N - K):
if A[i] < A[K + i]:
ans[i] = "Yes"
else:
ans[i] = "No"
print("\n".join(ans))
if __name__ == "__main__":
main()
| main |
expression.rs | use super::{
align_of::AlignOf, arithmetic_operation::ArithmeticOperation, bit_cast::BitCast,
bitwise_not_operation::BitwiseNotOperation, bitwise_operation::BitwiseOperation,
comparison_operation::ComparisonOperation, pointer_address::PointerAddress,
primitive::Primitive, record::Record, record_address::RecordAddress, size_of::SizeOf,
undefined::Undefined, union::Union, union_address::UnionAddress, variable::Variable,
};
#[derive(Clone, Debug, PartialEq)]
pub enum Expression {
AlignOf(AlignOf),
ArithmeticOperation(ArithmeticOperation),
BitCast(BitCast),
BitwiseNotOperation(BitwiseNotOperation),
BitwiseOperation(BitwiseOperation),
ComparisonOperation(ComparisonOperation),
PointerAddress(PointerAddress),
Primitive(Primitive),
Record(Record),
RecordAddress(RecordAddress),
SizeOf(SizeOf),
Undefined(Undefined),
Union(Union),
UnionAddress(UnionAddress),
Variable(Variable),
}
impl From<AlignOf> for Expression {
fn from(align_of: AlignOf) -> Self {
Self::AlignOf(align_of)
}
}
impl From<ArithmeticOperation> for Expression {
fn from(operation: ArithmeticOperation) -> Self {
Self::ArithmeticOperation(operation)
}
}
impl From<BitCast> for Expression {
fn from(bit_cast: BitCast) -> Self |
}
impl From<BitwiseNotOperation> for Expression {
fn from(operation: BitwiseNotOperation) -> Self {
Self::BitwiseNotOperation(operation)
}
}
impl From<BitwiseOperation> for Expression {
fn from(operation: BitwiseOperation) -> Self {
Self::BitwiseOperation(operation)
}
}
impl From<ComparisonOperation> for Expression {
fn from(operation: ComparisonOperation) -> Self {
Self::ComparisonOperation(operation)
}
}
impl From<PointerAddress> for Expression {
fn from(address: PointerAddress) -> Self {
Self::PointerAddress(address)
}
}
impl From<Primitive> for Expression {
fn from(primitive: Primitive) -> Self {
Self::Primitive(primitive)
}
}
impl From<Record> for Expression {
fn from(record: Record) -> Self {
Self::Record(record)
}
}
impl From<RecordAddress> for Expression {
fn from(address: RecordAddress) -> Self {
Self::RecordAddress(address)
}
}
impl From<SizeOf> for Expression {
fn from(size_of: SizeOf) -> Self {
Self::SizeOf(size_of)
}
}
impl From<Undefined> for Expression {
fn from(undefined: Undefined) -> Self {
Self::Undefined(undefined)
}
}
impl From<Union> for Expression {
fn from(union: Union) -> Self {
Self::Union(union)
}
}
impl From<UnionAddress> for Expression {
fn from(address: UnionAddress) -> Self {
Self::UnionAddress(address)
}
}
impl From<Variable> for Expression {
fn from(variable: Variable) -> Self {
Self::Variable(variable)
}
}
| {
Self::BitCast(bit_cast)
} |
content_type.go | package x
import (
"mime"
"net/http"
"strings"
)
// Determine whether the request `content-type` includes a
// server-acceptable mime-type
//
// Failure should yield an HTTP 415 (`http.StatusUnsupportedMediaType`)
func HasContentType(r *http.Request, mimetype string) bool {
contentType := r.Header.Get("Content-type")
if contentType == "" {
return mimetype == "application/octet-stream"
}
for _, v := range strings.Split(contentType, ",") {
t, _, err := mime.ParseMediaType(strings.TrimSpace(v))
if err != nil {
break
}
if t == mimetype |
}
return false
}
| {
return true
} |
test_retry_subprocess.py | # -*- coding: utf-8 -*-
from bamboo_engine.builder import * # noqa
from bamboo_engine.engine import Engine
from pipeline.eri.runtime import BambooDjangoRuntime
from ..utils import * # noqa
def | ():
subproc_start = EmptyStartEvent()
subproc_act = ServiceActivity(component_code="debug_node")
subproc_end = EmptyEndEvent()
subproc_start.extend(subproc_act).extend(subproc_end)
params = Params({"${raise_var}": Var(type=Var.LAZY, custom_type="raise_variable", value="")})
start = EmptyStartEvent()
subproc = SubProcess(start=subproc_start, params=params)
end = EmptyEndEvent()
start.extend(subproc).extend(end)
pipeline = build_tree(start)
engine = Engine(BambooDjangoRuntime())
engine.run_pipeline(pipeline=pipeline, root_pipeline_data={})
sleep(1)
old_state = runtime.get_state(subproc.id)
assert old_state.name == states.FAILED
engine.retry_subprocess(subproc.id)
sleep(1)
state = runtime.get_state(subproc.id)
assert state.name == states.FAILED
assert state.version != old_state.version
histories = runtime.get_histories(subproc.id)
assert len(histories) == 1
assert histories[0].node_id == subproc.id
assert histories[0].loop == 1
assert histories[0].retry == 0
assert histories[0].skip is False
assert histories[0].started_time is not None
assert histories[0].archived_time is not None
assert histories[0].inputs == {}
assert len(histories[0].outputs) == 1
assert "ex_data" in histories[0].outputs
assert histories[0].version == old_state.version
| test_retry_subprocess |
startCirq2117.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=37
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
|
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2117.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=34
c.append(cirq.Z.on(input_qubit[3])) # number=35
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=36
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=31
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=32
c.append(cirq.H.on(input_qubit[3])) # number=33
c.append(cirq.X.on(input_qubit[3])) # number=27
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=28
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=30
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=20
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=17
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=23
c.append(cirq.X.on(input_qubit[2])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=25
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=19
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c |
auth.dev.js | "use strict";
var loginForm = document.querySelector('#login-form');
var email, password, type; //Login function
loginForm.addEventListener('submit', function (e) {
e.preventDefault(); // get user info | password = loginForm['login-password'].value;
type = loginForm['UserType'].value; // log the user in
auth.signInWithEmailAndPassword(email, password).then(function (cred) {
console.log("User logged in successfully using email and password");
db.collection('User').where(firebase.firestore.FieldPath.documentId(), '==', email).get().then(function (snapshot) {
setupGuides(snapshot.docs, type);
});
})["catch"](function (e) {
var failMessage = document.querySelector("#failMessage");
failMessage.innerHTML = "<p style=\"color:red\">*Invalid Login Credentials</p>";
loginForm.reset();
});
});
var setupGuides = function setupGuides(data, inputType) {
data.forEach(function (doc) {
var userType = doc.data().Type;
if (userType === 1 && inputType === 'Student') {
console.log("User Logged In Successfully");
window.location.href = "Routine.html";
} else if (userType === 2 && inputType === 'CR') {
console.log("User Logged In Successfully");
window.location.href = "Routine.html";
} else if (userType === 3 && inputType === 'Faculty') {
console.log("User Logged In Successfully");
window.location.href = "UserProfile.html";
} else if (userType === 4 && inputType === 'Admin') {
console.log("User Logged In Successfully");
window.location.href = "UserProfile.html";
} else {
var failMessage = document.querySelector("#failMessage");
failMessage.innerHTML = "<p style=\"color:red\"> *User is not a ".concat(inputType, "</p>");
auth.signOut();
}
});
}; |
email = loginForm['login-email'].value; |
database.py | '''
Created on 08-10-2012
@author: Jacek Przemieniecki
'''
from . import errors
class Database(object):
def get_atom_valency(self, symbol):
return valency[symbol]
def get_q_r(self, symbol):
grp_id = str_to_id[symbol][0]
return q_r_data[grp_id]
def get_parameter(self, symbol1, symbol2):
if symbol1 == symbol2:
return 0.0
grp1 = str_to_id[symbol1][1] - 1 # Adjust for list indexing starting at 0
grp2 = str_to_id[symbol2][1] - 1
param = params[grp1][grp2]
if param is None:
raise errors.ValueNotFound()
else:
return param
def | (self):
for key in str_to_id:
yield key
def __init__(self):
pass
valency = {"C" : 4,
"N" : 3,
"O" : 2,
"S" : 2,
"Si" : 4,
"Cl" : 1,
"Br" : 1,
"I" : 1,
"F" : 1}
### Data from http://www.aim.env.uea.ac.uk/aim/info/UNIFACgroups.html
params = [[0.0, 86.02, 61.13, 76.5, 986.5, 697.2, 1318.0, 1333.0, 476.4, 677.0, 232.1, 507.0, 251.5, 391.5, 255.7, 206.6, 920.7, 287.77, 597.0, 663.5, 35.93, 53.76, 24.9, 104.3, 11.44, 661.5, 543.0, 153.6, 184.4, 354.55, 3025.0, 335.8, 479.5, 298.9, 526.5, 689.0, -4.189, 125.8, 485.3, -2.859, 387.1, -450.4, 252.7, 220.3, -5.869, 390.9, 553.3, 187.0, 216.1, 92.99, None, 808.59, 408.3, 718.01, None, 153.72, ], #1
[-35.36, 0.0, 38.81, 74.15, 524.1, 787.6, 270.6, 526.1, 182.6, 448.8, 37.85, 333.5, 214.5, 240.9, 163.9, 61.11, 749.3, 280.5, 336.9, 318.9, -36.87, 58.55, -13.99, -109.7, 100.1, 357.5, None, 76.302, None, 262.9, None, None, 183.8, 31.14, 179.0, -52.87, -66.46, 359.3, -70.45, 449.4, 48.33, None, None, 86.46, None, 200.2, 268.1, -617.0, 62.56, None, None, 200.94, 219.9, -677.25, None, None, ], #2
[-11.12, 3.446, 0.0, 167.0, 636.1, 637.35, 903.8, 1329.0, 25.77, 347.3, 5.994, 287.1, 32.14, 161.7, 122.8, 90.49, 648.2, -4.449, 212.5, 537.4, -18.81, -144.4, -231.9, 3.0, 187.0, 168.0, 194.9, 52.07, -10.43, -64.69, 210.4, 113.3, 261.3, 154.26, 169.9, 383.9, -259.1, 389.3, 245.6, 22.67, 103.5, -432.3, 238.9, 30.04, -88.11, None, 333.3, None, -59.58, -39.16, None, 360.82, 171.49, 272.33, 22.06, 174.35, ], #3
[-69.7, -113.6, -146.8, 0.0, 803.2, 603.25, 5695.0, 884.9, -52.1, 586.6, 5688.0, 197.8, 213.1, 19.02, -49.29, 23.5, 664.2, 52.8, 6096.0, 872.3, -114.1, -111.0, -80.25, -141.3, -211.0, 3629.0, 4448.0, -9.451, 393.6, 48.49, 4975.0, 259.0, 210.0, -152.55, 4284.0, -119.2, -282.5, 101.4, 5629.0, -245.39, 69.26, 683.3, 355.5, 46.38, None, None, 421.9, None, -203.6, 184.9, None, 233.51, -184.68, 9.63, 795.38, -280.9, ], #4
[156.4, 457.0, 89.6, 25.82, 0.0, -137.1, 353.5, -259.7, 84.0, -203.6, 101.1, 267.8, 28.06, 83.02, 42.7, -323.0, -52.39, 170.0, 6.712, 199.0, 75.62, 65.28, -98.12, 143.1, 123.5, 256.5, 157.1, 488.9, 147.5, -120.5, -318.9, 313.5, 202.1, 727.8, -202.1, 74.27, 225.8, 44.78, -143.9, None, 190.3, -817.7, 202.7, -504.2, 72.96, -382.7, -248.3, None, 104.7, 57.65, None, 215.81, 6.39, None, None, 147.97, ], #5
[16.51, -12.52, -50.0, -44.5, 249.1, 0.0, -181.0, -101.7, 23.39, 306.4, -10.72, 179.7, -128.6, 359.3, -20.98, 53.9, 489.7, 580.5, 53.28, -202.0, -38.32, -102.5, -139.4, -44.76, -28.25, 75.14, 457.88, -31.09, 17.5, -61.76, -119.2, 212.1, 106.3, -119.1, -399.3, -5.224, 33.47, -48.25, -172.4, None, 165.7, None, None, None, -52.1, None, None, 37.63, -59.4, -46.01, None, 150.02, 98.2, None, None, None, ], #6
[300.0, 496.1, 362.3, 377.6, -229.1, 289.6, 0.0, 324.5, -195.4, -116.0, 72.87, 233.87, 540.5, 48.89, 168.0, 304.0, 459.0, 459.0, 112.6, -14.09, 325.4, 370.4, 353.7, 497.5, 133.9, 220.6, 399.5, 887.1, None, 188.0, 12.72, None, 777.1, None, -139.0, 160.8, None, None, 319.0, None, -197.5, -363.8, None, -452.2, None, 835.6, 139.6, None, 407.9, None, None, -255.63, -144.77, None, None, 580.28, ], #7
[275.8, 217.5, 25.34, 244.2, -451.6, -265.2, -601.8, 0.0, -356.1, -271.1, -449.4, -32.52, -162.9, -832.97, None, None, -305.5, -305.5, None, 408.9, None, 517.27, None, 1827.0, 6915.0, None, -413.48, 8484.0, None, None, -687.1, None, None, None, None, None, None, None, None, None, -494.2, None, None, -659.0, None, None, None, None, None, 1005.0, None, None, None, None, None, None, ], #8
[26.76, 42.92, 140.1, 365.8, 164.5, 108.7, 472.5, -133.1, 0.0, -37.36, -213.7, -190.4, -103.6, None, -174.2, -169.0, 6201.0, 7.341, 481.7, 669.4, -191.7, -130.3, -354.6, -39.2, -119.8, 137.5, 548.5, 216.1, -46.28, -163.7, 71.46, 53.59, 245.2, -246.6, -44.58, -63.5, -34.57, None, -61.7, None, -18.8, -588.9, None, None, None, None, 37.54, None, None, -162.6, None, None, -288.94, 91.01, None, 179.74, ], #9
[505.7, 56.3, 23.39, 106.0, 529.0, -340.2, 480.8, -155.6, 128.0, 0.0, -110.3, 766.0, 304.1, None, None, None, None, None, -106.4, 497.5, 751.9, 67.52, -483.7, None, None, None, None, None, None, None, None, 117.0, None, 2.21, None, -339.2, 172.4, None, -268.8, None, -275.5, None, None, None, None, None, None, None, None, None, None, None, 79.71, None, None, None, ], #10
[114.8, 132.1, 85.84, -170.0, 245.4, 249.63, 200.8, -36.72, 372.2, 185.1, 0.0, -241.8, -235.7, None, -73.5, -196.7, 475.5, -0.13, 494.6, 660.2, -34.74, 108.9, -209.7, 54.57, 442.4, -81.13, None, 183.0, None, 202.3, -101.7, 148.3, 18.88, 71.48, 52.08, -28.61, -275.2, None, 85.33, None, 560.2, None, None, None, None, None, 151.8, None, None, None, None, None, 36.34, 446.9, None, None, ], #11
[329.3, 110.4, 18.12, 428.0, 139.4, 227.8, 124.63, -234.25, 385.4, -236.5, 1167.0, 0.0, -234.0, None, None, None, None, -233.4, -47.25, -268.1, None, 31.0, -126.2, 179.7, 24.28, None, None, None, 103.9, None, None, None, 298.13, None, None, None, -11.4, None, 308.9, None, -70.24, None, None, None, None, None, None, None, None, None, None, None, -77.96, None, None, None, ], #12
[83.36, 26.51, 52.13, 65.69, 237.7, 238.4, -314.7, -178.5, 191.1, -7.838, 461.3, 457.3, 0.0, -78.36, 251.5, 5422.3, -46.39, 213.2, -18.51, 664.6, 301.1, 137.8, -154.3, 47.67, 134.8, 95.18, 155.11, 140.9, -8.538, 170.1, -20.11, -149.5, -202.3, -156.57, 128.8, None, 240.2, -273.9, 254.8, -172.51, 417.0, 1338.0, None, None, None, None, None, None, None, None, None, None, 567.0, 102.21, None, None, ], #13
[-30.48, 1.163, -44.85, 296.4, -242.8, -481.7, -330.48, -870.8, None, None, None, None, 222.1, 0.0, -107.2, -41.11, -200.7, None, 358.9, None, -82.92, None, None, -99.81, 30.05, None, None, None, -70.14, None, None, None, None, None, 874.19, None, None, None, -164.0, None, None, -664.4, 275.9, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #14
[65.33, -28.7, -22.31, 223.0, -150.0, -370.3, -448.2, None, 394.6, None, 136.0, None, -56.08, 127.4, 0.0, -189.2, 138.54, 431.49, 147.1, None, None, None, None, 71.23, -18.93, None, None, None, None, None, 939.07, None, None, None, None, None, None, 570.9, -255.22, None, -38.77, 448.1, -1327.0, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #15
[-83.98, -25.38, -223.9, 109.9, 28.6, -406.8, -598.8, None, 225.3, None, 2889.0, None, -194.1, 38.89, 865.9, 0.0, 287.43, None, 1255.1, None, -182.91, -73.85, -352.9, -262.0, -181.9, None, None, None, None, None, None, None, None, None, 243.1, None, None, -196.3, 22.05, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #16
[1139.0, 2000.0, 247.5, 762.8, -17.4, -118.1, -341.6, -253.1, -450.3, None, -294.8, None, 285.36, -15.07, 64.3, -24.46, 0.0, 89.7, -281.6, -396.0, 287.0, -111.0, None, 882.0, 617.5, None, -139.3, None, None, None, 0.1004, None, None, None, None, None, None, None, -334.4, None, -89.42, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #17
[-101.6, -47.63, 31.87, 49.8, -132.3, -378.2, -332.9, -341.6, 29.1, None, 8.87, 554.4, -156.1, None, -207.66, None, 117.4, 0.0, -169.7, -153.7, None, -351.6, -114.7, -205.3, -2.17, None, 2845.0, None, None, None, None, None, -60.78, None, None, None, 160.7, -158.8, None, None, None, None, None, None, None, None, None, None, None, -136.6, None, None, None, 98.82, None, None, ], #18
[24.82, -40.62, -22.97, -138.4, 185.4, 162.6, 242.8, None, -287.5, 224.66, -266.6, 99.37, 38.81, -157.3, -108.5, -446.86, 777.4, 134.3, 0.0, 205.27, 4.933, -152.7, -15.62, -54.86, -4.624, -0.515, None, 230.9, 0.4604, None, 177.5, None, -62.17, -203.0, None, 81.57, -55.77, None, -151.5, None, 120.3, None, None, None, None, None, 16.23, None, None, None, None, None, None, None, None, None, ], #19
[315.3, 1264.0, 62.32, 89.86, -151.0, 339.8, -66.17, -11.0, -297.8, -165.5, -256.3, 193.9, -338.5, None, None, None, 493.8, -313.5, 92.07, 0.0, 13.41, -44.7, 39.63, 183.4, -79.08, None, None, None, None, -208.9, None, 228.4, -95.0, None, -463.6, None, -11.16, None, -228.0, None, -337.0, 169.3, 127.2, None, None, -322.3, None, None, None, None, None, None, 12.55, -60.07, 88.09, None, ], #20
[91.46, 40.25, 4.68, 122.9, 562.2, 529.0, 698.2, None, 286.3, -47.51, 35.38, None, 225.4, 131.2, None, 151.38, 429.7, None, 54.32, 519.1, 0.0, 108.3, 249.2, 62.42, 153.0, 32.73, 86.2, 450.1, 59.02, 65.56, None, 2.22, 344.4, None, None, None, -168.2, None, 6.57, None, 63.67, None, None, None, None, None, None, None, None, None, None, None, -127.9, None, None, None, ], #21
[34.01, -23.5, 121.3, 140.8, 527.6, 669.9, 708.7, 1633.5, 82.86, 190.6, -132.9, 80.99, -197.7, None, None, -141.4, 140.8, 587.3, 258.6, 543.3, -84.53, 0.0, 0.0, 56.33, 223.1, 108.9, None, None, None, 149.56, None, 177.6, 315.9, None, 215.0, None, -91.8, None, -160.28, None, -96.87, None, None, None, None, None, 361.1, None, None, None, None, None, None, None, None, None, ], #22
[36.7, 51.06, 288.5, 69.9, 742.1, 649.1, 826.76, None, 552.1, 242.8, 176.5, 235.6, -20.93, None, None, -293.7, None, 18.98, 74.04, 504.2, -157.1, 0.0, 0.0, -30.1, 192.1, None, None, 116.6, None, -64.38, None, 86.4, 168.8, None, 363.7, None, 111.2, None, None, None, 255.8, None, None, -35.68, None, None, None, 565.9, None, None, None, None, 165.67, None, None, None, ], #23
[-78.45, 160.9, -4.7, 134.7, 856.3, 709.6, 1201.0, 10000.0, 372.0, None, 129.5, 351.9, 113.9, 261.1, 91.13, 316.9, 898.2, 368.5, 492.0, 631.0, 11.8, 17.97, 51.9, 0.0, -75.97, 490.9, 534.7, 132.2, None, 546.7, None, 247.8, 146.6, None, 337.7, 369.5, 187.1, 215.2, 498.6, None, 256.5, None, 233.1, None, None, None, 423.1, 63.95, None, 108.5, None, 585.19, 291.87, 532.73, None, 127.16, ], #24
[106.8, 70.32, -97.27, 402.5, 325.7, 612.8, -274.5, 622.3, 518.4, None, -171.1, 383.3, -25.15, 108.5, 102.2, 2951.0, 334.9, 20.18, 363.5, 993.4, -129.7, -8.309, -0.2266, -248.4, 0.0, 132.7, 2213.0, None, None, None, None, None, 593.4, None, 1337.37, None, None, None, 5143.14, 309.58, -145.1, None, None, -209.7, None, None, 434.1, None, None, None, None, None, None, None, None, 8.48, ], #25
[-32.69, -1.996, 10.38, -97.05, 261.6, 252.6, 417.9, None, -142.6, None, 129.3, None, -94.49, None, None, None, None, None, 0.2827, None, 113.0, -9.639, None, -34.68, 132.9, 0.0, 533.2, 320.2, None, None, 139.8, 304.3, 10.17, -27.7, None, None, 10.76, None, -223.1, None, 248.4, None, None, None, -218.9, None, None, None, None, -4.565, None, None, None, None, None, None, ], #26
[5541.0, None, 1824.0, -127.8, 561.6, 511.29, 360.7, 815.12, -101.5, None, None, None, 220.66, None, None, None, 134.9, 2475.0, None, None, 1971.0, None, None, 514.6, -123.1, -85.12, 0.0, None, None, None, None, 2990.0, -124.0, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 1742.53, ], #27
[-52.65, 16.62, 21.5, 40.68, 609.8, 914.2, 1081.0, 1421.0, 303.7, None, 243.8, None, 112.4, None, None, None, None, None, 335.7, None, -73.09, None, -26.06, -60.71, None, 277.8, None, 0.0, None, None, None, 292.7, None, None, None, None, -47.37, None, None, None, 469.8, None, None, None, None, None, None, None, None, None, None, None, None, 684.78, None, None, ], #28
[-7.481, None, 28.41, 19.56, 461.6, 448.6, None, None, 160.6, None, None, 201.5, 63.71, 106.7, None, None, None, None, 161.0, None, -27.94, None, None, None, None, None, None, None, 0.0, None, None, None, None, None, 31.66, None, None, None, 78.92, None, None, None, None, 1004.0, None, None, None, -18.27, None, None, None, None, None, None, None, None, ], #29
[-25.31, 82.64, 157.3, 128.8, 521.6, 287.0, 23.48, None, 317.5, None, -146.3, None, -87.31, None, None, None, None, None, None, 570.6, -39.46, -116.21, 48.48, -133.16, None, None, None, None, None, 0.0, None, None, None, None, None, None, 262.9, None, None, None, 43.37, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #30
[140.0, None, 221.4, 150.6, 267.6, 240.8, -137.4, 838.4, 135.4, None, 152.0, None, 9.207, None, -213.74, None, 192.3, None, 169.6, None, None, None, None, None, None, 481.3, None, None, None, None, 0.0, None, None, None, -417.2, None, None, None, 302.2, None, 347.8, None, None, -262.0, None, None, -353.5, None, None, None, None, None, None, None, None, None, ], #31
[128.0, None, 58.68, 26.41, 501.3, 431.3, None, None, 138.0, 245.9, 21.92, None, 476.6, None, None, None, None, None, None, 616.6, 179.25, -40.82, 21.76, 48.49, None, 64.28, 2448.0, -27.45, None, None, None, 0.0, 6.37, None, None, None, None, None, None, None, 68.55, None, None, None, None, None, None, None, None, None, None, None, None, 190.81, None, None, ], #32
[-31.52, 174.6, -154.2, 1112.0, 524.9, 494.7, 79.18, None, -142.6, None, 24.37, -92.26, 736.4, None, None, None, None, -42.71, 136.9, 5256.0, -262.3, -174.5, -46.8, 77.55, -185.3, 125.3, 4288.0, None, None, None, None, 37.1, 0.0, None, 32.9, None, -48.33, None, 336.25, None, -195.1, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #33
[-72.88, 41.38, -101.12, 614.52, 68.95, 967.71, None, None, 443.6, -55.87, -111.45, None, 173.77, None, None, None, None, None, 329.1, None, None, None, None, None, None, 174.4, None, None, None, None, None, None, None, 0.0, None, None, 2073.0, None, -119.8, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #34
[50.49, 64.07, -2.504, -143.2, -25.87, 695.0, -240.0, None, 110.4, None, 41.57, None, -93.51, -366.51, None, -257.2, None, None, None, -180.2, None, -215.0, -343.6, -58.43, -334.12, None, None, None, 85.7, None, 535.8, None, -111.2, None, 0.0, None, None, None, -97.71, None, 153.7, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #35
[-165.9, 573.0, -123.6, 397.4, 389.3, 218.8, 386.6, None, 114.55, 354.0, 175.5, None, None, None, None, None, None, None, -42.31, None, None, None, None, -85.15, None, None, None, None, None, None, None, None, None, None, None, 0.0, -208.8, None, -8.804, None, 423.4, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #36
[47.41, 124.2, 395.8, 419.1, 738.9, 528.0, None, None, -40.9, 183.8, 611.3, 134.5, -217.9, None, None, None, None, 281.6, 335.2, 898.2, 383.2, 301.9, -149.8, -134.2, None, 379.4, None, 167.9, None, 82.64, None, None, 322.42, 631.5, None, 837.2, 0.0, None, 255.0, None, 730.8, None, None, None, None, None, None, 2429.0, None, None, None, None, -127.06, None, None, None, ], #37
[-5.132, -131.7, -237.2, -157.3, 649.7, 645.9, None, None, None, None, None, None, 167.1, None, -198.8, 116.5, None, 159.8, None, None, None, None, None, -124.6, None, None, None, None, None, None, None, None, None, None, None, None, None, 0.0, -110.65, -117.2, None, None, None, 26.35, None, None, None, None, None, None, None, None, None, None, None, 117.59, ], #38
[-31.95, 249.0, -133.9, -240.2, 64.16, 172.2, -287.1, None, 97.04, 13.89, -82.12, -116.7, -158.2, 49.7, 10.03, -185.2, 343.7, None, 150.6, -97.77, -55.21, 397.24, None, -186.7, -374.16, 223.6, None, None, -71.0, None, -191.7, None, -176.26, 6.699, 136.6, 5.15, -137.7, 50.06, 0.0, -5.579, 72.31, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 39.84, ], #39
[147.3, 62.4, 140.6, 839.83, None, None, None, None, None, None, None, None, 278.15, None, None, None, None, None, None, None, None, None, None, None, 33.95, None, None, None, None, None, None, None, None, None, None, None, None, 185.6, 55.8, 0.0, None, None, None, None, 111.8, None, None, None, None, None, None, None, None, None, None, None, ], #40
[529.0, 1397.0, 317.6, 615.8, 88.63, 171.0, 284.4, -167.3, 123.4, 577.5, -234.9, 65.37, -247.8, None, 284.5, None, -22.1, None, -61.6, 1179.0, 182.2, 305.4, -193.0, 335.7, 1107.0, -124.7, None, 885.5, None, -64.28, -264.3, 288.1, 627.7, None, -29.34, -53.91, -198.0, None, -28.65, None, 0.0, None, None, None, None, None, None, None, None, None, None, None, None, -100.53, None, None, ], #41
[-34.36, None, 787.9, 191.6, 1913.0, None, 180.2, None, 992.4, None, None, None, 448.5, 961.8, 1464.0, None, None, None, None, 2450.0, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 0.0, -2166.0, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #42
[110.2, None, 234.4, 221.8, 84.85, None, None, None, None, None, None, None, None, -125.2, 1604.0, None, None, None, None, 2496.0, None, None, None, 70.81, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 745.3, 0.0, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #43
[13.89, -16.11, -23.88, 6.214, 796.9, None, 832.2, -234.7, None, None, None, None, None, None, None, None, None, None, None, None, None, None, -196.2, None, 161.5, None, None, None, -274.1, None, 262.0, None, None, None, None, None, -66.31, None, None, None, None, None, None, 0.0, None, None, None, None, None, None, None, None, None, None, None, None, ], #44
[30.74, None, 167.9, None, 794.4, 762.7, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 844.0, None, None, None, None, None, None, None, None, None, None, None, None, None, -32.17, None, None, None, None, 0.0, None, None, None, None, None, None, None, None, None, None, None, ], #45
[27.97, 9.755, None, None, 394.8, None, -509.3, None, None, None, None, None, None, None, None, None, None, None, None, -70.25, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 0.0, None, None, None, None, None, None, None, None, None, None, ], #46
[-11.92, 132.4, -86.88, -19.45, 517.5, None, -205.7, None, 156.4, None, -3.444, None, None, None, None, None, None, None, 119.2, None, None, -194.7, None, 3.163, 7.082, None, None, None, None, None, 515.8, None, None, None, None, None, None, None, None, None, 101.2, None, None, None, None, None, 0.0, None, None, None, None, None, None, None, None, None, ], #47
[39.93, 543.6, None, None, None, 420.0, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, -363.1, -11.3, None, None, None, None, 6.971, None, None, None, None, None, None, None, 148.9, None, None, None, None, None, None, None, None, None, None, 0.0, None, None, None, None, None, None, None, None, ], #48
[-23.61, 161.1, 142.9, 274.1, -61.2, -89.24, -384.3, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 0.0, None, None, None, None, None, None, None, ], #49
[-8.479, None, 23.93, 2.845, 682.5, 597.8, None, 810.5, 278.8, None, None, None, None, None, None, None, None, 221.4, None, None, None, None, None, -79.34, None, 176.3, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #50
[None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #51
[245.21, 384.45, 47.05, 347.13, 72.19, 265.75, 627.39, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 75.04, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #52
[21.49, -2.8, 344.42, 510.32, 244.67, 163.76, 833.21, None, 569.18, -1.25, -38.4, 69.7, -375.6, None, None, None, None, None, None, 600.78, 291.1, None, -286.26, -52.93, None, None, None, None, None, None, None, None, None, None, None, None, 177.12, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #53
[272.82, 569.71, 165.18, 369.89, None, None, None, None, -62.02, None, -229.01, None, -196.59, None, None, None, None, 100.25, None, 472.04, None, None, None, 196.73, None, None, None, 434.32, None, None, None, 313.14, None, None, None, None, None, None, None, None, -244.59, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #54
[None, None, 920.49, 305.77, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 171.94, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ], #55
[-20.31, None, -106.7, 568.47, 284.28, None, 401.2, None, 106.21, None, None, None, None, None, None, None, None, None, None, None, None, None, None, -108.37, 5.76, None, -272.01, None, None, None, None, None, None, None, None, None, None, 107.84, -33.93, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ]] #56
# {symbol : (group_unique_id, main_group_id), ...}
# main_group_id stands for ids as listed at:
# http://www.aim.env.uea.ac.uk/aim/info/UNIFACgroups.html
# in "Group" column
str_to_id = { 'AC': (11, 3),
'ACBr': (119, 56),
'ACC#N': (118, 55),
'ACCH': (14, 4),
'ACCH2': (13, 4),
'ACCH3': (12, 4),
'ACCl': (54, 25),
'ACF': (71, 38),
'ACH': (10, 3),
'ACN(=O)=O': (58, 27),
'ACNH2': (37, 17),
'ACOH': (18, 8),
'Br': (65, 33),
'C#C': (67, 34),
'C(=O)N(CH2)CH2': (99, 46),
'C(=O)N(CH3)CH2': (98, 46),
'C(=O)N(CH3)CH3': (97, 46),
'C(=O)NH2': (94, 46),
'C(=O)NHCH2': (96, 46),
'C(=O)NHCH3': (95, 46),
'C(=O)OH': (43, 20),
# 'C2H4O2': (101, 47),
# 'C2H5O2': (100, 47),
# 'C4H2S': (108, 50),
# 'C4H3S': (107, 50),
# 'C4H4S': (106, 50),
# 'C5H3N': (40, 18),
# 'C5H4N': (39, 18),
# 'C5H5N': (38, 18),
'C=CCl': (70, 37),
'CCl': (47, 21),
'CCl2': (50, 22),
'CCl2F': (87, 45),
'CCl2F2': (93, 45),
'CCl3': (52, 23),
'CCl3F': (86, 45),
'CCl4': (53, 24),
'CClF2': (90, 45),
'CClF3': (92, 45),
'CF': (76, 40),
'CF2': (75, 40),
'CF3': (74, 40),
'CH': (3, 1),
'CH#C': (66, 34),
'CH(=O)O': (24, 12),
'CH(=O)OH': (44, 20),
'CH0': (4, 1),
'CH0=CH0': (9, 2),
'CH0OCH0': (116, 53),
'CH2': (2, 1),
'CH2=CH': (5, 2),
'CH2=CH0': (7, 2),
'CH2=CHC#N': (69, 36),
'CH2OH0': (26, 13),
'CH2C#N': (42, 19),
'CH2C(=O)O': (23, 11),
'CH2C=O': (20, 9),
'CH2Cl': (45, 21),
'CH2Cl2': (48, 22),
'CH2N(=O)=O': (56, 26),
'CH2NH': (33, 15),
'CH2NH0': (36, 16),
'CH2NH2': (30, 14),
# 'CH2OCH': (112, 53), # these are oxides, not ethers
# 'CH2OCH0': (113, 53),
# 'CH2OCH2': (111, 53),
'CH2S': (103, 48),
'CH2SH': (61, 29),
# 'CH2SuCH': (110, 52),
# 'CH2SuCH2': (109, 52),
'CH3': (1, 1),
'CH3OH0': (25, 13),
'CH3C#N': (41, 19),
'CH3C(=O)O': (22, 11),
'CH3C=O': (19, 9),
'CH3N(=O)=O': (55, 26),
'CH3NH': (32, 15),
'CH3NH0': (35, 16),
'CH3NH2': (29, 14),
'CH3OH': (16, 6),
'CH3S': (102, 48),
'CH3SH': (60, 29),
'CH=CH': (6, 2),
'CH=CH0': (8, 2),
'CHOH0': (27, 13),
'CHCl': (46, 21),
'CHCl2': (49, 22),
'CHCl2F': (88, 45),
'CHCl3': (51, 23),
'CHClF': (89, 45),
'CHClF2': (91, 45),
'CHN(=O)=O': (57, 26),
'CHNH': (34, 15),
'CHNH2': (31, 14),
# 'CHOCH': (114, 53), #these are oxides, not ethers
# 'CHOCH0': (115, 53),
'CHS': (104, 48),
# 'COO': (77, 41),
# 'DMF': (72, 39),
# 'DMSO': (68, 35),
# 'DOH': (63, 31),
# 'HCON(CH2)2': (73, 39),
'I': (64, 32),
# 'MORPH': (105, 49),
# 'NMP': (85, 44),
'O=COC=O': (117, 54),
'OH': (15, 5),
'OH2': (17, 7),
'SCS': (59, 28),
'Si': (81, 42),
'SiH': (80, 42),
'SiH2': (79, 42),
'SiH2O': (82, 43),
'SiH3': (78, 42),
'SiHO': (83, 43),
'SiO': (84, 43),
# 'THF': (28, 13),
# 'furfural': (62, 30)
}
q_r_data = {
1: (0.848, 0.9011),
2: (0.54, 0.6744),
3: (0.228, 0.4469),
4: (0.0, 0.2195),
5: (1.176, 1.3454),
6: (0.867, 1.1167),
7: (0.988, 1.1173),
8: (0.676, 0.8886),
9: (0.485, 0.6605),
10: (0.4, 0.5313),
11: (0.12, 0.3652),
12: (0.968, 1.2663),
13: (0.66, 1.0396),
14: (0.348, 0.8121),
15: (1.2, 1.0),
16: (1.432, 1.4311),
17: (1.4, 0.92),
18: (0.68, 0.8952),
19: (1.448, 1.6724),
20: (1.18, 1.4457),
21: (0.948, 0.998),
22: (1.728, 1.9031),
23: (1.42, 1.6764),
24: (1.188, 1.242),
25: (1.088, 1.145),
26: (0.78, 0.9183),
27: (0.468, 0.6908),
28: (1.1, 0.9183),
29: (1.544, 1.5959),
30: (1.236, 1.3692),
31: (0.924, 1.1417),
32: (1.244, 1.4337),
33: (0.936, 1.207),
34: (0.624, 0.9795),
35: (0.94, 1.1865),
36: (0.632, 0.9597),
37: (0.816, 1.06),
38: (2.113, 2.9993),
39: (1.833, 2.8332),
40: (1.553, 2.667),
41: (1.724, 1.8701),
42: (1.416, 1.6434),
43: (1.224, 1.3013),
44: (1.532, 1.528),
45: (1.264, 1.4654),
46: (0.952, 1.238),
47: (0.724, 1.0106),
48: (1.998, 2.2564),
49: (1.684, 2.0606),
50: (1.448, 1.8016),
51: (2.41, 2.87),
52: (2.184, 2.6401),
53: (2.91, 3.39),
54: (0.844, 1.1562),
55: (1.868, 2.0086),
56: (1.56, 1.7818),
57: (1.248, 1.5544),
58: (1.104, 1.4199),
59: (1.65, 2.057),
60: (1.676, 1.877),
61: (1.368, 1.651),
62: (2.484, 3.168),
63: (2.248, 2.4088),
64: (0.992, 1.264),
65: (0.832, 0.9492),
66: (1.088, 1.292),
67: (0.784, 1.0613),
68: (2.472, 2.8266),
69: (2.052, 2.3144),
70: (0.724, 0.791),
71: (0.524, 0.6948),
72: (2.736, 3.0856),
73: (2.12, 2.6322),
74: (1.38, 1.406),
75: (0.92, 1.0105),
76: (0.46, 0.615),
77: (1.2, 1.38),
78: (1.263, 1.6035),
79: (1.006, 1.4443),
80: (0.749, 1.2853),
81: (0.41, 1.047),
82: (1.062, 1.4838),
83: (0.764, 1.303),
84: (0.466, 1.1044),
85: (3.2, 3.981),
86: (2.644, 3.0356),
87: (1.916, 2.2287),
88: (2.116, 2.406),
89: (1.416, 1.6493),
90: (1.648, 1.8174),
91: (1.828, 1.967),
92: (2.1, 2.1721),
93: (2.376, 2.6243),
94: (1.248, 1.4515),
95: (1.796, 2.1905),
96: (1.488, 1.9637),
97: (2.428, 2.8589),
98: (2.12, 2.6322),
99: (1.812, 2.4054),
100: (1.904, 2.1226),
101: (1.592, 1.8952),
102: (1.368, 1.613),
103: (1.06, 1.3863),
104: (0.748, 1.1589),
105: (2.796, 3.474),
106: (2.14, 2.8569),
107: (1.86, 2.6908),
108: (1.58, 2.5247),
109: (2.12, 2.6869),
110: (1.808, 2.4595),
111: (1.32, 1.5926),
112: (1.008, 1.3652),
113: (0.78, 1.1378),
114: (0.696, 1.1378),
115: (0.468, 0.9103),
116: (0.24, 0.6829),
117: (1.52, 1.7732),
118: (0.996, 1.3342),
119: (0.972, 1.3629)}
| iterate_strings |
builder.ts | /*@internal*/
namespace ts {
export interface ReusableDiagnostic extends ReusableDiagnosticRelatedInformation {
/** May store more in future. For now, this will simply be `true` to indicate when a diagnostic is an unused-identifier diagnostic. */
reportsUnnecessary?: {};
source?: string;
relatedInformation?: ReusableDiagnosticRelatedInformation[];
}
export interface ReusableDiagnosticRelatedInformation {
category: DiagnosticCategory;
code: number;
file: Path | undefined;
start: number | undefined;
length: number | undefined;
messageText: string | ReusableDiagnosticMessageChain;
}
export interface ReusableDiagnosticMessageChain {
messageText: string;
category: DiagnosticCategory;
code: number;
next?: ReusableDiagnosticMessageChain;
}
export interface ReusableBuilderProgramState extends ReusableBuilderState {
/**
* Cache of semantic diagnostics for files with their Path being the key
*/
semanticDiagnosticsPerFile?: ReadonlyMap<ReadonlyArray<ReusableDiagnostic> | ReadonlyArray<Diagnostic>> | undefined;
/**
* The map has key by source file's path that has been changed
*/
changedFilesSet?: ReadonlyMap<true>;
/**
* Set of affected files being iterated
*/
affectedFiles?: ReadonlyArray<SourceFile> | undefined;
/**
* Current changed file for iterating over affected files
*/
currentChangedFilePath?: Path | undefined;
/**
* Map of file signatures, with key being file path, calculated while getting current changed file's affected files
* These will be committed whenever the iteration through affected files of current changed file is complete
*/
currentAffectedFilesSignatures?: ReadonlyMap<string> | undefined;
/**
* Newly computed visible to outside referencedSet
*/
currentAffectedFilesExportedModulesMap?: Readonly<BuilderState.ComputingExportedModulesMap> | undefined;
/**
* True if the semantic diagnostics were copied from the old state
*/
semanticDiagnosticsFromOldState?: Map<true>;
/**
* program corresponding to this state
*/
program?: Program | undefined;
/**
* compilerOptions for the program
*/
compilerOptions: CompilerOptions;
/**
* Files pending to be emitted
*/
affectedFilesPendingEmit?: ReadonlyArray<Path> | undefined;
/**
* Current index to retrieve pending affected file
*/
affectedFilesPendingEmitIndex?: number | undefined;
/*
* true if semantic diagnostics are ReusableDiagnostic instead of Diagnostic
*/
hasReusableDiagnostic?: true;
}
/**
* State to store the changed files, affected files and cache semantic diagnostics
*/
// TODO: GH#18217 Properties of this interface are frequently asserted to be defined.
export interface BuilderProgramState extends BuilderState {
/**
* Cache of semantic diagnostics for files with their Path being the key
*/
semanticDiagnosticsPerFile: Map<ReadonlyArray<Diagnostic>> | undefined;
/**
* The map has key by source file's path that has been changed
*/
changedFilesSet: Map<true>;
/**
* Set of affected files being iterated
*/
affectedFiles: ReadonlyArray<SourceFile> | undefined;
/**
* Current index to retrieve affected file from
*/
affectedFilesIndex: number | undefined;
/**
* Current changed file for iterating over affected files
*/
currentChangedFilePath: Path | undefined;
/**
* Map of file signatures, with key being file path, calculated while getting current changed file's affected files
* These will be committed whenever the iteration through affected files of current changed file is complete
*/
currentAffectedFilesSignatures: Map<string> | undefined;
/**
* Newly computed visible to outside referencedSet
*/
currentAffectedFilesExportedModulesMap: BuilderState.ComputingExportedModulesMap | undefined;
/**
* Already seen affected files
*/
seenAffectedFiles: Map<true> | undefined;
/**
* whether this program has cleaned semantic diagnostics cache for lib files
*/
cleanedDiagnosticsOfLibFiles?: boolean;
/**
* True if the semantic diagnostics were copied from the old state
*/
semanticDiagnosticsFromOldState?: Map<true>;
/**
* program corresponding to this state
*/
program: Program | undefined;
/**
* compilerOptions for the program
*/
compilerOptions: CompilerOptions;
/**
* Files pending to be emitted
*/
affectedFilesPendingEmit: ReadonlyArray<Path> | undefined;
/**
* Current index to retrieve pending affected file
*/
affectedFilesPendingEmitIndex: number | undefined;
/**
* true if build info is emitted
*/
emittedBuildInfo?: boolean;
/**
* Already seen affected files
*/
seenEmittedFiles: Map<true> | undefined;
/**
* true if program has been emitted
*/
programEmitComplete?: true;
}
function hasSameKeys<T, U>(map1: ReadonlyMap<T> | undefined, map2: ReadonlyMap<U> | undefined): boolean {
// Has same size and every key is present in both maps
return map1 as ReadonlyMap<T | U> === map2 || map1 !== undefined && map2 !== undefined && map1.size === map2.size && !forEachKey(map1, key => !map2.has(key));
}
/**
* Create the state so that we can iterate on changedFiles/affected files
*/
function createBuilderProgramState(newProgram: Program, getCanonicalFileName: GetCanonicalFileName, oldState?: Readonly<ReusableBuilderProgramState>): BuilderProgramState {
const state = BuilderState.create(newProgram, getCanonicalFileName, oldState) as BuilderProgramState;
state.program = newProgram;
const compilerOptions = newProgram.getCompilerOptions();
state.compilerOptions = compilerOptions;
// With --out or --outFile, any change affects all semantic diagnostics so no need to cache them
// With --isolatedModules, emitting changed file doesnt emit dependent files so we cant know of dependent files to retrieve errors so dont cache the errors
if (!compilerOptions.outFile && !compilerOptions.out && !compilerOptions.isolatedModules) {
state.semanticDiagnosticsPerFile = createMap<ReadonlyArray<Diagnostic>>();
}
state.changedFilesSet = createMap<true>();
const useOldState = BuilderState.canReuseOldState(state.referencedMap, oldState);
const oldCompilerOptions = useOldState ? oldState!.compilerOptions : undefined;
const canCopySemanticDiagnostics = useOldState && oldState!.semanticDiagnosticsPerFile && !!state.semanticDiagnosticsPerFile &&
!compilerOptionsAffectSemanticDiagnostics(compilerOptions, oldCompilerOptions!);
if (useOldState) {
// Verify the sanity of old state
if (!oldState!.currentChangedFilePath) {
const affectedSignatures = oldState!.currentAffectedFilesSignatures;
Debug.assert(!oldState!.affectedFiles && (!affectedSignatures || !affectedSignatures.size), "Cannot reuse if only few affected files of currentChangedFile were iterated");
}
const changedFilesSet = oldState!.changedFilesSet;
if (canCopySemanticDiagnostics) {
Debug.assert(!changedFilesSet || !forEachKey(changedFilesSet, path => oldState!.semanticDiagnosticsPerFile!.has(path)), "Semantic diagnostics shouldnt be available for changed files");
}
// Copy old state's changed files set
if (changedFilesSet) {
copyEntries(changedFilesSet, state.changedFilesSet);
}
if (!compilerOptions.outFile && !compilerOptions.out && oldState!.affectedFilesPendingEmit) {
state.affectedFilesPendingEmit = oldState!.affectedFilesPendingEmit;
state.affectedFilesPendingEmitIndex = oldState!.affectedFilesPendingEmitIndex;
}
}
// Update changed files and copy semantic diagnostics if we can
const referencedMap = state.referencedMap;
const oldReferencedMap = useOldState ? oldState!.referencedMap : undefined;
const copyDeclarationFileDiagnostics = canCopySemanticDiagnostics && !compilerOptions.skipLibCheck === !oldCompilerOptions!.skipLibCheck;
const copyLibFileDiagnostics = copyDeclarationFileDiagnostics && !compilerOptions.skipDefaultLibCheck === !oldCompilerOptions!.skipDefaultLibCheck;
state.fileInfos.forEach((info, sourceFilePath) => {
let oldInfo: Readonly<BuilderState.FileInfo> | undefined;
let newReferences: BuilderState.ReferencedSet | undefined;
// if not using old state, every file is changed
if (!useOldState ||
// File wasnt present in old state
!(oldInfo = oldState!.fileInfos.get(sourceFilePath)) ||
// versions dont match
oldInfo.version !== info.version ||
// Referenced files changed
!hasSameKeys(newReferences = referencedMap && referencedMap.get(sourceFilePath), oldReferencedMap && oldReferencedMap.get(sourceFilePath)) ||
// Referenced file was deleted in the new program
newReferences && forEachKey(newReferences, path => !state.fileInfos.has(path) && oldState!.fileInfos.has(path))) {
// Register file as changed file and do not copy semantic diagnostics, since all changed files need to be re-evaluated
state.changedFilesSet.set(sourceFilePath, true);
}
else if (canCopySemanticDiagnostics) {
const sourceFile = newProgram.getSourceFileByPath(sourceFilePath as Path)!;
if (sourceFile.isDeclarationFile && !copyDeclarationFileDiagnostics) { return; }
if (sourceFile.hasNoDefaultLib && !copyLibFileDiagnostics) { return; }
// Unchanged file copy diagnostics
const diagnostics = oldState!.semanticDiagnosticsPerFile!.get(sourceFilePath);
if (diagnostics) {
state.semanticDiagnosticsPerFile!.set(sourceFilePath, oldState!.hasReusableDiagnostic ? convertToDiagnostics(diagnostics as ReadonlyArray<ReusableDiagnostic>, newProgram) : diagnostics as ReadonlyArray<Diagnostic>);
if (!state.semanticDiagnosticsFromOldState) {
state.semanticDiagnosticsFromOldState = createMap<true>();
}
state.semanticDiagnosticsFromOldState.set(sourceFilePath, true);
}
}
});
if (oldCompilerOptions &&
(oldCompilerOptions.outDir !== compilerOptions.outDir ||
oldCompilerOptions.declarationDir !== compilerOptions.declarationDir ||
(oldCompilerOptions.outFile || oldCompilerOptions.out) !== (compilerOptions.outFile || compilerOptions.out))) {
// Add all files to affectedFilesPendingEmit since emit changed
state.affectedFilesPendingEmit = concatenate(state.affectedFilesPendingEmit, newProgram.getSourceFiles().map(f => f.path));
if (state.affectedFilesPendingEmitIndex === undefined) {
state.affectedFilesPendingEmitIndex = 0;
}
Debug.assert(state.seenAffectedFiles === undefined);
state.seenAffectedFiles = createMap<true>();
}
return state;
}
function convertToDiagnostics(diagnostics: ReadonlyArray<ReusableDiagnostic>, newProgram: Program): ReadonlyArray<Diagnostic> {
if (!diagnostics.length) return emptyArray;
return diagnostics.map(diagnostic => {
const result: Diagnostic = convertToDiagnosticRelatedInformation(diagnostic, newProgram);
result.reportsUnnecessary = diagnostic.reportsUnnecessary;
result.source = diagnostic.source;
const { relatedInformation } = diagnostic;
result.relatedInformation = relatedInformation ?
relatedInformation.length ?
relatedInformation.map(r => convertToDiagnosticRelatedInformation(r, newProgram)) :
emptyArray :
undefined;
return result;
});
}
function convertToDiagnosticRelatedInformation(diagnostic: ReusableDiagnosticRelatedInformation, newProgram: Program): DiagnosticRelatedInformation {
const { file, messageText } = diagnostic;
return {
...diagnostic,
file: file && newProgram.getSourceFileByPath(file),
messageText: messageText === undefined || isString(messageText) ?
messageText :
convertToDiagnosticMessageChain(messageText, newProgram)
};
}
function convertToDiagnosticMessageChain(diagnostic: ReusableDiagnosticMessageChain, newProgram: Program): DiagnosticMessageChain {
return {
...diagnostic,
next: diagnostic.next && convertToDiagnosticMessageChain(diagnostic.next, newProgram)
};
}
/**
* Releases program and other related not needed properties
*/
function releaseCache(state: BuilderProgramState) {
BuilderState.releaseCache(state);
state.program = undefined;
}
/**
* Creates a clone of the state
*/
function cloneBuilderProgramState(state: Readonly<BuilderProgramState>): BuilderProgramState {
const newState = BuilderState.clone(state) as BuilderProgramState;
newState.semanticDiagnosticsPerFile = cloneMapOrUndefined(state.semanticDiagnosticsPerFile);
newState.changedFilesSet = cloneMap(state.changedFilesSet);
newState.affectedFiles = state.affectedFiles;
newState.affectedFilesIndex = state.affectedFilesIndex;
newState.currentChangedFilePath = state.currentChangedFilePath;
newState.currentAffectedFilesSignatures = cloneMapOrUndefined(state.currentAffectedFilesSignatures);
newState.currentAffectedFilesExportedModulesMap = cloneMapOrUndefined(state.currentAffectedFilesExportedModulesMap);
newState.seenAffectedFiles = cloneMapOrUndefined(state.seenAffectedFiles);
newState.cleanedDiagnosticsOfLibFiles = state.cleanedDiagnosticsOfLibFiles;
newState.semanticDiagnosticsFromOldState = cloneMapOrUndefined(state.semanticDiagnosticsFromOldState);
newState.program = state.program;
newState.compilerOptions = state.compilerOptions;
newState.affectedFilesPendingEmit = state.affectedFilesPendingEmit;
newState.affectedFilesPendingEmitIndex = state.affectedFilesPendingEmitIndex;
newState.seenEmittedFiles = cloneMapOrUndefined(state.seenEmittedFiles);
newState.programEmitComplete = state.programEmitComplete;
return newState;
}
/**
* Verifies that source file is ok to be used in calls that arent handled by next
*/
function assertSourceFileOkWithoutNextAffectedCall(state: BuilderProgramState, sourceFile: SourceFile | undefined) {
Debug.assert(!sourceFile || !state.affectedFiles || state.affectedFiles[state.affectedFilesIndex! - 1] !== sourceFile || !state.semanticDiagnosticsPerFile!.has(sourceFile.path));
}
/**
* This function returns the next affected file to be processed.
* Note that until doneAffected is called it would keep reporting same result
* This is to allow the callers to be able to actually remove affected file only when the operation is complete
* eg. if during diagnostics check cancellation token ends up cancelling the request, the affected file should be retained
*/
function getNextAffectedFile(state: BuilderProgramState, cancellationToken: CancellationToken | undefined, computeHash: BuilderState.ComputeHash): SourceFile | Program | undefined {
while (true) {
const { affectedFiles } = state;
if (affectedFiles) {
const seenAffectedFiles = state.seenAffectedFiles!;
let affectedFilesIndex = state.affectedFilesIndex!; // TODO: GH#18217
while (affectedFilesIndex < affectedFiles.length) {
const affectedFile = affectedFiles[affectedFilesIndex];
if (!seenAffectedFiles.has(affectedFile.path)) {
// Set the next affected file as seen and remove the cached semantic diagnostics
state.affectedFilesIndex = affectedFilesIndex;
cleanSemanticDiagnosticsOfAffectedFile(state, affectedFile);
return affectedFile;
}
seenAffectedFiles.set(affectedFile.path, true);
affectedFilesIndex++;
}
// Remove the changed file from the change set
state.changedFilesSet.delete(state.currentChangedFilePath!);
state.currentChangedFilePath = undefined;
// Commit the changes in file signature
BuilderState.updateSignaturesFromCache(state, state.currentAffectedFilesSignatures!);
state.currentAffectedFilesSignatures!.clear();
BuilderState.updateExportedFilesMapFromCache(state, state.currentAffectedFilesExportedModulesMap);
state.affectedFiles = undefined;
}
// Get next changed file
const nextKey = state.changedFilesSet.keys().next();
if (nextKey.done) {
// Done
return undefined;
}
// With --out or --outFile all outputs go into single file
// so operations are performed directly on program, return program
const program = Debug.assertDefined(state.program);
const compilerOptions = program.getCompilerOptions();
if (compilerOptions.outFile || compilerOptions.out) {
Debug.assert(!state.semanticDiagnosticsPerFile);
return program;
}
// Get next batch of affected files
state.currentAffectedFilesSignatures = state.currentAffectedFilesSignatures || createMap();
if (state.exportedModulesMap) {
state.currentAffectedFilesExportedModulesMap = state.currentAffectedFilesExportedModulesMap || createMap<BuilderState.ReferencedSet | false>();
}
state.affectedFiles = BuilderState.getFilesAffectedBy(state, program, nextKey.value as Path, cancellationToken, computeHash, state.currentAffectedFilesSignatures, state.currentAffectedFilesExportedModulesMap);
state.currentChangedFilePath = nextKey.value as Path;
state.affectedFilesIndex = 0;
state.seenAffectedFiles = state.seenAffectedFiles || createMap<true>();
}
}
/**
* Returns next file to be emitted from files that retrieved semantic diagnostics but did not emit yet
*/
function getNextAffectedFilePendingEmit(state: BuilderProgramState): SourceFile | undefined {
const { affectedFilesPendingEmit } = state;
if (affectedFilesPendingEmit) {
const seenEmittedFiles = state.seenEmittedFiles || (state.seenEmittedFiles = createMap());
for (let i = state.affectedFilesPendingEmitIndex!; i < affectedFilesPendingEmit.length; i++) {
const affectedFile = Debug.assertDefined(state.program).getSourceFileByPath(affectedFilesPendingEmit[i]);
if (affectedFile && !seenEmittedFiles.has(affectedFile.path)) {
// emit this file
state.affectedFilesPendingEmitIndex = i;
return affectedFile;
}
}
state.affectedFilesPendingEmit = undefined;
state.affectedFilesPendingEmitIndex = undefined;
}
return undefined;
}
/**
* Remove the semantic diagnostics cached from old state for affected File and the files that are referencing modules that export entities from affected file
*/
function cleanSemanticDiagnosticsOfAffectedFile(state: BuilderProgramState, affectedFile: SourceFile) {
if (removeSemanticDiagnosticsOf(state, affectedFile.path)) {
// If there are no more diagnostics from old cache, done
return;
}
// Clean lib file diagnostics if its all files excluding default files to emit
if (state.allFilesExcludingDefaultLibraryFile === state.affectedFiles && !state.cleanedDiagnosticsOfLibFiles) {
state.cleanedDiagnosticsOfLibFiles = true;
const program = Debug.assertDefined(state.program);
const options = program.getCompilerOptions();
if (forEach(program.getSourceFiles(), f =>
program.isSourceFileDefaultLibrary(f) &&
!skipTypeChecking(f, options) &&
removeSemanticDiagnosticsOf(state, f.path)
)) {
return;
}
}
// If there was change in signature for the changed file,
// then delete the semantic diagnostics for files that are affected by using exports of this module
if (!state.exportedModulesMap || state.affectedFiles!.length === 1 || !state.changedFilesSet.has(affectedFile.path)) {
return;
}
Debug.assert(!!state.currentAffectedFilesExportedModulesMap);
const seenFileAndExportsOfFile = createMap<true>();
// Go through exported modules from cache first
// If exported modules has path, all files referencing file exported from are affected
if (forEachEntry(state.currentAffectedFilesExportedModulesMap!, (exportedModules, exportedFromPath) =>
exportedModules &&
exportedModules.has(affectedFile.path) &&
removeSemanticDiagnosticsOfFilesReferencingPath(state, exportedFromPath as Path, seenFileAndExportsOfFile)
)) {
return;
}
// If exported from path is not from cache and exported modules has path, all files referencing file exported from are affected
forEachEntry(state.exportedModulesMap, (exportedModules, exportedFromPath) =>
!state.currentAffectedFilesExportedModulesMap!.has(exportedFromPath) && // If we already iterated this through cache, ignore it
exportedModules.has(affectedFile.path) &&
removeSemanticDiagnosticsOfFilesReferencingPath(state, exportedFromPath as Path, seenFileAndExportsOfFile)
);
}
/**
* removes the semantic diagnostics of files referencing referencedPath and
* returns true if there are no more semantic diagnostics from old state
*/
function removeSemanticDiagnosticsOfFilesReferencingPath(state: BuilderProgramState, referencedPath: Path, seenFileAndExportsOfFile: Map<true>) {
return forEachEntry(state.referencedMap!, (referencesInFile, filePath) =>
referencesInFile.has(referencedPath) && removeSemanticDiagnosticsOfFileAndExportsOfFile(state, filePath as Path, seenFileAndExportsOfFile)
);
}
/**
* Removes semantic diagnostics of file and anything that exports this file
*/
function removeSemanticDiagnosticsOfFileAndExportsOfFile(state: BuilderProgramState, filePath: Path, seenFileAndExportsOfFile: Map<true>): boolean {
if (!addToSeen(seenFileAndExportsOfFile, filePath)) {
return false;
}
if (removeSemanticDiagnosticsOf(state, filePath)) {
// If there are no more diagnostics from old cache, done
return true;
}
Debug.assert(!!state.currentAffectedFilesExportedModulesMap);
// Go through exported modules from cache first
// If exported modules has path, all files referencing file exported from are affected
if (forEachEntry(state.currentAffectedFilesExportedModulesMap!, (exportedModules, exportedFromPath) =>
exportedModules &&
exportedModules.has(filePath) &&
removeSemanticDiagnosticsOfFileAndExportsOfFile(state, exportedFromPath as Path, seenFileAndExportsOfFile)
)) {
return true;
}
// If exported from path is not from cache and exported modules has path, all files referencing file exported from are affected
if (forEachEntry(state.exportedModulesMap!, (exportedModules, exportedFromPath) =>
!state.currentAffectedFilesExportedModulesMap!.has(exportedFromPath) && // If we already iterated this through cache, ignore it
exportedModules.has(filePath) &&
removeSemanticDiagnosticsOfFileAndExportsOfFile(state, exportedFromPath as Path, seenFileAndExportsOfFile)
)) {
return true;
}
// Remove diagnostics of files that import this file (without going to exports of referencing files)
return !!forEachEntry(state.referencedMap!, (referencesInFile, referencingFilePath) =>
referencesInFile.has(filePath) &&
!seenFileAndExportsOfFile.has(referencingFilePath) && // Not already removed diagnostic file
removeSemanticDiagnosticsOf(state, referencingFilePath as Path) // Dont add to seen since this is not yet done with the export removal
);
}
/**
* Removes semantic diagnostics for path and
* returns true if there are no more semantic diagnostics from the old state
*/
function removeSemanticDiagnosticsOf(state: BuilderProgramState, path: Path) {
if (!state.semanticDiagnosticsFromOldState) {
return true;
}
state.semanticDiagnosticsFromOldState.delete(path);
state.semanticDiagnosticsPerFile!.delete(path);
return !state.semanticDiagnosticsFromOldState.size;
}
/**
* This is called after completing operation on the next affected file.
* The operations here are postponed to ensure that cancellation during the iteration is handled correctly
| */
function doneWithAffectedFile(state: BuilderProgramState, affected: SourceFile | Program, isPendingEmit?: boolean, isBuildInfoEmit?: boolean) {
if (isBuildInfoEmit) {
state.emittedBuildInfo = true;
}
else if (affected === state.program) {
state.changedFilesSet.clear();
state.programEmitComplete = true;
}
else {
state.seenAffectedFiles!.set((affected as SourceFile).path, true);
if (isPendingEmit) {
state.affectedFilesPendingEmitIndex!++;
}
else {
state.affectedFilesIndex!++;
}
}
}
/**
* Returns the result with affected file
*/
function toAffectedFileResult<T>(state: BuilderProgramState, result: T, affected: SourceFile | Program, isPendingEmit?: boolean, isBuildInfoEmit?: boolean): AffectedFileResult<T> {
doneWithAffectedFile(state, affected, isPendingEmit, isBuildInfoEmit);
return { result, affected };
}
/**
* Gets the semantic diagnostics either from cache if present, or otherwise from program and caches it
* Note that it is assumed that the when asked about semantic diagnostics, the file has been taken out of affected files/changed file set
*/
function getSemanticDiagnosticsOfFile(state: BuilderProgramState, sourceFile: SourceFile, cancellationToken?: CancellationToken): ReadonlyArray<Diagnostic> {
const path = sourceFile.path;
if (state.semanticDiagnosticsPerFile) {
const cachedDiagnostics = state.semanticDiagnosticsPerFile.get(path);
// Report the semantic diagnostics from the cache if we already have those diagnostics present
if (cachedDiagnostics) {
return cachedDiagnostics;
}
}
// Diagnostics werent cached, get them from program, and cache the result
const diagnostics = Debug.assertDefined(state.program).getSemanticDiagnostics(sourceFile, cancellationToken);
if (state.semanticDiagnosticsPerFile) {
state.semanticDiagnosticsPerFile.set(path, diagnostics);
}
return diagnostics;
}
export type ProgramBuildInfoDiagnostic = string | [string, ReadonlyArray<ReusableDiagnostic>];
export interface ProgramBuildInfo {
fileInfos: MapLike<BuilderState.FileInfo>;
options: CompilerOptions;
referencedMap?: MapLike<string[]>;
exportedModulesMap?: MapLike<string[]>;
semanticDiagnosticsPerFile?: ProgramBuildInfoDiagnostic[];
}
/**
* Gets the program information to be emitted in buildInfo so that we can use it to create new program
*/
function getProgramBuildInfo(state: Readonly<ReusableBuilderProgramState>): ProgramBuildInfo | undefined {
if (state.compilerOptions.outFile || state.compilerOptions.out) return undefined;
const fileInfos: MapLike<BuilderState.FileInfo> = {};
state.fileInfos.forEach((value, key) => {
const signature = state.currentAffectedFilesSignatures && state.currentAffectedFilesSignatures.get(key);
fileInfos[key] = signature === undefined ? value : { version: value.version, signature };
});
const result: ProgramBuildInfo = { fileInfos, options: state.compilerOptions };
if (state.referencedMap) {
const referencedMap: MapLike<string[]> = {};
state.referencedMap.forEach((value, key) => {
referencedMap[key] = arrayFrom(value.keys());
});
result.referencedMap = referencedMap;
}
if (state.exportedModulesMap) {
const exportedModulesMap: MapLike<string[]> = {};
state.exportedModulesMap.forEach((value, key) => {
const newValue = state.currentAffectedFilesExportedModulesMap && state.currentAffectedFilesExportedModulesMap.get(key);
// Not in temporary cache, use existing value
if (newValue === undefined) exportedModulesMap[key] = arrayFrom(value.keys());
// Value in cache and has updated value map, use that
else if (newValue) exportedModulesMap[key] = arrayFrom(newValue.keys());
});
result.exportedModulesMap = exportedModulesMap;
}
if (state.semanticDiagnosticsPerFile) {
const semanticDiagnosticsPerFile: ProgramBuildInfoDiagnostic[] = [];
// Currently not recording actual errors since those mean no emit for tsc --build
state.semanticDiagnosticsPerFile.forEach((value, key) => semanticDiagnosticsPerFile.push(
value.length ?
[
key,
state.hasReusableDiagnostic ?
value as ReadonlyArray<ReusableDiagnostic> :
convertToReusableDiagnostics(value as ReadonlyArray<Diagnostic>)
] :
key
));
result.semanticDiagnosticsPerFile = semanticDiagnosticsPerFile;
}
return result;
}
function convertToReusableDiagnostics(diagnostics: ReadonlyArray<Diagnostic>): ReadonlyArray<ReusableDiagnostic> {
Debug.assert(!!diagnostics.length);
return diagnostics.map(diagnostic => {
const result: ReusableDiagnostic = convertToReusableDiagnosticRelatedInformation(diagnostic);
result.reportsUnnecessary = diagnostic.reportsUnnecessary;
result.source = diagnostic.source;
const { relatedInformation } = diagnostic;
result.relatedInformation = relatedInformation ?
relatedInformation.length ?
relatedInformation.map(r => convertToReusableDiagnosticRelatedInformation(r)) :
emptyArray :
undefined;
return result;
});
}
function convertToReusableDiagnosticRelatedInformation(diagnostic: DiagnosticRelatedInformation): ReusableDiagnosticRelatedInformation {
const { file, messageText } = diagnostic;
return {
...diagnostic,
file: file && file.path,
messageText: messageText === undefined || isString(messageText) ?
messageText :
convertToReusableDiagnosticMessageChain(messageText)
};
}
function convertToReusableDiagnosticMessageChain(diagnostic: DiagnosticMessageChain): ReusableDiagnosticMessageChain {
return {
...diagnostic,
next: diagnostic.next && convertToReusableDiagnosticMessageChain(diagnostic.next)
};
}
export enum BuilderProgramKind {
SemanticDiagnosticsBuilderProgram,
EmitAndSemanticDiagnosticsBuilderProgram
}
export interface BuilderCreationParameters {
newProgram: Program;
host: BuilderProgramHost;
oldProgram: BuilderProgram | undefined;
configFileParsingDiagnostics: ReadonlyArray<Diagnostic>;
}
export function getBuilderCreationParameters(newProgramOrRootNames: Program | ReadonlyArray<string> | undefined, hostOrOptions: BuilderProgramHost | CompilerOptions | undefined, oldProgramOrHost?: BuilderProgram | CompilerHost, configFileParsingDiagnosticsOrOldProgram?: ReadonlyArray<Diagnostic> | BuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>, projectReferences?: ReadonlyArray<ProjectReference>): BuilderCreationParameters {
let host: BuilderProgramHost;
let newProgram: Program;
let oldProgram: BuilderProgram;
if (newProgramOrRootNames === undefined) {
Debug.assert(hostOrOptions === undefined);
host = oldProgramOrHost as CompilerHost;
oldProgram = configFileParsingDiagnosticsOrOldProgram as BuilderProgram;
Debug.assert(!!oldProgram);
newProgram = oldProgram.getProgram();
}
else if (isArray(newProgramOrRootNames)) {
oldProgram = configFileParsingDiagnosticsOrOldProgram as BuilderProgram;
newProgram = createProgram({
rootNames: newProgramOrRootNames,
options: hostOrOptions as CompilerOptions,
host: oldProgramOrHost as CompilerHost,
oldProgram: oldProgram && oldProgram.getProgramOrUndefined(),
configFileParsingDiagnostics,
projectReferences
});
host = oldProgramOrHost as CompilerHost;
}
else {
newProgram = newProgramOrRootNames;
host = hostOrOptions as BuilderProgramHost;
oldProgram = oldProgramOrHost as BuilderProgram;
configFileParsingDiagnostics = configFileParsingDiagnosticsOrOldProgram as ReadonlyArray<Diagnostic>;
}
return { host, newProgram, oldProgram, configFileParsingDiagnostics: configFileParsingDiagnostics || emptyArray };
}
export function createBuilderProgram(kind: BuilderProgramKind.SemanticDiagnosticsBuilderProgram, builderCreationParameters: BuilderCreationParameters): SemanticDiagnosticsBuilderProgram;
export function createBuilderProgram(kind: BuilderProgramKind.EmitAndSemanticDiagnosticsBuilderProgram, builderCreationParameters: BuilderCreationParameters): EmitAndSemanticDiagnosticsBuilderProgram;
export function createBuilderProgram(kind: BuilderProgramKind, { newProgram, host, oldProgram, configFileParsingDiagnostics }: BuilderCreationParameters) {
// Return same program if underlying program doesnt change
let oldState = oldProgram && oldProgram.getState();
if (oldState && newProgram === oldState.program && configFileParsingDiagnostics === newProgram.getConfigFileParsingDiagnostics()) {
newProgram = undefined!; // TODO: GH#18217
oldState = undefined;
return oldProgram;
}
/**
* Create the canonical file name for identity
*/
const getCanonicalFileName = createGetCanonicalFileName(host.useCaseSensitiveFileNames());
/**
* Computing hash to for signature verification
*/
const computeHash = host.createHash || generateDjb2Hash;
let state = createBuilderProgramState(newProgram, getCanonicalFileName, oldState);
let backupState: BuilderProgramState | undefined;
newProgram.getProgramBuildInfo = () => getProgramBuildInfo(state);
// To ensure that we arent storing any references to old program or new program without state
newProgram = undefined!; // TODO: GH#18217
oldProgram = undefined;
oldState = undefined;
const result = createRedirectedBuilderProgram(state, configFileParsingDiagnostics);
result.getState = () => state;
result.backupState = () => {
Debug.assert(backupState === undefined);
backupState = cloneBuilderProgramState(state);
};
result.restoreState = () => {
state = Debug.assertDefined(backupState);
backupState = undefined;
};
result.getAllDependencies = sourceFile => BuilderState.getAllDependencies(state, Debug.assertDefined(state.program), sourceFile);
result.getSemanticDiagnostics = getSemanticDiagnostics;
result.emit = emit;
result.releaseProgram = () => {
releaseCache(state);
backupState = undefined;
};
if (kind === BuilderProgramKind.SemanticDiagnosticsBuilderProgram) {
(result as SemanticDiagnosticsBuilderProgram).getSemanticDiagnosticsOfNextAffectedFile = getSemanticDiagnosticsOfNextAffectedFile;
}
else if (kind === BuilderProgramKind.EmitAndSemanticDiagnosticsBuilderProgram) {
(result as EmitAndSemanticDiagnosticsBuilderProgram).emitNextAffectedFile = emitNextAffectedFile;
}
else {
notImplemented();
}
return result;
/**
* Emits the next affected file's emit result (EmitResult and sourceFiles emitted) or returns undefined if iteration is complete
* The first of writeFile if provided, writeFile of BuilderProgramHost if provided, writeFile of compiler host
* in that order would be used to write the files
*/
function emitNextAffectedFile(writeFile?: WriteFileCallback, cancellationToken?: CancellationToken, emitOnlyDtsFiles?: boolean, customTransformers?: CustomTransformers): AffectedFileResult<EmitResult> {
let affected = getNextAffectedFile(state, cancellationToken, computeHash);
let isPendingEmitFile = false;
if (!affected) {
if (!state.compilerOptions.out && !state.compilerOptions.outFile) {
affected = getNextAffectedFilePendingEmit(state);
if (!affected) {
if (state.emittedBuildInfo) {
return undefined;
}
const affected = Debug.assertDefined(state.program);
return toAffectedFileResult(
state,
// When whole program is affected, do emit only once (eg when --out or --outFile is specified)
// Otherwise just affected file
affected.emitBuildInfo(writeFile || maybeBind(host, host.writeFile), cancellationToken),
affected,
/*isPendingEmitFile*/ false,
/*isBuildInfoEmit*/ true
);
}
isPendingEmitFile = true;
}
else {
const program = Debug.assertDefined(state.program);
// Check if program uses any prepend project references, if thats the case we cant track of the js files of those, so emit even though there are no changes
if (state.programEmitComplete || !some(program.getProjectReferences(), ref => !!ref.prepend)) {
state.programEmitComplete = true;
return undefined;
}
affected = program;
}
}
// Mark seen emitted files if there are pending files to be emitted
if (state.affectedFilesPendingEmit && state.program !== affected) {
(state.seenEmittedFiles || (state.seenEmittedFiles = createMap())).set((affected as SourceFile).path, true);
}
return toAffectedFileResult(
state,
// When whole program is affected, do emit only once (eg when --out or --outFile is specified)
// Otherwise just affected file
Debug.assertDefined(state.program).emit(affected === state.program ? undefined : affected as SourceFile, writeFile || maybeBind(host, host.writeFile), cancellationToken, emitOnlyDtsFiles, customTransformers),
affected,
isPendingEmitFile
);
}
/**
* Emits the JavaScript and declaration files.
* When targetSource file is specified, emits the files corresponding to that source file,
* otherwise for the whole program.
* In case of EmitAndSemanticDiagnosticsBuilderProgram, when targetSourceFile is specified,
* it is assumed that that file is handled from affected file list. If targetSourceFile is not specified,
* it will only emit all the affected files instead of whole program
*
* The first of writeFile if provided, writeFile of BuilderProgramHost if provided, writeFile of compiler host
* in that order would be used to write the files
*/
function emit(targetSourceFile?: SourceFile, writeFile?: WriteFileCallback, cancellationToken?: CancellationToken, emitOnlyDtsFiles?: boolean, customTransformers?: CustomTransformers): EmitResult {
if (kind === BuilderProgramKind.EmitAndSemanticDiagnosticsBuilderProgram) {
assertSourceFileOkWithoutNextAffectedCall(state, targetSourceFile);
if (!targetSourceFile) {
// Emit and report any errors we ran into.
let sourceMaps: SourceMapEmitResult[] = [];
let emitSkipped = false;
let diagnostics: Diagnostic[] | undefined;
let emittedFiles: string[] = [];
let affectedEmitResult: AffectedFileResult<EmitResult>;
while (affectedEmitResult = emitNextAffectedFile(writeFile, cancellationToken, emitOnlyDtsFiles, customTransformers)) {
emitSkipped = emitSkipped || affectedEmitResult.result.emitSkipped;
diagnostics = addRange(diagnostics, affectedEmitResult.result.diagnostics);
emittedFiles = addRange(emittedFiles, affectedEmitResult.result.emittedFiles);
sourceMaps = addRange(sourceMaps, affectedEmitResult.result.sourceMaps);
}
return {
emitSkipped,
diagnostics: diagnostics || emptyArray,
emittedFiles,
sourceMaps
};
}
}
return Debug.assertDefined(state.program).emit(targetSourceFile, writeFile || maybeBind(host, host.writeFile), cancellationToken, emitOnlyDtsFiles, customTransformers);
}
/**
* Return the semantic diagnostics for the next affected file or undefined if iteration is complete
* If provided ignoreSourceFile would be called before getting the diagnostics and would ignore the sourceFile if the returned value was true
*/
function getSemanticDiagnosticsOfNextAffectedFile(cancellationToken?: CancellationToken, ignoreSourceFile?: (sourceFile: SourceFile) => boolean): AffectedFileResult<ReadonlyArray<Diagnostic>> {
while (true) {
const affected = getNextAffectedFile(state, cancellationToken, computeHash);
if (!affected) {
// Done
return undefined;
}
else if (affected === state.program) {
// When whole program is affected, get all semantic diagnostics (eg when --out or --outFile is specified)
return toAffectedFileResult(
state,
state.program.getSemanticDiagnostics(/*targetSourceFile*/ undefined, cancellationToken),
affected
);
}
// Get diagnostics for the affected file if its not ignored
if (ignoreSourceFile && ignoreSourceFile(affected as SourceFile)) {
// Get next affected file
doneWithAffectedFile(state, affected);
continue;
}
return toAffectedFileResult(
state,
getSemanticDiagnosticsOfFile(state, affected as SourceFile, cancellationToken),
affected
);
}
}
/**
* Gets the semantic diagnostics from the program corresponding to this state of file (if provided) or whole program
* The semantic diagnostics are cached and managed here
* Note that it is assumed that when asked about semantic diagnostics through this API,
* the file has been taken out of affected files so it is safe to use cache or get from program and cache the diagnostics
* In case of SemanticDiagnosticsBuilderProgram if the source file is not provided,
* it will iterate through all the affected files, to ensure that cache stays valid and yet provide a way to get all semantic diagnostics
*/
function getSemanticDiagnostics(sourceFile?: SourceFile, cancellationToken?: CancellationToken): ReadonlyArray<Diagnostic> {
assertSourceFileOkWithoutNextAffectedCall(state, sourceFile);
const compilerOptions = Debug.assertDefined(state.program).getCompilerOptions();
if (compilerOptions.outFile || compilerOptions.out) {
Debug.assert(!state.semanticDiagnosticsPerFile);
// We dont need to cache the diagnostics just return them from program
return Debug.assertDefined(state.program).getSemanticDiagnostics(sourceFile, cancellationToken);
}
if (sourceFile) {
return getSemanticDiagnosticsOfFile(state, sourceFile, cancellationToken);
}
// When semantic builder asks for diagnostics of the whole program,
// ensure that all the affected files are handled
let affected: SourceFile | Program | undefined;
let affectedFilesPendingEmit: Path[] | undefined;
while (affected = getNextAffectedFile(state, cancellationToken, computeHash)) {
if (affected !== state.program && kind === BuilderProgramKind.EmitAndSemanticDiagnosticsBuilderProgram) {
(affectedFilesPendingEmit || (affectedFilesPendingEmit = [])).push((affected as SourceFile).path);
}
doneWithAffectedFile(state, affected);
}
// In case of emit builder, cache the files to be emitted
if (affectedFilesPendingEmit) {
state.affectedFilesPendingEmit = concatenate(state.affectedFilesPendingEmit, affectedFilesPendingEmit);
// affectedFilesPendingEmitIndex === undefined
// - means the emit state.affectedFilesPendingEmit was undefined before adding current affected files
// so start from 0 as array would be affectedFilesPendingEmit
// else, continue to iterate from existing index, the current set is appended to existing files
if (state.affectedFilesPendingEmitIndex === undefined) {
state.affectedFilesPendingEmitIndex = 0;
}
}
let diagnostics: Diagnostic[] | undefined;
for (const sourceFile of Debug.assertDefined(state.program).getSourceFiles()) {
diagnostics = addRange(diagnostics, getSemanticDiagnosticsOfFile(state, sourceFile, cancellationToken));
}
return diagnostics || emptyArray;
}
}
function getMapOfReferencedSet(mapLike: MapLike<ReadonlyArray<string>> | undefined): ReadonlyMap<BuilderState.ReferencedSet> | undefined {
if (!mapLike) return undefined;
const map = createMap<BuilderState.ReferencedSet>();
// Copies keys/values from template. Note that for..in will not throw if
// template is undefined, and instead will just exit the loop.
for (const key in mapLike) {
if (hasProperty(mapLike, key)) {
map.set(key, arrayToSet(mapLike[key]));
}
}
return map;
}
export function createBuildProgramUsingProgramBuildInfo(program: ProgramBuildInfo): EmitAndSemanticDiagnosticsBuilderProgram & SemanticDiagnosticsBuilderProgram {
const fileInfos = createMapFromTemplate(program.fileInfos);
const state: ReusableBuilderProgramState = {
fileInfos,
compilerOptions: program.options,
referencedMap: getMapOfReferencedSet(program.referencedMap),
exportedModulesMap: getMapOfReferencedSet(program.exportedModulesMap),
semanticDiagnosticsPerFile: program.semanticDiagnosticsPerFile && arrayToMap(program.semanticDiagnosticsPerFile, value => isString(value) ? value : value[0], value => isString(value) ? emptyArray : value[1]),
hasReusableDiagnostic: true
};
return {
getState: () => state,
backupState: noop,
restoreState: noop,
getProgram: notImplemented,
getProgramOrUndefined: returnUndefined,
releaseProgram: noop,
getCompilerOptions: () => state.compilerOptions,
getSourceFile: notImplemented,
getSourceFiles: notImplemented,
getOptionsDiagnostics: notImplemented,
getGlobalDiagnostics: notImplemented,
getConfigFileParsingDiagnostics: notImplemented,
getSyntacticDiagnostics: notImplemented,
getDeclarationDiagnostics: notImplemented,
getSemanticDiagnostics: notImplemented,
emit: notImplemented,
getAllDependencies: notImplemented,
getCurrentDirectory: notImplemented,
emitNextAffectedFile: notImplemented,
getSemanticDiagnosticsOfNextAffectedFile: notImplemented,
};
}
export function createRedirectedBuilderProgram(state: { program: Program | undefined; compilerOptions: CompilerOptions; }, configFileParsingDiagnostics: ReadonlyArray<Diagnostic>): BuilderProgram {
return {
getState: notImplemented,
backupState: noop,
restoreState: noop,
getProgram,
getProgramOrUndefined: () => state.program,
releaseProgram: () => state.program = undefined,
getCompilerOptions: () => state.compilerOptions,
getSourceFile: fileName => getProgram().getSourceFile(fileName),
getSourceFiles: () => getProgram().getSourceFiles(),
getOptionsDiagnostics: cancellationToken => getProgram().getOptionsDiagnostics(cancellationToken),
getGlobalDiagnostics: cancellationToken => getProgram().getGlobalDiagnostics(cancellationToken),
getConfigFileParsingDiagnostics: () => configFileParsingDiagnostics,
getSyntacticDiagnostics: (sourceFile, cancellationToken) => getProgram().getSyntacticDiagnostics(sourceFile, cancellationToken),
getDeclarationDiagnostics: (sourceFile, cancellationToken) => getProgram().getDeclarationDiagnostics(sourceFile, cancellationToken),
getSemanticDiagnostics: (sourceFile, cancellationToken) => getProgram().getSemanticDiagnostics(sourceFile, cancellationToken),
emit: (sourceFile, writeFile, cancellationToken, emitOnlyDts, customTransformers) => getProgram().emit(sourceFile, writeFile, cancellationToken, emitOnlyDts, customTransformers),
getAllDependencies: notImplemented,
getCurrentDirectory: () => getProgram().getCurrentDirectory(),
};
function getProgram() {
return Debug.assertDefined(state.program);
}
}
}
namespace ts {
export type AffectedFileResult<T> = { result: T; affected: SourceFile | Program; } | undefined;
export interface BuilderProgramHost {
/**
* return true if file names are treated with case sensitivity
*/
useCaseSensitiveFileNames(): boolean;
/**
* If provided this would be used this hash instead of actual file shape text for detecting changes
*/
createHash?: (data: string) => string;
/**
* When emit or emitNextAffectedFile are called without writeFile,
* this callback if present would be used to write files
*/
writeFile?: WriteFileCallback;
}
/**
* Builder to manage the program state changes
*/
export interface BuilderProgram {
/*@internal*/
getState(): ReusableBuilderProgramState;
/*@internal*/
backupState(): void;
/*@internal*/
restoreState(): void;
/**
* Returns current program
*/
getProgram(): Program;
/**
* Returns current program that could be undefined if the program was released
*/
/*@internal*/
getProgramOrUndefined(): Program | undefined;
/**
* Releases reference to the program, making all the other operations that need program to fail.
*/
/*@internal*/
releaseProgram(): void;
/**
* Get compiler options of the program
*/
getCompilerOptions(): CompilerOptions;
/**
* Get the source file in the program with file name
*/
getSourceFile(fileName: string): SourceFile | undefined;
/**
* Get a list of files in the program
*/
getSourceFiles(): ReadonlyArray<SourceFile>;
/**
* Get the diagnostics for compiler options
*/
getOptionsDiagnostics(cancellationToken?: CancellationToken): ReadonlyArray<Diagnostic>;
/**
* Get the diagnostics that dont belong to any file
*/
getGlobalDiagnostics(cancellationToken?: CancellationToken): ReadonlyArray<Diagnostic>;
/**
* Get the diagnostics from config file parsing
*/
getConfigFileParsingDiagnostics(): ReadonlyArray<Diagnostic>;
/**
* Get the syntax diagnostics, for all source files if source file is not supplied
*/
getSyntacticDiagnostics(sourceFile?: SourceFile, cancellationToken?: CancellationToken): ReadonlyArray<Diagnostic>;
/**
* Get the declaration diagnostics, for all source files if source file is not supplied
*/
getDeclarationDiagnostics(sourceFile?: SourceFile, cancellationToken?: CancellationToken): ReadonlyArray<DiagnosticWithLocation>;
/**
* Get all the dependencies of the file
*/
getAllDependencies(sourceFile: SourceFile): ReadonlyArray<string>;
/**
* Gets the semantic diagnostics from the program corresponding to this state of file (if provided) or whole program
* The semantic diagnostics are cached and managed here
* Note that it is assumed that when asked about semantic diagnostics through this API,
* the file has been taken out of affected files so it is safe to use cache or get from program and cache the diagnostics
* In case of SemanticDiagnosticsBuilderProgram if the source file is not provided,
* it will iterate through all the affected files, to ensure that cache stays valid and yet provide a way to get all semantic diagnostics
*/
getSemanticDiagnostics(sourceFile?: SourceFile, cancellationToken?: CancellationToken): ReadonlyArray<Diagnostic>;
/**
* Emits the JavaScript and declaration files.
* When targetSource file is specified, emits the files corresponding to that source file,
* otherwise for the whole program.
* In case of EmitAndSemanticDiagnosticsBuilderProgram, when targetSourceFile is specified,
* it is assumed that that file is handled from affected file list. If targetSourceFile is not specified,
* it will only emit all the affected files instead of whole program
*
* The first of writeFile if provided, writeFile of BuilderProgramHost if provided, writeFile of compiler host
* in that order would be used to write the files
*/
emit(targetSourceFile?: SourceFile, writeFile?: WriteFileCallback, cancellationToken?: CancellationToken, emitOnlyDtsFiles?: boolean, customTransformers?: CustomTransformers): EmitResult;
/**
* Get the current directory of the program
*/
getCurrentDirectory(): string;
}
/**
* The builder that caches the semantic diagnostics for the program and handles the changed files and affected files
*/
export interface SemanticDiagnosticsBuilderProgram extends BuilderProgram {
/**
* Gets the semantic diagnostics from the program for the next affected file and caches it
* Returns undefined if the iteration is complete
*/
getSemanticDiagnosticsOfNextAffectedFile(cancellationToken?: CancellationToken, ignoreSourceFile?: (sourceFile: SourceFile) => boolean): AffectedFileResult<ReadonlyArray<Diagnostic>>;
}
/**
* The builder that can handle the changes in program and iterate through changed file to emit the files
* The semantic diagnostics are cached per file and managed by clearing for the changed/affected files
*/
export interface EmitAndSemanticDiagnosticsBuilderProgram extends BuilderProgram {
/**
* Emits the next affected file's emit result (EmitResult and sourceFiles emitted) or returns undefined if iteration is complete
* The first of writeFile if provided, writeFile of BuilderProgramHost if provided, writeFile of compiler host
* in that order would be used to write the files
*/
emitNextAffectedFile(writeFile?: WriteFileCallback, cancellationToken?: CancellationToken, emitOnlyDtsFiles?: boolean, customTransformers?: CustomTransformers): AffectedFileResult<EmitResult>;
}
/**
* Create the builder to manage semantic diagnostics and cache them
*/
export function createSemanticDiagnosticsBuilderProgram(newProgram: Program, host: BuilderProgramHost, oldProgram?: SemanticDiagnosticsBuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>): SemanticDiagnosticsBuilderProgram;
export function createSemanticDiagnosticsBuilderProgram(rootNames: ReadonlyArray<string> | undefined, options: CompilerOptions | undefined, host?: CompilerHost, oldProgram?: SemanticDiagnosticsBuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>, projectReferences?: ReadonlyArray<ProjectReference>): SemanticDiagnosticsBuilderProgram;
export function createSemanticDiagnosticsBuilderProgram(newProgramOrRootNames: Program | ReadonlyArray<string> | undefined, hostOrOptions: BuilderProgramHost | CompilerOptions | undefined, oldProgramOrHost?: CompilerHost | SemanticDiagnosticsBuilderProgram, configFileParsingDiagnosticsOrOldProgram?: ReadonlyArray<Diagnostic> | SemanticDiagnosticsBuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>, projectReferences?: ReadonlyArray<ProjectReference>) {
return createBuilderProgram(BuilderProgramKind.SemanticDiagnosticsBuilderProgram, getBuilderCreationParameters(newProgramOrRootNames, hostOrOptions, oldProgramOrHost, configFileParsingDiagnosticsOrOldProgram, configFileParsingDiagnostics, projectReferences));
}
/**
* Create the builder that can handle the changes in program and iterate through changed files
* to emit the those files and manage semantic diagnostics cache as well
*/
export function createEmitAndSemanticDiagnosticsBuilderProgram(newProgram: Program, host: BuilderProgramHost, oldProgram?: EmitAndSemanticDiagnosticsBuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>): EmitAndSemanticDiagnosticsBuilderProgram;
export function createEmitAndSemanticDiagnosticsBuilderProgram(rootNames: ReadonlyArray<string> | undefined, options: CompilerOptions | undefined, host?: CompilerHost, oldProgram?: EmitAndSemanticDiagnosticsBuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>, projectReferences?: ReadonlyArray<ProjectReference>): EmitAndSemanticDiagnosticsBuilderProgram;
export function createEmitAndSemanticDiagnosticsBuilderProgram(newProgramOrRootNames: Program | ReadonlyArray<string> | undefined, hostOrOptions: BuilderProgramHost | CompilerOptions | undefined, oldProgramOrHost?: CompilerHost | EmitAndSemanticDiagnosticsBuilderProgram, configFileParsingDiagnosticsOrOldProgram?: ReadonlyArray<Diagnostic> | EmitAndSemanticDiagnosticsBuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>, projectReferences?: ReadonlyArray<ProjectReference>) {
return createBuilderProgram(BuilderProgramKind.EmitAndSemanticDiagnosticsBuilderProgram, getBuilderCreationParameters(newProgramOrRootNames, hostOrOptions, oldProgramOrHost, configFileParsingDiagnosticsOrOldProgram, configFileParsingDiagnostics, projectReferences));
}
/**
* Creates a builder thats just abstraction over program and can be used with watch
*/
export function createAbstractBuilder(newProgram: Program, host: BuilderProgramHost, oldProgram?: BuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>): BuilderProgram;
export function createAbstractBuilder(rootNames: ReadonlyArray<string> | undefined, options: CompilerOptions | undefined, host?: CompilerHost, oldProgram?: BuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>, projectReferences?: ReadonlyArray<ProjectReference>): BuilderProgram;
export function createAbstractBuilder(newProgramOrRootNames: Program | ReadonlyArray<string> | undefined, hostOrOptions: BuilderProgramHost | CompilerOptions | undefined, oldProgramOrHost?: CompilerHost | BuilderProgram, configFileParsingDiagnosticsOrOldProgram?: ReadonlyArray<Diagnostic> | BuilderProgram, configFileParsingDiagnostics?: ReadonlyArray<Diagnostic>, projectReferences?: ReadonlyArray<ProjectReference>): BuilderProgram {
const { newProgram, configFileParsingDiagnostics: newConfigFileParsingDiagnostics } = getBuilderCreationParameters(newProgramOrRootNames, hostOrOptions, oldProgramOrHost, configFileParsingDiagnosticsOrOldProgram, configFileParsingDiagnostics, projectReferences);
return createRedirectedBuilderProgram({ program: newProgram, compilerOptions: newProgram.getCompilerOptions() }, newConfigFileParsingDiagnostics);
}
} | |
main.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import calendar
from datetime import timedelta
from itertools import islice
from bson import ObjectId
from ming.orm import session
from tg import tmpl_context as c, app_globals as g
from tg import request, response
from tg import expose, validate, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from paste.deploy.converters import asbool, asint
from webob import exc
import feedgenerator as FG
from activitystream.storage.mingstorage import Activity
from allura.app import Application
from allura import version
from allura import model as M
from allura.controllers import BaseController
from allura.controllers.rest import AppRestControllerMixin
from allura.lib.security import require_authenticated, require_access
from allura.model.timeline import perm_check, get_activity_object
from allura.lib import helpers as h
from allura.lib.decorators import require_post
from allura.lib.widgets.form_fields import PageList
from allura.ext.user_profile import ProfileSectionBase
from .widgets.follow import FollowToggle
from six.moves import filter
import re
log = logging.getLogger(__name__)
class ForgeActivityApp(Application):
"""Project Activity page for projects."""
__version__ = version.__version__
default_mount_point = 'activity'
max_instances = 0
searchable = False
has_notifications = False
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ForgeActivityController(self)
self.api_root = ForgeActivityRestController(self)
def admin_menu(self): # pragma no cover
return []
def install(self, project):
role_anon = M.ProjectRole.by_name('*anonymous')._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
]
def uninstall(self, project):
pass # pragma no cover
class W:
follow_toggle = FollowToggle()
page_list = PageList()
class ForgeActivityController(BaseController):
def __init__(self, app, *args, **kw):
super(ForgeActivityController, self).__init__(*args, **kw)
self.app = app
setattr(self, 'feed.atom', self.feed)
setattr(self, 'feed.rss', self.feed)
def _check_security(self):
require_access(c.app, 'read')
def _before(self, *args, **kw):
"""Runs before each request to this controller.
"""
# register the custom css for our tool
g.register_app_css('css/activity.css', app=self.app)
def _get_activities_data(self, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
c.follow_toggle = W.follow_toggle
c.page_list = W.page_list
if c.project.is_user_project:
followee = c.project.user_project_of
actor_only = followee != c.user
else:
followee = c.project
actor_only = False
following = g.director.is_connected(c.user, followee)
limit, page = h.paging_sanitizer(kw.get('limit', 100), kw.get('page', 0))
extra_limit = limit
# get more in case perm check filters some out
if page == 0 and limit <= 10:
extra_limit = limit * 20
timeline = g.director.get_timeline(followee, page,
limit=extra_limit,
actor_only=actor_only)
filtered_timeline = list(islice(filter(perm_check(c.user), timeline),
0, limit))
if config.get("default_avatar_image"):
for t in filtered_timeline:
if not t.actor.activity_extras.get('icon_url'):
t.actor.activity_extras.icon_url = config['default_avatar_image']
else:
t.actor.activity_extras.icon_url = re.sub(r'([&?])d=[^&]*',
r'\1d={}'.format(config["default_avatar_image"]),
t.actor.activity_extras.icon_url)
session(t).expunge(t) # don't save back this change
if extra_limit == limit:
# if we didn't ask for extra, then we expect there's more if we got all we asked for
has_more = len(timeline) == limit
else:
# if we did ask for extra, check filtered result
has_more = len(filtered_timeline) == limit
return dict(
followee=followee,
following=following,
timeline=filtered_timeline,
noindex=False if filtered_timeline else True,
page=page,
limit=limit,
has_more=has_more,
actor_only=actor_only)
@expose('jinja:forgeactivity:templates/index.html')
@with_trailing_slash
def index(self, **kw):
return self._get_activities_data(**kw)
@expose('jinja:forgeactivity:templates/timeline.html')
def pjax(self, **kw):
return self._get_activities_data(**kw)
@without_trailing_slash
@expose()
def feed(self, **kw):
data = self._get_activities_data(**kw)
response.headers['Content-Type'] = str('')
response.content_type = str('application/xml')
d = {
'title': 'Activity for %s' % data['followee'].activity_name,
'link': h.absurl(self.app.url),
'description': 'Recent activity for %s' % (
data['followee'].activity_name),
'language': 'en',
}
if request.environ['PATH_INFO'].endswith(str('.atom')):
feed = FG.Atom1Feed(**d)
else:
feed = FG.Rss201rev2Feed(**d)
for t in data['timeline']:
url_id = h.absurl(t.obj.activity_url) # try to keep this consistent over time (not url-quoted)
url = h.absurl(h.urlquote_path_only(t.obj.activity_url))
feed.add_item(title='%s %s %s%s' % (
t.actor.activity_name,
t.verb,
t.obj.activity_name,
' on %s' % t.target.activity_name if t.target.activity_name else '',
),
link=url,
pubdate=t.published,
description=h.strip_bad_unicode(t.obj.activity_extras.get('summary', '')),
unique_id=url_id,
author_name=t.actor.activity_name,
author_link=h.absurl(t.actor.activity_url))
return feed.writeString('utf-8')
@require_post()
@expose('json:')
@validate(W.follow_toggle)
def follow(self, follow, **kw):
activity_enabled = asbool(config.get('activitystream.enabled', False))
if not activity_enabled:
raise exc.HTTPNotFound()
require_authenticated()
followee = c.project
if c.project.is_user_project:
followee = c.project.user_project_of
if c.user == followee:
return dict(
success=False,
message='Cannot follow yourself')
try:
if follow:
g.director.connect(c.user, followee)
else:
g.director.disconnect(c.user, followee)
except Exception as e:
log.exception('Unexpected error following user')
return dict(
success=False,
message='Unexpected error: %s' % e)
return dict(
success=True,
message=W.follow_toggle.success_message(follow),
following=follow)
@require_post()
@expose('json:')
def delete_item(self, activity_id, **kwargs):
require_access(c.project.neighborhood, 'admin')
activity = Activity.query.get(_id=ObjectId(activity_id))
if not activity:
raise exc.HTTPGone
# find other copies of this activity on other user/projects timelines
# but only within a small time window, so we can do efficient searching
activity_ts = activity._id.generation_time
time_window = timedelta(hours=1)
all_copies = Activity.query.find({
'_id': {
'$gt': ObjectId.from_datetime(activity_ts - time_window),
'$lt': ObjectId.from_datetime(activity_ts + time_window),
},
'obj': activity.obj,
'target': activity.target,
'actor': activity.actor,
'verb': activity.verb,
'tags': activity.tags,
}).all()
log.info('Deleting %s copies of activity record: %s %s %s', len(all_copies),
activity.actor.activity_url, activity.verb, activity.obj.activity_url)
for activity in all_copies:
activity.query.delete()
return {'success': True}
class ForgeActivityRestController(BaseController, AppRestControllerMixin):
def __init__(self, app, *args, **kw):
super(ForgeActivityRestController, self).__init__(*args, **kw)
self.app = app
def _check_security(self):
require_access(c.app, 'read')
@expose('json:')
def index(self, **kw):
data = self.app.root._get_activities_data(**kw)
return {
'following': data['following'],
'followee': {
'activity_name': data['followee'].activity_name,
'activity_url': data['followee'].url(),
'activity_extras': {},
},
'timeline': [{
'published': calendar.timegm(a.published.timetuple()) * 1000,
'actor': a.actor._deinstrument(),
'verb': a.verb,
'obj': a.obj._deinstrument(),
'target': a.target._deinstrument(),
'tags': a.tags._deinstrument(),
} for a in data['timeline']],
}
class ForgeActivityProfileSection(ProfileSectionBase):
template = 'forgeactivity:templates/widgets/profile_section.html'
def __init__(self, *a, **kw):
super(ForgeActivityProfileSection, self).__init__(*a, **kw)
self.activity_app = self.project.app_instance('activity')
def check_display(self):
app_installed = self.activity_app is not None
activity_enabled = asbool(config.get('activitystream.enabled', False))
return app_installed and activity_enabled
def prepare_context(self, context):
| full_timeline = g.director.get_timeline(
self.user, page=0, limit=100,
actor_only=True,
)
filtered_timeline = list(islice(filter(perm_check(c.user), full_timeline),
0, 8))
for activity in filtered_timeline:
# Get the project for the activity.obj so we can use it in the
# template. Expunge first so Ming doesn't try to flush the attr
# we create to temporarily store the project.
#
# The get_activity_object() calls are cheap, pulling from
# the session identity map instead of mongo since identical
# calls are made by perm_check() above.
session(activity).expunge(activity)
activity_obj = get_activity_object(activity.obj)
activity.obj.project = getattr(activity_obj, 'project', None)
context.update({
'follow_toggle': W.follow_toggle,
'following': g.director.is_connected(c.user, self.user),
'timeline': filtered_timeline,
'activity_app': self.activity_app,
})
g.register_js('activity_js/follow.js')
return context |
|
listIotHubResourceKeys.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20200831preview
import (
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// The list of shared access policies with a next link.
func ListIotHubResourceKeys(ctx *pulumi.Context, args *ListIotHubResourceKeysArgs, opts ...pulumi.InvokeOption) (*ListIotHubResourceKeysResult, error) |
type ListIotHubResourceKeysArgs struct {
// The name of the resource group that contains the IoT hub.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The name of the IoT hub.
ResourceName string `pulumi:"resourceName"`
}
// The list of shared access policies with a next link.
type ListIotHubResourceKeysResult struct {
// The next link.
NextLink string `pulumi:"nextLink"`
// The list of shared access policies.
Value []SharedAccessSignatureAuthorizationRuleResponse `pulumi:"value"`
}
| {
var rv ListIotHubResourceKeysResult
err := ctx.Invoke("azure-nextgen:devices/v20200831preview:listIotHubResourceKeys", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
} |
0006_image_image.py | # Generated by Django 2.2 on 2021-07-03 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
| dependencies = [
('gallery', '0005_remove_image_image'),
]
operations = [
migrations.AddField(
model_name='image',
name='image',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
] |
|
index.js | /**
* @flow
*/
import React from 'react';
import {
Text,
Modal,
View,
} from 'react-native';
import Button from '../button';
import Styles from './styles';
interface Props {
modalVisible: boolean;
onPress: Function;
}
const CustomModal = (props: Props) => {
const {modalVisible, onPress} = props;
return (
<Modal
animationType="slide" | transparent={true}
visible={modalVisible}
onRequestClose={()=> onPress(!modalVisible)}>
<View style={Styles.container}>
<View style={Styles.message}>
<Text style={{fontSize: 24, fontWeight: 'bold'}}>
Bull's eyes
</Text>
<Text style={{fontSize: 16}}>
This is a game that uses a slider to guess a number.
</Text>
<Button
styles={{backgroundColor: 'red'}}
titleStyle={{fontSize: 14}}
onPress={()=> onPress(!modalVisible)}
title="Hide Modal"
/>
</View>
</View>
</Modal>
);
}
export default CustomModal; | |
combine_logs.py | #!/usr/bin/env python3
"""Combine logs from multiple swyft nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
|
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| """Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files]) |
hash_utils.eg.go | // Code generated by execgen; DO NOT EDIT.
// Copyright 2020 The Cockroach Authors.
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"math"
"reflect"
"unsafe"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coldataext"
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexecbase/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/types"
)
// rehash takes an element of a key (tuple representing a row of equality
// column values) at a given column and computes a new hash by applying a
// transformation to the existing hash.
func rehash(
ctx context.Context,
buckets []uint64,
col coldata.Vec,
nKeys int,
sel []int,
cancelChecker CancelChecker,
overloadHelper overloadHelper,
datumAlloc *sqlbase.DatumAlloc,
) {
// In order to inline the templated code of overloads, we need to have a
// "_overloadHelper" local variable of type "overloadHelper".
_overloadHelper := overloadHelper
switch col.CanonicalTypeFamily() {
case types.BoolFamily:
switch col.Type().Width() {
case -1:
default:
keys, nulls := col.Bool(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
x := 0
if v {
x = 1
}
p = p*31 + uintptr(x)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
x := 0
if v {
x = 1
}
p = p*31 + uintptr(x)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
x := 0
if v {
x = 1
}
p = p*31 + uintptr(x)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
x := 0
if v {
x = 1
}
p = p*31 + uintptr(x)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
}
case types.BytesFamily:
switch col.Type().Width() {
case -1:
default:
keys, nulls := col.Bytes(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
sh := (*reflect.SliceHeader)(unsafe.Pointer(&v))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(v)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
sh := (*reflect.SliceHeader)(unsafe.Pointer(&v))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(v)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
sh := (*reflect.SliceHeader)(unsafe.Pointer(&v))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(v)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
sh := (*reflect.SliceHeader)(unsafe.Pointer(&v))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(v)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
}
case types.DecimalFamily:
switch col.Type().Width() {
case -1:
default:
keys, nulls := col.Decimal(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for equal decimals to hash to the same value we need to
// remove the trailing zeroes if there are any.
tmpDec := &_overloadHelper.tmpDec1
tmpDec.Reduce(&v)
b := []byte(tmpDec.String())
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(b)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for equal decimals to hash to the same value we need to
// remove the trailing zeroes if there are any.
tmpDec := &_overloadHelper.tmpDec1
tmpDec.Reduce(&v)
b := []byte(tmpDec.String())
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(b)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for equal decimals to hash to the same value we need to
// remove the trailing zeroes if there are any.
tmpDec := &_overloadHelper.tmpDec1
tmpDec.Reduce(&v)
b := []byte(tmpDec.String())
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(b)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for equal decimals to hash to the same value we need to
// remove the trailing zeroes if there are any.
tmpDec := &_overloadHelper.tmpDec1
tmpDec.Reduce(&v)
b := []byte(tmpDec.String())
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(b)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
}
case types.IntFamily:
switch col.Type().Width() {
case 16:
keys, nulls := col.Int16(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
case 32:
keys, nulls := col.Int32(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
case -1:
default:
keys, nulls := col.Int64(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
// In order for integers with different widths but of the same value to
// to hash to the same value, we upcast all of them to int64.
asInt64 := int64(v)
p = memhash64(noescape(unsafe.Pointer(&asInt64)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
}
case types.FloatFamily:
switch col.Type().Width() {
case -1:
default:
keys, nulls := col.Float64(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
f := v
if math.IsNaN(float64(f)) {
f = 0
}
p = f64hash(noescape(unsafe.Pointer(&f)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
f := v
if math.IsNaN(float64(f)) {
f = 0
}
p = f64hash(noescape(unsafe.Pointer(&f)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
f := v
if math.IsNaN(float64(f)) {
f = 0
}
p = f64hash(noescape(unsafe.Pointer(&f)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ { |
f := v
if math.IsNaN(float64(f)) {
f = 0
}
p = f64hash(noescape(unsafe.Pointer(&f)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
}
case types.TimestampTZFamily:
switch col.Type().Width() {
case -1:
default:
keys, nulls := col.Timestamp(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
s := v.UnixNano()
p = memhash64(noescape(unsafe.Pointer(&s)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
s := v.UnixNano()
p = memhash64(noescape(unsafe.Pointer(&s)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
s := v.UnixNano()
p = memhash64(noescape(unsafe.Pointer(&s)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
s := v.UnixNano()
p = memhash64(noescape(unsafe.Pointer(&s)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
}
case types.IntervalFamily:
switch col.Type().Width() {
case -1:
default:
keys, nulls := col.Interval(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
months, days, nanos := v.Months, v.Days, v.Nanos()
p = memhash64(noescape(unsafe.Pointer(&months)), p)
p = memhash64(noescape(unsafe.Pointer(&days)), p)
p = memhash64(noescape(unsafe.Pointer(&nanos)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
months, days, nanos := v.Months, v.Days, v.Nanos()
p = memhash64(noescape(unsafe.Pointer(&months)), p)
p = memhash64(noescape(unsafe.Pointer(&days)), p)
p = memhash64(noescape(unsafe.Pointer(&nanos)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
months, days, nanos := v.Months, v.Days, v.Nanos()
p = memhash64(noescape(unsafe.Pointer(&months)), p)
p = memhash64(noescape(unsafe.Pointer(&days)), p)
p = memhash64(noescape(unsafe.Pointer(&nanos)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
months, days, nanos := v.Months, v.Days, v.Nanos()
p = memhash64(noescape(unsafe.Pointer(&months)), p)
p = memhash64(noescape(unsafe.Pointer(&days)), p)
p = memhash64(noescape(unsafe.Pointer(&nanos)), p)
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
}
case typeconv.DatumVecCanonicalTypeFamily:
switch col.Type().Width() {
case -1:
default:
keys, nulls := col.Datum(), col.Nulls()
if col.MaybeHasNulls() {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
b := v.(*coldataext.Datum).Hash(datumAlloc)
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(b)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
if nulls.NullAt(selIdx) {
continue
}
v := keys.Get(selIdx)
p := uintptr(buckets[i])
b := v.(*coldataext.Datum).Hash(datumAlloc)
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(b)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
} else {
if sel != nil {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = sel[nKeys-1]
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = sel[i]
v := keys.Get(selIdx)
p := uintptr(buckets[i])
b := v.(*coldataext.Datum).Hash(datumAlloc)
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(b)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
} else {
// Early bounds checks.
_ = buckets[nKeys-1]
_ = keys.Get(nKeys - 1)
var selIdx int
for i := 0; i < nKeys; i++ {
selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i])
b := v.(*coldataext.Datum).Hash(datumAlloc)
sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(b)))
buckets[i] = uint64(p)
}
cancelChecker.checkEveryCall(ctx)
}
}
}
default:
colexecerror.InternalError(fmt.Sprintf("unhandled type %s", col.Type()))
}
} | selIdx = i
v := keys.Get(selIdx)
p := uintptr(buckets[i]) |
styles.js | import { makeStyles } from "@material-ui/styles";
export default makeStyles(theme => ({
button:{
marginTop:theme.spacing(4),
marginLeft:theme.spacing(2),
},
buttonModal:{
marginTop:theme.spacing(2),
marginBottom:theme.spacing(4),
marginRight:theme.spacing(2),
},
saveButtonContainer: {
display:"flex",
justifyContent: "center",
alignItems:"center",
width: 68,
},
textFieldUnderline: {
"&:before": {
borderBottomColor: theme.palette.primary.light,
},
"&:after": {
borderBottomColor: theme.palette.primary.main,
},
"&:hover:before": {
borderBottomColor: `${theme.palette.primary.light} !important`,
}, |
InputToGetMember: {
marginTop:theme.spacing(4),
width:320,
},
})); | },
textField: {
borderBottomColor: theme.palette.background.light,
}, |
cras_shm_stream.rs | // Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::time::Duration;
use std::{error, fmt};
use audio_streams::{
shm_streams::{BufferSet, ServerRequest, ShmStream},
BoxError, SampleFormat, StreamDirection,
};
use cras_sys::gen::CRAS_AUDIO_MESSAGE_ID;
use sys_util::error;
use crate::audio_socket::{AudioMessage, AudioSocket};
use crate::cras_server_socket::CrasServerSocket;
use crate::cras_shm::{self, CrasAudioHeader, CrasAudioShmHeaderFd};
#[derive(Debug)]
pub enum Error {
MessageTypeError,
CaptureBufferTooSmall,
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::MessageTypeError => write!(f, "Message type error"),
Error::CaptureBufferTooSmall => write!(
f,
"Capture buffer too small, must have size at least 'used_size'."
),
}
}
}
/// An object that handles interactions with CRAS for a shm stream.
/// The object implements `ShmStream` and so can be used to wait for
/// `ServerRequest` and `BufferComplete` messages.
pub struct CrasShmStream<'a> {
stream_id: u32,
server_socket: CrasServerSocket,
audio_socket: AudioSocket,
direction: StreamDirection,
header: CrasAudioHeader<'a>,
frame_size: usize,
num_channels: usize,
frame_rate: u32,
// The index of the next buffer within SHM to set the buffer offset for.
next_buffer_idx: usize,
}
impl<'a> CrasShmStream<'a> {
/// Attempt to creates a CrasShmStream with the given arguments.
///
/// # Arguments
///
/// * `stream_id` - The server's ID for the stream.
/// * `server_socket` - The socket that is connected to the server.
/// * `audio_socket` - The socket for audio request and audio available messages.
/// * `direction` - The direction of the stream, `Playback` or `Capture`.
/// * `num_channels` - The number of audio channels for the stream.
/// * `format` - The format to use for the stream's samples.
/// * `header_fd` - The file descriptor for the audio header shm area.
/// * `samples_len` - The size of the audio samples shm area.
///
/// # Returns
///
/// `CrasShmStream` - CRAS client stream.
///
/// # Errors
///
/// * If `header_fd` could not be successfully mmapped.
#[allow(clippy::too_many_arguments)]
pub fn try_new(
stream_id: u32,
server_socket: CrasServerSocket,
audio_socket: AudioSocket,
direction: StreamDirection,
num_channels: usize,
frame_rate: u32,
format: SampleFormat,
header_fd: CrasAudioShmHeaderFd,
samples_len: usize,
) -> Result<Self, BoxError> {
let header = cras_shm::create_header(header_fd, samples_len)?;
Ok(Self {
stream_id,
server_socket,
audio_socket,
direction,
header,
frame_size: format.sample_bytes() * num_channels,
num_channels,
frame_rate,
// We have either sent zero or two offsets to the server, so we will
// need to update index 0 next.
next_buffer_idx: 0,
})
}
}
impl<'a> Drop for CrasShmStream<'a> {
/// Send the disconnect stream message and log an error if sending fails.
fn drop(&mut self) {
if let Err(e) = self.server_socket.disconnect_stream(self.stream_id) {
error!("CrasShmStream::drop error: {}", e);
}
}
}
impl<'a> ShmStream for CrasShmStream<'a> {
fn frame_size(&self) -> usize {
self.frame_size
}
fn num_channels(&self) -> usize {
self.num_channels
}
fn frame_rate(&self) -> u32 {
self.frame_rate
}
fn wait_for_next_action_with_timeout(
&mut self,
timeout: Duration,
) -> Result<Option<ServerRequest>, BoxError> |
}
impl BufferSet for CrasShmStream<'_> {
fn callback(&mut self, offset: usize, frames: usize) -> Result<(), BoxError> {
self.header
.set_buffer_offset(self.next_buffer_idx, offset)?;
self.next_buffer_idx ^= 1;
let frames = frames as u32;
match self.direction {
StreamDirection::Playback => {
self.header.commit_written_frames(frames)?;
// Notify CRAS that we've made playback data available.
self.audio_socket.data_ready(frames)?
}
StreamDirection::Capture => {
let used_size = self.header.get_used_size();
// Because CRAS doesn't know how long our buffer in shm is, we
// must make sure that there are always at least buffer_size
// frames available so that it doesn't write outside the buffer.
if frames < (used_size / self.frame_size) as u32 {
return Err(Box::new(Error::CaptureBufferTooSmall));
}
self.header.commit_read_frames(frames)?;
self.audio_socket.capture_ready(frames)?;
}
}
Ok(())
}
fn ignore(&mut self) -> Result<(), BoxError> {
// We send an empty buffer for an ignored playback request since the
// server will not read from a 0-length buffer. We don't do anything for
// an ignored capture request, since we don't have a way to communicate
// buffer length to the server, and we don't want the server writing
// data to offsets within the SHM area that aren't audio buffers.
if self.direction == StreamDirection::Playback {
self.callback(0, 0)?;
}
Ok(())
}
}
| {
let expected_id = match self.direction {
StreamDirection::Playback => CRAS_AUDIO_MESSAGE_ID::AUDIO_MESSAGE_REQUEST_DATA,
StreamDirection::Capture => CRAS_AUDIO_MESSAGE_ID::AUDIO_MESSAGE_DATA_READY,
};
match self
.audio_socket
.read_audio_message_with_timeout(Some(timeout))?
{
Some(AudioMessage::Success { id, frames }) if id == expected_id => {
Ok(Some(ServerRequest::new(frames as usize, self)))
}
None => Ok(None),
_ => Err(Box::new(Error::MessageTypeError)),
}
} |
crc8.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package crc8 implements the 8-bit cyclic redundancy check, or CRC-8,
// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check and
// http://www.ross.net/crc/download/crc_v3.txt for information.
package crc8
import "github.com/mewkiz/flac/internal/hashutil"
// Size of a CRC-8 checksum in bytes.
const Size = 1
// Predefined polynomials.
const (
ATM = 0x07 // x^8 + x^2 + x + 1
)
// Table is a 256-word table representing the polynomial for efficient
// processing.
type Table [256]uint8
// ATMTable is the table for the ATM polynomial.
var ATMTable = makeTable(ATM)
// MakeTable returns the Table constructed from the specified polynomial.
func | (poly uint8) (table *Table) {
switch poly {
case ATM:
return ATMTable
}
return makeTable(poly)
}
// makeTable returns the Table constructed from the specified polynomial.
func makeTable(poly uint8) (table *Table) {
table = new(Table)
for i := range table {
crc := uint8(i)
for j := 0; j < 8; j++ {
if crc&0x80 != 0 {
crc = crc<<1 ^ poly
} else {
crc <<= 1
}
}
table[i] = crc
}
return table
}
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint8
table *Table
}
// New creates a new hashutil.Hash8 computing the CRC-8 checksum using the
// polynomial represented by the Table.
func New(table *Table) hashutil.Hash8 {
return &digest{0, table}
}
// NewATM creates a new hashutil.Hash8 computing the CRC-8 checksum using the
// ATM polynomial.
func NewATM() hashutil.Hash8 {
return New(ATMTable)
}
func (d *digest) Size() int {
return Size
}
func (d *digest) BlockSize() int {
return 1
}
func (d *digest) Reset() {
d.crc = 0
}
// Update returns the result of adding the bytes in p to the crc.
func Update(crc uint8, table *Table, p []byte) uint8 {
for _, v := range p {
crc = table[crc^v]
}
return crc
}
func (d *digest) Write(p []byte) (n int, err error) {
d.crc = Update(d.crc, d.table, p)
return len(p), nil
}
// Sum8 returns the 8-bit checksum of the hash.
func (d *digest) Sum8() uint8 {
return d.crc
}
func (d *digest) Sum(in []byte) []byte {
return append(in, d.crc)
}
// Checksum returns the CRC-8 checksum of data, using the polynomial represented
// by the Table.
func Checksum(data []byte, table *Table) uint8 {
return Update(0, table, data)
}
// ChecksumATM returns the CRC-8 checksum of data using the ATM polynomial.
func ChecksumATM(data []byte) uint8 {
return Update(0, ATMTable, data)
}
| MakeTable |
Node.ts | class | <T> {
next: Node<T> | null;
private _value: T;
constructor(value: T) {
this._value = value;
this.next = null;
}
get value(): T {
return this._value;
}
}
export default Node;
| Node |
api.rs | use std::collections::{HashSet, HashMap, VecDeque};
use uuid::Uuid;
pub use grin_util::secp::{Message};
use common::crypto::{Hex, SecretKey};
use grin_core::core::hash::{Hash};
use grin_core::ser;
use grin_util::secp::pedersen;
use grin_util::secp::{ContextFlag, Secp256k1, Signature};
use grin_p2p::types::PeerInfoDisplay;
use crate::contacts::GrinboxAddress;
//use super::keys;
use super::types::TxProof;
use grin_wallet_libwallet::{AcctPathMapping, BlockFees, CbData, NodeClient, Slate, TxLogEntry, TxWrapper,
WalletInfo, WalletBackend, OutputCommitMapping, WalletInst, WalletLCProvider,
StatusMessage, TxLogEntryType, OutputData};
use grin_core::core::Transaction;
use grin_keychain::{Identifier, Keychain};
use grin_util::secp::key::{ PublicKey };
use crate::common::{Arc, Mutex, Error, ErrorKind};
use grin_keychain::{SwitchCommitmentType, ExtKeychainPath};
use grin_wallet_libwallet::internal::{updater,keys};
use std::sync::mpsc;
use crate::common::hasher;
use std::sync::mpsc::Sender;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread::JoinHandle;
use std::fs::File;
use std::io::{Write, BufReader, BufRead};
// struct for sending back node information
pub struct NodeInfo
{
pub height: u64,
pub total_difficulty: u64,
pub peers: Vec<PeerInfoDisplay>,
}
pub fn invoice_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
active_account: Option<String>,
slate: &Slate,
address: Option<String>,
minimum_confirmations: u64,
max_outputs: u32,
num_change_outputs: u32,
selection_strategy_is_use_all: bool,
message: Option<String>,
) -> Result< Slate, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let params = grin_wallet_libwallet::InitTxArgs {
src_acct_name: active_account,
amount: slate.amount,
minimum_confirmations,
max_outputs,
num_change_outputs,
/// If `true`, attempt to use up as many outputs as
/// possible to create the transaction, up the 'soft limit' of `max_outputs`. This helps
/// to reduce the size of the UTXO set and the amount of data stored in the wallet, and
/// minimizes fees. This will generally result in many inputs and a large change output(s),
/// usually much larger than the amount being sent. If `false`, the transaction will include
/// as many outputs as are needed to meet the amount, (and no more) starting with the smallest
/// value outputs.
selection_strategy_is_use_all,
message,
/// Optionally set the output target slate version (acceptable
/// down to the minimum slate version compatible with the current. If `None` the slate
/// is generated with the latest version.
target_slate_version: None,
/// Number of blocks from current after which TX should be ignored
ttl_blocks: None,
/// If set, require a payment proof for the particular recipient
payment_proof_recipient_address: None,
address,
/// If true, just return an estimate of the resulting slate, containing fees and amounts
/// locked without actually locking outputs or creating the transaction. Note if this is set to
/// 'true', the amount field in the slate will contain the total amount locked, not the provided
/// transaction amount
estimate_only: None,
/// Sender arguments. If present, the underlying function will also attempt to send the
/// transaction to a destination and optionally finalize the result
send_args: None,
};
let slate = grin_wallet_libwallet::owner::process_invoice_tx(
&mut **w,
None,
slate,
params,
false,
)?;
Ok(slate)
}
pub fn show_rootpublickey<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
message: Option<&str>
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let keychain = w.keychain(None)?;
let root_pub_key = keychain.public_root_key().to_hex();
cli_message!("Root public key: {}", root_pub_key);
match message {
Some(msg) => {
// that path and type will give as the root private key
let id = ExtKeychainPath::new(0,0,0,0,0).to_identifier();
// Note, first 32 bytes of the message will be used...
// Hash size equal to the message size (32 bytes).
// Actually we could sign the message, not
let msg_hash = Hash::from_vec(msg.as_bytes());
let msg_message = Message::from_slice(msg_hash.as_bytes())?;
// id pointes to the root key. Will check
let signature = keychain.sign(&msg_message,0, &id, &SwitchCommitmentType::None)?;
println!("Signature: {}", signature.to_hex());
},
None => {}
}
Ok(())
}
pub fn verifysignature(
message: &str,
signature: &str,
pubkey: &str
) -> Result<(), Error> {
let msg = Hash::from_vec(message.as_bytes());
let msg = Message::from_slice(msg.as_bytes())?;
let secp = Secp256k1::with_caps(ContextFlag::VerifyOnly);
let pk = grin_util::from_hex(pubkey.to_string())?;
let pk = PublicKey::from_slice(&secp, &pk)?;
let signature = grin_util::from_hex(signature.to_string())?;
let signature = Signature::from_der(&secp, &signature)?;
match secp.verify(&msg, &signature, &pk) {
Ok(_) => println!("Message, signature and public key are valid!"),
Err(_) => println!("WARNING: Message, signature and public key are INVALID!"),
}
Ok(())
}
pub fn getnextkey<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
amount: u64) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
|
pub fn accounts<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>
) -> Result<Vec<AcctPathMapping>, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
Ok(keys::accounts(&mut **w)?)
}
pub fn create_account_path<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
label: &str
) -> Result<Identifier, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
Ok(keys::new_acct_path(&mut **w, None, label)?)
}
pub fn rename_account_path<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
old_label: &str, new_label: &str
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let accounts = accounts(wallet_inst.clone())?;
wallet_lock!(wallet_inst, w);
keys::rename_acct_path(&mut **w, None, accounts, old_label, new_label)?;
Ok(())
}
pub fn retrieve_tx_id_by_slate_id<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
slate_id: Uuid
) -> Result<u32, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let tx = updater::retrieve_txs(&mut **w, None,
None, Some(slate_id),
None,
false, None, None)?;
let mut ret = 1000000000;
for t in &tx {
ret = t.id;
}
Ok(ret)
}
pub fn retrieve_outputs<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
include_spent: bool,
refresh_from_node: bool,
tx_id: Option<u32>,
pagination_start: Option<u32>,
pagination_len: Option<u32>,
) -> Result<(bool, Vec<OutputCommitMapping>), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
let mut validated = false;
if refresh_from_node {
validated = update_outputs(&mut **w, false, None, None);
}
let res = Ok((
validated,
updater::retrieve_outputs(&mut **w,
None,
include_spent,
tx_id,
Some(&parent_key_id),
pagination_start,
pagination_len)?,
));
//w.close()?;
res
}
pub fn retrieve_txs<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
refresh_from_node: bool,
tx_id: Option<u32>,
tx_slate_id: Option<Uuid>,
) -> Result<(bool, Vec<TxLogEntry>), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
let mut validated = false;
if refresh_from_node {
validated = update_outputs(&mut **w, false, None, None);
}
let res = Ok((
validated,
updater::retrieve_txs(&mut **w, None, tx_id, tx_slate_id, Some(&parent_key_id), false, None, None)?,
));
//w.close()?;
res
}
pub fn retrieve_txs_with_proof_flag<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
refresh_from_node: bool,
tx_id: Option<u32>,
tx_slate_id: Option<Uuid>,
pagination_start: Option<u32>,
pagination_length: Option<u32>,
) -> Result<(bool, Vec<(TxLogEntry, bool)>), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
let mut validated = false;
let mut output_list = None;
if refresh_from_node {
validated = update_outputs(&mut **w, false, None, None);
// we need to check outputs for confirmations of ALL
output_list = Some((
validated,
// OutputCommitMap Array to array of (OutputData, pedersen::Commitment)
updater::retrieve_outputs(&mut **w,
None,
false,
None,
Some(&parent_key_id),
None, None)?
.iter().map(|ocm| (ocm.output.clone(), ocm.commit.clone())).collect()
));
}
let txs: Vec<TxLogEntry> =
updater::retrieve_txs_with_outputs(&mut **w,
None,
tx_id,
tx_slate_id,
Some(&parent_key_id),
false,
pagination_start,
pagination_length,
output_list)?;
let txs = txs
.into_iter()
.map(|t| {
let tx_slate_id = t.tx_slate_id.clone();
(
t,
tx_slate_id
.map(|i| TxProof::has_stored_tx_proof( w.get_data_file_dir(), &i.to_string()).unwrap_or(false))
.unwrap_or(false),
)
})
.collect();
let res = Ok((validated, txs));
//w.close()?;
res
}
struct TransactionInfo {
tx_log: TxLogEntry,
tx_kernels: Vec<String>, // pedersen::Commitment as strings. tx kernel, that can be used to validate send transactions
tx_outputs: Vec<OutputData>, // Output data from transaction.
validated: bool,
validation_flags: String,
warnings: Vec<String>,
}
impl TransactionInfo {
fn new(tx_log: TxLogEntry) -> Self {
TransactionInfo {
tx_log,
tx_kernels: Vec::new(),
tx_outputs: Vec::new(),
validated: false,
validation_flags: String::new(),
warnings: Vec::new(),
}
}
}
fn calc_best_merge(
outputs : &mut VecDeque<OutputData>,
transactions: &mut VecDeque<TxLogEntry>,
) -> (Vec<(TxLogEntry, Vec<OutputData>, bool)>, // Tx to output mapping
Vec<OutputData>) // Outstanding outputs
{
let mut res : Vec<(TxLogEntry,Vec<OutputData>, bool)> = Vec::new();
let mut next_canlelled = true;
while let Some(tx) = transactions.pop_front() {
if outputs.is_empty() { // failed to find the outputs
res.push( (tx.clone(), vec![], false) );
continue;
}
if tx.num_outputs==0 {
res.push( (tx.clone(), vec![], true) );
continue;
}
if tx.is_cancelled() {
if res.is_empty() { // first is cancelled. Edge case. Let's get transaction is possible
next_canlelled = tx.amount_credited != outputs.front().unwrap().value;
}
if next_canlelled {
// normally output is deleted form the DB. But there might be exceptions.
res.push((tx.clone(), vec![], true));
continue;
}
}
assert!(tx.num_outputs>0);
// Don't do much. Just chck the current ones.
if tx.num_outputs <= outputs.len() {
let mut found = false;
for i in 0..(outputs.len()-(tx.num_outputs-1)) {
let mut amount: u64 = 0;
for k in 0..tx.num_outputs {
amount += outputs[k+i].value;
}
if amount == tx.amount_credited {
let mut res_outs: Vec<OutputData> = Vec::new();
for _ in 0..tx.num_outputs {
res_outs.push( outputs.remove(i).unwrap() );
}
found = true;
if let Some(o2) = outputs.get(i) {
next_canlelled = o2.n_child - res_outs.last().unwrap().n_child > 1; // normally it is 1
}
else {
next_canlelled = true;
}
res.push((tx.clone(), res_outs, true));
break;
}
}
if !found {
res.push( (tx.clone(), vec![], false) );
}
}
}
( res, outputs.iter().map(|o| o.clone()).collect::<Vec<OutputData>>() )
}
/// Validate transactions as bulk against full node kernels dump
pub fn txs_bulk_validate<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
kernels_fn: &str, // file with kernels dump. One line per kernel
outputs_fn: &str, // file with outputs dump. One line per output
result_fn: &str, // Resulting file
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
// Validation will be processed for all transactions...
// Natural wallet's order should be good for us. Otherwise need to sort by tx_log.id
let mut txs : Vec<TransactionInfo> = Vec::new();
// Key: commit. Value: index at txs
let mut kernel_to_tx: HashMap< String, usize > = HashMap::new();
let mut output_to_tx: HashMap< String, usize > = HashMap::new();
let mut tx_id_to_tx: HashMap< u32, usize > = HashMap::new();
// Scanning both transactions and outputs. Doing that for all accounts. Filtering will be later
// Outputs don't have to start from the n_child and they don't have to go in the order because of possible race condition at recovery steps
let mut wallet_outputs : VecDeque<OutputData> = w.iter()
.filter(|o| o.root_key_id == parent_key_id && o.commit.is_some() )
.collect();
let mut wallet_transactions: VecDeque<TxLogEntry> = w.tx_log_iter()
.filter(|t| t.parent_key_id == parent_key_id )
.collect();
let wallet_outputs_len = wallet_outputs.len();
let (
tx_to_output,
outstanding_outputs,
) = calc_best_merge( &mut wallet_outputs, &mut wallet_transactions );
for ( tx, outputs, success ) in tx_to_output {
let mut tx_info = TransactionInfo::new(tx.clone());
tx_info.tx_outputs = outputs;
if !success {
tx_info.warnings.push("Failed to descover outputs".to_string());
}
if tx.tx_type == TxLogEntryType::ConfirmedCoinbase || tx.tx_type == TxLogEntryType::TxReceived {
if tx_info.tx_log.num_outputs == 0 {
tx_info.warnings.push("Tx Has no outputs".to_string());
println!("WARNING: Recieve transaction id {} doesn't have any outputs. Please check why it is happaning. {:?}", tx.id, tx);
}
}
///////////////////////////////////////////////////////////
// Taking case about Send type of transactions. Sends are expected to have slate with a kernel
// Note, output with change is a secondary source of verification because of cut through.
if tx.tx_type == TxLogEntryType::TxSent {
if tx.tx_slate_id.is_none() && tx.kernel_excess.is_none() {
tx_info.warnings.push("Transaction doesn't have UUID".to_string());
println!("WARNING: Sent trasaction id {} doesn't have uuid or kernel data", tx.id );
}
}
if tx.tx_type != TxLogEntryType::TxReceived && tx.tx_type != TxLogEntryType::TxReceivedCancelled {
if let Some(uuid_str) = tx.tx_slate_id {
if let Ok(transaction) = w.get_stored_tx_by_uuid(&uuid_str.to_string()) {
tx_info.tx_kernels = transaction.body.kernels.iter().map(|k| grin_util::to_hex(k.excess.0.to_vec())).collect();
} else {
if tx.tx_type == TxLogEntryType::TxSent {
tx_info.warnings.push("Transaction slate not found".to_string());
println!("INFO: Not found slate data for id {} and uuid {}. Might be recoverable issue", tx.id, uuid_str);
}
}
}
if let Some(kernel) = tx.kernel_excess {
tx_info.tx_kernels.push(grin_util::to_hex(kernel.0.to_vec()));
}
}
if tx.tx_type == TxLogEntryType::TxSent {
if tx_info.tx_kernels.is_empty() {
tx_info.warnings.push("No Kernels found".to_string());
if tx_info.tx_outputs.is_empty() {
println!("WARNING: For send transaction id {} we not found any kernels and no change outputs was found. We will not be able to validate it.", tx.id );
}
else {
println!("WARNING: For send transaction id {} we not found any kernels, but {} outputs exist. Outputs might not exist because of cut though.", tx.id, tx_info.tx_outputs.len() );
}
}
}
// Data is ready, let's collect it
let tx_idx = txs.len();
for kernel in &tx_info.tx_kernels {
kernel_to_tx.insert(kernel.clone(), tx_idx);
}
for out in &tx_info.tx_outputs {
if let Some(commit) = &out.commit {
output_to_tx.insert(commit.clone(), tx_idx);
}
else {
tx_info.warnings.push("Has Output without commit record".to_string());
println!("WARNING: Transaction id {} has broken Output without commit record. It can't be used for validation. This Transaction has outpts number: {}. Output data: {:?}", tx.id, tx_info.tx_outputs.len(), out);
}
}
tx_id_to_tx.insert( tx.id, tx_idx );
txs.push(tx_info);
}
// Transactions are prepared. Now need to validate them.
// Scanning node dump line by line and updating the valiated flag.
// ------------ Send processing first because sends are end points for Recieved Outputs. ---------------------
// If receive outputs is not in the chain but end point send was delivered - mean that it was a cut through and transaction is valid
// Normally there is a single kernel in tx. If any of kernels found - will make all transaction valid.
{
let file = File::open(kernels_fn).map_err(|_| ErrorKind::FileNotFound(String::from(kernels_fn)))?;
let reader = BufReader::new(file);
// Read the file line by line using the lines() iterator from std::io::BufRead.
for line in reader.lines() {
let line = line.unwrap();
if let Some(tx_idx) = kernel_to_tx.get(&line) {
txs[*tx_idx].validated = true;
txs[*tx_idx].validation_flags += "K";
}
}
}
// ---------- Processing Outputs. Targeting 'receive' and partly 'send' -----------------
{
{
let file = File::open(outputs_fn).map_err(|_| ErrorKind::FileNotFound(String::from(outputs_fn)))?;
let reader = BufReader::new(file);
// Read the file line by line using the lines() iterator from std::io::BufRead.
for output in reader.lines() {
let output = output.unwrap();
if let Some(tx_idx) = output_to_tx.get(&output) {
txs[*tx_idx].validated = true;
txs[*tx_idx].validation_flags += "O";
}
}
}
}
// Processing outputs by Send target - it is a Cut through Case.
// Do that for Recieve transactions without confirmations
{
for i in 0..txs.len() {
let t = &txs[i];
if t.validated {
continue;
}
let mut validated = false;
for out in &t.tx_outputs {
if let Some(tx_log_entry) = out.tx_log_entry {
if let Some(tx_idx) = tx_id_to_tx.get(&tx_log_entry) {
let tx_info = &txs[*tx_idx];
if (tx_info.tx_log.tx_type == TxLogEntryType::TxSent || tx_info.tx_log.tx_type == TxLogEntryType::TxSentCancelled)
&& tx_info.validated {
// We can validate this transaction because output was spent sucessfully
validated = true;
}
}
}
}
drop(t);
if validated {
txs[i].validated = true;
txs[i].validation_flags += "S";
}
}
}
// Done, now let's do a reporting
let mut res_file = File::create(result_fn).map_err(|_| ErrorKind::FileUnableToCreate(String::from(result_fn)))?;
write!(res_file, "id,uuid,type,address,create time,height,amount,fee,messages,node validation,validation flags,validation warnings\n" )?;
for t in &txs {
let amount = if t.tx_log.amount_credited >= t.tx_log.amount_debited {
grin_core::core::amount_to_hr_string(t.tx_log.amount_credited - t.tx_log.amount_debited, true)
} else {
format!("-{}", grin_core::core::amount_to_hr_string(t.tx_log.amount_debited - t.tx_log.amount_credited, true))
};
let report_str = format!("{},{},{},\"{}\",{},{},{},{},\"{}\",{},{},\"{}\"\n",
t.tx_log.id,
t.tx_log.tx_slate_id.map(|uuid| uuid.to_string()).unwrap_or("None".to_string()),
match t.tx_log.tx_type { // TxLogEntryType print doesn't work for us
TxLogEntryType::ConfirmedCoinbase => "Coinbase",
TxLogEntryType::TxReceived => "Received",
TxLogEntryType::TxSent => "Sent",
TxLogEntryType::TxReceivedCancelled => "ReceivedCancelled",
TxLogEntryType::TxSentCancelled => "SentCancelled",
},
t.tx_log.address.clone().unwrap_or("None".to_string()),
t.tx_log.creation_ts.format("%Y-%m-%d %H:%M:%S"),
t.tx_log.output_height,
amount,
t.tx_log.fee.map(|fee| grin_core::core::amount_to_hr_string(fee, true) ).unwrap_or("Unknown".to_string()),
t.tx_log.messages.clone().map(|msg| {
let msgs: Vec<String> = msg.messages.iter().filter_map(|m| m.message.clone()).collect();
msgs.join(",").replace('"', "\"\"")
}).unwrap_or(String::new()),
if t.validated {
"true"
} else {
"false"
},
t.validation_flags,
t.warnings.join("; "),
);
write!(res_file, "{}", report_str )?;
}
if !outstanding_outputs.is_empty() {
println!("WARNING: There are {} from {} outstanding outputs that wasn't used. That affect accuracy of results!!!", outstanding_outputs.len(), wallet_outputs_len );
}
Ok(())
}
pub fn retrieve_summary_info<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
refresh_from_node: bool,
minimum_confirmations: u64,
) -> Result<(bool, WalletInfo), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let (tx, rx) = mpsc::channel();
// Starting printing to console thread.
let running = Arc::new( AtomicBool::new(true) );
let updater = grin_wallet_libwallet::api_impl::owner_updater::start_updater_console_thread(rx, running.clone())?;
let tx = Some(tx);
let res = grin_wallet_libwallet::owner::retrieve_summary_info(wallet_inst,
None,
&tx,
refresh_from_node,
minimum_confirmations,
)?;
running.store(false, Ordering::Relaxed);
let _ = updater.join();
Ok(res)
}
pub fn initiate_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
active_account: Option<String>,
address: Option<String>,
amount: u64,
minimum_confirmations: u64,
max_outputs: u32,
num_change_outputs: u32,
selection_strategy_is_use_all: bool,
message: Option<String>,
outputs: Option<Vec<&str>>, // outputs to include into the transaction
version: Option<u16>, // Slate version
routputs: usize, // Number of resulting outputs. Normally it is 1
) -> Result<Slate, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let params = grin_wallet_libwallet::InitTxArgs {
src_acct_name: active_account,
amount,
minimum_confirmations,
max_outputs,
num_change_outputs,
/// If `true`, attempt to use up as many outputs as
/// possible to create the transaction, up the 'soft limit' of `max_outputs`. This helps
/// to reduce the size of the UTXO set and the amount of data stored in the wallet, and
/// minimizes fees. This will generally result in many inputs and a large change output(s),
/// usually much larger than the amount being sent. If `false`, the transaction will include
/// as many outputs as are needed to meet the amount, (and no more) starting with the smallest
/// value outputs.
selection_strategy_is_use_all: selection_strategy_is_use_all,
message,
/// Optionally set the output target slate version (acceptable
/// down to the minimum slate version compatible with the current. If `None` the slate
/// is generated with the latest version.
target_slate_version: version,
/// Number of blocks from current after which TX should be ignored
ttl_blocks: None,
/// If set, require a payment proof for the particular recipient
payment_proof_recipient_address: None,
/// If true, just return an estimate of the resulting slate, containing fees and amounts
/// locked without actually locking outputs or creating the transaction. Note if this is set to
/// 'true', the amount field in the slate will contain the total amount locked, not the provided
/// transaction amount
address,
estimate_only: None,
/// Sender arguments. If present, the underlying function will also attempt to send the
/// transaction to a destination and optionally finalize the result
send_args: None,
};
let s = grin_wallet_libwallet::owner::init_send_tx( &mut **w,
None, params , false,
outputs, routputs)?;
Ok(s)
}
// Lock put outputs and tx into the DB. Caller suppose to call it if slate was created and send sucessfully.
pub fn tx_lock_outputs<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
slate: &Slate,
address: Option<String>,
participant_id: usize,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
grin_wallet_libwallet::owner::tx_lock_outputs( &mut **w, None, slate, address, participant_id )?;
Ok(())
}
pub fn finalize_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
slate: &mut Slate,
tx_proof: Option<&mut TxProof>,
) -> Result<bool, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let (slate_res, context) = grin_wallet_libwallet::owner::finalize_tx( &mut **w, None, slate )?;
*slate = slate_res;
if tx_proof.is_some() {
let mut proof = tx_proof.unwrap();
proof.amount = context.amount;
proof.fee = context.fee;
for input in context.input_commits {
proof.inputs.push(input.clone());
}
for output in context.output_commits {
proof.outputs.push(output.clone());
}
proof.store_tx_proof(w.get_data_file_dir(), &slate.id.to_string() )?;
};
Ok(true)
}
pub fn finalize_invoice_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
slate: &mut Slate,
) -> Result<bool, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
*slate = grin_wallet_libwallet::foreign::finalize_invoice_tx( &mut **w, None, slate )?;
Ok(true)
}
pub fn cancel_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
tx_id: Option<u32>,
tx_slate_id: Option<Uuid>,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let (tx, rx) = mpsc::channel();
// Starting printing to console thread.
let running = Arc::new( AtomicBool::new(true) );
let updater = grin_wallet_libwallet::api_impl::owner_updater::start_updater_console_thread(rx, running.clone())?;
let tx = Some(tx);
grin_wallet_libwallet::owner::cancel_tx( wallet_inst.clone(), None, &tx, tx_id, tx_slate_id )?;
running.store(false, Ordering::Relaxed);
let _ = updater.join();
Ok(())
}
pub fn get_stored_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
uuid: &str
) -> Result<Transaction, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
Ok(w.get_stored_tx_by_uuid(uuid)?)
}
pub fn node_info<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
) -> Result<NodeInfo, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
// first get height
let mut height = 0;
let mut total_difficulty = 0;
match w.w2n_client().get_chain_tip() {
Ok( (hght, _, total_diff) ) => {
height=hght;
total_difficulty=total_diff;
},
_ => (),
}
// peer info
let mut peers : Vec<PeerInfoDisplay> = Vec::new();
match w.w2n_client().get_connected_peer_info() {
Ok(p) => peers = p,
_ => (),
};
Ok(NodeInfo{height,total_difficulty,peers})
}
pub fn post_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
tx: &Transaction,
fluff: bool
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let tx_hex = grin_util::to_hex(ser::ser_vec(tx,ser::ProtocolVersion(1) ).unwrap());
let client = {
wallet_lock!(wallet_inst, w);
w.w2n_client().clone()
};
client.post_tx(&TxWrapper { tx_hex: tx_hex }, fluff)?;
Ok(())
}
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()?;
Ok(())
}
// restore is a repairs. Since nothing exist, it will do what is needed.
pub fn restore<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
check_repair(wallet_inst, 1, true)
}
pub fn check_repair<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
start_height: u64,
delete_unconfirmed: bool
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let (tx, rx) = mpsc::channel();
// Starting printing to console thread.
let running = Arc::new( AtomicBool::new(true) );
let updater = grin_wallet_libwallet::api_impl::owner_updater::start_updater_console_thread(rx, running.clone())?;
let tx = Some(tx);
grin_wallet_libwallet::owner::scan( wallet_inst.clone(),
None,
Some(start_height),
delete_unconfirmed,
&tx,
None,
)?;
running.store(false, Ordering::Relaxed);
let _ = updater.join();
Ok(())
}
pub fn dump_wallet_data<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
file_name: Option<String>,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
// Starting printing to console thread.
let running = Arc::new( AtomicBool::new(true) );
let (tx, rx) = mpsc::channel();
let updater = grin_wallet_libwallet::api_impl::owner_updater::start_updater_console_thread(rx, running.clone())?;
grin_wallet_libwallet::owner::dump_wallet_data(
wallet_inst,
&tx,
file_name,
)?;
running.store(false, Ordering::Relaxed);
let _ = updater.join();
Ok(())
}
pub fn sync<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
update_all: bool,
print_progress: bool,
) -> Result<bool, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut status_send_channel: Option<Sender<StatusMessage>> = None;
let running = Arc::new( AtomicBool::new(true) );
let mut updater : Option<JoinHandle<()>> = None;
if print_progress {
let (tx, rx) = mpsc::channel();
// Starting printing to console thread.
updater = Some(grin_wallet_libwallet::api_impl::owner_updater::start_updater_console_thread(rx, running.clone())?);
status_send_channel = Some(tx);
}
let res = grin_wallet_libwallet::owner::update_wallet_state(
wallet_inst,
None,
&status_send_channel,
update_all,
None, // Need Update for all accounts
)?;
running.store(false, Ordering::Relaxed);
if updater.is_some() {
let _ = updater.unwrap().join();
}
Ok(res)
}
pub fn scan_outputs<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
pub_keys: Vec<PublicKey>,
output_fn : String
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
update_outputs(&mut **w, true, None, None);
crate::wallet::api::restore::scan_outputs(&mut **w, pub_keys, output_fn)?;
Ok(())
}
pub fn node_height<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
) -> Result<(u64, bool), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let res = {
wallet_lock!(wallet_inst, w);
w.w2n_client().get_chain_tip()
};
match res {
Ok(height) => Ok((height.0, true)),
Err(_) => {
let outputs = retrieve_outputs(wallet_inst.clone(), true, false, None, None, None)?;
let height = match outputs.1.iter().map(|ocm| ocm.output.height).max() {
Some(height) => height,
None => 0,
};
Ok((height, false))
}
}
}
fn update_outputs<'a, T: ?Sized, C, K>(wallet: &mut T, update_all: bool, height: Option<u64>, accumulator: Option<Vec<grin_api::Output>>) -> bool
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
// Updating outptus for all accounts
match updater::refresh_outputs(wallet, None, None, update_all, height, accumulator) {
Ok(_) => true,
Err(_) => false,
}
}
pub fn get_stored_tx_proof<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
id: u32) -> Result<TxProof, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let parent_key_id = w.parent_key_id();
let txs: Vec<TxLogEntry> =
updater::retrieve_txs(&mut **w, None,Some(id), None, Some(&parent_key_id), false, None, None)?;
if txs.len() != 1 {
return Err(ErrorKind::TransactionHasNoProof)?;
}
let uuid = txs[0]
.tx_slate_id
.ok_or_else(|| ErrorKind::TransactionHasNoProof)?;
TxProof::get_stored_tx_proof( w.get_data_file_dir(), &uuid.to_string())
}
pub fn verify_tx_proof(
tx_proof: &TxProof,
) -> Result<
(
Option<GrinboxAddress>,
GrinboxAddress,
u64,
Vec<pedersen::Commitment>,
pedersen::Commitment,
),
Error,
> {
let secp = &Secp256k1::with_caps(ContextFlag::Commit);
let (destination, slate) = tx_proof
.verify_extract(None)
.map_err(|_| ErrorKind::VerifyProof)?;
let inputs_ex = tx_proof.inputs.iter().collect::<HashSet<_>>();
let mut inputs: Vec<pedersen::Commitment> = slate
.tx
.inputs()
.iter()
.map(|i| i.commitment())
.filter(|c| !inputs_ex.contains(c))
.collect();
let outputs_ex = tx_proof.outputs.iter().collect::<HashSet<_>>();
let outputs: Vec<pedersen::Commitment> = slate
.tx
.outputs()
.iter()
.map(|o| o.commitment())
.filter(|c| !outputs_ex.contains(c))
.collect();
let excess = &slate.participant_data[1].public_blind_excess;
let excess_parts: Vec<&PublicKey> = slate
.participant_data
.iter()
.map(|p| &p.public_blind_excess)
.collect();
let excess_sum =
PublicKey::from_combination(secp, excess_parts).map_err(|_| ErrorKind::VerifyProof)?;
let commit_amount = secp.commit_value(tx_proof.amount)?;
inputs.push(commit_amount);
let commit_excess = secp.commit_sum(outputs.clone(), inputs)?;
let pubkey_excess = commit_excess.to_pubkey(secp)?;
if excess != &pubkey_excess {
return Err(ErrorKind::VerifyProof.into());
}
let mut input_com: Vec<pedersen::Commitment> =
slate.tx.inputs().iter().map(|i| i.commitment()).collect();
let mut output_com: Vec<pedersen::Commitment> =
slate.tx.outputs().iter().map(|o| o.commitment()).collect();
input_com.push(secp.commit(0, slate.tx.offset.secret_key(secp)?)?);
output_com.push(secp.commit_value(slate.fee)?);
let excess_sum_com = secp.commit_sum(output_com, input_com)?;
if excess_sum_com.to_pubkey(secp)? != excess_sum {
return Err(ErrorKind::VerifyProof.into());
}
return Ok((
destination,
tx_proof.address.clone(),
tx_proof.amount,
outputs,
excess_sum_com,
));
}
pub fn derive_address_key<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
index: u32
) -> Result<SecretKey, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let keychain = w.keychain(None)?;
hasher::derive_address_key(&keychain, index).map_err(|e| e.into())
}
pub fn initiate_receive_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
address: Option<String>,
active_account: Option<String>,
amount: u64,
num_outputs: usize,
message: Option<String>,
) -> Result< Slate, Error >
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let params = grin_wallet_libwallet::IssueInvoiceTxArgs {
dest_acct_name: active_account,
amount,
message,
/// Optionally set the output target slate version (acceptable
/// down to the minimum slate version compatible with the current. If `None` the slate
/// is generated with the latest version.
target_slate_version: None,
address,
};
let s = grin_wallet_libwallet::owner::issue_invoice_tx(&mut **w,
None, params , false, num_outputs)?;
Ok(s)
}
pub fn build_coinbase<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
block_fees: &BlockFees
) -> Result<CbData, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let res = updater::build_coinbase(&mut **w, None, block_fees, false )?;
Ok(res)
}
pub fn receive_tx<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
address: Option<String>,
slate: &mut Slate,
message: Option<String>,
key_id: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
) -> Result<Slate, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
wallet_lock!(wallet_inst, w);
let s = grin_wallet_libwallet::foreign::receive_tx(
&mut **w,
None,
slate,
address,
key_id,
output_amounts,
dest_acct_name,
message,
false,
)?;
Ok(s)
}
| {
wallet_lock!(wallet_inst, w);
let id = keys::next_available_key(&mut **w, None)?;
let keychain = w.keychain(None)?;
let sec_key = keychain.derive_key(amount, &id, &SwitchCommitmentType::Regular)?;
let pubkey = PublicKey::from_secret_key(keychain.secp(), &sec_key)?;
let ret = format!("{:?}, {:?}", id, pubkey);
Ok(ret)
} |
client.py | """
Module that implements the EppClient class
"""
try:
# use gevent if available
import gevent.socket as socket
import gevent.ssl as ssl
except ImportError:
import socket
import ssl
import struct
from collections import deque
import logging
from six import PY2, PY3
from past.builtins import xrange # Python 2 backwards compatibility
from .exceptions import EppLoginError, EppConnectionError
from .doc import (EppResponse, EppHello, EppLoginCommand, EppLogoutCommand,
EppCreateCommand, EppUpdateCommand, EppRenewCommand,
EppTransferCommand, EppDeleteCommand)
from .utils import gen_trid
try:
from ssl import match_hostname, CertificateError
except ImportError:
from backports.ssl_match_hostname import match_hostname, CertificateError
class EppClient(object):
"""
EPP client class
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, host=None, port=700,
ssl_enable=True, ssl_keyfile=None, ssl_certfile=None, ssl_cacerts=None,
ssl_version=None, ssl_ciphers=None,
ssl_validate_hostname=True, socket_timeout=60, socket_connect_timeout=15,
ssl_validate_cert=True):
self.host = host
self.port = port
self.ssl_enable = ssl_enable
# PROTOCOL_SSLv23 gives the best proto version available (including TLSv1 and above)
# SSLv2 should be disabled by most OpenSSL build
self.ssl_version = ssl_version or ssl.PROTOCOL_SSLv23
# `ssl_ciphers`, if given, should be a string
# (https://www.openssl.org/docs/apps/ciphers.html)
# if not given, use the default in Python version (`ssl._DEFAULT_CIPHERS`)
self.ssl_ciphers = ssl_ciphers
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
self.cacerts = ssl_cacerts
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout
self.validate_hostname = ssl_validate_hostname
self.log = logging.getLogger(__name__)
self.sock = None
self.greeting = None
if ssl_validate_cert:
self.cert_required = ssl.CERT_REQUIRED
else:
self.cert_required = ssl.CERT_NONE
def connect(self, host=None, port=None, address_family=None):
"""
Method that initiates a connection to an EPP host
"""
host = host or self.host
self.sock = socket.socket(address_family or socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.socket_connect_timeout) # connect timeout
self.sock.connect((host, port or self.port))
local_sock_addr = self.sock.getsockname()
local_addr, local_port = local_sock_addr[:2]
self.log.debug('connected local=%s:%s remote=%s:%s',
local_addr, local_port, self.sock.getpeername()[0], port)
if self.ssl_enable:
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,
ssl_version=self.ssl_version,
ciphers=self.ssl_ciphers,
server_side=False,
cert_reqs=self.cert_required,
ca_certs=self.cacerts)
self.log.debug('%s negotiated with local=%s:%s remote=%s:%s', self.sock.version(),
local_addr, local_port, self.sock.getpeername()[0], port)
if self.validate_hostname:
try:
match_hostname(self.sock.getpeercert(), host)
except CertificateError as exp:
self.log.exception("SSL hostname mismatch")
raise EppConnectionError(str(exp))
self.greeting = EppResponse.from_xml(self.read().decode('utf-8'))
self.sock.settimeout(self.socket_timeout) # regular timeout
def remote_info(self):
"""
Method that returns the remote peer name
"""
return '{}:{}'.format(*self.sock.getpeername())
def hello(self, log_send_recv=False):
"""
Method to send EppHello()
"""
return self.send(EppHello(), log_send_recv=log_send_recv)
# pylint: disable=c0103
def login(self, clID, pw, newPW=None, raise_on_fail=True,
obj_uris=None, extra_obj_uris=None, extra_ext_uris=None, clTRID=None):
|
def logout(self, clTRID=None):
cmd = EppLogoutCommand()
if clTRID:
cmd['epp']['command']['clTRID'] = clTRID
return self.send(cmd)
# pylint: enable=c0103
def read(self):
recvmeth = self.sock.read if self.ssl_enable else self.sock.recv
siz = b''
while len(siz) < 4:
siz += recvmeth(4 - len(siz))
if not siz:
# empty string after read means EOF
self.close()
raise IOError("No size header read")
size_remaining = siz = struct.unpack(">I", siz)[0] - 4
data = b''
while size_remaining:
buf = recvmeth(size_remaining)
if not buf:
self.close()
raise IOError(
"Short / no data read (expected %d bytes, got %d)" %
(siz, len(data)))
size_remaining -= len(buf)
data += buf
return data
#self.log.debug("read total %d bytes:\n%s\n" % (siz+4, data))
def write(self, data):
writemeth = self.sock.write if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
if PY3:
datad = str.encode(data) if type(data) is str else data
writemeth(siz + datad)
else:
writemeth(siz + data)
def write_many(self, docs):
"""
For testing only.
Writes multiple documents at once
"""
writemeth = self.sock.write if self.ssl_enable else self.sock.sendall
buf = []
for doc in docs:
buf.append(struct.pack(">I", 4 + len(doc)))
buf.append(doc)
writemeth(b''.join(buf))
def send(self, doc, log_send_recv=True, extra_nsmap=None, strip_hints=True):
self._gen_cltrid(doc)
buf = doc.to_xml(force_prefix=True)
if log_send_recv:
self.log.debug("SEND %s: %s", self.remote_info(), buf.decode('utf-8'))
self.write(buf)
r_buf = self.read().decode('utf-8')
if log_send_recv:
self.log.debug("RECV %s: %s", self.remote_info(), r_buf)
resp = EppResponse.from_xml(r_buf, extra_nsmap=extra_nsmap)
if strip_hints:
self.strip_hints(resp)
doc.normalize_response(resp)
return resp
@staticmethod
def strip_hints(data):
"""
Remove various cruft from the given EppDoc
(useful for responses where we don't care about _order etc.)
"""
stack = deque([data])
while len(stack):
current = stack.pop()
for key in list(current.keys()):
if key in ('@xsi:schemaLocation', '_order'):
del current[key]
else:
val = current[key]
if isinstance(val, dict):
# visit later
stack.append(val)
elif isinstance(val, list):
# visit each dict in the list
for elem in val:
if isinstance(elem, dict):
stack.append(elem)
return data
def batchsend(self, docs, readresponse=True, failfast=True, pipeline=False):
""" Send multiple documents. If ``pipeline`` is True, it will
send it in a single ``write`` call (which may have the effect
of having more than one doc packed into a single TCP packet
if they fits) """
sent = 0
recved = 0
ndocs = len(docs)
try:
if pipeline:
self.write_many(docs)
sent = ndocs
else:
for doc in docs:
self.write(str(doc))
sent += 1
# pylint: disable=w0702
except:
self.log.error(
"Failed to send all commands (sent %d/%d)", sent, ndocs)
if failfast:
raise
if not readresponse:
return sent
try:
out = []
for _ in xrange(sent):
r_buf = self.read()
out.append(EppResponse.from_xml(r_buf))
recved += 1
# pylint: disable=w0702
except Exception as exp:
self.log.error(
"Failed to receive all responses (recv'ed %d/%d)", recved, sent)
# pad the rest with None
for _ in xrange(sent - len(out)):
out.append(None)
# pylint: enable=w0702
return out
def write_split(self, data):
"""
For testing only.
Writes the size header and first 4 bytes of the payload in one call,
then the rest of the payload in another call.
"""
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz + data[:4])
writemeth(data[4:])
def write_splitsize(self, data):
"""
For testing only.
Writes 2 bytes of the header, then another two bytes,
then the payload in another call.
"""
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz[:2])
writemeth(siz[2:])
writemeth(data)
def write_splitall(self, data):
"""
For testing only.
Writes 2 bytes of the header, then another two bytes,
then 4 bytes of the payload, then the rest of the payload.
"""
writemeth = self.sock.sendall if self.ssl_enable else self.sock.sendall
siz = struct.pack(">I", 4 + len(data))
self.log.debug("siz=%d", (4 + len(data)))
writemeth(siz[:2])
writemeth(siz[2:])
writemeth(data[:4])
writemeth(data[4:])
def close(self):
self.sock.close()
self.sock = None
@staticmethod
def _gen_cltrid(doc):
if isinstance(doc, (EppLoginCommand, EppCreateCommand, EppUpdateCommand,
EppDeleteCommand, EppTransferCommand, EppRenewCommand)):
cmd_node = doc['epp']['command']
if not cmd_node.get('clTRID'):
cmd_node['clTRID'] = gen_trid()
def _get_ssl_protocol_version(self):
"""
This is a hack to get the negotiated protocol version of an SSL connection.
WARNING: Do not use this on anything other than Python 2.7
WARNING: Do not use on non-CPython.
WARNING: only use it for debugging.
WARNING: this will probably crash because we may be loading the wrong version of libssl
From https://github.com/python-git/python/blob/master/Modules/_ssl.c
the PySSLObject struct looks like this:
typedef struct {
PyObject_HEAD
PySocketSockObject *Socket; /* Socket on which we're layered */
SSL_CTX* ctx;
SSL* ssl;
X509* peer_cert;
char server[X509_NAME_MAXLEN];
char issuer[X509_NAME_MAXLEN];
} PySSLObject;
and this is stored as self.sock._sslobj so we pry open the mem location
and call OpenSSL's SSL_get_version C API
This technique is inspired by http://pyevolve.sourceforge.net/wordpress/?p=2171
"""
assert self.ssl_enable, "don't use it on non-SSL sockets"
assert self.sock._sslobj, "don't use it on non-SSL sockets"
import ctypes
import ctypes.util
size_pyobject_head = ctypes.sizeof(
ctypes.c_long) + ctypes.sizeof(ctypes.c_voidp)
# skip PySocketSockObject* and SSL_CTX*
real_ssl_offset = size_pyobject_head + ctypes.sizeof(ctypes.c_voidp) * 2
ssl_p = ctypes.c_voidp.from_address(id(self.sock._sslobj) + real_ssl_offset)
# libssl = ctypes.cdll.LoadLibrary('/usr/local/opt/openssl/lib/libssl.1.0.0.dylib')
libssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library('ssl'))
if not libssl:
return None
libssl.SSL_get_version.restype = ctypes.c_char_p
libssl.SSL_get_version.argtypes = [ctypes.c_void_p]
ver = libssl.SSL_get_version(ssl_p)
return ver
| if not self.sock:
self.connect(self.host, self.port)
cmd = EppLoginCommand(
obj_uris=obj_uris,
extra_obj_uris=extra_obj_uris,
extra_ext_uris=extra_ext_uris)
cmd.clID = clID
cmd.pw = pw
if clTRID:
cmd['epp']['command']['clTRID'] = clTRID
if newPW:
cmd.newPW = newPW
r = self.send(cmd)
if not r.success and raise_on_fail:
raise EppLoginError(r)
return r |
js_catalog.py | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.utils import timezone
from django.utils.translation.trans_real import DjangoTranslation | from django.views.decorators.http import etag
from django.views.i18n import JavaScriptCatalog
# Yes, we want to regenerate this every time the module has been imported to
# refresh the cache at least at every code deployment
import_date = timezone.now().strftime("%Y%m%d%H%M")
# This is not a valid Django URL configuration, as the final
# configuration is done by the pretix.multidomain package.
js_info_dict = {
'packages': ('pretix',),
}
@etag(lambda *s, **k: import_date)
@cache_page(3600, key_prefix='js18n-%s' % import_date)
def js_catalog(request, lang):
c = JavaScriptCatalog()
c.translation = DjangoTranslation(lang, domain='djangojs')
context = c.get_context_data()
return c.render_to_response(context) | from django.views.decorators.cache import cache_page |
new.rs | use crate::instructions::base::bytecode_reader::BytecodeReader;
use crate::instructions::base::class_init_logic::init_class;
use crate::instructions::base::instruction::{ConstantPoolInstruction, Instruction};
use crate::runtime::frame::Frame;
use crate::oops::class::Class;
use crate::oops::constant_pool::Constant::ClassReference;
use crate::utils::boxed;
use std::cell::RefCell;
use std::rc::Rc;
use crate::instructions::references::ResolveClassRef;
pub struct New(ConstantPoolInstruction);
impl New {
#[inline]
pub fn | () -> New {
return New(ConstantPoolInstruction::new());
}
}
impl Instruction for New {
fn fetch_operands(&mut self, reader: &mut BytecodeReader) {
self.0.fetch_operands(reader);
}
fn execute(&mut self, frame: &mut Frame) {
let class = frame.method().class();
let class = self.resolve_class_ref(class);
if !(*class).borrow().initialized() {
frame.revert_next_pc();
init_class(frame.thread(), class.clone());
return;
}
let ref_class = (*class).borrow();
if ref_class.is_interface() || ref_class.is_abstract() {
panic!("java.lang.InstantiationError")
}
let object = match ref_class.is_class_loader() {
true => Class::new_class_loader_object(&class),
false => Class::new_object(&class),
};
frame
.operand_stack()
.expect("")
.push_ref(Some(boxed(object)));
}
}
impl ResolveClassRef for New {
fn get_index(&self) -> usize {
return self.0.index();
}
}
| new |
data_source_google_compute_backend_bucket.go | package google
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func dataSourceGoogleComputeBackendBucket() *schema.Resource {
dsSchema := datasourceSchemaFromResourceSchema(resourceComputeBackendBucket().Schema)
// Set 'Required' schema elements
addRequiredFieldsToSchema(dsSchema, "name")
// Set 'Optional' schema elements
addOptionalFieldsToSchema(dsSchema, "project")
return &schema.Resource{
Read: dataSourceComputeBackendBucketRead,
Schema: dsSchema,
}
}
func dataSourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error | {
config := meta.(*Config)
backendBucketName := d.Get("name").(string)
project, err := getProject(d, config)
if err != nil {
return err
}
d.SetId(fmt.Sprintf("projects/%s/global/backendBuckets/%s", project, backendBucketName))
return resourceComputeBackendBucketRead(d, meta)
} |
|
scoped_handler_factory_test.go | package buildserver_test
import (
"context"
"net/http"
"net/http/httptest" |
"code.cloudfoundry.org/lager/lagertest"
. "github.com/concourse/atc/api/buildserver"
"github.com/concourse/atc/auth"
"github.com/concourse/atc/db"
"github.com/concourse/atc/db/dbfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("ScopedHandlerFactory", func() {
var (
response *http.Response
server *httptest.Server
delegate *delegateHandler
handler http.Handler
)
BeforeEach(func() {
delegate = &delegateHandler{}
logger := lagertest.NewTestLogger("test")
handlerFactory := NewScopedHandlerFactory(logger)
handler = handlerFactory.HandlerFor(delegate.GetHandler)
})
JustBeforeEach(func() {
server = httptest.NewServer(handler)
request, err := http.NewRequest("POST", server.URL, nil)
Expect(err).NotTo(HaveOccurred())
response, err = new(http.Client).Do(request)
Expect(err).NotTo(HaveOccurred())
})
var _ = AfterEach(func() {
server.Close()
})
Context("build is in the context", func() {
var contextBuild *dbfakes.FakeBuild
BeforeEach(func() {
contextBuild = new(dbfakes.FakeBuild)
handler = &wrapHandler{handler, contextBuild}
})
It("calls scoped handler with build from context", func() {
Expect(delegate.IsCalled).To(BeTrue())
Expect(delegate.Build).To(BeIdenticalTo(contextBuild))
})
})
Context("build not found in the context", func() {
It("returns 500", func() {
Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
})
It("does not call the scoped handler", func() {
Expect(delegate.IsCalled).To(BeFalse())
})
})
})
type delegateHandler struct {
IsCalled bool
Build db.Build
}
func (handler *delegateHandler) GetHandler(build db.Build) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler.IsCalled = true
handler.Build = build
})
}
type wrapHandler struct {
delegate http.Handler
contextBuild db.Build
}
func (h *wrapHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), auth.BuildKey, h.contextBuild)
h.delegate.ServeHTTP(w, r.WithContext(ctx))
} | |
ng_network.go | package server
import (
"fmt"
"net"
)
const ReadBufferSize int = 1024
func Listen(network, address string) |
func ListenU(network, address string) {
var udpAddr *net.UDPAddr
udpAddr, _ = net.ResolveUDPAddr(network, address)
udpSocket, err := net.ListenUDP(network, udpAddr)
if err != nil {
fmt.Println("Listen UDP failed")
return
}
defer udpSocket.Close()
}
| {
var tcpAddr *net.TCPAddr
tcpAddr, _ = net.ResolveTCPAddr(network, address)
tcpListener, _ := net.ListenTCP(network, tcpAddr)
defer tcpListener.Close()
fmt.Println("Server ready to read ...")
for {
tcpConn, err := tcpListener.AcceptTCP()
peer := Peer{Conn: nil}
if err != nil {
fmt.Println(err)
continue
}
fmt.Println("A client connected :" + tcpConn.RemoteAddr().String())
go peer.HandRequest()
}
} |
StudioGalleriesPanel.tsx | import React from "react";
import * as GQL from "src/core/generated-graphql";
import { GalleryList } from "src/components/Galleries/GalleryList";
import { studioFilterHook } from "src/core/studios";
interface IStudioGalleriesPanel {
studio: GQL.StudioDataFragment;
}
export const StudioGalleriesPanel: React.FC<IStudioGalleriesPanel> = ({
studio,
}) => { | return <GalleryList filterHook={studioFilterHook(studio)} />;
}; |
|
checkbox-group.component.ts | import {
Component,
ContentChild,
ContentChildren,
forwardRef,
Host,
HostBinding,
Input,
Optional,
QueryList,
Self,
SkipSelf,
ViewEncapsulation,
ElementRef,
Renderer2,
} from '@angular/core';
import { ControlValueAccessor, FormGroupDirective, NgControl } from '@angular/forms';
import { LgDomService } from '../../utils/dom.service';
import { LgHintComponent } from '../hint/hint.component';
import { LgErrorStateMatcher } from '../validation/error-state-matcher';
import { LgValidationComponent } from '../validation/validation.component';
import { LgToggleComponent } from '../toggle';
import { CheckboxGroupVariant } from './checkbox-group.interface';
let uniqueId = 0;
@Component({
selector: 'lg-checkbox-group, lg-filter-multiple-group',
templateUrl: './checkbox-group.component.html', | nextUniqueId = ++uniqueId;
private _name = `lg-checkbox-group-${this.nextUniqueId}`;
@Input() id = `lg-checkbox-group-id-${this.nextUniqueId}`;
@Input() inline = false;
@Input() disabled = false;
@Input() ariaDescribedBy: string;
_variant: CheckboxGroupVariant;
set variant(variant: CheckboxGroupVariant) {
if (this._variant) {
this.renderer.removeClass(
this.hostElement.nativeElement,
`lg-checkbox-group--${this.variant}`,
);
}
this.renderer.addClass(
this.hostElement.nativeElement,
`lg-checkbox-group--${variant}`,
);
this._variant = variant;
}
get variant() {
return this._variant;
}
@HostBinding('class.lg-checkbox-group--inline') get inlineClass() {
return this.inline;
}
@HostBinding('class.lg-checkbox-group--error') get errorClass() {
return this.errorState.isControlInvalid(this.control, this.controlContainer);
}
_checkboxes: QueryList<LgToggleComponent>;
@ContentChildren(forwardRef(() => LgToggleComponent), {
descendants: true,
})
set checkboxes(checkboxes: QueryList<LgToggleComponent>) {
checkboxes.toArray().forEach((checkbox: LgToggleComponent) => {
checkbox.control = this.control;
});
this._checkboxes = checkboxes;
}
get checkboxes(): QueryList<LgToggleComponent> {
return this._checkboxes;
}
_hintElement: LgHintComponent;
@ContentChild(LgHintComponent)
set hintElement(element: LgHintComponent) {
this.ariaDescribedBy = this.domService.toggleIdInStringProperty(
this.ariaDescribedBy,
this._validationElement,
element,
);
this._hintElement = element;
}
_validationElement: LgValidationComponent;
@ContentChild(LgValidationComponent)
set errorElement(element: LgValidationComponent) {
this.ariaDescribedBy = this.domService.toggleIdInStringProperty(
this.ariaDescribedBy,
this._validationElement,
element,
);
this._validationElement = element;
}
_value: Array<string> = null;
@Input()
get value() {
return this._value;
}
set value(value) {
this._value = value;
this.onChange(value);
if (this.checkboxes) {
this.checkboxes.forEach((checkbox) => {
if (value.includes(checkbox.value.toString())) {
checkbox.checked = true;
}
});
}
}
@Input()
get name(): string {
return this._name;
}
set name(value: string) {
this._name = value;
this._updateRadioButtonNames();
}
constructor(
@Self() @Optional() private control: NgControl,
private errorState: LgErrorStateMatcher,
@Optional()
@Host()
@SkipSelf()
private controlContainer: FormGroupDirective,
private domService: LgDomService,
private renderer: Renderer2,
private hostElement: ElementRef,
) {
this.variant = this.hostElement.nativeElement.tagName
.split('-')[1]
.toLowerCase() as CheckboxGroupVariant;
if (this.control != null) {
this.control.valueAccessor = this;
}
}
public onChange(value: Array<string>) {
this._value = value;
}
public onTouched(_?: any) {}
public writeValue(obj: Array<string>): void {
this.value = obj;
}
public registerOnChange(fn: any): void {
this.onChange = fn;
}
public registerOnTouched(fn: any): void {
this.onTouched = fn;
}
private _updateRadioButtonNames(): void {
if (this.checkboxes) {
this.checkboxes.forEach((checkbox) => {
checkbox.name = this.name;
});
}
}
public setDisabledState(isDisabled: boolean) {
this.disabled = isDisabled;
}
} | styleUrls: ['./checkbox-group.component.scss'],
encapsulation: ViewEncapsulation.None,
})
export class LgCheckboxGroupComponent implements ControlValueAccessor { |
evaluator_test.go | package conditions
import (
"testing"
"github.com/grafana/grafana/pkg/components/null"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/stretchr/testify/require"
)
func evaluatorScenario(t *testing.T, json string, reducedValue float64, datapoints ...float64) bool {
jsonModel, err := simplejson.NewJson([]byte(json))
require.NoError(t, err)
evaluator, err := NewAlertEvaluator(jsonModel)
require.NoError(t, err)
return evaluator.Eval(null.FloatFrom(reducedValue))
}
func TestEvaluators(t *testing.T) {
t.Run("greater then", func(t *testing.T) {
require.True(t, evaluatorScenario(t, `{"type": "gt", "params": [1] }`, 3))
require.False(t, evaluatorScenario(t, `{"type": "gt", "params": [3] }`, 1))
})
t.Run("less then", func(t *testing.T) {
require.False(t, evaluatorScenario(t, `{"type": "lt", "params": [1] }`, 3))
require.True(t, evaluatorScenario(t, `{"type": "lt", "params": [3] }`, 1))
})
t.Run("within_range", func(t *testing.T) {
require.True(t, evaluatorScenario(t, `{"type": "within_range", "params": [1, 100] }`, 3))
require.False(t, evaluatorScenario(t, `{"type": "within_range", "params": [1, 100] }`, 300))
require.True(t, evaluatorScenario(t, `{"type": "within_range", "params": [100, 1] }`, 3))
require.False(t, evaluatorScenario(t, `{"type": "within_range", "params": [100, 1] }`, 300))
})
t.Run("outside_range", func(t *testing.T) {
require.True(t, evaluatorScenario(t, `{"type": "outside_range", "params": [1, 100] }`, 1000))
require.False(t, evaluatorScenario(t, `{"type": "outside_range", "params": [1, 100] }`, 50))
require.True(t, evaluatorScenario(t, `{"type": "outside_range", "params": [100, 1] }`, 1000))
require.False(t, evaluatorScenario(t, `{"type": "outside_range", "params": [100, 1] }`, 50))
})
t.Run("no_value", func(t *testing.T) {
t.Run("should be false if series have values", func(t *testing.T) {
require.False(t, evaluatorScenario(t, `{"type": "no_value", "params": [] }`, 50))
})
t.Run("should be true when the series have no value", func(t *testing.T) {
jsonModel, err := simplejson.NewJson([]byte(`{"type": "no_value", "params": [] }`))
require.NoError(t, err)
| require.NoError(t, err)
require.True(t, evaluator.Eval(null.FloatFromPtr(nil)))
})
})
} | evaluator, err := NewAlertEvaluator(jsonModel) |
adminx.py | from __future__ import absolute_import
from django.forms import ModelMultipleChoiceField
from django.utils.translation import ugettext as _
import xadmin
from .xadmin_action import RunloopAction
from .models import RunLoopGroup, Orders
ACTION_NAME = {
'add': _('Can add %s'),
'change': _('Can change %s'),
'edit': _('Can edit %s'),
'delete': _('Can delete %s'),
'view': _('Can view %s'),
} | if action in ACTION_NAME:
return ACTION_NAME[action] % str(p.content_type)
else:
return p.co_name
class StockModelMultipleChoiceField(ModelMultipleChoiceField):
def label_from_instance(self, p):
return get_stock_name(p)
@xadmin.sites.register(RunLoopGroup)
class RunLoopGroupAdmin(object):
list_display = ("name", "start", "end", "status", "description", 'link',)
list_display_links = ("name",)
# readony_fields = ("status", )
exclude = ['status']
list_quick_filter = [{"field": "name", "limit": 10}]
search_fields = ["name"]
reversion_enable = True
style_fields = {"factor_buys": "checkbox-inline", "factor_sells": "checkbox-inline", "positions": "radio-inline",
"stocks": "m2m_transfer"}
# def get_field_attrs(self, db_field, **kwargs):
# print("db_field", db_field)
# attrs = super(RunLoopGroupAdmin, self).get_field_attrs(db_field, **kwargs)
# if db_field.name == 'stocks':
# attrs['form_class'] = StockModelMultipleChoiceField
# return attrs
actions = [RunloopAction]
def link(self, instance):
if instance.status == 'done':
return "<a href='%s/k' target='_blank'>%s</a>" % (
instance.id, '买卖点') + " <a href='%s/returns' target='_blank'>%s</a>" % (instance.id, '收益')
else:
return ""
link.short_description = '<div style="width: 100px;">报表</div>'
link.allow_tags = True
link.is_column = False
@xadmin.sites.register(Orders)
class OrdersAdmin(object):
list_display = (
"run_loop_group", "stock", "profit", "profit_cg_hunder", "buy_date", "buy_price", "buy_cnt", "buy_factor",
"sell_date", "sell_price", "sell_type_extra", "sell_type")
list_display_links = ("stock",)
# readony_fields = ("status", )
# exclude = ['status']
list_quick_filter = [{"field": "stock", "limit": 10}]
search_fields = ["stock"]
reversion_enable = True
# xadmin.sites.site.register(HostGroup, HostGroupAdmin)
# xadmin.sites.site.register(MaintainLog, MaintainLogAdmin)
# xadmin.sites.site.register(IDC, IDCAdmin)
# xadmin.sites.site.register(AccessRecord, AccessRecordAdmin) |
def get_stock_name(p):
action = p.codename.split('_')[0] |
settings.py | """
Django settings for helloworld project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import dj_database_url
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "tf)!7lw(kb4q0o1(xjedy_l8t+8+s1uzc-35fs21j71vw+56n5"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'helloworld.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'helloworld.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/ | STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Activate Django-Heroku.
django_heroku.settings(locals()) |
|
EKV_Adjustment.py | #This file is only available to the lab members in the internal Wiki |
||
signal_action.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use crate::ShortcutAction;
use glib::object::Cast;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::fmt;
glib::wrapper! {
pub struct SignalAction(Object<ffi::GtkSignalAction, ffi::GtkSignalActionClass>) @extends ShortcutAction;
match fn {
type_ => || ffi::gtk_signal_action_get_type(),
}
}
impl SignalAction {
#[doc(alias = "gtk_signal_action_new")]
pub fn new(signal_name: &str) -> SignalAction {
assert_initialized_main_thread!();
unsafe { from_glib_full(ffi::gtk_signal_action_new(signal_name.to_glib_none().0)) }
}
#[doc(alias = "gtk_signal_action_get_signal_name")]
#[doc(alias = "get_signal_name")]
pub fn signal_name(&self) -> glib::GString {
unsafe {
from_glib_none(ffi::gtk_signal_action_get_signal_name(
self.to_glib_none().0,
))
}
}
}
#[derive(Clone, Default)]
pub struct SignalActionBuilder {
signal_name: Option<String>,
}
impl SignalActionBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn | (self) -> SignalAction {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref signal_name) = self.signal_name {
properties.push(("signal-name", signal_name));
}
glib::Object::new::<SignalAction>(&properties)
.expect("Failed to create an instance of SignalAction")
}
pub fn signal_name(mut self, signal_name: &str) -> Self {
self.signal_name = Some(signal_name.to_string());
self
}
}
impl fmt::Display for SignalAction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SignalAction")
}
}
| build |
random_field_access.rs | use crate::FixValue;
use std::iter::FusedIterator;
use std::ops::Range;
/// Provides random (i.e. non-sequential) access to FIX fields and groups within
/// messages.
///
/// # Methods
///
/// [`RandomFieldAccess`] provides two kinds of methods:
///
/// 1. Group getters: [`RandomFieldAccess::group`] and
/// [`RandomFieldAccess::group_opt`].
///
/// 2. Field getters: [`RandomFieldAccess::fv_raw`], [`RandomFieldAccess::fv`],
/// etc..
///
/// The most basic form of field access is done via
/// [`RandomFieldAccess::fv_raw`], which performs no deserialization at all: it
/// simply returns the bytes contents associated with a FIX field, if found.
///
/// Building upon [`RandomFieldAccess::fv_raw`] and [`FixValue`], the other
/// field access methods all provide some utility deserialization logic. These
/// methods all have the `fv` prefix, with the following considerations:
///
/// - `fvl` methods perform "lossy" deserialization via
/// [`FixValue::deserialize_lossy`]. Unlike lossless deserialization, these
/// methods may skip some error checking logic and thus prove to be faster.
/// Memory-safety is still guaranteed, but malformed FIX fields won't be
/// detected 100% of the time.
/// - `_opt` methods work exactly like their non-`_opt` counterparties, but they
/// have a different return type: instead of returning [`Err(None)`] for missing
/// fields, these methods return [`None`] for missing fields and
/// [`Some(Ok(field))`] for existing fields.
///
/// # Type parameters
///
/// This trait is generic over a type `F`, which must univocally identify FIX
/// fields (besides FIX repeating groups, which allow repetitions).
pub trait RandomFieldAccess<F> {
/// The type returned by [`RandomFieldAccess::group`] and
/// [`RandomFieldAccess::group_opt`].
type Group: RepeatingGroup<Entry = Self>;
/// Looks for a `field` within `self` and then returns its raw byte
/// contents, if it exists.
fn fv_raw(&self, field: F) -> Option<&[u8]>;
/// Like [`RandomFieldAccess::group`], but doesn't return an [`Err`] if the
/// group is missing.
fn group_opt(&self, field: F) -> Option<Result<Self::Group, <usize as FixValue>::Error>>;
/// Looks for a group that starts with `field` within `self`.
#[inline]
fn group(&self, field: F) -> Result<Self::Group, Option<<usize as FixValue>::Error>> {
match self.group_opt(field) {
Some(Ok(group)) => Ok(group),
Some(Err(e)) => Err(Some(e)),
None => Err(None),
}
}
/// Looks for a `field` within `self` and then decodes its raw byte contents
/// via [`FixValue::deserialize`], if found.
#[inline]
fn fv<'a, V>(&'a self, field: F) -> Result<V, Option<V::Error>>
where
V: FixValue<'a>,
{
match self.fv_opt(field) {
Some(Ok(x)) => Ok(x),
Some(Err(err)) => Err(Some(err)),
None => Err(None),
}
}
/// Like [`RandomFieldAccess::fv`], but with lossy deserialization.
#[inline]
fn fvl<'a, V>(&'a self, field: F) -> Result<V, Option<V::Error>>
where
V: FixValue<'a>,
{
match self.fvl_opt(field) { | }
}
/// Like [`RandomFieldAccess::fv`], but doesn't return an [`Err`] if `field`
/// is missing.
#[inline]
fn fv_opt<'a, V>(&'a self, field: F) -> Option<Result<V, V::Error>>
where
V: FixValue<'a>,
{
self.fv_raw(field).map(|raw| match V::deserialize(raw) {
Ok(value) => Ok(value),
Err(err) => Err(err.into()),
})
}
/// Like [`RandomFieldAccess::fv_opt`], but with lossy deserialization.
#[inline]
fn fvl_opt<'a, V>(&'a self, field: F) -> Option<Result<V, V::Error>>
where
V: FixValue<'a>,
{
self.fv_raw(field)
.map(|raw| match V::deserialize_lossy(raw) {
Ok(value) => Ok(value),
Err(err) => Err(err.into()),
})
}
}
/// Provides access to entries within a FIX repeating group.
pub trait RepeatingGroup: Sized {
/// The type of entries in this FIX repeating group. Must implement
/// [`RandomFieldAccess`].
type Entry;
/// Returns the number of FIX group entries in `self`.
fn len(&self) -> usize;
/// Returns the `i` -th entry in `self`, if present.
fn entry_opt(&self, i: usize) -> Option<Self::Entry>;
/// Returns the `i` -th entry in `self`.
///
/// # Panics
///
/// Panics if `i` is outside the legal range of `self`.
fn entry(&self, i: usize) -> Self::Entry {
self.entry_opt(i)
.expect("Index outside bounds of FIX repeating group.")
}
/// Creates and returns an [`Iterator`] over the entries in `self`.
/// Iteration MUST be done in sequential order, i.e. in which they appear in
/// the original FIX message.
fn entries(&self) -> GroupEntries<Self> {
GroupEntries {
group: self,
range: 0..self.len(),
}
}
}
/// An [`Iterator`] over the entries of a FIX repeating group.
///
/// This `struct` is created by the method [`RepeatingGroup::entries`]. It
/// also implements [`FusedIterator`], [`DoubleEndedIterator`], and
/// [`ExactSizeIterator`].
#[derive(Debug, Clone)]
pub struct GroupEntries<'a, G> {
group: &'a G,
range: Range<usize>,
}
impl<'a, G> Iterator for GroupEntries<'a, G>
where
G: RepeatingGroup,
{
type Item = G::Entry;
fn next(&mut self) -> Option<Self::Item> {
let i = self.range.next()?;
Some(self.group.entry(i))
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.range.size_hint()
}
}
impl<'a, G> FusedIterator for GroupEntries<'a, G> where G: RepeatingGroup {}
impl<'a, G> ExactSizeIterator for GroupEntries<'a, G> where G: RepeatingGroup {}
impl<'a, G> DoubleEndedIterator for GroupEntries<'a, G>
where
G: RepeatingGroup,
{
fn next_back(&mut self) -> Option<Self::Item> {
let i = self.range.next_back()?;
Some(self.group.entry(i))
}
} | Some(Ok(x)) => Ok(x),
Some(Err(err)) => Err(Some(err)),
None => Err(None), |
service_instance_summary_test.go | package v2action_test
import (
"errors"
. "code.cloudfoundry.org/cli/actor/v2action"
"code.cloudfoundry.org/cli/actor/v2action/v2actionfakes"
"code.cloudfoundry.org/cli/api/cloudcontroller/ccerror"
"code.cloudfoundry.org/cli/api/cloudcontroller/ccv2"
"code.cloudfoundry.org/cli/api/cloudcontroller/ccv2/constant"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
)
var _ = Describe("Service Instance Summary Actions", func() {
var (
actor *Actor
fakeCloudControllerClient *v2actionfakes.FakeCloudControllerClient
)
BeforeEach(func() {
fakeCloudControllerClient = new(v2actionfakes.FakeCloudControllerClient)
actor = NewActor(fakeCloudControllerClient, nil, nil)
})
Describe("ServiceInstanceSummary", func() {
var summary ServiceInstanceSummary
Describe("IsShareable", func() {
When("the 'service_instance_sharing' feature flag is enabled", func() {
BeforeEach(func() {
summary.ServiceInstanceSharingFeatureFlag = true
})
When("the service broker has enabled sharing", func() {
BeforeEach(func() {
summary.Service.Extra.Shareable = true
})
It("returns true", func() {
Expect(summary.IsShareable()).To(BeTrue())
})
})
When("the service broker has not enabled sharing", func() {
BeforeEach(func() {
summary.Service.Extra.Shareable = false
})
It("returns true", func() {
Expect(summary.IsShareable()).To(BeFalse())
})
})
})
When("the 'service_instance_sharing' feature flag is not enabled", func() {
BeforeEach(func() {
summary.ServiceInstanceSharingFeatureFlag = false
})
When("the service broker has enabled sharing", func() {
BeforeEach(func() {
summary.Service.Extra.Shareable = true
})
It("returns true", func() {
Expect(summary.IsShareable()).To(BeFalse())
})
})
When("the service broker has not enabled sharing", func() {
BeforeEach(func() {
summary.Service.Extra.Shareable = false
})
It("returns true", func() {
Expect(summary.IsShareable()).To(BeFalse())
})
})
})
})
DescribeTable("UpgradeAvailable",
func(versionFromPlan, versionFromServiceInstance, expectedResult string) {
summary.MaintenanceInfo.Version = versionFromServiceInstance
summary.ServicePlan.MaintenanceInfo.Version = versionFromPlan
Expect(summary.UpgradeAvailable()).To(Equal(expectedResult))
},
Entry("values are the same", "2.0.0", "2.0.0", "no"),
Entry("values are different", "3.0.0", "2.0.0", "yes"),
Entry("values are both empty", "", "", ""),
Entry("plan has value but instance does not", "1.0.0", "", "yes"),
Entry("instance has value but plan does not", "", "1.0.0", ""),
)
})
Describe("GetServiceInstanceSummaryByNameAndSpace", func() {
var (
summary ServiceInstanceSummary
summaryWarnings Warnings
summaryErr error
)
JustBeforeEach(func() {
summary, summaryWarnings, summaryErr = actor.GetServiceInstanceSummaryByNameAndSpace("some-service-instance", "some-space-guid")
})
When("an error is encountered getting the service instance", func() {
var expectedErr error
BeforeEach(func() {
expectedErr = errors.New("get space service instance error")
fakeCloudControllerClient.GetSpaceServiceInstancesReturns(
[]ccv2.ServiceInstance{},
ccv2.Warnings{"get-space-service-instance-warning"},
expectedErr)
})
It("returns the error and all warnings", func() {
Expect(summaryErr).To(MatchError(expectedErr))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning"))
Expect(fakeCloudControllerClient.GetSpaceServiceInstancesCallCount()).To(Equal(1))
spaceGUIDArg, getUserProvidedServicesArg, queriesArg := fakeCloudControllerClient.GetSpaceServiceInstancesArgsForCall(0)
Expect(spaceGUIDArg).To(Equal("some-space-guid"))
Expect(getUserProvidedServicesArg).To(BeTrue())
Expect(queriesArg).To(HaveLen(1))
Expect(queriesArg[0]).To(Equal(ccv2.Filter{
Type: constant.NameFilter,
Operator: constant.EqualOperator,
Values: []string{"some-service-instance"},
}))
})
})
When("no errors are encountered getting the service instance", func() {
var (
returnedServiceInstance ccv2.ServiceInstance
returnedFeatureFlag ccv2.FeatureFlag
)
When("the service instance is a managed service instance", func() {
BeforeEach(func() {
returnedServiceInstance = ccv2.ServiceInstance{
DashboardURL: "some-dashboard",
GUID: "some-service-instance-guid",
Name: "some-service-instance",
ServiceGUID: "some-service-guid",
ServicePlanGUID: "some-service-plan-guid",
Tags: []string{"tag-1", "tag-2"},
Type: constant.ManagedService,
}
fakeCloudControllerClient.GetSpaceServiceInstancesReturns(
[]ccv2.ServiceInstance{returnedServiceInstance},
ccv2.Warnings{"get-space-service-instance-warning"},
nil)
returnedFeatureFlag = ccv2.FeatureFlag{
Name: "service_instance_sharing",
Enabled: true,
}
fakeCloudControllerClient.GetConfigFeatureFlagsReturns(
[]ccv2.FeatureFlag{returnedFeatureFlag},
ccv2.Warnings{"get-feature-flags-warning"},
nil)
})
It("returns the service instance info and all warnings", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstance).To(Equal(ServiceInstance(returnedServiceInstance)))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning"))
Expect(fakeCloudControllerClient.GetSpaceServiceInstancesCallCount()).To(Equal(1))
spaceGUIDArg, getUserProvidedServicesArg, queriesArg := fakeCloudControllerClient.GetSpaceServiceInstancesArgsForCall(0)
Expect(spaceGUIDArg).To(Equal("some-space-guid"))
Expect(getUserProvidedServicesArg).To(BeTrue())
Expect(queriesArg).To(HaveLen(1))
Expect(queriesArg[0]).To(Equal(ccv2.Filter{
Type: constant.NameFilter,
Operator: constant.EqualOperator,
Values: []string{"some-service-instance"},
}))
})
When("the service instance is shared from another space (not created in the currently targeted space)", func() {
When("the source space of the service instance is different from the currently targeted space", func() {
BeforeEach(func() {
returnedServiceInstance.SpaceGUID = "not-currently-targeted-space-guid"
fakeCloudControllerClient.GetSpaceServiceInstancesReturns(
[]ccv2.ServiceInstance{returnedServiceInstance},
ccv2.Warnings{"get-space-service-instance-warning"},
nil)
})
When("an error is encountered getting the shared_from information", func() {
var expectedErr error
When("the error is generic", func() {
BeforeEach(func() {
expectedErr = errors.New("get-service-instance-shared-from-error")
fakeCloudControllerClient.GetServiceInstanceSharedFromReturns(
ccv2.ServiceInstanceSharedFrom{},
ccv2.Warnings{"get-service-instance-shared-from-warning"},
expectedErr,
)
})
It("returns the error and all warnings", func() {
Expect(summaryErr).To(MatchError(expectedErr))
Expect(summaryWarnings).To(ConsistOf("get-service-instance-shared-from-warning", "get-feature-flags-warning", "get-space-service-instance-warning"))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosCallCount()).To(Equal(0))
})
})
When("the API version does not support service instance sharing", func() {
BeforeEach(func() {
expectedErr = ccerror.ResourceNotFoundError{}
fakeCloudControllerClient.GetServiceInstanceSharedFromReturns(
ccv2.ServiceInstanceSharedFrom{},
ccv2.Warnings{"get-service-instance-shared-from-warning"},
expectedErr,
)
})
It("ignores the 404 error and continues without shared_from information", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summaryWarnings).To(ConsistOf("get-service-instance-shared-from-warning", "get-feature-flags-warning", "get-space-service-instance-warning"))
Expect(summary.ServiceInstanceSharedFrom).To(Equal(ServiceInstanceSharedFrom{}))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosCallCount()).To(Equal(0))
})
})
})
When("no errors are encountered getting the shared_from information", func() {
When("the shared_from info is NOT empty", func() {
var returnedServiceSharedFrom ccv2.ServiceInstanceSharedFrom
BeforeEach(func() {
returnedServiceSharedFrom = ccv2.ServiceInstanceSharedFrom{
SpaceGUID: "some-space-guid",
SpaceName: "some-space-name",
OrganizationName: "some-org-name",
}
fakeCloudControllerClient.GetServiceInstanceSharedFromReturns(
returnedServiceSharedFrom,
ccv2.Warnings{"get-service-instance-shared-from-warning"},
nil)
})
It("returns the service instance share type, shared_from info, and all warnings", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstance).To(Equal(ServiceInstance(returnedServiceInstance)))
Expect(summary.ServiceInstanceSharingFeatureFlag).To(BeTrue())
Expect(summary.ServiceInstanceShareType).To(Equal(ServiceInstanceIsSharedFrom))
Expect(summary.ServiceInstanceSharedFrom).To(Equal(ServiceInstanceSharedFrom(returnedServiceSharedFrom)))
Expect(summaryWarnings).To(ConsistOf("get-service-instance-shared-from-warning", "get-feature-flags-warning", "get-space-service-instance-warning"))
Expect(fakeCloudControllerClient.GetConfigFeatureFlagsCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosCallCount()).To(Equal(0))
})
})
When("the shared_from info is empty", func() {
It("sets the share type to not shared", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstanceShareType).To(Equal(ServiceInstanceIsNotShared))
})
})
})
})
When("the source space of the service instance is 'null'", func() {
BeforeEach(func() {
// API returns a json null value that is unmarshalled into the empty string
returnedServiceInstance.SpaceGUID = ""
fakeCloudControllerClient.GetSpaceServiceInstancesReturns(
[]ccv2.ServiceInstance{returnedServiceInstance},
ccv2.Warnings{"get-space-service-instance-warning"},
nil)
})
When("an error is encountered getting the shared_from information", func() {
var expectedErr error
When("the error is generic", func() {
BeforeEach(func() {
expectedErr = errors.New("get-service-instance-shared-from-error")
fakeCloudControllerClient.GetServiceInstanceSharedFromReturns(
ccv2.ServiceInstanceSharedFrom{},
ccv2.Warnings{"get-service-instance-shared-from-warning"},
expectedErr,
)
})
It("returns the error and all warnings", func() {
Expect(summaryErr).To(MatchError(expectedErr))
Expect(summaryWarnings).To(ConsistOf("get-service-instance-shared-from-warning", "get-feature-flags-warning", "get-space-service-instance-warning"))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
})
})
When("the API version does not support service instance sharing", func() {
BeforeEach(func() {
expectedErr = ccerror.ResourceNotFoundError{}
fakeCloudControllerClient.GetServiceInstanceSharedFromReturns(
ccv2.ServiceInstanceSharedFrom{},
ccv2.Warnings{"get-service-instance-shared-from-warning"},
expectedErr,
)
})
It("ignores the 404 error and continues without shared_from information", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summaryWarnings).To(ConsistOf("get-service-instance-shared-from-warning", "get-feature-flags-warning", "get-space-service-instance-warning"))
Expect(summary.ServiceInstanceSharedFrom).To(Equal(ServiceInstanceSharedFrom{}))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
})
})
})
When("no errors are encountered getting the shared_from information", func() {
When("the shared_from info is NOT empty", func() {
var returnedServiceSharedFrom ccv2.ServiceInstanceSharedFrom
BeforeEach(func() {
returnedServiceSharedFrom = ccv2.ServiceInstanceSharedFrom{
SpaceGUID: "some-space-guid",
SpaceName: "some-space-name",
OrganizationName: "some-org-name",
}
fakeCloudControllerClient.GetServiceInstanceSharedFromReturns(
returnedServiceSharedFrom,
ccv2.Warnings{"get-service-instance-shared-from-warning"},
nil)
})
It("returns the service instance share type, shared_from info, and all warnings", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstance).To(Equal(ServiceInstance(returnedServiceInstance)))
Expect(summary.ServiceInstanceSharingFeatureFlag).To(BeTrue())
Expect(summary.ServiceInstanceShareType).To(Equal(ServiceInstanceIsSharedFrom))
Expect(summary.ServiceInstanceSharedFrom).To(Equal(ServiceInstanceSharedFrom(returnedServiceSharedFrom)))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-instance-shared-from-warning"))
Expect(fakeCloudControllerClient.GetConfigFeatureFlagsCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosCallCount()).To(Equal(0))
})
})
When("the shared_from info is empty", func() {
It("sets the share type to not shared", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstanceShareType).To(Equal(ServiceInstanceIsNotShared))
})
})
})
})
})
When("the service instance is shared to other spaces", func() {
When("the source space of the service instance is the same as the currently targeted space", func() {
BeforeEach(func() {
returnedServiceInstance.SpaceGUID = "some-space-guid"
fakeCloudControllerClient.GetSpaceServiceInstancesReturns(
[]ccv2.ServiceInstance{returnedServiceInstance},
ccv2.Warnings{"get-space-service-instance-warning"},
nil)
})
When("an error is encountered getting the shared_to information", func() {
var expectedErr error
When("the error is generic", func() {
BeforeEach(func() {
expectedErr = errors.New("get-service-instance-shared-tos-error")
fakeCloudControllerClient.GetServiceInstanceSharedTosReturns(
[]ccv2.ServiceInstanceSharedTo{},
ccv2.Warnings{"get-service-instance-shared-tos-warning"},
expectedErr,
)
})
It("returns the error and all warnings", func() {
Expect(summaryErr).To(MatchError(expectedErr))
Expect(summaryWarnings).To(ConsistOf("get-service-instance-shared-tos-warning", "get-feature-flags-warning", "get-space-service-instance-warning"))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(0))
})
})
When("the API version does not support service instance sharing", func() {
BeforeEach(func() {
expectedErr = ccerror.ResourceNotFoundError{}
fakeCloudControllerClient.GetServiceInstanceSharedTosReturns(
[]ccv2.ServiceInstanceSharedTo{},
ccv2.Warnings{"get-service-instance-shared-tos-warning"},
expectedErr,
)
})
It("ignores the 404 error and continues without shared_to information", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summaryWarnings).To(ConsistOf("get-service-instance-shared-tos-warning", "get-feature-flags-warning", "get-space-service-instance-warning"))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(0))
})
})
})
When("no errors are encountered getting the shared_to information", func() {
When("the shared_to info is NOT an empty list", func() {
var returnedServiceSharedTos []ccv2.ServiceInstanceSharedTo
BeforeEach(func() {
returnedServiceSharedTos = []ccv2.ServiceInstanceSharedTo{
{
SpaceGUID: "some-space-guid",
SpaceName: "some-space-name",
OrganizationName: "some-org-name",
},
{
SpaceGUID: "some-space-guid2",
SpaceName: "some-space-name2",
OrganizationName: "some-org-name2",
},
}
fakeCloudControllerClient.GetServiceInstanceSharedTosReturns(
returnedServiceSharedTos,
ccv2.Warnings{"get-service-instance-shared-tos-warning"},
nil)
})
It("returns the service instance share type, shared_to info, and all warnings", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstance).To(Equal(ServiceInstance(returnedServiceInstance)))
Expect(summary.ServiceInstanceSharingFeatureFlag).To(BeTrue())
Expect(summary.ServiceInstanceShareType).To(Equal(ServiceInstanceIsSharedTo))
Expect(summary.ServiceInstanceSharedTos).To(ConsistOf(ServiceInstanceSharedTo(returnedServiceSharedTos[0]), ServiceInstanceSharedTo(returnedServiceSharedTos[1])))
Expect(summaryWarnings).To(ConsistOf("get-service-instance-shared-tos-warning", "get-feature-flags-warning", "get-space-service-instance-warning"))
Expect(fakeCloudControllerClient.GetConfigFeatureFlagsCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedTosArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetServiceInstanceSharedFromCallCount()).To(Equal(0))
})
})
When("the shared_to info is an empty list", func() {
It("sets the share type to not shared", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstanceShareType).To(Equal(ServiceInstanceIsNotShared))
})
})
})
})
})
When("an error is encountered getting the service plan", func() {
Describe("a generic error", func() {
var expectedErr error
BeforeEach(func() {
expectedErr = errors.New("get-service-plan-error")
fakeCloudControllerClient.GetServicePlanReturns(
ccv2.ServicePlan{},
ccv2.Warnings{"get-service-plan-warning"},
expectedErr)
})
It("returns the error and all warnings", func() {
Expect(summaryErr).To(MatchError(expectedErr))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning"))
})
})
Describe("a Forbidden error", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetServicePlanReturns(
ccv2.ServicePlan{},
ccv2.Warnings{"get-service-plan-warning"},
ccerror.ForbiddenError{})
})
It("returns warnings and continues on", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning", "This org is not authorized to view necessary data about this service plan. Contact your administrator regarding service GUID some-service-plan-guid."))
Expect(fakeCloudControllerClient.GetServiceCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceArgsForCall(0)).To(Equal("some-service-guid"))
})
})
})
When("no errors are encountered getting the service plan", func() {
var returnedServicePlan ccv2.ServicePlan
BeforeEach(func() {
returnedServicePlan = ccv2.ServicePlan{
GUID: "some-service-plan-guid",
Name: "some-service-plan",
ServiceGUID: "some-service-guid",
}
fakeCloudControllerClient.GetServicePlanReturns(
returnedServicePlan,
ccv2.Warnings{"get-service-plan-warning"},
nil)
})
It("returns the service plan info and all warnings", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstance).To(Equal(ServiceInstance(returnedServiceInstance)))
Expect(summary.ServicePlan).To(Equal(ServicePlan(returnedServicePlan)))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning"))
Expect(fakeCloudControllerClient.GetServicePlanCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServicePlanArgsForCall(0)).To(Equal(returnedServiceInstance.ServicePlanGUID))
})
When("an error is encountered getting the service", func() {
Describe("a generic error", func() {
var expectedErr error
BeforeEach(func() {
expectedErr = errors.New("get service error")
fakeCloudControllerClient.GetServiceReturns(
ccv2.Service{},
ccv2.Warnings{"get-service-warning"},
expectedErr)
})
It("returns the error and all warnings", func() {
Expect(summaryErr).To(MatchError(expectedErr))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning", "get-service-warning"))
Expect(fakeCloudControllerClient.GetServiceCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceArgsForCall(0)).To(Equal(returnedServicePlan.ServiceGUID))
})
})
Describe("a Forbidden error", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetServiceReturns(
ccv2.Service{},
ccv2.Warnings{"get-service-warning"},
ccerror.ForbiddenError{})
})
It("returns warnings and continues on", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning", "get-service-warning", "This org is not authorized to view necessary data about this service. Contact your administrator regarding service GUID some-service-guid."))
Expect(fakeCloudControllerClient.GetServiceInstanceServiceBindingsCallCount()).To(BeNumerically(">=", 1))
})
})
})
When("no errors are encountered getting the service", func() {
var returnedService ccv2.Service
BeforeEach(func() {
returnedService = ccv2.Service{
GUID: "some-service-guid",
Label: "some-service",
Description: "some-description",
DocumentationURL: "some-url",
Extra: ccv2.ServiceExtra{
Shareable: true,
},
}
fakeCloudControllerClient.GetServiceReturns(
returnedService,
ccv2.Warnings{"get-service-warning"},
nil)
})
It("returns the service info and all warnings", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstance).To(Equal(ServiceInstance(returnedServiceInstance)))
Expect(summary.ServicePlan).To(Equal(ServicePlan(returnedServicePlan)))
Expect(summary.Service).To(Equal(Service(returnedService)))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning", "get-service-warning"))
Expect(fakeCloudControllerClient.GetServiceCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceArgsForCall(0)).To(Equal(returnedServicePlan.ServiceGUID))
})
When("an error is encountered getting the service bindings", func() {
var expectedErr error
BeforeEach(func() {
expectedErr = errors.New("get service bindings error")
fakeCloudControllerClient.GetServiceInstanceServiceBindingsReturns(
[]ccv2.ServiceBinding{},
ccv2.Warnings{"get-service-bindings-warning"},
expectedErr)
})
It("returns the error and all warnings", func() {
Expect(summaryErr).To(MatchError(expectedErr))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning", "get-service-warning", "get-service-bindings-warning"))
Expect(fakeCloudControllerClient.GetServiceInstanceServiceBindingsCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceServiceBindingsArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
})
})
When("no errors are encountered getting the service bindings", func() {
var returnedServiceBindings []ccv2.ServiceBinding
BeforeEach(func() {
returnedServiceBindings = []ccv2.ServiceBinding{
{
GUID: "some-service-binding-1-guid",
Name: "some-service-binding-1",
AppGUID: "some-app-1-guid",
LastOperation: ccv2.LastOperation{Type: "create", State: constant.LastOperationInProgress, Description: "10% complete"},
},
{
GUID: "some-service-binding-2-guid",
Name: "some-service-binding-2",
AppGUID: "some-app-2-guid",
LastOperation: ccv2.LastOperation{Type: "delete", State: constant.LastOperationSucceeded, Description: "100% complete"},
},
}
fakeCloudControllerClient.GetServiceInstanceServiceBindingsReturns(
returnedServiceBindings,
ccv2.Warnings{"get-service-bindings-warning"},
nil)
})
When("an error is encountered getting bound application info", func() {
var expectedErr error
BeforeEach(func() {
expectedErr = errors.New("get application error")
fakeCloudControllerClient.GetApplicationReturns(
ccv2.Application{},
ccv2.Warnings{"get-application-warning"},
expectedErr)
})
It("returns the error", func() {
Expect(summaryErr).To(MatchError(expectedErr)) | Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning", "get-service-warning", "get-service-bindings-warning", "get-application-warning"))
Expect(fakeCloudControllerClient.GetApplicationCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetApplicationArgsForCall(0)).To(Equal(returnedServiceBindings[0].AppGUID))
})
})
When("no errors are encountered getting bound application info", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetApplicationReturnsOnCall(
0,
ccv2.Application{
GUID: "some-app-1-guid",
Name: "some-app-1",
},
ccv2.Warnings{"get-application-warning-1"},
nil)
fakeCloudControllerClient.GetApplicationReturnsOnCall(
1,
ccv2.Application{
GUID: "some-app-2-guid",
Name: "some-app-2",
},
ccv2.Warnings{"get-application-warning-2"},
nil)
})
It("returns a list of applications bound to the service instance and all warnings", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary.ServiceInstance).To(Equal(ServiceInstance(returnedServiceInstance)))
Expect(summary.ServicePlan).To(Equal(ServicePlan(returnedServicePlan)))
Expect(summary.Service).To(Equal(Service(returnedService)))
Expect(summary.BoundApplications).To(Equal([]BoundApplication{
{
AppName: "some-app-1",
ServiceBindingName: "some-service-binding-1",
LastOperation: LastOperation{
Type: "create",
State: constant.LastOperationInProgress,
Description: "10% complete",
},
},
{
AppName: "some-app-2",
ServiceBindingName: "some-service-binding-2",
LastOperation: LastOperation{
Type: "delete",
State: constant.LastOperationSucceeded,
Description: "100% complete",
},
},
}))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-feature-flags-warning", "get-service-plan-warning", "get-service-warning", "get-service-bindings-warning", "get-application-warning-1", "get-application-warning-2"))
Expect(fakeCloudControllerClient.GetServiceInstanceServiceBindingsCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetServiceInstanceServiceBindingsArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetApplicationCallCount()).To(Equal(2))
Expect(fakeCloudControllerClient.GetApplicationArgsForCall(0)).To(Equal(returnedServiceBindings[0].AppGUID))
Expect(fakeCloudControllerClient.GetApplicationArgsForCall(1)).To(Equal(returnedServiceBindings[1].AppGUID))
Expect(fakeCloudControllerClient.GetUserProvidedServiceInstanceServiceBindingsCallCount()).To(Equal(0))
})
})
})
})
})
})
When("the service instance is a user provided service instance", func() {
BeforeEach(func() {
returnedServiceInstance = ccv2.ServiceInstance{
GUID: "some-user-provided-service-instance-guid",
Name: "some-user-provided-service-instance",
Type: constant.UserProvidedService,
}
fakeCloudControllerClient.GetSpaceServiceInstancesReturns(
[]ccv2.ServiceInstance{returnedServiceInstance},
ccv2.Warnings{"get-space-service-instance-warning"},
nil)
})
Context("getting the service bindings errors", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetUserProvidedServiceInstanceServiceBindingsReturns(
nil,
ccv2.Warnings{"some-get-user-provided-si-bindings-warnings"},
errors.New("some-get-user-provided-si-bindings-error"))
})
It("should return the error and return all warnings", func() {
Expect(summaryErr).To(MatchError("some-get-user-provided-si-bindings-error"))
Expect(summaryWarnings).To(ConsistOf("some-get-user-provided-si-bindings-warnings",
"get-space-service-instance-warning"))
})
})
When("no errors are encountered getting the service bindings", func() {
var returnedServiceBindings []ccv2.ServiceBinding
BeforeEach(func() {
returnedServiceBindings = []ccv2.ServiceBinding{
{
GUID: "some-service-binding-1-guid",
Name: "some-service-binding-1",
AppGUID: "some-app-1-guid",
},
{
GUID: "some-service-binding-2-guid",
Name: "some-service-binding-2",
AppGUID: "some-app-2-guid",
},
}
fakeCloudControllerClient.GetUserProvidedServiceInstanceServiceBindingsReturns(
returnedServiceBindings,
ccv2.Warnings{"get-service-bindings-warning"},
nil)
})
When("no errors are encountered getting bound application info", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetApplicationReturnsOnCall(
0,
ccv2.Application{
GUID: "some-app-1-guid",
Name: "some-app-1",
},
ccv2.Warnings{"get-application-warning-1"},
nil)
fakeCloudControllerClient.GetApplicationReturnsOnCall(
1,
ccv2.Application{
GUID: "some-app-2-guid",
Name: "some-app-2",
},
ccv2.Warnings{"get-application-warning-2"},
nil)
})
It("returns a list of applications bound to the service instance and all warnings", func() {
Expect(summaryErr).ToNot(HaveOccurred())
Expect(summary).To(Equal(ServiceInstanceSummary{
ServiceInstance: ServiceInstance(returnedServiceInstance),
BoundApplications: []BoundApplication{
{
AppName: "some-app-1",
ServiceBindingName: "some-service-binding-1",
},
{
AppName: "some-app-2",
ServiceBindingName: "some-service-binding-2",
},
},
}))
Expect(summaryWarnings).To(ConsistOf("get-space-service-instance-warning", "get-service-bindings-warning", "get-application-warning-1", "get-application-warning-2"))
Expect(fakeCloudControllerClient.GetSpaceServiceInstancesCallCount()).To(Equal(1))
spaceGUIDArg, getUserProvidedServicesArg, queriesArg := fakeCloudControllerClient.GetSpaceServiceInstancesArgsForCall(0)
Expect(spaceGUIDArg).To(Equal("some-space-guid"))
Expect(getUserProvidedServicesArg).To(BeTrue())
Expect(queriesArg).To(HaveLen(1))
Expect(queriesArg[0]).To(Equal(ccv2.Filter{
Type: constant.NameFilter,
Operator: constant.EqualOperator,
Values: []string{"some-service-instance"},
}))
Expect(fakeCloudControllerClient.GetUserProvidedServiceInstanceServiceBindingsCallCount()).To(Equal(1))
Expect(fakeCloudControllerClient.GetUserProvidedServiceInstanceServiceBindingsArgsForCall(0)).To(Equal(returnedServiceInstance.GUID))
Expect(fakeCloudControllerClient.GetApplicationCallCount()).To(Equal(2))
Expect(fakeCloudControllerClient.GetApplicationArgsForCall(0)).To(Equal(returnedServiceBindings[0].AppGUID))
Expect(fakeCloudControllerClient.GetApplicationArgsForCall(1)).To(Equal(returnedServiceBindings[1].AppGUID))
Expect(fakeCloudControllerClient.GetServicePlanCallCount()).To(Equal(0))
Expect(fakeCloudControllerClient.GetServiceCallCount()).To(Equal(0))
Expect(fakeCloudControllerClient.GetServiceInstanceServiceBindingsCallCount()).To(Equal(0))
})
})
})
})
})
})
Describe("GetServiceInstancesSummaryBySpace", func() {
var (
serviceInstancesSummary []ServiceInstanceSummary
warnings Warnings
executeErr error
)
JustBeforeEach(func() {
serviceInstancesSummary, warnings, executeErr = actor.GetServiceInstancesSummaryBySpace("some-space-GUID")
})
When("an error is encountered getting a space's summary", func() {
var expectedErr error
BeforeEach(func() {
expectedErr = errors.New("summary error")
fakeCloudControllerClient.GetSpaceSummaryReturns(
ccv2.SpaceSummary{},
ccv2.Warnings{"get-by-space-service-instances-warning"},
expectedErr,
)
})
It("returns the error and all warnings", func() {
Expect(executeErr).To(MatchError(expectedErr))
Expect(warnings).To(ConsistOf("get-by-space-service-instances-warning"))
})
})
When("no errors are encountered getting a space's summary", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetServicesReturns(
[]ccv2.Service{
{
GUID: "service-guid-1",
Label: "service-label",
ServiceBrokerName: "some-broker",
},
{
GUID: "service-guid-2",
Label: "service-label",
ServiceBrokerName: "other-broker",
},
},
ccv2.Warnings{"get-space-services-warning"},
nil,
)
fakeCloudControllerClient.GetSpaceSummaryReturns(
ccv2.SpaceSummary{
Name: "space-name",
Applications: []ccv2.SpaceSummaryApplication{
{
Name: "1-app-name",
ServiceNames: []string{"managed-service-instance", "user-provided-service-instance"},
},
{
Name: "2-app-name",
ServiceNames: []string{"managed-service-instance"},
},
},
ServiceInstances: []ccv2.SpaceSummaryServiceInstance{
{
Name: "managed-service-instance",
MaintenanceInfo: ccv2.MaintenanceInfo{
Version: "2.0.0",
},
ServicePlan: ccv2.SpaceSummaryServicePlan{
GUID: "plan-guid",
Name: "simple-plan",
MaintenanceInfo: ccv2.MaintenanceInfo{
Version: "3.0.0",
},
Service: ccv2.SpaceSummaryService{
GUID: "service-guid-1",
Label: "service-label",
ServiceBrokerName: "some-broker",
},
},
LastOperation: ccv2.LastOperation{
Type: "create",
State: "succeeded",
Description: "a description",
},
},
{
Name: "user-provided-service-instance",
},
},
},
ccv2.Warnings{"get-space-summary-warning"},
nil,
)
})
It("returns the service instances summary with bound apps and all warnings", func() {
Expect(executeErr).NotTo(HaveOccurred())
Expect(warnings).To(ConsistOf("get-space-summary-warning", "get-space-services-warning"))
Expect(serviceInstancesSummary).To(Equal([]ServiceInstanceSummary{
{
ServiceInstance: ServiceInstance{
Name: "managed-service-instance",
Type: constant.ManagedService,
LastOperation: ccv2.LastOperation{
Type: "create",
State: "succeeded",
Description: "a description",
},
MaintenanceInfo: ccv2.MaintenanceInfo{
Version: "2.0.0",
},
},
ServicePlan: ServicePlan{
Name: "simple-plan",
MaintenanceInfo: ccv2.MaintenanceInfo{
Version: "3.0.0",
},
},
Service: Service{
Label: "service-label",
ServiceBrokerName: "some-broker",
},
BoundApplications: []BoundApplication{
{AppName: "1-app-name"},
{AppName: "2-app-name"},
},
},
{
ServiceInstance: ServiceInstance{
Name: "user-provided-service-instance",
Type: constant.UserProvidedService,
},
BoundApplications: []BoundApplication{
{AppName: "1-app-name"},
},
},
},
))
})
When("an error is encountered getting all services", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetServicesReturns(
[]ccv2.Service{},
ccv2.Warnings{"warning-1", "warning-2"},
errors.New("oops"),
)
})
It("returns the error and all warnings", func() {
Expect(executeErr).To(MatchError(errors.New("oops")))
Expect(warnings).To(ConsistOf("get-space-summary-warning", "warning-1", "warning-2"))
})
})
})
})
}) | |
w_inline_code.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'w_inline_code.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_InlineCodeEditor(object):
def setupUi(self, InlineCodeEditor):
InlineCodeEditor.setObjectName("InlineCodeEditor")
InlineCodeEditor.resize(400, 300)
self.verticalLayout = QtWidgets.QVBoxLayout(InlineCodeEditor)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.txtExpr = QtWidgets.QLineEdit(InlineCodeEditor)
self.txtExpr.setObjectName("txtExpr")
self.horizontalLayout.addWidget(self.txtExpr)
self.btnClear = QtWidgets.QPushButton(InlineCodeEditor)
self.btnClear.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/action/media/clear.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnClear.setIcon(icon)
self.btnClear.setObjectName("btnClear")
self.horizontalLayout.addWidget(self.btnClear)
self.btnSubmit = QtWidgets.QPushButton(InlineCodeEditor)
self.btnSubmit.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/action/media/accept.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnSubmit.setIcon(icon1)
self.btnSubmit.setDefault(True)
self.btnSubmit.setObjectName("btnSubmit")
self.horizontalLayout.addWidget(self.btnSubmit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2") | self.cbxFuncs = QtWidgets.QComboBox(InlineCodeEditor)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbxFuncs.sizePolicy().hasHeightForWidth())
self.cbxFuncs.setSizePolicy(sizePolicy)
self.cbxFuncs.setObjectName("cbxFuncs")
self.horizontalLayout_2.addWidget(self.cbxFuncs)
self.txtSearch = QtWidgets.QLineEdit(InlineCodeEditor)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.txtSearch.sizePolicy().hasHeightForWidth())
self.txtSearch.setSizePolicy(sizePolicy)
self.txtSearch.setObjectName("txtSearch")
self.horizontalLayout_2.addWidget(self.txtSearch)
self.horizontalLayout_2.setStretch(0, 1)
self.horizontalLayout_2.setStretch(1, 2)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.lstFuncs = QtWidgets.QListWidget(InlineCodeEditor)
self.lstFuncs.setObjectName("lstFuncs")
self.verticalLayout_2.addWidget(self.lstFuncs)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.retranslateUi(InlineCodeEditor)
QtCore.QMetaObject.connectSlotsByName(InlineCodeEditor)
InlineCodeEditor.setTabOrder(self.txtExpr, self.btnClear)
InlineCodeEditor.setTabOrder(self.btnClear, self.btnSubmit)
InlineCodeEditor.setTabOrder(self.btnSubmit, self.lstFuncs)
def retranslateUi(self, InlineCodeEditor):
pass
import turing_rc | self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2") |
poseestimation.py | import cv2
import numpy as np
import argparse
import csv
import os
import glob
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--csv", help="Path to the CSV file holding the 2D data for the video.")
ap.add_argument("-v", "--video", help="Path to the video file.")
args = vars(ap.parse_args())
dir_name = args["csv"]
csv_list = [os.path.basename(x) for x in glob.glob(dir_name+"*.csv")]
csv_list.sort()
output_path = os.getcwd() + "/data/Knot_Tying/"
try:
os.makedirs(output_path)
except FileExistsError as e:
pass
dir_name = args["video"]
video_list = [os.path.basename(x) for x in glob.glob(dir_name+"*.avi")]
video_list.sort()
for i, csvs_file in enumerate(csv_list):
video_path = args["video"] + video_list[i]
cap = cv2.VideoCapture(video_path)
frame = cap.read()[1]
frameSize = frame.shape
cap.release()
rows = []
result_file = output_path + csvs_file
csv_file_path = args["csv"] + csvs_file
with open(csv_file_path, "r") as f:
csvReader = csv.reader(f)
for i, row in enumerate(csvReader):
rows.append(list(row))
modelPoints = np.array([
(0.0, 0.0, 0.0), # Origo
(2.0, 0.0, 2.8), # Left from Origo
(10.83, 0.5, 0.5), # RightAbove from Origo
(10.83, -0.5, 0.5), # RightBelow from Origo
(0.0, -3.16, 0.5), # Below Origo
(0.0, 3.16, 0.5) # Above Orgio
])
focalLength = frameSize[1]
center = (frameSize[1]/2, frameSize[0]/2)
cameraMatrix = np.array([
[focalLength, 0, center[0]],
[0, focalLength, center[1]],
[0,0,1] | with open(result_file, 'w') as r:
rwriter = csv.writer(r)
for row in rows:
imagePoints = np.array([
(float(row[0]), float(row[1])), # Origo
(float(row[2]), float(row[3])), # Left from Origo
(float(row[4]), float(row[5])), # RightAbove from Origo
(float(row[6]), float(row[7])), # RightBelow from Origo
(float(row[8]), float(row[9])), # Below Origo
(float(row[10]), float(row[11])) # Above Origo
])
(success, rotationVector, translationVector) = cv2.solvePnP(
modelPoints,
imagePoints,
cameraMatrix,
distCoeffs,
flags=cv2.SOLVEPNP_ITERATIVE)
data = [translationVector[0][0], translationVector[1][0], translationVector[2][0]]
rwriter.writerow(data) | ], dtype="double")
distCoeffs = np.zeros((4,1))
|
lbph.go | // lbph package provides a texture classification using local binary patterns.
package lbph
import (
"errors"
"image"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
"github.com/kelvins/lbph/histogram"
"github.com/kelvins/lbph/lbp"
"github.com/kelvins/lbph/metric"
)
// TrainingData struct is used to store the input data (images and labels)
// and each calculated histogram.
type TrainingData struct {
Images []image.Image
Labels []string
Histograms [][]float64
}
// Params struct is used to pass the LBPH parameters.
type Params struct {
Radius uint8
Neighbors uint8
GridX uint8
GridY uint8
}
// trainData struct stores the TrainingData loaded by the user.
// It needs to be a pointer because the first state will be nil.
// This field should not be exported because it is "read only".
var trainingData = &TrainingData{}
// lbphParams struct stores the LBPH parameters.
// It is not a pointer, so it will never be nil.
// This field should not be exported because the user cannot change
// the LBPH parameters after training the algorithm. To change the
// parameters we need to call Init that will "reset" the training data.
var lbphParams = Params{}
// The metric used to compare the histograms in the Predict step.
var Metric string
// init define the default state of some variables.
// It will set the default parameters for the LBPH,
// set the trainingData to nil and define the default
// metric (in this case ChiSquare).
func init() |
// Init function is used to set the LBPH parameters based on the Params structure.
// It is needed to set the default parameters if something is wrong and
// to reset the trainingData when new parameters are defined.
func Init(params Params) {
// If some parameter is wrong (== 0) set the default one.
// As the data type is uint8 we don't need to check if it is lower than 0.
if params.Radius == 0 {
params.Radius = 1
}
if params.Neighbors == 0 {
params.Neighbors = 8
}
if params.GridX == 0 {
params.GridX = 8
}
if params.GridY == 0 {
params.GridY = 8
}
// Set the LBPH Params
lbphParams = params
// Every time the Init function is called the training data will be
// reset, so the user needs to train the algorithm again.
trainingData = nil
}
// GetTrainingData is used to get the trainingData struct.
// The user can use it to access the images, labels and histograms.
func GetTrainingData() TrainingData {
// Returns the data structure pointed by trainData.
return *trainingData
}
// checkImagesSizes function is used to check if all images have the same size.
func checkImagesSizes(images []image.Image) error {
// Check if the slice is empty
if len(images) == 0 {
return errors.New("The images slice is empty")
}
// Check if the first image is nil
if images[0] == nil {
return errors.New("At least one image in the slice is nil")
}
// Get the image size from the first image
defaultWidth, defaultHeight := lbp.GetImageSize(images[0])
// Check if the size is valid
// This condition should never happen because
// we already tested if the image was nil
if defaultWidth <= 0 || defaultHeight <= 0 {
return errors.New("At least one image have an invalid size")
}
// Check each image in the slice
for index := 0; index < len(images); index++ {
// Check if the current image is nil
if images[index] == nil {
return errors.New("At least one image in the slice is nil")
}
// Get the size from the current image
width, height := lbp.GetImageSize(images[index])
// Check if all images have the same size
if width != defaultWidth || height != defaultHeight {
return errors.New("One or more images have different sizes")
}
}
// No error has occurred, return nil
return nil
}
// Train function is used for training the LBPH algorithm based on the
// images and labels passed by parameter. It basically checks the input
// data, calculates the LBP operation and gets the histogram of each image.
func Train(images []image.Image, labels []string) error {
// Clear the data structure
trainingData = nil
// Check if the slices are not empty.
if len(images) == 0 || len(labels) == 0 {
return errors.New("At least one of the slices is empty")
}
// Check if the images and labels slices have the same size.
if len(images) != len(labels) {
return errors.New("The slices have different sizes")
}
// Call the CheckImagesSizes from the common package.
// It will check if all images have the same size.
err := checkImagesSizes(images)
if err != nil {
return err
}
// Calculates the LBP operation and gets the histograms for each image.
var histograms [][]float64
for index := 0; index < len(images); index++ {
// Calculate the LBP operation for the current image.
pixels, err := lbp.Calculate(images[index], lbphParams.Radius, lbphParams.Neighbors)
if err != nil {
return err
}
// Get the histogram from the current image.
hist, err := histogram.Calculate(pixels, lbphParams.GridX, lbphParams.GridY)
if err != nil {
return err
}
// Store the histogram in the 'matrix' (slice of slice).
histograms = append(histograms, hist)
}
// Store the current data that we are working on.
trainingData = &TrainingData{
Images: images,
Labels: labels,
Histograms: histograms,
}
// Everything is ok, return nil.
return nil
}
// Predict function is used to find the closest image based on the images used in the training step.
func Predict(img image.Image) (string, float64, error) {
// Check if we have data in the trainingData struct.
if trainingData == nil {
return "", 0.0, errors.New("The algorithm was not trained yet")
}
// Check if the image passed by parameter is nil.
if img == nil {
return "", 0.0, errors.New("The image passed by parameter is nil")
}
// If we don't have histograms to compare, probably the Train function was
// not called or has occurred an error and it was not correctly treated.
if len(trainingData.Histograms) == 0 {
return "", 0.0, errors.New("There are no histograms in the trainData")
}
// Calculate the LBP operation.
pixels, err := lbp.Calculate(img, lbphParams.Radius, lbphParams.Neighbors)
if err != nil {
return "", 0.0, err
}
// Calculate the histogram for the image.
hist, err := histogram.Calculate(pixels, lbphParams.GridX, lbphParams.GridY)
if err != nil {
return "", 0.0, err
}
// Search for the closest histogram based on the histograms calculated in the training step.
minDistance, err := histogram.Compare(hist, trainingData.Histograms[0], Metric)
if err != nil {
return "", 0.0, err
}
minIndex := 0
for index := 1; index < len(trainingData.Histograms); index++ {
// Calculate the distance from the current histogram.
distance, err := histogram.Compare(hist, trainingData.Histograms[index], Metric)
if err != nil {
return "", 0.0, err
}
// If it is closer, save the minDistance and the index.
if distance < minDistance {
minDistance = distance
minIndex = index
}
}
// Return the label corresponding to the closest histogram,
// the distance (minDistance) and the error (nil).
return trainingData.Labels[minIndex], minDistance, nil
}
| {
// Define the default LBPH parameters.
lbphParams = Params{
Radius: 1,
Neighbors: 8,
GridX: 8,
GridY: 8,
}
// As the trainData is a pointer, the initial state can be nil.
trainingData = nil
// Use the EuclideanDistance as the default metric.
Metric = metric.EuclideanDistance
} |
__init__.py | import json
import sys
from . import app
from . import bdev
from . import iscsi
from . import log
from . import lvol
from . import nbd
from . import net
from . import nvmf
from . import pmem
from . import subsystem
from . import vhost
def start_subsystem_init(client):
return client.call('start_subsystem_init')
def get_rpc_methods(client, args):
params = {}
if args.current:
params['current'] = args.current
return client.call('get_rpc_methods', params)
def | (client, args):
config = {
'subsystems': []
}
for elem in client.call('get_subsystems'):
cfg = {
'subsystem': elem['subsystem'],
'config': client.call('get_subsystem_config', {"name": elem['subsystem']})
}
config['subsystems'].append(cfg)
indent = args.indent
if args.filename is None:
if indent is None:
indent = 2
elif indent < 0:
indent = None
json.dump(config, sys.stdout, indent=indent)
sys.stdout.write('\n')
else:
if indent is None or indent < 0:
indent = None
with open(args.filename, 'w') as file:
json.dump(config, file, indent=indent)
file.write('\n')
def load_config(client, args):
if not args.filename or args.filename == '-':
json_config = json.load(sys.stdin)
else:
with open(args.filename, 'r') as file:
json_config = json.load(file)
subsystems = json_config['subsystems']
while subsystems:
allowed_methods = client.call('get_rpc_methods', {'current': True})
allowed_found = False
for subsystem in list(subsystems):
if not subsystem['config']:
subsystems.remove(subsystem)
continue
config = subsystem['config']
for elem in list(config):
if not elem or 'method' not in elem or elem['method'] not in allowed_methods:
continue
client.call(elem['method'], elem['params'])
config.remove(elem)
allowed_found = True
if not config:
subsystems.remove(subsystem)
if 'start_subsystem_init' in allowed_methods:
client.call('start_subsystem_init')
allowed_found = True
if subsystems and not allowed_found:
raise JSONRPCException("Some config left but did not found any allowed method to execute")
| save_config |
client.go | package main
import (
"context"
"flag"
"fmt"
"github.com/HuJingwei/go-zero/pkg/log"
"github.com/HuJingwei/go-zero/pkg/net/rpc/warden"
pb "github.com/HuJingwei/go-zero/pkg/net/rpc/warden/internal/proto/testproto"
) | func main() {
log.Init(&log.Config{Stdout: true})
flag.Parse()
conn, err := warden.NewClient(nil).Dial(context.Background(), "direct://default/127.0.0.1:9000")
if err != nil {
panic(err)
}
cli := pb.NewGreeterClient(conn)
normalCall(cli)
}
func normalCall(cli pb.GreeterClient) {
reply, err := cli.SayHello(context.Background(), &pb.HelloRequest{Name: "tom", Age: 23})
if err != nil {
panic(err)
}
fmt.Println("get reply:", *reply)
} |
// usage: ./client -grpc.target=test.service=127.0.0.1:9000 |
electricity-chart.component.ts | import {delay, takeWhile} from 'rxjs/operators';
import {AfterViewInit, Component, Input, OnDestroy} from '@angular/core';
import {NbThemeService} from '@nebular/theme';
import {LayoutService} from '../../../../@core/utils';
import {ElectricityChart} from '../../../../@core/data/electricity';
@Component({
selector: 'ngx-electricity-chart',
styleUrls: ['./electricity-chart.component.scss'],
template: `
<div echarts
[options]="option"
[merge]="option"
class="echart"
(chartInit)="onChartInit($event)">
</div>
`,
})
export class | implements AfterViewInit, OnDestroy {
private alive = true;
@Input() data: ElectricityChart[];
option: any;
echartsIntance: any;
constructor(private theme: NbThemeService,
private layoutService: LayoutService) {
this.layoutService.onSafeChangeLayoutSize()
.pipe(
takeWhile(() => this.alive),
)
.subscribe(() => this.resizeChart());
}
ngAfterViewInit(): void {
this.theme.getJsTheme()
.pipe(
takeWhile(() => this.alive),
delay(1),
)
.subscribe(config => {
const eTheme: any = config.variables.electricity;
this.option = {
grid: {
left: 0,
top: 0,
right: 0,
bottom: 80,
},
tooltip: {
trigger: 'axis',
axisPointer: {
type: 'line',
lineStyle: {
color: eTheme.tooltipLineColor,
width: eTheme.tooltipLineWidth,
},
},
textStyle: {
color: eTheme.tooltipTextColor,
fontSize: 20,
fontWeight: eTheme.tooltipFontWeight,
},
position: 'top',
backgroundColor: eTheme.tooltipBg,
borderColor: eTheme.tooltipBorderColor,
borderWidth: 1,
formatter: '{c0} kWh',
extraCssText: eTheme.tooltipExtraCss,
},
xAxis: {
type: 'category',
boundaryGap: false,
offset: 25,
data: this.data.map(i => i.label),
axisTick: {
show: false,
},
axisLabel: {
color: eTheme.xAxisTextColor,
fontSize: 18,
},
axisLine: {
lineStyle: {
color: eTheme.axisLineColor,
width: '2',
},
},
},
yAxis: {
boundaryGap: [0, '5%'],
axisLine: {
show: false,
},
axisLabel: {
show: false,
},
axisTick: {
show: false,
},
splitLine: {
show: true,
lineStyle: {
color: eTheme.yAxisSplitLine,
width: '1',
},
},
},
series: [
{
type: 'line',
smooth: true,
symbolSize: 20,
itemStyle: {
normal: {
opacity: 0,
},
emphasis: {
color: '#ffffff',
borderColor: eTheme.itemBorderColor,
borderWidth: 2,
opacity: 1,
},
},
lineStyle: {
normal: {
width: eTheme.lineWidth,
type: eTheme.lineStyle,
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: eTheme.lineGradFrom,
}, {
offset: 1,
color: eTheme.lineGradTo,
}]),
shadowColor: eTheme.lineShadow,
shadowBlur: 6,
shadowOffsetY: 12,
},
},
areaStyle: {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: eTheme.areaGradFrom,
}, {
offset: 1,
color: eTheme.areaGradTo,
}]),
},
},
data: this.data.map(i => i.value),
},
{
type: 'line',
smooth: true,
symbol: 'none',
lineStyle: {
normal: {
width: eTheme.lineWidth,
type: eTheme.lineStyle,
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: eTheme.lineGradFrom,
}, {
offset: 1,
color: eTheme.lineGradTo,
}]),
shadowColor: eTheme.shadowLineDarkBg,
shadowBlur: 14,
opacity: 1,
},
},
data: this.data.map(i => i.value),
},
],
};
});
}
onChartInit(echarts) {
this.echartsIntance = echarts;
}
resizeChart() {
if (this.echartsIntance) {
this.echartsIntance.resize();
}
}
ngOnDestroy() {
this.alive = false;
}
}
| ElectricityChartComponent |
asset_service.pb.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.13.0
// source: google/ads/googleads/v3/services/asset_service.proto
package services
import (
context "context"
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
resources "google.golang.org/genproto/googleapis/ads/googleads/v3/resources"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Request message for [AssetService.GetAsset][google.ads.googleads.v3.services.AssetService.GetAsset]
type GetAssetRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The resource name of the asset to fetch.
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
}
func (x *GetAssetRequest) Reset() {
*x = GetAssetRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetAssetRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetAssetRequest) ProtoMessage() {}
func (x *GetAssetRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetAssetRequest.ProtoReflect.Descriptor instead.
func (*GetAssetRequest) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v3_services_asset_service_proto_rawDescGZIP(), []int{0}
}
func (x *GetAssetRequest) GetResourceName() string {
if x != nil {
return x.ResourceName
}
return ""
}
// Request message for [AssetService.MutateAssets][google.ads.googleads.v3.services.AssetService.MutateAssets]
type MutateAssetsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The ID of the customer whose assets are being modified.
CustomerId string `protobuf:"bytes,1,opt,name=customer_id,json=customerId,proto3" json:"customer_id,omitempty"`
// Required. The list of operations to perform on individual assets.
Operations []*AssetOperation `protobuf:"bytes,2,rep,name=operations,proto3" json:"operations,omitempty"`
}
func (x *MutateAssetsRequest) Reset() {
*x = MutateAssetsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MutateAssetsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MutateAssetsRequest) ProtoMessage() {}
func (x *MutateAssetsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MutateAssetsRequest.ProtoReflect.Descriptor instead.
func (*MutateAssetsRequest) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v3_services_asset_service_proto_rawDescGZIP(), []int{1}
}
func (x *MutateAssetsRequest) GetCustomerId() string {
if x != nil {
return x.CustomerId
}
return ""
}
func (x *MutateAssetsRequest) GetOperations() []*AssetOperation {
if x != nil {
return x.Operations
}
return nil
}
// A single operation to create an asset. Supported asset types are
// YoutubeVideoAsset, MediaBundleAsset, ImageAsset, and LeadFormAsset. TextAsset
// should be created with Ad inline.
type AssetOperation struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The mutate operation.
//
// Types that are assignable to Operation:
// *AssetOperation_Create
Operation isAssetOperation_Operation `protobuf_oneof:"operation"`
}
func (x *AssetOperation) Reset() {
*x = AssetOperation{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AssetOperation) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AssetOperation) ProtoMessage() {}
func (x *AssetOperation) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AssetOperation.ProtoReflect.Descriptor instead.
func (*AssetOperation) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v3_services_asset_service_proto_rawDescGZIP(), []int{2}
}
func (m *AssetOperation) GetOperation() isAssetOperation_Operation {
if m != nil {
return m.Operation
}
return nil
}
func (x *AssetOperation) GetCreate() *resources.Asset {
if x, ok := x.GetOperation().(*AssetOperation_Create); ok {
return x.Create
}
return nil
}
type isAssetOperation_Operation interface {
isAssetOperation_Operation()
}
type AssetOperation_Create struct {
// Create operation: No resource name is expected for the new asset.
Create *resources.Asset `protobuf:"bytes,1,opt,name=create,proto3,oneof"`
}
func (*AssetOperation_Create) isAssetOperation_Operation() {}
// Response message for an asset mutate.
type MutateAssetsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// All results for the mutate.
Results []*MutateAssetResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
}
func (x *MutateAssetsResponse) Reset() {
*x = MutateAssetsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MutateAssetsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MutateAssetsResponse) ProtoMessage() {}
func (x *MutateAssetsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MutateAssetsResponse.ProtoReflect.Descriptor instead.
func (*MutateAssetsResponse) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v3_services_asset_service_proto_rawDescGZIP(), []int{3}
}
func (x *MutateAssetsResponse) GetResults() []*MutateAssetResult {
if x != nil {
return x.Results
}
return nil
}
// The result for the asset mutate.
type MutateAssetResult struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The resource name returned for successful operations.
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
}
func (x *MutateAssetResult) Reset() {
*x = MutateAssetResult{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MutateAssetResult) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MutateAssetResult) ProtoMessage() {}
func (x *MutateAssetResult) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MutateAssetResult.ProtoReflect.Descriptor instead.
func (*MutateAssetResult) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v3_services_asset_service_proto_rawDescGZIP(), []int{4}
}
func (x *MutateAssetResult) GetResourceName() string {
if x != nil {
return x.ResourceName
}
return ""
}
var File_google_ads_googleads_v3_services_asset_service_proto protoreflect.FileDescriptor
var file_google_ads_googleads_v3_services_asset_service_proto_rawDesc = []byte{
0x0a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x33, 0x2e,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76,
0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x61, 0x73, 0x73, 0x65,
0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64,
0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5e, 0x0a, 0x0f, 0x47, 0x65,
0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a,
0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x0c, 0x72, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x4d,
0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x75,
0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x12, 0x55, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x64, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
0x41, 0x73, 0x73, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03,
0xe0, 0x41, 0x02, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
0x61, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x72, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x48, 0x00, 0x52, 0x06, 0x63,
0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x14, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65,
0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x07, 0x72, 0x65,
0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x64, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x4d,
0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74,
0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x38, 0x0a, 0x11, 0x4d, 0x75, 0x74,
0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x23,
0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e,
0x61, 0x6d, 0x65, 0x32, 0xa8, 0x03, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x12, 0xa9, 0x01, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x41, 0x73, 0x73, 0x65,
0x74, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64,
0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x72,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x22, 0x40,
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x63, 0x75, 0x73, 0x74, 0x6f,
0x6d, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
0xda, 0x41, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0xce, 0x01, 0x0a, 0x0c, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74,
0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e,
0x76, 0x33, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x75, 0x74, 0x61,
0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x4f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x22, 0x2b, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x75,
0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65,
0x72, 0x5f, 0x69, 0x64, 0x3d, 0x2a, 0x7d, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73, 0x3a, 0x6d,
0x75, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x16, 0x63, 0x75, 0x73, 0x74, 0x6f,
0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x1a, 0x1b, 0xca, 0x41, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0xf8,
0x01, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64,
0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64,
0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x3b, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa, 0x02, 0x20, 0x47,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x41, 0x64, 0x73, 0x2e, 0x56, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xca,
0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x33, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0xea, 0x02, 0x24, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x73,
0x3a, 0x3a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56, 0x33, 0x3a,
0x3a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_google_ads_googleads_v3_services_asset_service_proto_rawDescOnce sync.Once
file_google_ads_googleads_v3_services_asset_service_proto_rawDescData = file_google_ads_googleads_v3_services_asset_service_proto_rawDesc
)
func file_google_ads_googleads_v3_services_asset_service_proto_rawDescGZIP() []byte {
file_google_ads_googleads_v3_services_asset_service_proto_rawDescOnce.Do(func() {
file_google_ads_googleads_v3_services_asset_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v3_services_asset_service_proto_rawDescData)
})
return file_google_ads_googleads_v3_services_asset_service_proto_rawDescData
}
var file_google_ads_googleads_v3_services_asset_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_google_ads_googleads_v3_services_asset_service_proto_goTypes = []interface{}{
(*GetAssetRequest)(nil), // 0: google.ads.googleads.v3.services.GetAssetRequest
(*MutateAssetsRequest)(nil), // 1: google.ads.googleads.v3.services.MutateAssetsRequest
(*AssetOperation)(nil), // 2: google.ads.googleads.v3.services.AssetOperation
(*MutateAssetsResponse)(nil), // 3: google.ads.googleads.v3.services.MutateAssetsResponse
(*MutateAssetResult)(nil), // 4: google.ads.googleads.v3.services.MutateAssetResult
(*resources.Asset)(nil), // 5: google.ads.googleads.v3.resources.Asset
}
var file_google_ads_googleads_v3_services_asset_service_proto_depIdxs = []int32{
2, // 0: google.ads.googleads.v3.services.MutateAssetsRequest.operations:type_name -> google.ads.googleads.v3.services.AssetOperation
5, // 1: google.ads.googleads.v3.services.AssetOperation.create:type_name -> google.ads.googleads.v3.resources.Asset
4, // 2: google.ads.googleads.v3.services.MutateAssetsResponse.results:type_name -> google.ads.googleads.v3.services.MutateAssetResult
0, // 3: google.ads.googleads.v3.services.AssetService.GetAsset:input_type -> google.ads.googleads.v3.services.GetAssetRequest
1, // 4: google.ads.googleads.v3.services.AssetService.MutateAssets:input_type -> google.ads.googleads.v3.services.MutateAssetsRequest
5, // 5: google.ads.googleads.v3.services.AssetService.GetAsset:output_type -> google.ads.googleads.v3.resources.Asset
3, // 6: google.ads.googleads.v3.services.AssetService.MutateAssets:output_type -> google.ads.googleads.v3.services.MutateAssetsResponse
5, // [5:7] is the sub-list for method output_type
3, // [3:5] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_google_ads_googleads_v3_services_asset_service_proto_init() }
func file_google_ads_googleads_v3_services_asset_service_proto_init() {
if File_google_ads_googleads_v3_services_asset_service_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetAssetRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MutateAssetsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AssetOperation); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MutateAssetsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MutateAssetResult); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_google_ads_googleads_v3_services_asset_service_proto_msgTypes[2].OneofWrappers = []interface{}{
(*AssetOperation_Create)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_ads_googleads_v3_services_asset_service_proto_rawDesc,
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_google_ads_googleads_v3_services_asset_service_proto_goTypes,
DependencyIndexes: file_google_ads_googleads_v3_services_asset_service_proto_depIdxs,
MessageInfos: file_google_ads_googleads_v3_services_asset_service_proto_msgTypes,
}.Build()
File_google_ads_googleads_v3_services_asset_service_proto = out.File
file_google_ads_googleads_v3_services_asset_service_proto_rawDesc = nil
file_google_ads_googleads_v3_services_asset_service_proto_goTypes = nil
file_google_ads_googleads_v3_services_asset_service_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// AssetServiceClient is the client API for AssetService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AssetServiceClient interface {
// Returns the requested asset in full detail.
GetAsset(ctx context.Context, in *GetAssetRequest, opts ...grpc.CallOption) (*resources.Asset, error)
// Creates assets. Operation statuses are returned.
MutateAssets(ctx context.Context, in *MutateAssetsRequest, opts ...grpc.CallOption) (*MutateAssetsResponse, error)
}
type assetServiceClient struct {
cc grpc.ClientConnInterface
}
func NewAssetServiceClient(cc grpc.ClientConnInterface) AssetServiceClient {
return &assetServiceClient{cc}
}
func (c *assetServiceClient) GetAsset(ctx context.Context, in *GetAssetRequest, opts ...grpc.CallOption) (*resources.Asset, error) {
out := new(resources.Asset)
err := c.cc.Invoke(ctx, "/google.ads.googleads.v3.services.AssetService/GetAsset", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *assetServiceClient) MutateAssets(ctx context.Context, in *MutateAssetsRequest, opts ...grpc.CallOption) (*MutateAssetsResponse, error) {
out := new(MutateAssetsResponse)
err := c.cc.Invoke(ctx, "/google.ads.googleads.v3.services.AssetService/MutateAssets", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// AssetServiceServer is the server API for AssetService service.
type AssetServiceServer interface {
// Returns the requested asset in full detail.
GetAsset(context.Context, *GetAssetRequest) (*resources.Asset, error)
// Creates assets. Operation statuses are returned.
MutateAssets(context.Context, *MutateAssetsRequest) (*MutateAssetsResponse, error)
}
// UnimplementedAssetServiceServer can be embedded to have forward compatible implementations.
type UnimplementedAssetServiceServer struct {
}
func (*UnimplementedAssetServiceServer) GetAsset(context.Context, *GetAssetRequest) (*resources.Asset, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetAsset not implemented")
}
func (*UnimplementedAssetServiceServer) MutateAssets(context.Context, *MutateAssetsRequest) (*MutateAssetsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method MutateAssets not implemented")
}
func RegisterAssetServiceServer(s *grpc.Server, srv AssetServiceServer) {
s.RegisterService(&_AssetService_serviceDesc, srv)
}
func _AssetService_GetAsset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) |
func _AssetService_MutateAssets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MutateAssetsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AssetServiceServer).MutateAssets(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.ads.googleads.v3.services.AssetService/MutateAssets",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AssetServiceServer).MutateAssets(ctx, req.(*MutateAssetsRequest))
}
return interceptor(ctx, in, info, handler)
}
var _AssetService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.ads.googleads.v3.services.AssetService",
HandlerType: (*AssetServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetAsset",
Handler: _AssetService_GetAsset_Handler,
},
{
MethodName: "MutateAssets",
Handler: _AssetService_MutateAssets_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/ads/googleads/v3/services/asset_service.proto",
}
| {
in := new(GetAssetRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AssetServiceServer).GetAsset(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.ads.googleads.v3.services.AssetService/GetAsset",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AssetServiceServer).GetAsset(ctx, req.(*GetAssetRequest))
}
return interceptor(ctx, in, info, handler)
} |
package.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dftfe(CMakePackage):
"""Real-space DFT calculations using Finite Elements""" |
homepage = "https://sites.google.com/umich.edu/dftfe/"
url = "https://github.com/dftfeDevelopers/dftfe/archive/0.5.1.tar.gz"
maintainers = ['rmsds']
version('0.6.0', sha256='66b633a3aae2f557f241ee45b2faa41aa179e4a0bdf39c4ae2e679a2970845a1')
version('0.5.2', sha256='9dc4fa9f16b00be6fb1890d8af4a1cd3e4a2f06a2539df999671a09f3d26ec64')
version('0.5.1', sha256='e47272d3783cf675dcd8bc31da07765695164110bfebbbab29f5815531f148c1')
version('0.5.0', sha256='9aadb9a9b059f98f88c7756b417423dc67d02f1cdd2ed7472ba395fcfafc6dcb')
variant('scalapack', default=True, description='Use ScaLAPACK, strongly recommended for problem sizes >5000 electrons')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
depends_on('mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi')
depends_on('[email protected]:', when='@0.5.1:')
depends_on('scalapack', when='+scalapack')
depends_on('alglib')
depends_on('libxc')
depends_on('spglib')
depends_on('libxml2')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER={0}'.format(spec['mpi'].mpicc),
'-DCMAKE_CXX_COMPILER={0}'.format(spec['mpi'].mpicxx),
'-DALGLIB_DIR={0}'.format(spec['alglib'].prefix),
'-DLIBXC_DIR={0}'.format(spec['libxc'].prefix),
'-DXML_LIB_DIR={0}/lib'.format(spec['libxml2'].prefix),
'-DXML_INCLUDE_DIR={0}/include'.format(spec['libxml2'].prefix),
'-DSPGLIB_DIR={0}'.format(spec['spglib'].prefix),
]
if spec.satisfies('^intel-mkl'):
args.append('-DWITH_INTEL_MKL=ON')
else:
args.append('-DWITH_INTEL_MKL=OFF')
if spec.satisfies('%gcc'):
args.append('-DCMAKE_C_FLAGS=-fpermissive')
args.append('-DCMAKE_CXX_FLAGS=-fpermissive')
return args
@when('@:0.5.2')
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.lib64)
install(join_path(self.build_directory, 'main'),
join_path(prefix.bin, 'dftfe'))
install(join_path(self.build_directory, 'libdftfe.so'),
prefix.lib64) | |
templates.go | package utils
import (
"bytes"
"strings"
"text/template"
)
func | () template.FuncMap {
return template.FuncMap{
"replace": strings.ReplaceAll,
}
}
// Evaluate a template
func ApplyTemplate(name, templateText string, ctx interface{}, functions template.FuncMap) (string, error) {
if functions == nil {
functions = GetDefaultFunctionMap()
}
tmpl, err := template.New(name).Funcs(functions).Parse(templateText)
if err != nil {
return "", err
}
b := &bytes.Buffer{}
err = tmpl.Execute(b, ctx) // Evaluate the template
if err != nil {
return "", err
}
return b.String(), nil
}
| GetDefaultFunctionMap |
mod.rs | mod blake2b;
mod blake2s;
#[macro_export]
macro_rules! impl_blake2_short_msg_kat_with_key {
($module:ident, $T:ident, $test_cases:ident, $init:ident) => {
#[allow(non_snake_case)]
mod $T { | use super::$test_cases;
use utils::Hash;
use $module::$T;
fn hex_to_bytes(s: &str) -> Vec<u8> {
// 上位4ビット
let s1: Vec<u8> = s
.chars()
.by_ref()
.enumerate()
.filter(|(i, _)| i % 2 == 0)
.map(|(_, c)| (c.to_digit(16).unwrap() as u8) << 4)
.collect();
// 下位4ビット
let s2: Vec<u8> = s
.chars()
.by_ref()
.enumerate()
.filter(|(i, _)| i % 2 == 1)
.map(|(_, c)| c.to_digit(16).unwrap() as u8)
.collect();
if s1.len() != s2.len() {
unreachable!();
}
let bytes = {
let mut bytes: Vec<u8> = Vec::new();
for i in 0..s1.len() {
bytes.push((s1[i] & 0b1111_0000) | (s2[i] & 0b0000_1111));
}
bytes
};
bytes
}
#[test]
fn short_msg_kat() {
for (m, p, e) in $test_cases.iter() {
let mut hasher = $T::$init(p.0, p.1, p.2, p.3);
assert_eq!(hasher.hash_to_lowerhex(&hex_to_bytes(m)), *e);
}
}
}
};
} | |
races.js | var $races,
$pager;
var $form;
var $search,
$results;
var cache = null,
cacheSize = -1,
cacheReady = false;
var currentPage = -1;
function gotPage(err, data) {
if (err) { return; }
var races = data.docs,
race,
html;
for (var n = 0, len = races.length; n < len; n++) {
race = races[n];
html = '<a href="/races/'+race._id+'" class="list-group-item">';
html += '<h4 class="list-group-item-heading">'+race.name +'</h4>';
html += '<p class="list-group-item-text">'+race.description+'</p>';
html += '</a>';
$races.append(html);
}
currentPage = parseInt(data.page);
html = '';
for (var n = 1; n <= data.pages; n++) {
cls = (n === currentPage ? ' class="active"' : '');
html += '<li'+cls+'><a href="#"data-page="'+n+'">'+n+'</a></li>';
}
$pager.html(html);
}
function getPage(page) {
if (page == currentPage) { return; }
$races.empty();
$pager.empty();
Request.get('/api/v1/users/{user}/races')
.body({page: (page || 1), s: 'created=desc'})
.call(gotPage);
}
function navigate(evt) {
getPage($(this).attr('data-page'));
evt.preventDefault();
}
function submitForm(evt) {
evt.preventDefault();
Request.post('/api/v1/users/{user}/races')
.body(Util.serialize(this))
.call(function(err, data) {
if (err) {
Util.tErrorize($form, err.errors);
return;
}
getCache();
getPage();
Util.tErrorize($form);
$form.trigger('reset');
});
}
function getCache() {
Request.get('/api/v1/users/{user}/races/autocomplete')
.call(function(err, data) {
if (err) {
return;
}
cache = data;
cacheSize = data.length;
cacheReady = true;
});
}
function | (str) {
return str.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g, '\\$&');
}
function searchInput() {
if (!cacheReady) { return; }
var value = escapeInput(this.value);
if (value.length === 0) {
$results.empty();
return;
}
var html = '', expr, name;
for (var i = cacheSize - 1; i >= 0; i--) {
result = cache[i];
expr = new RegExp('(' + value + ')', 'i');
if (expr.test(result.name)) {
name = result.name.replace(expr, function(match, p1) {
return '<strong>'+p1+'</strong>';
});
html += '<a href="/races/'+result._id+'" class="list-group-item">'+name+'</a>';
}
}
$results.html(html);
}
function searchFocus() {
$results.show();
}
function searchBlur() {
setTimeout(function() {
$results.hide();
}, 200);
}
function appLoad() {
$pager = $('#pager');
$races = $('#races');
$form = $('#form').submit(submitForm);
$search = $('#search')
.on('input', searchInput)
.on('focus', searchFocus)
.on('blur', searchBlur);
$results = $('#results').hide();
$('body').on('click', 'a[data-page]', navigate);
getCache();
getPage();
}
window.onload = function() {
App.load(appLoad);
};
| escapeInput |
TFLite_detection_video.py | ######## Webcam Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 10/2/19
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a
# video. It draws boxes and scores around the objects of interest in each frame
# from the video.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#
# I added my own method of drawing boxes and labels using OpenCV.
# Import packages
import os
import argparse
import cv2
import numpy as np
import sys
import importlib.util
def increase_brightness(img, value=30):
|
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
required=True)
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='detect.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
parser.add_argument('--video', help='Name of the video file',
default='test.mp4')
parser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',
action='store_true')
parser.add_argument('--subsample', type=int, default=1, help='Subsample the input image')
parser.add_argument('--offset', type=int, default=0, help='Offset into file')
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
VIDEO_NAME = args.video
min_conf_threshold = float(args.threshold)
use_TPU = args.edgetpu
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
if use_TPU:
from tflite_runtime.interpreter import load_delegate
else:
from tensorflow.lite.python.interpreter import Interpreter
if use_TPU:
from tensorflow.lite.python.interpreter import load_delegate
# If using Edge TPU, assign filename for Edge TPU model
if use_TPU:
# If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'
if (GRAPH_NAME == 'detect.tflite'):
GRAPH_NAME = 'edgetpu.tflite'
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to video file
VIDEO_PATH = os.path.join(CWD_PATH,VIDEO_NAME)
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Open video file
video = cv2.VideoCapture(VIDEO_PATH)
imW = video.get(cv2.CAP_PROP_FRAME_WIDTH)
imH = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
out = cv2.VideoWriter('output.mp4', -1, 20.0, (int(imW),int(imH)))
fidx = 0
while(video.isOpened()):
# Acquire frame and resize to expected shape [1xHxWx3]
ret, frame = video.read()
if not ret:
print('Reached the end of the video!')
break
print(fidx)
fidx += 1
if fidx < args.offset:
continue
if args.subsample > 1:
imH, imW, _ = frame.shape
frame = cv2.resize(frame, (imW // args.subsample, imH // args.subsample))
# frame = increase_brightness(frame, value=70)
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(int(num)):
# for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 4)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# All the results have been drawn on the frame, so it's time to display it.
out.write(frame)
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
out.release()
video.release()
cv2.destroyAllWindows()
| hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img |
fresnel_blend.rs | use super::{BxDF, same_hemisphere, BxDFType, abs_cos_theta, cosine_sample_hemisphere};
use crate::core::pbrt::{Float, Spectrum, consts::{PI, INV_PI}};
use crate::core::geometry::{Vector3f, Point2f};
use crate::core::microfacet::MicrofacetDistribution;
use crate::core::rng::ONE_MINUS_EPSILON;
use crate::core::reflection::reflect;
use std::fmt;
pub struct FresnelBlend {
rd: Spectrum,
rs: Spectrum,
distribution: Box<dyn MicrofacetDistribution>
}
impl FresnelBlend {
pub fn new(rd: Spectrum, rs: Spectrum, distribution: Box<dyn MicrofacetDistribution>) -> FresnelBlend {
FresnelBlend{ rd, rs, distribution }
}
pub fn schlick_fresnel(&self, cos_theta: Float) -> Spectrum {
self.rs + pow5(1.0 - cos_theta) * (Spectrum::new(1.0) - self.rs)
}
}
impl BxDF for FresnelBlend {
fn get_type(&self) -> u8 {
BxDFType::BSDF_REFLECTION | BxDFType::BSDF_GLOSSY
}
fn f(
&self,
wo: &Vector3f,
wi: &Vector3f
) -> Spectrum {
let diffuse = (28.0 / (23.0 * PI)) * self.rd * (Spectrum::new(1.0) - self.rs) *
(1.0 - pow5(1.0 - 0.5 * abs_cos_theta(wi))) *
(1.0 - pow5(1.0 - 0.5 * abs_cos_theta(wo)));
let mut wh = *wi + *wo;
if wh.z == 0.0 && wh.y == 0.0 && wh.z == 0.0 {
return Spectrum::new(0.0);
}
wh = wh.normalize();
let specular = self.distribution.d(&wh) /
(4.0 * wi.dot(&wh).abs() * abs_cos_theta(wi).max(abs_cos_theta(wo))) *
self.schlick_fresnel(wi.dot(&wh));
diffuse + specular
}
fn sample_f(
&self,
wo: &Vector3f,
wi: &mut Vector3f,
sample: &Point2f,
pdf: &mut Float,
sampled_type: &mut u8
) -> Spectrum {
let mut u = sample;
if u[0] < 0.5 {
u[0] = (2.0 * u[0]).min(ONE_MINUS_EPSILON);
// Cosine-sample the hemisphere, flipping the direction if necessary
*wi = cosine_sample_hemisphere(&u);
if wo.z < 0.0 {
wi.z *= -1.0;
}
} else {
u[0] = (2.0 * (u[0] - 0.5)).min(ONE_MINUS_EPSILON);
// Sample microfacet orientation $\wh$ and reflected direction $\wi$
let wh = self.distribution.sample_wh(wo, u);
*wi = reflect(wo, &wh);
if !same_hemisphere(wo, wi) {
return Spectrum::new(0.0);
}
}
*pdf = self.pdf(wo, wi);
self.f(wo, wi)
}
fn pdf(&self, wo: &Vector3f, wi: &Vector3f) -> Float |
}
impl fmt::Display for FresnelBlend {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[ FresnelBlend rd: {} rs: {} distribution: {} ]",
self.rd, self.rs, self.distribution)
}
}
#[inline]
fn pow5(v: Float) -> Float {
v * v * v * v * v
} | {
if !same_hemisphere(wo, wi) {
return 0.0;
}
let wh = (*wo + *wi).normalize();
let pdf_wh = self.distribution.pdf(wo, &wh);
0.5 * (abs_cos_theta(wi) * INV_PI + pdf_wh / (4.0 * wo.dot(&wh)))
} |
SignalWifiStatusbar2Bar26X24Px.js | "use strict";
function _typeof(obj) { "@babel/helpers - typeof"; if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof(obj); }
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = require("react");
var React = _interopRequireWildcard(_react);
var _styledComponents = require("styled-components");
var _styledComponents2 = _interopRequireDefault(_styledComponents);
var _styledSystem = require("styled-system");
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
function _getRequireWildcardCache() { if (typeof WeakMap !== "function") return null; var cache = new WeakMap(); _getRequireWildcardCache = function _getRequireWildcardCache() { return cache; }; return cache; }
function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } if (obj === null || _typeof(obj) !== "object" && typeof obj !== "function") { return { "default": obj }; } var cache = _getRequireWildcardCache(); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj["default"] = obj; if (cache) { cache.set(obj, newObj); } return newObj; }
function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }
var Svg = (0, _styledComponents2["default"])("svg")({
flex: "none"
}, _styledSystem.space, _styledSystem.color);
var SvgSignalWifiStatusbar2Bar26X24Px = React.forwardRef(function (props, ref) { | fill: "currentcolor",
ref: ref
}), React.createElement("path", {
fillOpacity: 0.3,
d: "M13.01 21.99L25.58 6.32C25.1 5.96 20.26 2 13 2S.9 5.96.42 6.32l12.57 15.66.01.02.01-.01z"
}), React.createElement("path", {
d: "M13.01 21.99l7.54-9.4C20.26 12.38 17.36 10 13 10c-4.36 0-7.26 2.38-7.55 2.59l7.54 9.4h.02z"
}), React.createElement("path", {
d: "M0 0h26v24H0z",
fill: "none"
}));
});
SvgSignalWifiStatusbar2Bar26X24Px.displayName = "SvgSignalWifiStatusbar2Bar26X24Px";
SvgSignalWifiStatusbar2Bar26X24Px.defaultProps = {
size: 24,
color: "inherit"
};
exports["default"] = SvgSignalWifiStatusbar2Bar26X24Px; | return React.createElement(Svg, _extends({}, props, {
viewBox: "0 0 24 24",
height: props.size,
width: props.size, |
aws_unenforced_https_elasticsearch_domain_endpoint.go | package checks
import (
"fmt"
"github.com/hemanthgk10/tfsec/pkg/app/tfsec/scanner"
"github.com/zclconf/go-cty/cty"
"github.com/hemanthgk10/tfsec/pkg/app/tfsec/parser"
)
// AWSUnenforcedHTTPSElasticsearchDomainEndpoint See
// https://github.com/tfsec/tfsec#included-checks for check info
const AWSUnenforcedHTTPSElasticsearchDomainEndpoint scanner.RuleID = "AWS033"
const AWSUnenforcedHTTPSElasticsearchDomainEndpointDescription scanner.RuleDescription = "Elasticsearch doesn't enforce HTTPS traffic."
func init() {
scanner.RegisterCheck(scanner.Check{
Code: AWSUnenforcedHTTPSElasticsearchDomainEndpoint,
Description: AWSUnenforcedHTTPSElasticsearchDomainEndpointDescription,
Provider: scanner.AWSProvider,
RequiredTypes: []string{"resource"},
RequiredLabels: []string{"aws_elasticsearch_domain"},
CheckFunc: func(check *scanner.Check, block *parser.Block, context *scanner.Context) []scanner.Result {
endpointBlock := block.GetBlock("domain_endpoint_options")
if endpointBlock == nil {
return []scanner.Result{
check.NewResult(
fmt.Sprintf("Resource '%s' defines an Elasticsearch domain with plaintext traffic (missing domain_endpoint_options block).", block.Name()),
block.Range(),
scanner.SeverityError,
),
}
}
enforceHTTPSAttr := endpointBlock.GetAttribute("enforce_https")
if enforceHTTPSAttr == nil {
return []scanner.Result{
check.NewResult(
fmt.Sprintf("Resource '%s' defines an Elasticsearch domain with plaintext traffic (missing enforce_https attribute).", block.Name()),
endpointBlock.Range(),
scanner.SeverityError,
),
}
}
isTrueBool := enforceHTTPSAttr.Type() == cty.Bool && enforceHTTPSAttr.Value().True()
isTrueString := enforceHTTPSAttr.Type() == cty.String &&
enforceHTTPSAttr.Value().Equals(cty.StringVal("true")).True()
enforcedHTTPS := isTrueBool || isTrueString | endpointBlock.Range(),
scanner.SeverityError,
),
}
}
return nil
},
})
} | if !enforcedHTTPS {
return []scanner.Result{
check.NewResult(
fmt.Sprintf("Resource '%s' defines an Elasticsearch domain with plaintext traffic (enabled attribute set to false).", block.Name()), |
nginx.go | package nginx
import (
"bufio"
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/MadDogTechnology/telegraf"
"github.com/MadDogTechnology/telegraf/internal"
"github.com/MadDogTechnology/telegraf/internal/tls"
"github.com/MadDogTechnology/telegraf/plugins/inputs"
)
type Nginx struct {
Urls []string
ResponseTimeout internal.Duration
tls.ClientConfig
// HTTP client
client *http.Client
}
var sampleConfig = `
# An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/server_status"]
## Optional TLS Config
tls_ca = "/etc/telegraf/ca.pem"
tls_cert = "/etc/telegraf/cert.cer"
tls_key = "/etc/telegraf/key.key"
## Use TLS but skip chain & host verification
insecure_skip_verify = false
# HTTP response timeout (default: 5s)
response_timeout = "5s"
`
func (n *Nginx) SampleConfig() string {
return sampleConfig
}
func (n *Nginx) Description() string {
return "Read Nginx's basic status information (ngx_http_stub_status_module)"
}
func (n *Nginx) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
// Create an HTTP client that is re-used for each
// collection interval
if n.client == nil {
client, err := n.createHttpClient()
if err != nil {
return err
}
n.client = client
}
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
}
wg.Add(1)
go func(addr *url.URL) {
defer wg.Done()
acc.AddError(n.gatherUrl(addr, acc))
}(addr)
}
wg.Wait()
return nil
}
func (n *Nginx) createHttpClient() (*http.Client, error) {
tlsCfg, err := n.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
if n.ResponseTimeout.Duration < time.Second {
n.ResponseTimeout.Duration = time.Second * 5
}
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
Timeout: n.ResponseTimeout.Duration,
}
return client, nil
}
func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
resp, err := n.client.Get(addr.String())
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
}
r := bufio.NewReader(resp.Body)
// Active connections
_, err = r.ReadString(':')
if err != nil {
return err
}
line, err := r.ReadString('\n')
if err != nil {
return err
}
active, err := strconv.ParseUint(strings.TrimSpace(line), 10, 64)
if err != nil {
return err
}
// Server accepts handled requests
_, err = r.ReadString('\n')
if err != nil {
return err
}
line, err = r.ReadString('\n')
if err != nil {
return err
}
data := strings.Fields(line)
accepts, err := strconv.ParseUint(data[0], 10, 64)
if err != nil {
return err
}
handled, err := strconv.ParseUint(data[1], 10, 64)
if err != nil {
return err
}
requests, err := strconv.ParseUint(data[2], 10, 64)
if err != nil {
return err
}
// Reading/Writing/Waiting
line, err = r.ReadString('\n')
if err != nil {
return err
}
data = strings.Fields(line)
reading, err := strconv.ParseUint(data[1], 10, 64)
if err != nil {
return err
}
writing, err := strconv.ParseUint(data[3], 10, 64)
if err != nil {
return err
}
waiting, err := strconv.ParseUint(data[5], 10, 64)
if err != nil {
return err
}
tags := getTags(addr)
fields := map[string]interface{}{
"active": active,
"accepts": accepts,
"handled": handled,
"requests": requests,
"reading": reading,
"writing": writing,
"waiting": waiting,
}
acc.AddFields("nginx", fields, tags)
return nil
}
// Get tag(s) for the nginx plugin
func getTags(addr *url.URL) map[string]string {
h := addr.Host
host, port, err := net.SplitHostPort(h)
if err != nil {
host = addr.Host
if addr.Scheme == "http" {
port = "80"
} else if addr.Scheme == "https" {
port = "443"
} else {
port = ""
}
}
return map[string]string{"server": host, "port": port}
}
func | () {
inputs.Add("nginx", func() telegraf.Input {
return &Nginx{}
})
}
| init |
ctl.go | package handler
import (
"encoding/base64"
"encoding/json"
"github.com/miky4u2/RAagent/agent/common"
"github.com/miky4u2/RAagent/agent/config"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"time"
)
// Ctl HTTP handler function
//
func Ctl(w http.ResponseWriter, req *http.Request) | {
// Check if IP is allowed, abort if not. (Must be server IP)
if !common.IsIPAllowed(req, config.Settings.ServerIP) {
http.Error(w, http.StatusText(403), http.StatusForbidden)
return
}
// POST method only
if req.Method != "POST" {
http.Error(w, http.StatusText(403), http.StatusForbidden)
return
}
// Prepare response header
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
// Instantiate a ctlReq and ctlRes struct to be populated
ctlReq := struct {
Type string `json:"type"`
}{}
ctlRes := struct {
Status string `json:"status"`
ErrorMsgs []string `json:"errorMsgs"`
Output string `json:"output"`
}{}
// Populate the ctlReq struct with received json request
json.NewDecoder(req.Body).Decode(&ctlReq)
// If control Type is invalid, abbort now, respond with error
if ctlReq.Type != `status` && ctlReq.Type != `restart` && ctlReq.Type != `stop` {
log.Println(`Received incorrect Type`)
ctlRes.Status = "failed"
ctlRes.ErrorMsgs = append(ctlRes.ErrorMsgs, `Invalid Type`)
res, err := json.Marshal(ctlRes)
if err != nil {
log.Println(err)
}
w.Write(res)
return
}
// If we get here, the received control Type is valid
ctlRes.Status = `done`
var output string
// If control Type is status
if ctlReq.Type == `status` {
// Get list of available modules
modulePath := filepath.Join(config.AppBasePath, `modules`)
files, _ := ioutil.ReadDir(modulePath)
output = `Version ` + config.Version + ` alive and kicking !!`
output += "\nAvaiable modules: "
for _, f := range files {
output += `[` + f.Name() + `]`
}
output += "\n"
}
// If control Type is restart
if ctlReq.Type == `restart` {
output = `Version ` + config.Version + ` restarting now...` + "\n"
emptyFile, _ := os.Create(filepath.Join(config.AppBasePath, `bin`, `agent_restart`))
emptyFile.Close()
log.Println(`Received Ctl Restart, RAagent will now attempt to restart...`)
go func() { time.Sleep(2 * time.Second); os.Exit(0) }()
}
// If control Type is stop
if ctlReq.Type == `stop` {
output = `Version ` + config.Version + ` shutting down now...` + "\n"
log.Println(`Received Ctl Stop, RAagent will now shutting down...`)
go func() { time.Sleep(2 * time.Second); os.Exit(0) }()
}
// Encode output and send response
ctlRes.Output = base64.StdEncoding.EncodeToString([]byte(output))
res, err := json.Marshal(ctlRes)
if err != nil {
log.Println(err)
}
w.Write(res)
return
} |
|
lexical_environment.rs | //! # Lexical Environment
//!
//! <https://tc39.es/ecma262/#sec-lexical-environment-operations>
//!
//! The following operations are used to operate upon lexical environments
//! This is the entrypoint to lexical environments.
use super::global_environment_record::GlobalEnvironmentRecord;
use crate::{
environment::environment_record_trait::EnvironmentRecordTrait, object::JsObject, BoaProfiler,
Context, JsResult, JsValue,
};
use gc::Gc;
use std::{collections::VecDeque, error, fmt};
/// Environments are wrapped in a Box and then in a GC wrapper
pub type Environment = Gc<Box<dyn EnvironmentRecordTrait>>;
/// Give each environment an easy way to declare its own type
/// This helps with comparisons
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum EnvironmentType {
Declarative,
Function,
Global,
Object,
}
/// The scope of a given variable
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum VariableScope {
/// The variable declaration is scoped to the current block (`let` and `const`)
Block,
/// The variable declaration is scoped to the current function (`var`)
Function,
}
#[derive(Debug, Clone)]
pub struct LexicalEnvironment {
environment_stack: VecDeque<Environment>,
}
/// An error that occurred during lexing or compiling of the source input.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct EnvironmentError {
details: String,
}
impl EnvironmentError {
pub fn new(msg: &str) -> Self {
Self {
details: msg.to_string(),
}
}
}
impl fmt::Display for EnvironmentError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.details)
}
}
impl error::Error for EnvironmentError {}
impl LexicalEnvironment {
pub fn new(global: JsObject) -> Self {
let _timer = BoaProfiler::global().start_event("LexicalEnvironment::new", "env");
let global_env = GlobalEnvironmentRecord::new(global.clone(), global);
let mut lexical_env = Self {
environment_stack: VecDeque::new(),
};
// lexical_env.push(global_env);
lexical_env.environment_stack.push_back(global_env.into());
lexical_env
}
}
impl Context {
pub(crate) fn push_environment<T: Into<Environment>>(&mut self, env: T) {
self.realm
.environment
.environment_stack
.push_back(env.into());
}
pub(crate) fn pop_environment(&mut self) -> Option<Environment> {
self.realm.environment.environment_stack.pop_back()
}
pub(crate) fn get_this_binding(&mut self) -> JsResult<JsValue> |
pub(crate) fn get_global_this_binding(&mut self) -> JsResult<JsValue> {
let global = self.realm.global_env.clone();
global.get_this_binding(self)
}
pub(crate) fn create_mutable_binding(
&mut self,
name: &str,
deletion: bool,
scope: VariableScope,
) -> JsResult<()> {
self.get_current_environment()
.recursive_create_mutable_binding(name, deletion, scope, self)
}
pub(crate) fn create_immutable_binding(
&mut self,
name: &str,
deletion: bool,
scope: VariableScope,
) -> JsResult<()> {
self.get_current_environment()
.recursive_create_immutable_binding(name, deletion, scope, self)
}
pub(crate) fn set_mutable_binding(
&mut self,
name: &str,
value: JsValue,
strict: bool,
) -> JsResult<()> {
self.get_current_environment()
.recursive_set_mutable_binding(name, value, strict, self)
}
pub(crate) fn initialize_binding(&mut self, name: &str, value: JsValue) -> JsResult<()> {
self.get_current_environment()
.recursive_initialize_binding(name, value, self)
}
/// When neededing to clone an environment (linking it with another environnment)
/// cloning is more suited. The GC will remove the env once nothing is linking to it anymore
pub(crate) fn get_current_environment(&mut self) -> Environment {
self.realm
.environment
.environment_stack
.back_mut()
.expect("Could not get mutable reference to back object")
.clone()
}
pub(crate) fn has_binding(&mut self, name: &str) -> JsResult<bool> {
self.get_current_environment()
.recursive_has_binding(name, self)
}
pub(crate) fn get_binding_value(&mut self, name: &str) -> JsResult<JsValue> {
self.get_current_environment()
.recursive_get_binding_value(name, self)
}
}
#[cfg(test)]
mod tests {
use crate::exec;
#[test]
fn let_is_blockscoped() {
let scenario = r#"
{
let bar = "bar";
}
try{
bar;
} catch (err) {
err.message
}
"#;
assert_eq!(&exec(scenario), "\"bar is not defined\"");
}
#[test]
fn const_is_blockscoped() {
let scenario = r#"
{
const bar = "bar";
}
try{
bar;
} catch (err) {
err.message
}
"#;
assert_eq!(&exec(scenario), "\"bar is not defined\"");
}
#[test]
fn var_not_blockscoped() {
let scenario = r#"
{
var bar = "bar";
}
bar == "bar";
"#;
assert_eq!(&exec(scenario), "true");
}
#[test]
fn functions_use_declaration_scope() {
let scenario = r#"
function foo() {
try {
bar;
} catch (err) {
return err.message;
}
}
{
let bar = "bar";
foo();
}
"#;
assert_eq!(&exec(scenario), "\"bar is not defined\"");
}
#[test]
fn set_outer_var_in_blockscope() {
let scenario = r#"
var bar;
{
bar = "foo";
}
bar == "foo";
"#;
assert_eq!(&exec(scenario), "true");
}
#[test]
fn set_outer_let_in_blockscope() {
let scenario = r#"
let bar;
{
bar = "foo";
}
bar == "foo";
"#;
assert_eq!(&exec(scenario), "true");
}
}
| {
self.get_current_environment()
.recursive_get_this_binding(self)
} |
main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"strconv"
)
func makeResponse(status, message string) map[string]interface{} {
return map[string]interface{}{
"status": status,
"message": message,
}
}
/// Return status
func Init() interface{} {
resp := makeResponse("Success", "No Initialization required")
resp["capabilities"] = map[string]interface{}{
"attach": false,
"selinuxRelabel": false,
}
return resp
}
func | (path string) bool {
cmd := exec.Command("mountpoint", path)
err := cmd.Run()
if err != nil {
return false
}
return true
}
/// If goofys hasn't been mounted yet, mount!
/// If mounted, bind mount to appropriate place.
func Mount(target string, options map[string]string) interface{} {
bucket := options["bucket"]
subPath := options["subPath"]
dirMode, ok := options["dirMode"]
if !ok {
dirMode = "0755"
}
fileMode, ok := options["fileMode"]
if !ok {
fileMode = "0644"
}
args := []string{
"-o", "allow_other",
"--dir-mode", dirMode,
"--file-mode", fileMode,
}
if endpoint, ok := options["endpoint"]; ok {
args = append(args, "--endpoint", endpoint)
}
if region, ok := options["region"]; ok {
args = append(args, "--region", region)
}
if uid, ok := options["uid"]; ok {
args = append(args, "--uid", uid)
}
if gid, ok := options["gid"]; ok {
args = append(args, "--gid", gid)
}
debug_s3, ok := options["debug_s3"]
if ok && debug_s3 == "true" {
args = append(args, "--debug_s3")
}
use_content_type, ok := options["use_content_type"]
if ok && use_content_type == "true" {
args = append(args, "--use-content-type")
}
mountPath := path.Join("/mnt/goofys", bucket)
args = append(args, bucket, mountPath)
if !isMountPoint(mountPath) {
exec.Command("umount", mountPath).Run()
exec.Command("rm", "-rf", mountPath).Run()
os.MkdirAll(mountPath, 0755)
mountCmd := exec.Command("goofys", args...)
mountCmd.Env = os.Environ()
if accessKey, ok := options["access-key"]; ok {
mountCmd.Env = append(mountCmd.Env, "AWS_ACCESS_KEY_ID=" + accessKey)
}
if secretKey, ok := options["secret-key"]; ok {
mountCmd.Env = append(mountCmd.Env, "AWS_SECRET_ACCESS_KEY=" + secretKey)
}
var stderr bytes.Buffer
mountCmd.Stderr = &stderr
err := mountCmd.Run()
if err != nil {
errMsg := err.Error() + ": " + stderr.String()
if debug_s3 == "true" {
errMsg += fmt.Sprintf("; /var/log/syslog follows")
grepCmd := exec.Command("sh", "-c", "grep goofys /var/log/syslog | tail")
var stdout bytes.Buffer
grepCmd.Stdout = &stdout
grepCmd.Run()
errMsg += stdout.String()
}
return makeResponse("Failure", errMsg)
}
}
srcPath := path.Join(mountPath, subPath)
// Create subpath if it does not exist
intDirMode, _ := strconv.ParseUint(dirMode, 8, 32)
os.MkdirAll(srcPath, os.FileMode(intDirMode))
// Now we rmdir the target, and then make a symlink to it!
err := os.Remove(target)
if err != nil {
return makeResponse("Failure", err.Error())
}
err = os.Symlink(srcPath, target)
return makeResponse("Success", "Mount completed!")
}
func Unmount(target string) interface{} {
err := os.Remove(target)
if err != nil {
return makeResponse("Failure", err.Error())
}
return makeResponse("Success", "Successfully unmounted")
}
func printJSON(data interface{}) {
jsonBytes, err := json.Marshal(data)
if err != nil {
panic(err)
}
fmt.Printf("%s", string(jsonBytes))
}
func main() {
switch action := os.Args[1]; action {
case "init":
printJSON(Init())
case "mount":
optsString := os.Args[3]
opts := make(map[string]string)
json.Unmarshal([]byte(optsString), &opts)
printJSON(Mount(os.Args[2], opts))
case "unmount":
printJSON(Unmount(os.Args[2]))
default:
printJSON(makeResponse("Not supported", fmt.Sprintf("Operation %s is not supported", action)))
}
}
| isMountPoint |
transaction.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
import bitcoin
from bitcoin import *
from util import print_error, profiler
import time
import sys
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import random
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
#
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
#
import types, string, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
s = []
for sig in x_sig:
if sig[-2:] == '01':
s.append(sig[:-2])
else:
assert sig == NO_SIGNATURE
s.append(None)
return s
def is_extended_pubkey(x_pubkey):
return x_pubkey[0:2] in ['fe', 'ff']
def x_to_xpub(x_pubkey):
if x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub
def parse_xpub(x_pubkey):
if x_pubkey[0:2] in ['02','03','04']:
pubkey = x_pubkey
elif x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
pubkey = BIP32_Account.derive_pubkey_from_xpub(xpub, s[0], s[1])
elif x_pubkey[0:2] == 'fe':
from account import OldAccount
mpk, s = OldAccount.parse_xpubkey(x_pubkey)
pubkey = OldAccount.get_pubkey_from_mpk(mpk.decode('hex'), s[0], s[1])
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
hash160 = x_pubkey[4:].decode('hex')
pubkey = None
address = hash_160_to_bc_address(hash160, addrtype)
else:
raise BaseException("Cannnot parse pubkey")
if pubkey:
address = public_key_to_bc_address(pubkey.decode('hex'))
return pubkey, address
def parse_scriptSig(d, bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except Exception:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bytes.encode('hex'))
return
# payto_pubkey
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
d['address'] = "(pubkey)"
d['signatures'] = [sig]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
x_pubkey = decoded[1][1].encode('hex')
try:
signatures = parse_sig([sig])
pubkey, address = parse_xpub(x_pubkey)
except:
import traceback
traceback.print_exc(file=sys.stdout)
print_error("cannot find address in input script", bytes.encode('hex'))
return
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_sig = [x[1].encode('hex') for x in decoded[1:-1]]
dec2 = [ x for x in script_GetOp(decoded[-1][1]) ]
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_pubkeys = map(lambda x: x[1].encode('hex'), dec2[1:-2])
pubkeys = [parse_xpub(x)[0] for x in x_pubkeys] # xpub, addr = parse_xpub()
redeemScript = Transaction.multisig_script(pubkeys, m)
# write result in d
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = hash_160_to_bc_address(hash_160(redeemScript.decode('hex')), 5)
def get_address_from_output_script(bytes):
decoded = [ x for x in script_GetOp(bytes) ]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match): | return TYPE_PUBKEY, decoded[0][1].encode('hex')
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_160_to_bc_address(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_160_to_bc_address(decoded[1][1],5)
return TYPE_SCRIPT, bytes
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
d['scriptSig'] = scriptSig.encode('hex')
sequence = vds.read_uint32()
if prevout_hash == '00'*32:
d['is_coinbase'] = True
else:
d['is_coinbase'] = False
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
if scriptSig:
parse_scriptSig(d, scriptSig)
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = scriptPubKey.encode('hex')
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(raw.decode('hex'))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = list(parse_input(vds) for i in xrange(n_vin))
n_vout = vds.read_compact_size()
d['outputs'] = list(parse_output(vds,i) for i in xrange(n_vout))
d['lockTime'] = vds.read_uint32()
d['refheight'] = vds.read_int32()
return d
def push_script(x):
return op_push(len(x)/2) + x
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif type(raw) in [str, unicode]:
self.raw = raw.strip() if raw else None
elif type(raw) is dict:
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
def update_signatures(self, raw):
"""Add new signatures to a transaction"""
d = deserialize(raw)
for i, txin in enumerate(self.inputs()):
sigs1 = txin.get('signatures')
sigs2 = d['inputs'][i].get('signatures')
for sig in sigs2:
if sig in sigs1:
continue
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
# der to string
order = ecdsa.ecdsa.generator_secp256k1.order()
r, s = ecdsa.util.sigdecode_der(sig.decode('hex'), order)
sig_string = ecdsa.util.sigencode_string(r, s, order)
pubkeys = txin.get('pubkeys')
compressed = True
for recid in range(4):
public_key = MyVerifyingKey.from_signature(sig_string, recid, for_sig, curve = SECP256k1)
pubkey = point_to_ser(public_key.pubkey.point, compressed).encode('hex')
if pubkey in pubkeys:
public_key.verify_digest(sig_string, for_sig, sigdecode = ecdsa.util.sigdecode_string)
j = pubkeys.index(pubkey)
print_error("adding sig", i, j, pubkey, sig)
self._inputs[i]['signatures'][j] = sig
self._inputs[i]['x_pubkeys'][j] = pubkey
break
# redo raw
self.raw = self.serialize()
def deserialize(self):
if self.raw is None:
self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw)
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
self.refheight = d['refheight']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, refheight=0):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
self.refheight = refheight
return self
@classmethod
def sweep(klass, privkeys, network, to_address, fee):
inputs = []
keypairs = {}
for privkey in privkeys:
pubkey = public_key_from_private_key(privkey)
address = address_from_private_key(privkey)
u = network.synchronous_get(('blockchain.address.listunspent',[address]))
pay_script = klass.pay_script(TYPE_ADDRESS, address)
for item in u:
item['scriptPubKey'] = pay_script
item['redeemPubkey'] = pubkey
item['address'] = address
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs += u
keypairs[pubkey] = privkey
if not inputs:
return
total = sum(i.get('value') for i in inputs) - fee
outputs = [(TYPE_ADDRESS, to_address, total)]
self = klass.from_io(inputs, outputs)
self.sign(keypairs)
return self
@classmethod
def multisig_script(klass, public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)/2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
@classmethod
def pay_script(self, output_type, addr):
if output_type == TYPE_SCRIPT:
return addr.encode('hex')
elif output_type == TYPE_ADDRESS:
addrtype, hash_160 = bc_address_to_hash_160(addr)
if addrtype == 0:
script = '76a9' # op_dup, op_hash_160
script += push_script(hash_160.encode('hex'))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == 5:
script = 'a9' # op_hash_160
script += push_script(hash_160.encode('hex'))
script += '87' # op_equal
else:
raise
else:
raise
return script
@classmethod
def input_script(self, txin, i, for_sig):
# for_sig:
# -1 : do not sign, estimate length
# i>=0 : serialized tx for signing input i
# None : add all known signatures
p2sh = txin.get('redeemScript') is not None
num_sig = txin['num_sig'] if p2sh else 1
address = txin['address']
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
is_complete = len(signatures) == num_sig
if for_sig in [-1, None]:
# if we have enough signatures, we use the actual pubkeys
# use extended pubkeys (with bip32 derivation)
if for_sig == -1:
# we assume that signature will be 0x48 bytes long
pubkeys = txin['pubkeys']
sig_list = [ "00" * 0x48 ] * num_sig
elif is_complete:
pubkeys = txin['pubkeys']
sig_list = ((sig + '01') for sig in signatures)
else:
pubkeys = txin['x_pubkeys']
sig_list = ((sig + '01') if sig else NO_SIGNATURE for sig in x_signatures)
script = ''.join(push_script(x) for x in sig_list)
if not p2sh:
x_pubkey = pubkeys[0]
if x_pubkey is None:
addrtype, h160 = bc_address_to_hash_160(txin['address'])
x_pubkey = 'fd' + (chr(addrtype) + h160).encode('hex')
script += push_script(x_pubkey)
else:
script = '00' + script # put op_0 in front of script
redeem_script = self.multisig_script(pubkeys, num_sig)
script += push_script(redeem_script)
elif for_sig==i:
script = txin['redeemScript'] if p2sh else self.pay_script(TYPE_ADDRESS, address)
else:
script = ''
return script
@classmethod
def serialize_input(self, txin, i, for_sig):
# Prev hash and index
s = txin['prevout_hash'].decode('hex')[::-1].encode('hex')
s += int_to_hex(txin['prevout_n'], 4)
# Script length, script, sequence
script = self.input_script(txin, i, for_sig)
s += var_int(len(script) / 2)
s += script
s += "ffffffff"
return s
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[0], o[1])))
def serialize(self, for_sig=None):
inputs = self.inputs()
outputs = self.outputs()refheight = self.refheight
s = int_to_hex(2,4) # version
s += var_int( len(inputs) ) # number of inputs
for i, txin in enumerate(inputs):
s += self.serialize_input(txin, i, for_sig)
s += var_int( len(outputs) ) # number of outputs
for output in outputs:
output_type, addr, amount = output
s += int_to_hex( amount, 8) # amount
script = self.pay_script(output_type, addr)
s += var_int( len(script)/2 ) # script length
s += script # script
s += int_to_hex(0,4) # lock time
s += int_to_hex(refheight,4) # refheight
if for_sig is not None and for_sig != -1:
s += int_to_hex(1, 4) # hash type
return s
def tx_for_sig(self,i):
return self.serialize(for_sig = i)
def hash(self):
return Hash(self.raw.decode('hex') )[::-1].encode('hex')
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
return sum(x['value'] for x in self.inputs())
def output_value(self):
return sum( val for tp,addr,val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence') < 0xffffffff - 1 for x in self.inputs()])
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return len(self.serialize(-1)) / 2 # ASCII hex string
@classmethod
def estimated_input_size(self, txin):
'''Return an estimated of serialized input size in bytes.'''
return len(self.serialize_input(txin, -1, -1)) / 2
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin.get('is_coinbase'):
continue
signatures = filter(None, txin.get('signatures',[]))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def inputs_without_script(self):
out = set()
for i, txin in enumerate(self.inputs()):
if txin.get('scriptSig') == '':
out.add(i)
return out
def inputs_to_sign(self):
out = set()
for txin in self.inputs():
num_sig = txin.get('num_sig')
if num_sig is None:
continue
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
if len(signatures) == num_sig:
# input is complete
continue
for k, x_pubkey in enumerate(txin['x_pubkeys']):
if x_signatures[k] is not None:
# this pubkey already signed
continue
out.add(x_pubkey)
return out
def sign(self, keypairs):
for i, txin in enumerate(self.inputs()):
num = txin['num_sig']
for x_pubkey in txin['x_pubkeys']:
signatures = filter(None, txin['signatures'])
if len(signatures) == num:
# txin is complete
break
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey)
# add pubkey to txin
txin = self._inputs[i]
x_pubkeys = txin['x_pubkeys']
ii = x_pubkeys.index(x_pubkey)
sec = keypairs[x_pubkey]
pubkey = public_key_from_private_key(sec)
txin['x_pubkeys'][ii] = pubkey
txin['pubkeys'][ii] = pubkey
self._inputs[i] = txin
# add signature
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = bitcoin.MySigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic( for_sig, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der )
assert public_key.verify_digest( sig, for_sig, sigdecode = ecdsa.util.sigdecode_der)
txin['signatures'][ii] = sig.encode('hex')
self._inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs():
if type == TYPE_ADDRESS:
addr = x
elif type == TYPE_PUBKEY:
addr = public_key_to_bc_address(x.decode('hex'))
else:
addr = 'SCRIPT ' + x.encode('hex')
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete()
}
return out
def requires_fee(self, wallet):
# see https://en.bitcoin.it/wiki/Transaction_fees
#
# size must be smaller than 1 kbyte for free tx
size = len(self.serialize(-1))/2
if size >= 10000:
return True
# all outputs must be 0.01 BTC or larger for free tx
for addr, value in self.get_outputs():
if value < 1000000:
return True
# priority must be large enough for free tx
threshold = 57600000
weight = 0
for txin in self.inputs():
age = wallet.get_confirmations(txin["prevout_hash"])[0]
weight += txin["value"] * age
priority = weight / size
print_error(priority, threshold)
return priority < threshold
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
try:
txt.decode('hex')
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"] | |
basic.rs | use std::fmt::Error as FormatterError;
use std::fmt::{Debug, Display, Formatter};
use super::{
Client, EmptyExtraTokenFields, ErrorResponseType, RequestTokenError, StandardErrorResponse,
StandardTokenResponse, TokenType,
};
///
/// Basic OAuth2 client specialization, suitable for most applications.
///
pub type BasicClient = Client<BasicErrorResponse, BasicTokenResponse, BasicTokenType>;
///
/// Basic OAuth2 authorization token types.
///
#[derive(Clone, Debug, PartialEq)]
pub enum BasicTokenType {
///
/// Bearer token
/// ([OAuth 2.0 Bearer Tokens - RFC 6750](https://tools.ietf.org/html/rfc6750)).
///
Bearer,
///
/// MAC ([OAuth 2.0 Message Authentication Code (MAC)
/// Tokens](https://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-05)).
///
Mac,
///
/// An extension not defined by RFC 6749.
///
Extension(String),
}
impl BasicTokenType {
fn from_str(s: &str) -> Self {
match s {
"bearer" => BasicTokenType::Bearer,
"mac" => BasicTokenType::Mac,
ext => BasicTokenType::Extension(ext.to_string()),
}
}
}
impl AsRef<str> for BasicTokenType {
fn as_ref(&self) -> &str {
match *self {
BasicTokenType::Bearer => "bearer",
BasicTokenType::Mac => "mac",
BasicTokenType::Extension(ref ext) => ext.as_str(),
}
}
}
impl<'de> serde::Deserialize<'de> for BasicTokenType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let variant_str = String::deserialize(deserializer)?;
Ok(Self::from_str(&variant_str))
}
}
impl serde::ser::Serialize for BasicTokenType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
serializer.serialize_str(self.as_ref())
}
}
impl TokenType for BasicTokenType {}
///
/// Basic OAuth2 token response.
///
pub type BasicTokenResponse = StandardTokenResponse<EmptyExtraTokenFields, BasicTokenType>;
///
/// Basic access token error types.
///
/// These error types are defined in
/// [Section 5.2 of RFC 6749](https://tools.ietf.org/html/rfc6749#section-5.2).
///
#[derive(Clone, PartialEq)]
pub enum BasicErrorResponseType {
///
/// Client authentication failed (e.g., unknown client, no client authentication included,
/// or unsupported authentication method).
///
InvalidClient,
///
/// The provided authorization grant (e.g., authorization code, resource owner credentials)
/// or refresh token is invalid, expired, revoked, does not match the redirection URI used
/// in the authorization request, or was issued to another client.
///
InvalidGrant,
///
/// The request is missing a required parameter, includes an unsupported parameter value
/// (other than grant type), repeats a parameter, includes multiple credentials, utilizes
/// more than one mechanism for authenticating the client, or is otherwise malformed.
///
InvalidRequest,
///
/// The requested scope is invalid, unknown, malformed, or exceeds the scope granted by the
/// resource owner.
///
InvalidScope,
///
/// The authenticated client is not authorized to use this authorization grant type.
///
UnauthorizedClient,
///
/// The authorization grant type is not supported by the authorization server.
///
UnsupportedGrantType,
///
/// An extension not defined by RFC 6749.
///
Extension(String),
}
impl BasicErrorResponseType {
pub(crate) fn from_str(s: &str) -> Self {
match s {
"invalid_client" => BasicErrorResponseType::InvalidClient,
"invalid_grant" => BasicErrorResponseType::InvalidGrant,
"invalid_request" => BasicErrorResponseType::InvalidRequest,
"invalid_scope" => BasicErrorResponseType::InvalidScope,
"unauthorized_client" => BasicErrorResponseType::UnauthorizedClient,
"unsupported_grant_type" => BasicErrorResponseType::UnsupportedGrantType,
ext => BasicErrorResponseType::Extension(ext.to_string()),
}
}
}
impl AsRef<str> for BasicErrorResponseType {
fn as_ref(&self) -> &str {
match *self {
BasicErrorResponseType::InvalidClient => "invalid_client",
BasicErrorResponseType::InvalidGrant => "invalid_grant",
BasicErrorResponseType::InvalidRequest => "invalid_request",
BasicErrorResponseType::InvalidScope => "invalid_scope",
BasicErrorResponseType::UnauthorizedClient => "unauthorized_client",
BasicErrorResponseType::UnsupportedGrantType => "unsupported_grant_type",
BasicErrorResponseType::Extension(ref ext) => ext.as_str(),
}
}
}
impl<'de> serde::Deserialize<'de> for BasicErrorResponseType {
fn | <D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let variant_str = String::deserialize(deserializer)?;
Ok(Self::from_str(&variant_str))
}
}
impl serde::ser::Serialize for BasicErrorResponseType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
serializer.serialize_str(self.as_ref())
}
}
impl ErrorResponseType for BasicErrorResponseType {}
impl Debug for BasicErrorResponseType {
fn fmt(&self, f: &mut Formatter) -> Result<(), FormatterError> {
Display::fmt(self, f)
}
}
impl Display for BasicErrorResponseType {
fn fmt(&self, f: &mut Formatter) -> Result<(), FormatterError> {
write!(f, "{}", self.as_ref())
}
}
///
/// Error response specialization for basic OAuth2 implementation.
///
pub type BasicErrorResponse = StandardErrorResponse<BasicErrorResponseType>;
///
/// Token error specialization for basic OAuth2 implementation.
///
pub type BasicRequestTokenError<RE> = RequestTokenError<RE, BasicErrorResponse>;
| deserialize |
logging-gen.go | // Package logging provides access to the Stackdriver Logging API.
//
// See https://cloud.google.com/logging/docs/
//
// Usage example:
//
// import "google.golang.org/api/logging/v2"
// ...
// loggingService, err := logging.New(oauthHttpClient)
package logging // import "google.golang.org/api/logging/v2"
import (
"bytes"
"encoding/json"
"errors"
"fmt"
context "golang.org/x/net/context"
ctxhttp "golang.org/x/net/context/ctxhttp"
gensupport "google.golang.org/api/gensupport"
googleapi "google.golang.org/api/googleapi"
"io"
"net/http"
"net/url"
"strconv"
"strings"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = ctxhttp.Do
const apiId = "logging:v2"
const apiName = "logging"
const apiVersion = "v2"
const basePath = "https://logging.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// View and manage your data across Google Cloud Platform services
CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
// View your data across Google Cloud Platform services
CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only"
// Administrate log data for your projects
LoggingAdminScope = "https://www.googleapis.com/auth/logging.admin"
// View log data for your projects
LoggingReadScope = "https://www.googleapis.com/auth/logging.read"
// Submit log data for your projects
LoggingWriteScope = "https://www.googleapis.com/auth/logging.write"
)
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.BillingAccounts = NewBillingAccountsService(s)
s.Entries = NewEntriesService(s)
s.Folders = NewFoldersService(s)
s.MonitoredResourceDescriptors = NewMonitoredResourceDescriptorsService(s)
s.Organizations = NewOrganizationsService(s)
s.Projects = NewProjectsService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
BillingAccounts *BillingAccountsService
Entries *EntriesService
Folders *FoldersService
MonitoredResourceDescriptors *MonitoredResourceDescriptorsService
Organizations *OrganizationsService
Projects *ProjectsService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewBillingAccountsService(s *Service) *BillingAccountsService {
rs := &BillingAccountsService{s: s}
rs.Logs = NewBillingAccountsLogsService(s)
rs.Sinks = NewBillingAccountsSinksService(s)
return rs
}
type BillingAccountsService struct {
s *Service
Logs *BillingAccountsLogsService
Sinks *BillingAccountsSinksService
}
func NewBillingAccountsLogsService(s *Service) *BillingAccountsLogsService {
rs := &BillingAccountsLogsService{s: s}
return rs
}
type BillingAccountsLogsService struct {
s *Service
}
func NewBillingAccountsSinksService(s *Service) *BillingAccountsSinksService {
rs := &BillingAccountsSinksService{s: s}
return rs
}
type BillingAccountsSinksService struct {
s *Service
}
func NewEntriesService(s *Service) *EntriesService {
rs := &EntriesService{s: s}
return rs
}
type EntriesService struct {
s *Service
}
func NewFoldersService(s *Service) *FoldersService {
rs := &FoldersService{s: s}
rs.Logs = NewFoldersLogsService(s)
rs.Sinks = NewFoldersSinksService(s)
return rs
}
type FoldersService struct {
s *Service
Logs *FoldersLogsService
Sinks *FoldersSinksService
}
func NewFoldersLogsService(s *Service) *FoldersLogsService {
rs := &FoldersLogsService{s: s}
return rs
}
type FoldersLogsService struct {
s *Service
}
func NewFoldersSinksService(s *Service) *FoldersSinksService {
rs := &FoldersSinksService{s: s}
return rs
}
type FoldersSinksService struct {
s *Service
}
func NewMonitoredResourceDescriptorsService(s *Service) *MonitoredResourceDescriptorsService {
rs := &MonitoredResourceDescriptorsService{s: s}
return rs
}
type MonitoredResourceDescriptorsService struct {
s *Service
}
func NewOrganizationsService(s *Service) *OrganizationsService {
rs := &OrganizationsService{s: s}
rs.Logs = NewOrganizationsLogsService(s)
rs.Sinks = NewOrganizationsSinksService(s)
return rs
}
type OrganizationsService struct {
s *Service
Logs *OrganizationsLogsService
Sinks *OrganizationsSinksService
}
func NewOrganizationsLogsService(s *Service) *OrganizationsLogsService {
rs := &OrganizationsLogsService{s: s}
return rs
}
type OrganizationsLogsService struct {
s *Service
}
func | (s *Service) *OrganizationsSinksService {
rs := &OrganizationsSinksService{s: s}
return rs
}
type OrganizationsSinksService struct {
s *Service
}
func NewProjectsService(s *Service) *ProjectsService {
rs := &ProjectsService{s: s}
rs.Logs = NewProjectsLogsService(s)
rs.Metrics = NewProjectsMetricsService(s)
rs.Sinks = NewProjectsSinksService(s)
return rs
}
type ProjectsService struct {
s *Service
Logs *ProjectsLogsService
Metrics *ProjectsMetricsService
Sinks *ProjectsSinksService
}
func NewProjectsLogsService(s *Service) *ProjectsLogsService {
rs := &ProjectsLogsService{s: s}
return rs
}
type ProjectsLogsService struct {
s *Service
}
func NewProjectsMetricsService(s *Service) *ProjectsMetricsService {
rs := &ProjectsMetricsService{s: s}
return rs
}
type ProjectsMetricsService struct {
s *Service
}
func NewProjectsSinksService(s *Service) *ProjectsSinksService {
rs := &ProjectsSinksService{s: s}
return rs
}
type ProjectsSinksService struct {
s *Service
}
// Empty: A generic empty message that you can re-use to avoid defining
// duplicated empty messages in your APIs. A typical example is to use
// it as the request or the response type of an API method. For
// instance:
// service Foo {
// rpc Bar(google.protobuf.Empty) returns
// (google.protobuf.Empty);
// }
// The JSON representation for Empty is empty JSON object {}.
type Empty struct {
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
}
// HttpRequest: A common proto for logging HTTP requests. Only contains
// semantics defined by the HTTP specification. Product-specific logging
// information MUST be defined in a separate message.
type HttpRequest struct {
// CacheFillBytes: The number of HTTP response bytes inserted into
// cache. Set only when a cache fill was attempted.
CacheFillBytes int64 `json:"cacheFillBytes,omitempty,string"`
// CacheHit: Whether or not an entity was served from cache (with or
// without validation).
CacheHit bool `json:"cacheHit,omitempty"`
// CacheLookup: Whether or not a cache lookup was attempted.
CacheLookup bool `json:"cacheLookup,omitempty"`
// CacheValidatedWithOriginServer: Whether or not the response was
// validated with the origin server before being served from cache. This
// field is only meaningful if cache_hit is True.
CacheValidatedWithOriginServer bool `json:"cacheValidatedWithOriginServer,omitempty"`
// Latency: The request processing latency on the server, from the time
// the request was received until the response was sent.
Latency string `json:"latency,omitempty"`
// Referer: The referer URL of the request, as defined in HTTP/1.1
// Header Field Definitions
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).
Referer string `json:"referer,omitempty"`
// RemoteIp: The IP address (IPv4 or IPv6) of the client that issued the
// HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329".
RemoteIp string `json:"remoteIp,omitempty"`
// RequestMethod: The request method. Examples: "GET", "HEAD", "PUT",
// "POST".
RequestMethod string `json:"requestMethod,omitempty"`
// RequestSize: The size of the HTTP request message in bytes, including
// the request headers and the request body.
RequestSize int64 `json:"requestSize,omitempty,string"`
// RequestUrl: The scheme (http, https), the host name, the path and the
// query portion of the URL that was requested. Example:
// "http://example.com/some/info?color=red".
RequestUrl string `json:"requestUrl,omitempty"`
// ResponseSize: The size of the HTTP response message sent back to the
// client, in bytes, including the response headers and the response
// body.
ResponseSize int64 `json:"responseSize,omitempty,string"`
// ServerIp: The IP address (IPv4 or IPv6) of the origin server that the
// request was sent to.
ServerIp string `json:"serverIp,omitempty"`
// Status: The response code indicating the status of response.
// Examples: 200, 404.
Status int64 `json:"status,omitempty"`
// UserAgent: The user agent sent by the client. Example: "Mozilla/4.0
// (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)".
UserAgent string `json:"userAgent,omitempty"`
// ForceSendFields is a list of field names (e.g. "CacheFillBytes") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CacheFillBytes") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *HttpRequest) MarshalJSON() ([]byte, error) {
type noMethod HttpRequest
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LabelDescriptor: A description of a label.
type LabelDescriptor struct {
// Description: A human-readable description for the label.
Description string `json:"description,omitempty"`
// Key: The label key.
Key string `json:"key,omitempty"`
// ValueType: The type of data that can be assigned to the label.
//
// Possible values:
// "STRING" - A variable-length string. This is the default.
// "BOOL" - Boolean; true or false.
// "INT64" - A 64-bit signed integer.
ValueType string `json:"valueType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LabelDescriptor) MarshalJSON() ([]byte, error) {
type noMethod LabelDescriptor
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListLogEntriesRequest: The parameters to ListLogEntries.
type ListLogEntriesRequest struct {
// Filter: Optional. A filter that chooses which log entries to return.
// See Advanced Logs Filters. Only log entries that match the filter are
// returned. An empty filter matches all log entries in the resources
// listed in resource_names. Referencing a parent resource that is not
// listed in resource_names will cause the filter to return no results.
// The maximum length of the filter is 20000 characters.
Filter string `json:"filter,omitempty"`
// OrderBy: Optional. How the results should be sorted. Presently, the
// only permitted values are "timestamp asc" (default) and "timestamp
// desc". The first option returns entries in order of increasing values
// of LogEntry.timestamp (oldest first), and the second option returns
// entries in order of decreasing timestamps (newest first). Entries
// with equal timestamps are returned in order of their insert_id
// values.
OrderBy string `json:"orderBy,omitempty"`
// PageSize: Optional. The maximum number of results to return from this
// request. Non-positive values are ignored. The presence of
// next_page_token in the response indicates that more results might be
// available.
PageSize int64 `json:"pageSize,omitempty"`
// PageToken: Optional. If present, then retrieve the next batch of
// results from the preceding call to this method. page_token must be
// the value of next_page_token from the previous response. The values
// of other method parameters should be identical to those in the
// previous call.
PageToken string `json:"pageToken,omitempty"`
// ProjectIds: Deprecated. Use resource_names instead. One or more
// project identifiers or project numbers from which to retrieve log
// entries. Example: "my-project-1A". If present, these project
// identifiers are converted to resource name format and added to the
// list of resources in resource_names.
ProjectIds []string `json:"projectIds,omitempty"`
// ResourceNames: Required. Names of one or more parent resources from
// which to retrieve log
// entries:
// "projects/[PROJECT_ID]"
// "organizations/[ORGANIZATION_ID]"
// "bi
// llingAccounts/[BILLING_ACCOUNT_ID]"
// "folders/[FOLDER_ID]"
// Projects listed in the project_ids field are added to this list.
ResourceNames []string `json:"resourceNames,omitempty"`
// ForceSendFields is a list of field names (e.g. "Filter") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Filter") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListLogEntriesRequest) MarshalJSON() ([]byte, error) {
type noMethod ListLogEntriesRequest
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListLogEntriesResponse: Result returned from ListLogEntries.
type ListLogEntriesResponse struct {
// Entries: A list of log entries.
Entries []*LogEntry `json:"entries,omitempty"`
// NextPageToken: If there might be more results than those appearing in
// this response, then nextPageToken is included. To get the next set of
// results, call this method again using the value of nextPageToken as
// pageToken.If a value for next_page_token appears and the entries
// field is empty, it means that the search found no log entries so far
// but it did not have time to search all the possible log entries.
// Retry the method with this value for page_token to continue the
// search. Alternatively, consider speeding up the search by changing
// your filter to specify a single log name or resource type, or to
// narrow the time range of the search.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Entries") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Entries") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListLogEntriesResponse) MarshalJSON() ([]byte, error) {
type noMethod ListLogEntriesResponse
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListLogMetricsResponse: Result returned from ListLogMetrics.
type ListLogMetricsResponse struct {
// Metrics: A list of logs-based metrics.
Metrics []*LogMetric `json:"metrics,omitempty"`
// NextPageToken: If there might be more results than appear in this
// response, then nextPageToken is included. To get the next set of
// results, call this method again using the value of nextPageToken as
// pageToken.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Metrics") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Metrics") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListLogMetricsResponse) MarshalJSON() ([]byte, error) {
type noMethod ListLogMetricsResponse
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListLogsResponse: Result returned from ListLogs.
type ListLogsResponse struct {
// LogNames: A list of log names. For example,
// "projects/my-project/syslog" or
// "organizations/123/cloudresourcemanager.googleapis.com%2Factivity".
LogNames []string `json:"logNames,omitempty"`
// NextPageToken: If there might be more results than those appearing in
// this response, then nextPageToken is included. To get the next set of
// results, call this method again using the value of nextPageToken as
// pageToken.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "LogNames") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "LogNames") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListLogsResponse) MarshalJSON() ([]byte, error) {
type noMethod ListLogsResponse
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListMonitoredResourceDescriptorsResponse: Result returned from
// ListMonitoredResourceDescriptors.
type ListMonitoredResourceDescriptorsResponse struct {
// NextPageToken: If there might be more results than those appearing in
// this response, then nextPageToken is included. To get the next set of
// results, call this method again using the value of nextPageToken as
// pageToken.
NextPageToken string `json:"nextPageToken,omitempty"`
// ResourceDescriptors: A list of resource descriptors.
ResourceDescriptors []*MonitoredResourceDescriptor `json:"resourceDescriptors,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListMonitoredResourceDescriptorsResponse) MarshalJSON() ([]byte, error) {
type noMethod ListMonitoredResourceDescriptorsResponse
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListSinksResponse: Result returned from ListSinks.
type ListSinksResponse struct {
// NextPageToken: If there might be more results than appear in this
// response, then nextPageToken is included. To get the next set of
// results, call the same method again using the value of nextPageToken
// as pageToken.
NextPageToken string `json:"nextPageToken,omitempty"`
// Sinks: A list of sinks.
Sinks []*LogSink `json:"sinks,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListSinksResponse) MarshalJSON() ([]byte, error) {
type noMethod ListSinksResponse
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LogEntry: An individual entry in a log.
type LogEntry struct {
// HttpRequest: Optional. Information about the HTTP request associated
// with this log entry, if applicable.
HttpRequest *HttpRequest `json:"httpRequest,omitempty"`
// InsertId: Optional. A unique identifier for the log entry. If you
// provide a value, then Stackdriver Logging considers other log entries
// in the same project, with the same timestamp, and with the same
// insert_id to be duplicates which can be removed. If omitted in new
// log entries, then Stackdriver Logging will insert its own unique
// identifier. The insert_id is used to order log entries that have the
// same timestamp value.
InsertId string `json:"insertId,omitempty"`
// JsonPayload: The log entry payload, represented as a structure that
// is expressed as a JSON object.
JsonPayload googleapi.RawMessage `json:"jsonPayload,omitempty"`
// Labels: Optional. A set of user-defined (key, value) data that
// provides additional information about the log entry.
Labels map[string]string `json:"labels,omitempty"`
// LogName: Required. The resource name of the log to which this log
// entry
// belongs:
// "projects/[PROJECT_ID]/logs/[LOG_ID]"
// "organizations/[ORGANIZ
// ATION_ID]/logs/[LOG_ID]"
// "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[L
// OG_ID]"
// "folders/[FOLDER_ID]/logs/[LOG_ID]"
// [LOG_ID] must be URL-encoded within log_name. Example:
// "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa
// ctivity". [LOG_ID] must be less than 512 characters long and can only
// include the following characters: upper and lower case alphanumeric
// characters, forward-slash, underscore, hyphen, and period.For
// backward compatibility, if log_name begins with a forward-slash, such
// as /projects/..., then the log entry is ingested as usual but the
// forward-slash is removed. Listing the log entry will not show the
// leading slash and filtering for a log name with a leading slash will
// never return any results.
LogName string `json:"logName,omitempty"`
// Operation: Optional. Information about an operation associated with
// the log entry, if applicable.
Operation *LogEntryOperation `json:"operation,omitempty"`
// ProtoPayload: The log entry payload, represented as a protocol
// buffer. Some Google Cloud Platform services use this field for their
// log entry payloads.
ProtoPayload googleapi.RawMessage `json:"protoPayload,omitempty"`
// ReceiveTimestamp: Output only. The time the log entry was received by
// Stackdriver Logging.
ReceiveTimestamp string `json:"receiveTimestamp,omitempty"`
// Resource: Required. The monitored resource associated with this log
// entry. Example: a log entry that reports a database error would be
// associated with the monitored resource designating the particular
// database that reported the error.
Resource *MonitoredResource `json:"resource,omitempty"`
// Severity: Optional. The severity of the log entry. The default value
// is LogSeverity.DEFAULT.
//
// Possible values:
// "DEFAULT" - (0) The log entry has no assigned severity level.
// "DEBUG" - (100) Debug or trace information.
// "INFO" - (200) Routine information, such as ongoing status or
// performance.
// "NOTICE" - (300) Normal but significant events, such as start up,
// shut down, or a configuration change.
// "WARNING" - (400) Warning events might cause problems.
// "ERROR" - (500) Error events are likely to cause problems.
// "CRITICAL" - (600) Critical events cause more severe problems or
// outages.
// "ALERT" - (700) A person must take an action immediately.
// "EMERGENCY" - (800) One or more systems are unusable.
Severity string `json:"severity,omitempty"`
// SourceLocation: Optional. Source code location information associated
// with the log entry, if any.
SourceLocation *LogEntrySourceLocation `json:"sourceLocation,omitempty"`
// TextPayload: The log entry payload, represented as a Unicode string
// (UTF-8).
TextPayload string `json:"textPayload,omitempty"`
// Timestamp: Optional. The time the event described by the log entry
// occurred. If omitted in a new log entry, Stackdriver Logging will
// insert the time the log entry is received. Stackdriver Logging might
// reject log entries whose time stamps are more than a couple of hours
// in the future. Log entries with time stamps in the past are accepted.
Timestamp string `json:"timestamp,omitempty"`
// Trace: Optional. Resource name of the trace associated with the log
// entry, if any. If it contains a relative resource name, the name is
// assumed to be relative to //tracing.googleapis.com. Example:
// projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824
Trace string `json:"trace,omitempty"`
// ForceSendFields is a list of field names (e.g. "HttpRequest") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "HttpRequest") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LogEntry) MarshalJSON() ([]byte, error) {
type noMethod LogEntry
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LogEntryOperation: Additional information about a potentially
// long-running operation with which a log entry is associated.
type LogEntryOperation struct {
// First: Optional. Set this to True if this is the first log entry in
// the operation.
First bool `json:"first,omitempty"`
// Id: Optional. An arbitrary operation identifier. Log entries with the
// same identifier are assumed to be part of the same operation.
Id string `json:"id,omitempty"`
// Last: Optional. Set this to True if this is the last log entry in the
// operation.
Last bool `json:"last,omitempty"`
// Producer: Optional. An arbitrary producer identifier. The combination
// of id and producer must be globally unique. Examples for producer:
// "MyDivision.MyBigCompany.com", "github.com/MyProject/MyApplication".
Producer string `json:"producer,omitempty"`
// ForceSendFields is a list of field names (e.g. "First") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "First") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LogEntryOperation) MarshalJSON() ([]byte, error) {
type noMethod LogEntryOperation
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LogEntrySourceLocation: Additional information about the source code
// location that produced the log entry.
type LogEntrySourceLocation struct {
// File: Optional. Source file name. Depending on the runtime
// environment, this might be a simple name or a fully-qualified name.
File string `json:"file,omitempty"`
// Function: Optional. Human-readable name of the function or method
// being invoked, with optional context such as the class or package
// name. This information may be used in contexts such as the logs
// viewer, where a file and line number are less meaningful. The format
// can vary by language. For example: qual.if.ied.Class.method (Java),
// dir/package.func (Go), function (Python).
Function string `json:"function,omitempty"`
// Line: Optional. Line within the source file. 1-based; 0 indicates no
// line number available.
Line int64 `json:"line,omitempty,string"`
// ForceSendFields is a list of field names (e.g. "File") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "File") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LogEntrySourceLocation) MarshalJSON() ([]byte, error) {
type noMethod LogEntrySourceLocation
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LogLine: Application log line emitted while processing a request.
type LogLine struct {
// LogMessage: App-provided log message.
LogMessage string `json:"logMessage,omitempty"`
// Severity: Severity of this log entry.
//
// Possible values:
// "DEFAULT" - (0) The log entry has no assigned severity level.
// "DEBUG" - (100) Debug or trace information.
// "INFO" - (200) Routine information, such as ongoing status or
// performance.
// "NOTICE" - (300) Normal but significant events, such as start up,
// shut down, or a configuration change.
// "WARNING" - (400) Warning events might cause problems.
// "ERROR" - (500) Error events are likely to cause problems.
// "CRITICAL" - (600) Critical events cause more severe problems or
// outages.
// "ALERT" - (700) A person must take an action immediately.
// "EMERGENCY" - (800) One or more systems are unusable.
Severity string `json:"severity,omitempty"`
// SourceLocation: Where in the source code this log message was
// written.
SourceLocation *SourceLocation `json:"sourceLocation,omitempty"`
// Time: Approximate time when this log entry was made.
Time string `json:"time,omitempty"`
// ForceSendFields is a list of field names (e.g. "LogMessage") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "LogMessage") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LogLine) MarshalJSON() ([]byte, error) {
type noMethod LogLine
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LogMetric: Describes a logs-based metric. The value of the metric is
// the number of log entries that match a logs filter in a given time
// interval.
type LogMetric struct {
// Description: Optional. A description of this metric, which is used in
// documentation.
Description string `json:"description,omitempty"`
// Filter: Required. An advanced logs filter which is used to match log
// entries. Example:
// "resource.type=gae_app AND severity>=ERROR"
// The maximum length of the filter is 20000 characters.
Filter string `json:"filter,omitempty"`
// Name: Required. The client-assigned metric identifier. Examples:
// "error_count", "nginx/requests".Metric identifiers are limited to 100
// characters and can include only the following characters: A-Z, a-z,
// 0-9, and the special characters _-.,+!*',()%/. The forward-slash
// character (/) denotes a hierarchy of name pieces, and it cannot be
// the first character of the name.The metric identifier in this field
// must not be URL-encoded
// (https://en.wikipedia.org/wiki/Percent-encoding). However, when the
// metric identifier appears as the [METRIC_ID] part of a metric_name
// API parameter, then the metric identifier must be URL-encoded.
// Example: "projects/my-project/metrics/nginx%2Frequests".
Name string `json:"name,omitempty"`
// Version: Output only. The API version that created or updated this
// metric. The version also dictates the syntax of the filter
// expression. When a value for this field is missing, the default value
// of V2 should be assumed.
//
// Possible values:
// "V2" - Stackdriver Logging API v2.
// "V1" - Stackdriver Logging API v1.
Version string `json:"version,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LogMetric) MarshalJSON() ([]byte, error) {
type noMethod LogMetric
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LogSink: Describes a sink used to export log entries to one of the
// following destinations in any project: a Cloud Storage bucket, a
// BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls
// which log entries are exported. The sink must be created within a
// project, organization, billing account, or folder.
type LogSink struct {
// Destination: Required. The export
// destination:
// "storage.googleapis.com/[GCS_BUCKET]"
// "bigquery.googleapi
// s.com/projects/[PROJECT_ID]/datasets/[DATASET]"
// "pubsub.googleapis.com
// /projects/[PROJECT_ID]/topics/[TOPIC_ID]"
// The sink's writer_identity, set when the sink is created, must have
// permission to write to the destination or else the log entries are
// not exported. For more information, see Exporting Logs With Sinks.
Destination string `json:"destination,omitempty"`
// EndTime: Optional. The time at which this sink will stop exporting
// log entries. Log entries are exported only if their timestamp is
// earlier than the end time. If this field is not supplied, there is no
// end time. If both a start time and an end time are provided, then the
// end time must be later than the start time.
EndTime string `json:"endTime,omitempty"`
// Filter: Optional. An advanced logs filter. The only exported log
// entries are those that are in the resource owning the sink and that
// match the filter. The filter must use the log entry format specified
// by the output_version_format parameter. For example, in the v2
// format:
// logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR
//
Filter string `json:"filter,omitempty"`
// IncludeChildren: Optional. This field applies only to sinks owned by
// organizations and folders. If the field is false, the default, only
// the logs owned by the sink's parent resource are available for
// export. If the field is true, then logs from all the projects,
// folders, and billing accounts contained in the sink's parent resource
// are also available for export. Whether a particular log entry from
// the children is exported depends on the sink's filter expression. For
// example, if this field is true, then the filter
// resource.type=gce_instance would export all Compute Engine VM
// instance log entries from all projects in the sink's parent. To only
// export entries from certain child projects, filter on the project
// part of the log name:
// logName:("projects/test-project1/" OR "projects/test-project2/")
// AND
// resource.type=gce_instance
//
IncludeChildren bool `json:"includeChildren,omitempty"`
// Name: Required. The client-assigned sink identifier, unique within
// the project. Example: "my-syslog-errors-to-pubsub". Sink identifiers
// are limited to 100 characters and can include only the following
// characters: upper and lower-case alphanumeric characters,
// underscores, hyphens, and periods.
Name string `json:"name,omitempty"`
// OutputVersionFormat: Optional. The log entry format to use for this
// sink's exported log entries. The v2 format is used by default. The v1
// format is deprecated and should be used only as part of a migration
// effort to v2. See Migration to the v2 API.
//
// Possible values:
// "VERSION_FORMAT_UNSPECIFIED" - An unspecified format version that
// will default to V2.
// "V2" - LogEntry version 2 format.
// "V1" - LogEntry version 1 format.
OutputVersionFormat string `json:"outputVersionFormat,omitempty"`
// StartTime: Optional. The time at which this sink will begin exporting
// log entries. Log entries are exported only if their timestamp is not
// earlier than the start time. The default value of this field is the
// time the sink is created or updated.
StartTime string `json:"startTime,omitempty"`
// WriterIdentity: Output only. An IAM identity—a service account
// or group—under which Stackdriver Logging writes the exported
// log entries to the sink's destination. This field is set by
// sinks.create and sinks.update, based on the setting of
// unique_writer_identity in those methods.Until you grant this identity
// write-access to the destination, log entry exports from this sink
// will fail. For more information, see Granting access for a resource.
// Consult the destination service's documentation to determine the
// appropriate IAM roles to assign to the identity.
WriterIdentity string `json:"writerIdentity,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Destination") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Destination") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LogSink) MarshalJSON() ([]byte, error) {
type noMethod LogSink
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MonitoredResource: An object representing a resource that can be used
// for monitoring, logging, billing, or other purposes. Examples include
// virtual machine instances, databases, and storage devices such as
// disks. The type field identifies a MonitoredResourceDescriptor object
// that describes the resource's schema. Information in the labels field
// identifies the actual resource and its attributes according to the
// schema. For example, a particular Compute Engine VM instance could be
// represented by the following object, because the
// MonitoredResourceDescriptor for "gce_instance" has labels
// "instance_id" and "zone":
// { "type": "gce_instance",
// "labels": { "instance_id": "12345678901234",
// "zone": "us-central1-a" }}
//
type MonitoredResource struct {
// Labels: Required. Values for all of the labels listed in the
// associated monitored resource descriptor. For example, Compute Engine
// VM instances use the labels "project_id", "instance_id", and "zone".
Labels map[string]string `json:"labels,omitempty"`
// Type: Required. The monitored resource type. This field must match
// the type field of a MonitoredResourceDescriptor object. For example,
// the type of a Compute Engine VM instance is gce_instance.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "Labels") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Labels") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MonitoredResource) MarshalJSON() ([]byte, error) {
type noMethod MonitoredResource
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MonitoredResourceDescriptor: An object that describes the schema of a
// MonitoredResource object using a type name and a set of labels. For
// example, the monitored resource descriptor for Google Compute Engine
// VM instances has a type of "gce_instance" and specifies the use of
// the labels "instance_id" and "zone" to identify particular VM
// instances.Different APIs can support different monitored resource
// types. APIs generally provide a list method that returns the
// monitored resource descriptors used by the API.
type MonitoredResourceDescriptor struct {
// Description: Optional. A detailed description of the monitored
// resource type that might be used in documentation.
Description string `json:"description,omitempty"`
// DisplayName: Optional. A concise name for the monitored resource type
// that might be displayed in user interfaces. It should be a Title
// Cased Noun Phrase, without any article or other determiners. For
// example, "Google Cloud SQL Database".
DisplayName string `json:"displayName,omitempty"`
// Labels: Required. A set of labels used to describe instances of this
// monitored resource type. For example, an individual Google Cloud SQL
// database is identified by values for the labels "database_id" and
// "zone".
Labels []*LabelDescriptor `json:"labels,omitempty"`
// Name: Optional. The resource name of the monitored resource
// descriptor:
// "projects/{project_id}/monitoredResourceDescriptors/{type}" where
// {type} is the value of the type field in this object and {project_id}
// is a project ID that provides API-specific context for accessing the
// type. APIs that do not use project information can use the resource
// name format "monitoredResourceDescriptors/{type}".
Name string `json:"name,omitempty"`
// Type: Required. The monitored resource type. For example, the type
// "cloudsql_database" represents databases in Google Cloud SQL. The
// maximum length of this value is 256 characters.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) {
type noMethod MonitoredResourceDescriptor
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// RequestLog: Complete log information about a single HTTP request to
// an App Engine application.
type RequestLog struct {
// AppEngineRelease: App Engine release version.
AppEngineRelease string `json:"appEngineRelease,omitempty"`
// AppId: Application that handled this request.
AppId string `json:"appId,omitempty"`
// Cost: An indication of the relative cost of serving this request.
Cost float64 `json:"cost,omitempty"`
// EndTime: Time when the request finished.
EndTime string `json:"endTime,omitempty"`
// Finished: Whether this request is finished or active.
Finished bool `json:"finished,omitempty"`
// First: Whether this is the first RequestLog entry for this request.
// If an active request has several RequestLog entries written to
// Stackdriver Logging, then this field will be set for one of them.
First bool `json:"first,omitempty"`
// Host: Internet host and port number of the resource being requested.
Host string `json:"host,omitempty"`
// HttpVersion: HTTP version of request. Example: "HTTP/1.1".
HttpVersion string `json:"httpVersion,omitempty"`
// InstanceId: An identifier for the instance that handled the request.
InstanceId string `json:"instanceId,omitempty"`
// InstanceIndex: If the instance processing this request belongs to a
// manually scaled module, then this is the 0-based index of the
// instance. Otherwise, this value is -1.
InstanceIndex int64 `json:"instanceIndex,omitempty"`
// Ip: Origin IP address.
Ip string `json:"ip,omitempty"`
// Latency: Latency of the request.
Latency string `json:"latency,omitempty"`
// Line: A list of log lines emitted by the application while serving
// this request.
Line []*LogLine `json:"line,omitempty"`
// MegaCycles: Number of CPU megacycles used to process request.
MegaCycles int64 `json:"megaCycles,omitempty,string"`
// Method: Request method. Example: "GET", "HEAD", "PUT", "POST",
// "DELETE".
Method string `json:"method,omitempty"`
// ModuleId: Module of the application that handled this request.
ModuleId string `json:"moduleId,omitempty"`
// Nickname: The logged-in user who made the request.Most likely, this
// is the part of the user's email before the @ sign. The field value is
// the same for different requests from the same user, but different
// users can have similar names. This information is also available to
// the application via the App Engine Users API.This field will be
// populated starting with App Engine 1.9.21.
Nickname string `json:"nickname,omitempty"`
// PendingTime: Time this request spent in the pending request queue.
PendingTime string `json:"pendingTime,omitempty"`
// Referrer: Referrer URL of request.
Referrer string `json:"referrer,omitempty"`
// RequestId: Globally unique identifier for a request, which is based
// on the request start time. Request IDs for requests which started
// later will compare greater as strings than those for requests which
// started earlier.
RequestId string `json:"requestId,omitempty"`
// Resource: Contains the path and query portion of the URL that was
// requested. For example, if the URL was
// "http://example.com/app?name=val", the resource would be
// "/app?name=val". The fragment identifier, which is identified by the
// # character, is not included.
Resource string `json:"resource,omitempty"`
// ResponseSize: Size in bytes sent back to client by request.
ResponseSize int64 `json:"responseSize,omitempty,string"`
// SourceReference: Source code for the application that handled this
// request. There can be more than one source reference per deployed
// application if source code is distributed among multiple
// repositories.
SourceReference []*SourceReference `json:"sourceReference,omitempty"`
// StartTime: Time when the request started.
StartTime string `json:"startTime,omitempty"`
// Status: HTTP response status code. Example: 200, 404.
Status int64 `json:"status,omitempty"`
// TaskName: Task name of the request, in the case of an offline
// request.
TaskName string `json:"taskName,omitempty"`
// TaskQueueName: Queue name of the request, in the case of an offline
// request.
TaskQueueName string `json:"taskQueueName,omitempty"`
// TraceId: Stackdriver Trace identifier for this request.
TraceId string `json:"traceId,omitempty"`
// UrlMapEntry: File or class that handled the request.
UrlMapEntry string `json:"urlMapEntry,omitempty"`
// UserAgent: User agent that made the request.
UserAgent string `json:"userAgent,omitempty"`
// VersionId: Version of the application that handled this request.
VersionId string `json:"versionId,omitempty"`
// WasLoadingRequest: Whether this was a loading request for the
// instance.
WasLoadingRequest bool `json:"wasLoadingRequest,omitempty"`
// ForceSendFields is a list of field names (e.g. "AppEngineRelease") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppEngineRelease") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *RequestLog) MarshalJSON() ([]byte, error) {
type noMethod RequestLog
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *RequestLog) UnmarshalJSON(data []byte) error {
type noMethod RequestLog
var s1 struct {
Cost gensupport.JSONFloat64 `json:"cost"`
*noMethod
}
s1.noMethod = (*noMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Cost = float64(s1.Cost)
return nil
}
// SourceLocation: Specifies a location in a source code file.
type SourceLocation struct {
// File: Source file name. Depending on the runtime environment, this
// might be a simple name or a fully-qualified name.
File string `json:"file,omitempty"`
// FunctionName: Human-readable name of the function or method being
// invoked, with optional context such as the class or package name.
// This information is used in contexts such as the logs viewer, where a
// file and line number are less meaningful. The format can vary by
// language. For example: qual.if.ied.Class.method (Java),
// dir/package.func (Go), function (Python).
FunctionName string `json:"functionName,omitempty"`
// Line: Line within the source file.
Line int64 `json:"line,omitempty,string"`
// ForceSendFields is a list of field names (e.g. "File") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "File") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SourceLocation) MarshalJSON() ([]byte, error) {
type noMethod SourceLocation
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SourceReference: A reference to a particular snapshot of the source
// tree used to build and deploy an application.
type SourceReference struct {
// Repository: Optional. A URI string identifying the repository.
// Example: "https://github.com/GoogleCloudPlatform/kubernetes.git"
Repository string `json:"repository,omitempty"`
// RevisionId: The canonical and persistent identifier of the deployed
// revision. Example (git): "0035781c50ec7aa23385dc841529ce8a4b70db1b"
RevisionId string `json:"revisionId,omitempty"`
// ForceSendFields is a list of field names (e.g. "Repository") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Repository") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SourceReference) MarshalJSON() ([]byte, error) {
type noMethod SourceReference
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// WriteLogEntriesRequest: The parameters to WriteLogEntries.
type WriteLogEntriesRequest struct {
// Entries: Required. The log entries to write. Values supplied for the
// fields log_name, resource, and labels in this entries.write request
// are inserted into those log entries in this list that do not provide
// their own values.Stackdriver Logging also creates and inserts values
// for timestamp and insert_id if the entries do not provide them. The
// created insert_id for the N'th entry in this list will be greater
// than earlier entries and less than later entries. Otherwise, the
// order of log entries in this list does not matter.To improve
// throughput and to avoid exceeding the quota limit for calls to
// entries.write, you should write multiple log entries at once rather
// than calling this method for each individual log entry.
Entries []*LogEntry `json:"entries,omitempty"`
// Labels: Optional. Default labels that are added to the labels field
// of all log entries in entries. If a log entry already has a label
// with the same key as a label in this parameter, then the log entry's
// label is not changed. See LogEntry.
Labels map[string]string `json:"labels,omitempty"`
// LogName: Optional. A default log resource name that is assigned to
// all log entries in entries that do not specify a value for
// log_name:
// "projects/[PROJECT_ID]/logs/[LOG_ID]"
// "organizations/[ORGANI
// ZATION_ID]/logs/[LOG_ID]"
// "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[
// LOG_ID]"
// "folders/[FOLDER_ID]/logs/[LOG_ID]"
// [LOG_ID] must be URL-encoded. For example,
// "projects/my-project-id/logs/syslog" or
// "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa
// ctivity". For more information about log names, see LogEntry.
LogName string `json:"logName,omitempty"`
// PartialSuccess: Optional. Whether valid entries should be written
// even if some other entries fail due to INVALID_ARGUMENT or
// PERMISSION_DENIED errors. If any entry is not written, then the
// response status is the error associated with one of the failed
// entries and the response includes error details keyed by the entries'
// zero-based index in the entries.write method.
PartialSuccess bool `json:"partialSuccess,omitempty"`
// Resource: Optional. A default monitored resource object that is
// assigned to all log entries in entries that do not specify a value
// for resource. Example:
// { "type": "gce_instance",
// "labels": {
// "zone": "us-central1-a", "instance_id": "00000000000000000000"
// }}
// See LogEntry.
Resource *MonitoredResource `json:"resource,omitempty"`
// ForceSendFields is a list of field names (e.g. "Entries") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Entries") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *WriteLogEntriesRequest) MarshalJSON() ([]byte, error) {
type noMethod WriteLogEntriesRequest
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// WriteLogEntriesResponse: Result returned from WriteLogEntries. empty
type WriteLogEntriesResponse struct {
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
}
// method id "logging.billingAccounts.logs.delete":
type BillingAccountsLogsDeleteCall struct {
s *Service
logName string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes all the log entries in a log. The log reappears if it
// receives new entries. Log entries written shortly before the delete
// operation might not be deleted.
func (r *BillingAccountsLogsService) Delete(logName string) *BillingAccountsLogsDeleteCall {
c := &BillingAccountsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.logName = logName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BillingAccountsLogsDeleteCall) Fields(s ...googleapi.Field) *BillingAccountsLogsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BillingAccountsLogsDeleteCall) Context(ctx context.Context) *BillingAccountsLogsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BillingAccountsLogsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BillingAccountsLogsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"logName": c.logName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.billingAccounts.logs.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.",
// "flatPath": "v2/billingAccounts/{billingAccountsId}/logs/{logsId}",
// "httpMethod": "DELETE",
// "id": "logging.billingAccounts.logs.delete",
// "parameterOrder": [
// "logName"
// ],
// "parameters": {
// "logName": {
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+/logs/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+logName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.billingAccounts.logs.list":
type BillingAccountsLogsListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the logs in projects, organizations, folders, or billing
// accounts. Only logs that have entries are listed.
func (r *BillingAccountsLogsService) List(parent string) *BillingAccountsLogsListCall {
c := &BillingAccountsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *BillingAccountsLogsListCall) PageSize(pageSize int64) *BillingAccountsLogsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *BillingAccountsLogsListCall) PageToken(pageToken string) *BillingAccountsLogsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BillingAccountsLogsListCall) Fields(s ...googleapi.Field) *BillingAccountsLogsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *BillingAccountsLogsListCall) IfNoneMatch(entityTag string) *BillingAccountsLogsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BillingAccountsLogsListCall) Context(ctx context.Context) *BillingAccountsLogsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BillingAccountsLogsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.billingAccounts.logs.list" call.
// Exactly one of *ListLogsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLogsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLogsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.",
// "flatPath": "v2/billingAccounts/{billingAccountsId}/logs",
// "httpMethod": "GET",
// "id": "logging.billingAccounts.logs.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/logs",
// "response": {
// "$ref": "ListLogsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *BillingAccountsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.billingAccounts.sinks.create":
type BillingAccountsSinksCreateCall struct {
s *Service
parent string
logsink *LogSink
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a sink that exports specified log entries to a
// destination. The export of newly-ingested log entries begins
// immediately, unless the current time is outside the sink's start and
// end times or the sink's writer_identity is not permitted to write to
// the destination. A sink can export log entries only from the resource
// owning the sink.
func (r *BillingAccountsSinksService) Create(parent string, logsink *LogSink) *BillingAccountsSinksCreateCall {
c := &BillingAccountsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.logsink = logsink
return c
}
// UniqueWriterIdentity sets the optional parameter
// "uniqueWriterIdentity": Determines the kind of IAM identity returned
// as writer_identity in the new sink. If this value is omitted or set
// to false, and if the sink's parent is a project, then the value
// returned as writer_identity is the same group or service account used
// by Stackdriver Logging before the addition of writer identities to
// this API. The sink's destination must be in the same project as the
// sink itself.If this field is set to true, or if the sink is owned by
// a non-project resource such as an organization, then the value of
// writer_identity will be a unique service account used only for
// exports from the new sink. For more information, see writer_identity
// in LogSink.
func (c *BillingAccountsSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksCreateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BillingAccountsSinksCreateCall) Fields(s ...googleapi.Field) *BillingAccountsSinksCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BillingAccountsSinksCreateCall) Context(ctx context.Context) *BillingAccountsSinksCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BillingAccountsSinksCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BillingAccountsSinksCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.billingAccounts.sinks.create" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BillingAccountsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.",
// "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks",
// "httpMethod": "POST",
// "id": "logging.billingAccounts.sinks.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v2/{+parent}/sinks",
// "request": {
// "$ref": "LogSink"
// },
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.billingAccounts.sinks.delete":
type BillingAccountsSinksDeleteCall struct {
s *Service
sinkNameid string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a sink. If the sink has a unique writer_identity,
// then that service account is also deleted.
func (r *BillingAccountsSinksService) Delete(sinkNameid string) *BillingAccountsSinksDeleteCall {
c := &BillingAccountsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkNameid = sinkNameid
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BillingAccountsSinksDeleteCall) Fields(s ...googleapi.Field) *BillingAccountsSinksDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BillingAccountsSinksDeleteCall) Context(ctx context.Context) *BillingAccountsSinksDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BillingAccountsSinksDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BillingAccountsSinksDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkNameid,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.billingAccounts.sinks.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BillingAccountsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.",
// "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}",
// "httpMethod": "DELETE",
// "id": "logging.billingAccounts.sinks.delete",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+sinkName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.billingAccounts.sinks.get":
type BillingAccountsSinksGetCall struct {
s *Service
sinkName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a sink.
func (r *BillingAccountsSinksService) Get(sinkName string) *BillingAccountsSinksGetCall {
c := &BillingAccountsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkName = sinkName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BillingAccountsSinksGetCall) Fields(s ...googleapi.Field) *BillingAccountsSinksGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *BillingAccountsSinksGetCall) IfNoneMatch(entityTag string) *BillingAccountsSinksGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BillingAccountsSinksGetCall) Context(ctx context.Context) *BillingAccountsSinksGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BillingAccountsSinksGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BillingAccountsSinksGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.billingAccounts.sinks.get" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BillingAccountsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a sink.",
// "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}",
// "httpMethod": "GET",
// "id": "logging.billingAccounts.sinks.get",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+sinkName}",
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// method id "logging.billingAccounts.sinks.list":
type BillingAccountsSinksListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists sinks.
func (r *BillingAccountsSinksService) List(parent string) *BillingAccountsSinksListCall {
c := &BillingAccountsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *BillingAccountsSinksListCall) PageSize(pageSize int64) *BillingAccountsSinksListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *BillingAccountsSinksListCall) PageToken(pageToken string) *BillingAccountsSinksListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BillingAccountsSinksListCall) Fields(s ...googleapi.Field) *BillingAccountsSinksListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *BillingAccountsSinksListCall) IfNoneMatch(entityTag string) *BillingAccountsSinksListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BillingAccountsSinksListCall) Context(ctx context.Context) *BillingAccountsSinksListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BillingAccountsSinksListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BillingAccountsSinksListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.billingAccounts.sinks.list" call.
// Exactly one of *ListSinksResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListSinksResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *BillingAccountsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListSinksResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists sinks.",
// "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks",
// "httpMethod": "GET",
// "id": "logging.billingAccounts.sinks.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/sinks",
// "response": {
// "$ref": "ListSinksResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *BillingAccountsSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.billingAccounts.sinks.update":
type BillingAccountsSinksUpdateCall struct {
s *Service
sinkNameid string
logsink *LogSink
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a sink. If the named sink doesn't exist, then this
// method is identical to sinks.create. If the named sink does exist,
// then this method replaces the following fields in the existing sink
// with values from the new sink: destination, filter,
// output_version_format, start_time, and end_time. The updated filter
// might also have a new writer_identity; see the unique_writer_identity
// field.
func (r *BillingAccountsSinksService) Update(sinkNameid string, logsink *LogSink) *BillingAccountsSinksUpdateCall {
c := &BillingAccountsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkNameid = sinkNameid
c.logsink = logsink
return c
}
// UniqueWriterIdentity sets the optional parameter
// "uniqueWriterIdentity": See sinks.create for a description of this
// field. When updating a sink, the effect of this field on the value of
// writer_identity in the updated sink depends on both the old and new
// values of this field:
// If the old and new values of this field are both false or both true,
// then there is no change to the sink's writer_identity.
// If the old value is false and the new value is true, then
// writer_identity is changed to a unique service account.
// It is an error if the old value is true and the new value is false.
func (c *BillingAccountsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *BillingAccountsSinksUpdateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BillingAccountsSinksUpdateCall) Fields(s ...googleapi.Field) *BillingAccountsSinksUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BillingAccountsSinksUpdateCall) Context(ctx context.Context) *BillingAccountsSinksUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BillingAccountsSinksUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BillingAccountsSinksUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkNameid,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.billingAccounts.sinks.update" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BillingAccountsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.",
// "flatPath": "v2/billingAccounts/{billingAccountsId}/sinks/{sinksId}",
// "httpMethod": "PUT",
// "id": "logging.billingAccounts.sinks.update",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^billingAccounts/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v2/{+sinkName}",
// "request": {
// "$ref": "LogSink"
// },
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.entries.list":
type EntriesListCall struct {
s *Service
listlogentriesrequest *ListLogEntriesRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// List: Lists log entries. Use this method to retrieve log entries from
// Stackdriver Logging. For ways to export log entries, see Exporting
// Logs.
func (r *EntriesService) List(listlogentriesrequest *ListLogEntriesRequest) *EntriesListCall {
c := &EntriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.listlogentriesrequest = listlogentriesrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *EntriesListCall) Fields(s ...googleapi.Field) *EntriesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *EntriesListCall) Context(ctx context.Context) *EntriesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *EntriesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *EntriesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.listlogentriesrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/entries:list")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.entries.list" call.
// Exactly one of *ListLogEntriesResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLogEntriesResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *EntriesListCall) Do(opts ...googleapi.CallOption) (*ListLogEntriesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLogEntriesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs.",
// "flatPath": "v2/entries:list",
// "httpMethod": "POST",
// "id": "logging.entries.list",
// "parameterOrder": [],
// "parameters": {},
// "path": "v2/entries:list",
// "request": {
// "$ref": "ListLogEntriesRequest"
// },
// "response": {
// "$ref": "ListLogEntriesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *EntriesListCall) Pages(ctx context.Context, f func(*ListLogEntriesResponse) error) error {
c.ctx_ = ctx
defer func(pt string) { c.listlogentriesrequest.PageToken = pt }(c.listlogentriesrequest.PageToken) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.listlogentriesrequest.PageToken = x.NextPageToken
}
}
// method id "logging.entries.write":
type EntriesWriteCall struct {
s *Service
writelogentriesrequest *WriteLogEntriesRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Write: Writes log entries to Stackdriver Logging.
func (r *EntriesService) Write(writelogentriesrequest *WriteLogEntriesRequest) *EntriesWriteCall {
c := &EntriesWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.writelogentriesrequest = writelogentriesrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *EntriesWriteCall) Fields(s ...googleapi.Field) *EntriesWriteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *EntriesWriteCall) Context(ctx context.Context) *EntriesWriteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *EntriesWriteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *EntriesWriteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/entries:write")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.entries.write" call.
// Exactly one of *WriteLogEntriesResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *WriteLogEntriesResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *EntriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteLogEntriesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &WriteLogEntriesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Writes log entries to Stackdriver Logging.",
// "flatPath": "v2/entries:write",
// "httpMethod": "POST",
// "id": "logging.entries.write",
// "parameterOrder": [],
// "parameters": {},
// "path": "v2/entries:write",
// "request": {
// "$ref": "WriteLogEntriesRequest"
// },
// "response": {
// "$ref": "WriteLogEntriesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.write"
// ]
// }
}
// method id "logging.folders.logs.delete":
type FoldersLogsDeleteCall struct {
s *Service
logName string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes all the log entries in a log. The log reappears if it
// receives new entries. Log entries written shortly before the delete
// operation might not be deleted.
func (r *FoldersLogsService) Delete(logName string) *FoldersLogsDeleteCall {
c := &FoldersLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.logName = logName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FoldersLogsDeleteCall) Fields(s ...googleapi.Field) *FoldersLogsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FoldersLogsDeleteCall) Context(ctx context.Context) *FoldersLogsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FoldersLogsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FoldersLogsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"logName": c.logName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.folders.logs.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *FoldersLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.",
// "flatPath": "v2/folders/{foldersId}/logs/{logsId}",
// "httpMethod": "DELETE",
// "id": "logging.folders.logs.delete",
// "parameterOrder": [
// "logName"
// ],
// "parameters": {
// "logName": {
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "location": "path",
// "pattern": "^folders/[^/]+/logs/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+logName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.folders.logs.list":
type FoldersLogsListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the logs in projects, organizations, folders, or billing
// accounts. Only logs that have entries are listed.
func (r *FoldersLogsService) List(parent string) *FoldersLogsListCall {
c := &FoldersLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *FoldersLogsListCall) PageSize(pageSize int64) *FoldersLogsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *FoldersLogsListCall) PageToken(pageToken string) *FoldersLogsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FoldersLogsListCall) Fields(s ...googleapi.Field) *FoldersLogsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *FoldersLogsListCall) IfNoneMatch(entityTag string) *FoldersLogsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FoldersLogsListCall) Context(ctx context.Context) *FoldersLogsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FoldersLogsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FoldersLogsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.folders.logs.list" call.
// Exactly one of *ListLogsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLogsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLogsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.",
// "flatPath": "v2/folders/{foldersId}/logs",
// "httpMethod": "GET",
// "id": "logging.folders.logs.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^folders/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/logs",
// "response": {
// "$ref": "ListLogsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *FoldersLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.folders.sinks.create":
type FoldersSinksCreateCall struct {
s *Service
parent string
logsink *LogSink
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a sink that exports specified log entries to a
// destination. The export of newly-ingested log entries begins
// immediately, unless the current time is outside the sink's start and
// end times or the sink's writer_identity is not permitted to write to
// the destination. A sink can export log entries only from the resource
// owning the sink.
func (r *FoldersSinksService) Create(parent string, logsink *LogSink) *FoldersSinksCreateCall {
c := &FoldersSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.logsink = logsink
return c
}
// UniqueWriterIdentity sets the optional parameter
// "uniqueWriterIdentity": Determines the kind of IAM identity returned
// as writer_identity in the new sink. If this value is omitted or set
// to false, and if the sink's parent is a project, then the value
// returned as writer_identity is the same group or service account used
// by Stackdriver Logging before the addition of writer identities to
// this API. The sink's destination must be in the same project as the
// sink itself.If this field is set to true, or if the sink is owned by
// a non-project resource such as an organization, then the value of
// writer_identity will be a unique service account used only for
// exports from the new sink. For more information, see writer_identity
// in LogSink.
func (c *FoldersSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksCreateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FoldersSinksCreateCall) Fields(s ...googleapi.Field) *FoldersSinksCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FoldersSinksCreateCall) Context(ctx context.Context) *FoldersSinksCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FoldersSinksCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FoldersSinksCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.folders.sinks.create" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *FoldersSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.",
// "flatPath": "v2/folders/{foldersId}/sinks",
// "httpMethod": "POST",
// "id": "logging.folders.sinks.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".",
// "location": "path",
// "pattern": "^folders/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v2/{+parent}/sinks",
// "request": {
// "$ref": "LogSink"
// },
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.folders.sinks.delete":
type FoldersSinksDeleteCall struct {
s *Service
sinkNameid string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a sink. If the sink has a unique writer_identity,
// then that service account is also deleted.
func (r *FoldersSinksService) Delete(sinkNameid string) *FoldersSinksDeleteCall {
c := &FoldersSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkNameid = sinkNameid
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FoldersSinksDeleteCall) Fields(s ...googleapi.Field) *FoldersSinksDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FoldersSinksDeleteCall) Context(ctx context.Context) *FoldersSinksDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FoldersSinksDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FoldersSinksDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkNameid,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.folders.sinks.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *FoldersSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.",
// "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}",
// "httpMethod": "DELETE",
// "id": "logging.folders.sinks.delete",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^folders/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+sinkName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.folders.sinks.get":
type FoldersSinksGetCall struct {
s *Service
sinkName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a sink.
func (r *FoldersSinksService) Get(sinkName string) *FoldersSinksGetCall {
c := &FoldersSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkName = sinkName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FoldersSinksGetCall) Fields(s ...googleapi.Field) *FoldersSinksGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *FoldersSinksGetCall) IfNoneMatch(entityTag string) *FoldersSinksGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FoldersSinksGetCall) Context(ctx context.Context) *FoldersSinksGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FoldersSinksGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FoldersSinksGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.folders.sinks.get" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *FoldersSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a sink.",
// "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}",
// "httpMethod": "GET",
// "id": "logging.folders.sinks.get",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^folders/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+sinkName}",
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// method id "logging.folders.sinks.list":
type FoldersSinksListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists sinks.
func (r *FoldersSinksService) List(parent string) *FoldersSinksListCall {
c := &FoldersSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *FoldersSinksListCall) PageSize(pageSize int64) *FoldersSinksListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *FoldersSinksListCall) PageToken(pageToken string) *FoldersSinksListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FoldersSinksListCall) Fields(s ...googleapi.Field) *FoldersSinksListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *FoldersSinksListCall) IfNoneMatch(entityTag string) *FoldersSinksListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FoldersSinksListCall) Context(ctx context.Context) *FoldersSinksListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FoldersSinksListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FoldersSinksListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.folders.sinks.list" call.
// Exactly one of *ListSinksResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListSinksResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *FoldersSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListSinksResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists sinks.",
// "flatPath": "v2/folders/{foldersId}/sinks",
// "httpMethod": "GET",
// "id": "logging.folders.sinks.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^folders/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/sinks",
// "response": {
// "$ref": "ListSinksResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *FoldersSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.folders.sinks.update":
type FoldersSinksUpdateCall struct {
s *Service
sinkNameid string
logsink *LogSink
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a sink. If the named sink doesn't exist, then this
// method is identical to sinks.create. If the named sink does exist,
// then this method replaces the following fields in the existing sink
// with values from the new sink: destination, filter,
// output_version_format, start_time, and end_time. The updated filter
// might also have a new writer_identity; see the unique_writer_identity
// field.
func (r *FoldersSinksService) Update(sinkNameid string, logsink *LogSink) *FoldersSinksUpdateCall {
c := &FoldersSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkNameid = sinkNameid
c.logsink = logsink
return c
}
// UniqueWriterIdentity sets the optional parameter
// "uniqueWriterIdentity": See sinks.create for a description of this
// field. When updating a sink, the effect of this field on the value of
// writer_identity in the updated sink depends on both the old and new
// values of this field:
// If the old and new values of this field are both false or both true,
// then there is no change to the sink's writer_identity.
// If the old value is false and the new value is true, then
// writer_identity is changed to a unique service account.
// It is an error if the old value is true and the new value is false.
func (c *FoldersSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *FoldersSinksUpdateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *FoldersSinksUpdateCall) Fields(s ...googleapi.Field) *FoldersSinksUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *FoldersSinksUpdateCall) Context(ctx context.Context) *FoldersSinksUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *FoldersSinksUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *FoldersSinksUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkNameid,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.folders.sinks.update" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *FoldersSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.",
// "flatPath": "v2/folders/{foldersId}/sinks/{sinksId}",
// "httpMethod": "PUT",
// "id": "logging.folders.sinks.update",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^folders/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v2/{+sinkName}",
// "request": {
// "$ref": "LogSink"
// },
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.monitoredResourceDescriptors.list":
type MonitoredResourceDescriptorsListCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the descriptors for monitored resource types used by
// Stackdriver Logging.
func (r *MonitoredResourceDescriptorsService) List() *MonitoredResourceDescriptorsListCall {
c := &MonitoredResourceDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *MonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *MonitoredResourceDescriptorsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *MonitoredResourceDescriptorsListCall) PageToken(pageToken string) *MonitoredResourceDescriptorsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *MonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *MonitoredResourceDescriptorsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *MonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *MonitoredResourceDescriptorsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *MonitoredResourceDescriptorsListCall) Context(ctx context.Context) *MonitoredResourceDescriptorsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *MonitoredResourceDescriptorsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *MonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/monitoredResourceDescriptors")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.monitoredResourceDescriptors.list" call.
// Exactly one of *ListMonitoredResourceDescriptorsResponse or error
// will be non-nil. Any non-2xx status code is an error. Response
// headers are in either
// *ListMonitoredResourceDescriptorsResponse.ServerResponse.Header or
// (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListMonitoredResourceDescriptorsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging.",
// "flatPath": "v2/monitoredResourceDescriptors",
// "httpMethod": "GET",
// "id": "logging.monitoredResourceDescriptors.list",
// "parameterOrder": [],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/monitoredResourceDescriptors",
// "response": {
// "$ref": "ListMonitoredResourceDescriptorsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *MonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func(*ListMonitoredResourceDescriptorsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.organizations.logs.delete":
type OrganizationsLogsDeleteCall struct {
s *Service
logName string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes all the log entries in a log. The log reappears if it
// receives new entries. Log entries written shortly before the delete
// operation might not be deleted.
func (r *OrganizationsLogsService) Delete(logName string) *OrganizationsLogsDeleteCall {
c := &OrganizationsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.logName = logName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OrganizationsLogsDeleteCall) Fields(s ...googleapi.Field) *OrganizationsLogsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OrganizationsLogsDeleteCall) Context(ctx context.Context) *OrganizationsLogsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OrganizationsLogsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OrganizationsLogsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"logName": c.logName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.organizations.logs.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.",
// "flatPath": "v2/organizations/{organizationsId}/logs/{logsId}",
// "httpMethod": "DELETE",
// "id": "logging.organizations.logs.delete",
// "parameterOrder": [
// "logName"
// ],
// "parameters": {
// "logName": {
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "location": "path",
// "pattern": "^organizations/[^/]+/logs/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+logName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.organizations.logs.list":
type OrganizationsLogsListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the logs in projects, organizations, folders, or billing
// accounts. Only logs that have entries are listed.
func (r *OrganizationsLogsService) List(parent string) *OrganizationsLogsListCall {
c := &OrganizationsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *OrganizationsLogsListCall) PageSize(pageSize int64) *OrganizationsLogsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *OrganizationsLogsListCall) PageToken(pageToken string) *OrganizationsLogsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OrganizationsLogsListCall) Fields(s ...googleapi.Field) *OrganizationsLogsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *OrganizationsLogsListCall) IfNoneMatch(entityTag string) *OrganizationsLogsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OrganizationsLogsListCall) Context(ctx context.Context) *OrganizationsLogsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OrganizationsLogsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OrganizationsLogsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.organizations.logs.list" call.
// Exactly one of *ListLogsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLogsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLogsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.",
// "flatPath": "v2/organizations/{organizationsId}/logs",
// "httpMethod": "GET",
// "id": "logging.organizations.logs.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^organizations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/logs",
// "response": {
// "$ref": "ListLogsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *OrganizationsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.organizations.sinks.create":
type OrganizationsSinksCreateCall struct {
s *Service
parent string
logsink *LogSink
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a sink that exports specified log entries to a
// destination. The export of newly-ingested log entries begins
// immediately, unless the current time is outside the sink's start and
// end times or the sink's writer_identity is not permitted to write to
// the destination. A sink can export log entries only from the resource
// owning the sink.
func (r *OrganizationsSinksService) Create(parent string, logsink *LogSink) *OrganizationsSinksCreateCall {
c := &OrganizationsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.logsink = logsink
return c
}
// UniqueWriterIdentity sets the optional parameter
// "uniqueWriterIdentity": Determines the kind of IAM identity returned
// as writer_identity in the new sink. If this value is omitted or set
// to false, and if the sink's parent is a project, then the value
// returned as writer_identity is the same group or service account used
// by Stackdriver Logging before the addition of writer identities to
// this API. The sink's destination must be in the same project as the
// sink itself.If this field is set to true, or if the sink is owned by
// a non-project resource such as an organization, then the value of
// writer_identity will be a unique service account used only for
// exports from the new sink. For more information, see writer_identity
// in LogSink.
func (c *OrganizationsSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksCreateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OrganizationsSinksCreateCall) Fields(s ...googleapi.Field) *OrganizationsSinksCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OrganizationsSinksCreateCall) Context(ctx context.Context) *OrganizationsSinksCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OrganizationsSinksCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OrganizationsSinksCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.organizations.sinks.create" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *OrganizationsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.",
// "flatPath": "v2/organizations/{organizationsId}/sinks",
// "httpMethod": "POST",
// "id": "logging.organizations.sinks.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".",
// "location": "path",
// "pattern": "^organizations/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v2/{+parent}/sinks",
// "request": {
// "$ref": "LogSink"
// },
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.organizations.sinks.delete":
type OrganizationsSinksDeleteCall struct {
s *Service
sinkNameid string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a sink. If the sink has a unique writer_identity,
// then that service account is also deleted.
func (r *OrganizationsSinksService) Delete(sinkNameid string) *OrganizationsSinksDeleteCall {
c := &OrganizationsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkNameid = sinkNameid
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OrganizationsSinksDeleteCall) Fields(s ...googleapi.Field) *OrganizationsSinksDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OrganizationsSinksDeleteCall) Context(ctx context.Context) *OrganizationsSinksDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OrganizationsSinksDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OrganizationsSinksDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkNameid,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.organizations.sinks.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *OrganizationsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.",
// "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}",
// "httpMethod": "DELETE",
// "id": "logging.organizations.sinks.delete",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^organizations/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+sinkName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.organizations.sinks.get":
type OrganizationsSinksGetCall struct {
s *Service
sinkName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a sink.
func (r *OrganizationsSinksService) Get(sinkName string) *OrganizationsSinksGetCall {
c := &OrganizationsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkName = sinkName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OrganizationsSinksGetCall) Fields(s ...googleapi.Field) *OrganizationsSinksGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *OrganizationsSinksGetCall) IfNoneMatch(entityTag string) *OrganizationsSinksGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OrganizationsSinksGetCall) Context(ctx context.Context) *OrganizationsSinksGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OrganizationsSinksGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OrganizationsSinksGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.organizations.sinks.get" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *OrganizationsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a sink.",
// "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}",
// "httpMethod": "GET",
// "id": "logging.organizations.sinks.get",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^organizations/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+sinkName}",
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// method id "logging.organizations.sinks.list":
type OrganizationsSinksListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists sinks.
func (r *OrganizationsSinksService) List(parent string) *OrganizationsSinksListCall {
c := &OrganizationsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *OrganizationsSinksListCall) PageSize(pageSize int64) *OrganizationsSinksListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *OrganizationsSinksListCall) PageToken(pageToken string) *OrganizationsSinksListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OrganizationsSinksListCall) Fields(s ...googleapi.Field) *OrganizationsSinksListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *OrganizationsSinksListCall) IfNoneMatch(entityTag string) *OrganizationsSinksListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OrganizationsSinksListCall) Context(ctx context.Context) *OrganizationsSinksListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OrganizationsSinksListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OrganizationsSinksListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.organizations.sinks.list" call.
// Exactly one of *ListSinksResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListSinksResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *OrganizationsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListSinksResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists sinks.",
// "flatPath": "v2/organizations/{organizationsId}/sinks",
// "httpMethod": "GET",
// "id": "logging.organizations.sinks.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^organizations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/sinks",
// "response": {
// "$ref": "ListSinksResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *OrganizationsSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.organizations.sinks.update":
type OrganizationsSinksUpdateCall struct {
s *Service
sinkNameid string
logsink *LogSink
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a sink. If the named sink doesn't exist, then this
// method is identical to sinks.create. If the named sink does exist,
// then this method replaces the following fields in the existing sink
// with values from the new sink: destination, filter,
// output_version_format, start_time, and end_time. The updated filter
// might also have a new writer_identity; see the unique_writer_identity
// field.
func (r *OrganizationsSinksService) Update(sinkNameid string, logsink *LogSink) *OrganizationsSinksUpdateCall {
c := &OrganizationsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkNameid = sinkNameid
c.logsink = logsink
return c
}
// UniqueWriterIdentity sets the optional parameter
// "uniqueWriterIdentity": See sinks.create for a description of this
// field. When updating a sink, the effect of this field on the value of
// writer_identity in the updated sink depends on both the old and new
// values of this field:
// If the old and new values of this field are both false or both true,
// then there is no change to the sink's writer_identity.
// If the old value is false and the new value is true, then
// writer_identity is changed to a unique service account.
// It is an error if the old value is true and the new value is false.
func (c *OrganizationsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *OrganizationsSinksUpdateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OrganizationsSinksUpdateCall) Fields(s ...googleapi.Field) *OrganizationsSinksUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OrganizationsSinksUpdateCall) Context(ctx context.Context) *OrganizationsSinksUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OrganizationsSinksUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OrganizationsSinksUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkNameid,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.organizations.sinks.update" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *OrganizationsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.",
// "flatPath": "v2/organizations/{organizationsId}/sinks/{sinksId}",
// "httpMethod": "PUT",
// "id": "logging.organizations.sinks.update",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^organizations/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v2/{+sinkName}",
// "request": {
// "$ref": "LogSink"
// },
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.projects.logs.delete":
type ProjectsLogsDeleteCall struct {
s *Service
logName string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes all the log entries in a log. The log reappears if it
// receives new entries. Log entries written shortly before the delete
// operation might not be deleted.
func (r *ProjectsLogsService) Delete(logName string) *ProjectsLogsDeleteCall {
c := &ProjectsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.logName = logName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLogsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLogsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLogsDeleteCall) Context(ctx context.Context) *ProjectsLogsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLogsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLogsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+logName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"logName": c.logName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.logs.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.",
// "flatPath": "v2/projects/{projectsId}/logs/{logsId}",
// "httpMethod": "DELETE",
// "id": "logging.projects.logs.delete",
// "parameterOrder": [
// "logName"
// ],
// "parameters": {
// "logName": {
// "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.",
// "location": "path",
// "pattern": "^projects/[^/]+/logs/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+logName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.projects.logs.list":
type ProjectsLogsListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the logs in projects, organizations, folders, or billing
// accounts. Only logs that have entries are listed.
func (r *ProjectsLogsService) List(parent string) *ProjectsLogsListCall {
c := &ProjectsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *ProjectsLogsListCall) PageSize(pageSize int64) *ProjectsLogsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *ProjectsLogsListCall) PageToken(pageToken string) *ProjectsLogsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLogsListCall) Fields(s ...googleapi.Field) *ProjectsLogsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLogsListCall) IfNoneMatch(entityTag string) *ProjectsLogsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLogsListCall) Context(ctx context.Context) *ProjectsLogsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLogsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLogsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/logs")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.logs.list" call.
// Exactly one of *ListLogsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLogsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLogsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.",
// "flatPath": "v2/projects/{projectsId}/logs",
// "httpMethod": "GET",
// "id": "logging.projects.logs.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/logs",
// "response": {
// "$ref": "ListLogsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.projects.metrics.create":
type ProjectsMetricsCreateCall struct {
s *Service
parent string
logmetric *LogMetric
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a logs-based metric.
func (r *ProjectsMetricsService) Create(parent string, logmetric *LogMetric) *ProjectsMetricsCreateCall {
c := &ProjectsMetricsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.logmetric = logmetric
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricsCreateCall) Fields(s ...googleapi.Field) *ProjectsMetricsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricsCreateCall) Context(ctx context.Context) *ProjectsMetricsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/metrics")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.metrics.create" call.
// Exactly one of *LogMetric or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *LogMetric.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsMetricsCreateCall) Do(opts ...googleapi.CallOption) (*LogMetric, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogMetric{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a logs-based metric.",
// "flatPath": "v2/projects/{projectsId}/metrics",
// "httpMethod": "POST",
// "id": "logging.projects.metrics.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/metrics",
// "request": {
// "$ref": "LogMetric"
// },
// "response": {
// "$ref": "LogMetric"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.write"
// ]
// }
}
// method id "logging.projects.metrics.delete":
type ProjectsMetricsDeleteCall struct {
s *Service
metricName string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a logs-based metric.
func (r *ProjectsMetricsService) Delete(metricName string) *ProjectsMetricsDeleteCall {
c := &ProjectsMetricsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.metricName = metricName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricsDeleteCall) Fields(s ...googleapi.Field) *ProjectsMetricsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricsDeleteCall) Context(ctx context.Context) *ProjectsMetricsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+metricName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"metricName": c.metricName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.metrics.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsMetricsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a logs-based metric.",
// "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}",
// "httpMethod": "DELETE",
// "id": "logging.projects.metrics.delete",
// "parameterOrder": [
// "metricName"
// ],
// "parameters": {
// "metricName": {
// "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n",
// "location": "path",
// "pattern": "^projects/[^/]+/metrics/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+metricName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.write"
// ]
// }
}
// method id "logging.projects.metrics.get":
type ProjectsMetricsGetCall struct {
s *Service
metricName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a logs-based metric.
func (r *ProjectsMetricsService) Get(metricName string) *ProjectsMetricsGetCall {
c := &ProjectsMetricsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.metricName = metricName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricsGetCall) Fields(s ...googleapi.Field) *ProjectsMetricsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsMetricsGetCall) IfNoneMatch(entityTag string) *ProjectsMetricsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricsGetCall) Context(ctx context.Context) *ProjectsMetricsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+metricName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"metricName": c.metricName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.metrics.get" call.
// Exactly one of *LogMetric or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *LogMetric.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsMetricsGetCall) Do(opts ...googleapi.CallOption) (*LogMetric, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogMetric{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a logs-based metric.",
// "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}",
// "httpMethod": "GET",
// "id": "logging.projects.metrics.get",
// "parameterOrder": [
// "metricName"
// ],
// "parameters": {
// "metricName": {
// "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n",
// "location": "path",
// "pattern": "^projects/[^/]+/metrics/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+metricName}",
// "response": {
// "$ref": "LogMetric"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// method id "logging.projects.metrics.list":
type ProjectsMetricsListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists logs-based metrics.
func (r *ProjectsMetricsService) List(parent string) *ProjectsMetricsListCall {
c := &ProjectsMetricsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *ProjectsMetricsListCall) PageSize(pageSize int64) *ProjectsMetricsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *ProjectsMetricsListCall) PageToken(pageToken string) *ProjectsMetricsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricsListCall) Fields(s ...googleapi.Field) *ProjectsMetricsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsMetricsListCall) IfNoneMatch(entityTag string) *ProjectsMetricsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricsListCall) Context(ctx context.Context) *ProjectsMetricsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/metrics")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.metrics.list" call.
// Exactly one of *ListLogMetricsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLogMetricsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsMetricsListCall) Do(opts ...googleapi.CallOption) (*ListLogMetricsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLogMetricsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists logs-based metrics.",
// "flatPath": "v2/projects/{projectsId}/metrics",
// "httpMethod": "GET",
// "id": "logging.projects.metrics.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/metrics",
// "response": {
// "$ref": "ListLogMetricsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsMetricsListCall) Pages(ctx context.Context, f func(*ListLogMetricsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.projects.metrics.update":
type ProjectsMetricsUpdateCall struct {
s *Service
metricName string
logmetric *LogMetric
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Creates or updates a logs-based metric.
func (r *ProjectsMetricsService) Update(metricName string, logmetric *LogMetric) *ProjectsMetricsUpdateCall {
c := &ProjectsMetricsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.metricName = metricName
c.logmetric = logmetric
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricsUpdateCall) Fields(s ...googleapi.Field) *ProjectsMetricsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricsUpdateCall) Context(ctx context.Context) *ProjectsMetricsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+metricName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"metricName": c.metricName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.metrics.update" call.
// Exactly one of *LogMetric or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *LogMetric.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsMetricsUpdateCall) Do(opts ...googleapi.CallOption) (*LogMetric, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogMetric{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates or updates a logs-based metric.",
// "flatPath": "v2/projects/{projectsId}/metrics/{metricsId}",
// "httpMethod": "PUT",
// "id": "logging.projects.metrics.update",
// "parameterOrder": [
// "metricName"
// ],
// "parameters": {
// "metricName": {
// "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.",
// "location": "path",
// "pattern": "^projects/[^/]+/metrics/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+metricName}",
// "request": {
// "$ref": "LogMetric"
// },
// "response": {
// "$ref": "LogMetric"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.write"
// ]
// }
}
// method id "logging.projects.sinks.create":
type ProjectsSinksCreateCall struct {
s *Service
parent string
logsink *LogSink
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a sink that exports specified log entries to a
// destination. The export of newly-ingested log entries begins
// immediately, unless the current time is outside the sink's start and
// end times or the sink's writer_identity is not permitted to write to
// the destination. A sink can export log entries only from the resource
// owning the sink.
func (r *ProjectsSinksService) Create(parent string, logsink *LogSink) *ProjectsSinksCreateCall {
c := &ProjectsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.logsink = logsink
return c
}
// UniqueWriterIdentity sets the optional parameter
// "uniqueWriterIdentity": Determines the kind of IAM identity returned
// as writer_identity in the new sink. If this value is omitted or set
// to false, and if the sink's parent is a project, then the value
// returned as writer_identity is the same group or service account used
// by Stackdriver Logging before the addition of writer identities to
// this API. The sink's destination must be in the same project as the
// sink itself.If this field is set to true, or if the sink is owned by
// a non-project resource such as an organization, then the value of
// writer_identity will be a unique service account used only for
// exports from the new sink. For more information, see writer_identity
// in LogSink.
func (c *ProjectsSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksCreateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsSinksCreateCall) Fields(s ...googleapi.Field) *ProjectsSinksCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsSinksCreateCall) Context(ctx context.Context) *ProjectsSinksCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsSinksCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsSinksCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.sinks.create" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.",
// "flatPath": "v2/projects/{projectsId}/sinks",
// "httpMethod": "POST",
// "id": "logging.projects.sinks.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v2/{+parent}/sinks",
// "request": {
// "$ref": "LogSink"
// },
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.projects.sinks.delete":
type ProjectsSinksDeleteCall struct {
s *Service
sinkNameid string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a sink. If the sink has a unique writer_identity,
// then that service account is also deleted.
func (r *ProjectsSinksService) Delete(sinkNameid string) *ProjectsSinksDeleteCall {
c := &ProjectsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkNameid = sinkNameid
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsSinksDeleteCall) Fields(s ...googleapi.Field) *ProjectsSinksDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsSinksDeleteCall) Context(ctx context.Context) *ProjectsSinksDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsSinksDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsSinksDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkNameid,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.sinks.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.",
// "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}",
// "httpMethod": "DELETE",
// "id": "logging.projects.sinks.delete",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^projects/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+sinkName}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
// method id "logging.projects.sinks.get":
type ProjectsSinksGetCall struct {
s *Service
sinkName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a sink.
func (r *ProjectsSinksService) Get(sinkName string) *ProjectsSinksGetCall {
c := &ProjectsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkName = sinkName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsSinksGetCall) Fields(s ...googleapi.Field) *ProjectsSinksGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsSinksGetCall) IfNoneMatch(entityTag string) *ProjectsSinksGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsSinksGetCall) Context(ctx context.Context) *ProjectsSinksGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsSinksGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsSinksGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.sinks.get" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a sink.",
// "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}",
// "httpMethod": "GET",
// "id": "logging.projects.sinks.get",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^projects/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+sinkName}",
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// method id "logging.projects.sinks.list":
type ProjectsSinksListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists sinks.
func (r *ProjectsSinksService) List(parent string) *ProjectsSinksListCall {
c := &ProjectsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return from this request. Non-positive values are
// ignored. The presence of nextPageToken in the response indicates that
// more results might be available.
func (c *ProjectsSinksListCall) PageSize(pageSize int64) *ProjectsSinksListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If present, then
// retrieve the next batch of results from the preceding call to this
// method. pageToken must be the value of nextPageToken from the
// previous response. The values of other method parameters should be
// identical to those in the previous call.
func (c *ProjectsSinksListCall) PageToken(pageToken string) *ProjectsSinksListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsSinksListCall) Fields(s ...googleapi.Field) *ProjectsSinksListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsSinksListCall) IfNoneMatch(entityTag string) *ProjectsSinksListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsSinksListCall) Context(ctx context.Context) *ProjectsSinksListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsSinksListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsSinksListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/sinks")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.sinks.list" call.
// Exactly one of *ListSinksResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListSinksResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListSinksResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists sinks.",
// "flatPath": "v2/projects/{projectsId}/sinks",
// "httpMethod": "GET",
// "id": "logging.projects.sinks.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/sinks",
// "response": {
// "$ref": "ListSinksResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/logging.admin",
// "https://www.googleapis.com/auth/logging.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "logging.projects.sinks.update":
type ProjectsSinksUpdateCall struct {
s *Service
sinkNameid string
logsink *LogSink
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a sink. If the named sink doesn't exist, then this
// method is identical to sinks.create. If the named sink does exist,
// then this method replaces the following fields in the existing sink
// with values from the new sink: destination, filter,
// output_version_format, start_time, and end_time. The updated filter
// might also have a new writer_identity; see the unique_writer_identity
// field.
func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *ProjectsSinksUpdateCall {
c := &ProjectsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.sinkNameid = sinkNameid
c.logsink = logsink
return c
}
// UniqueWriterIdentity sets the optional parameter
// "uniqueWriterIdentity": See sinks.create for a description of this
// field. When updating a sink, the effect of this field on the value of
// writer_identity in the updated sink depends on both the old and new
// values of this field:
// If the old and new values of this field are both false or both true,
// then there is no change to the sink's writer_identity.
// If the old value is false and the new value is true, then
// writer_identity is changed to a unique service account.
// It is an error if the old value is true and the new value is false.
func (c *ProjectsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksUpdateCall {
c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsSinksUpdateCall) Fields(s ...googleapi.Field) *ProjectsSinksUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsSinksUpdateCall) Context(ctx context.Context) *ProjectsSinksUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsSinksUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsSinksUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+sinkName}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"sinkName": c.sinkNameid,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "logging.projects.sinks.update" call.
// Exactly one of *LogSink or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *LogSink.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LogSink{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.",
// "flatPath": "v2/projects/{projectsId}/sinks/{sinksId}",
// "httpMethod": "PUT",
// "id": "logging.projects.sinks.update",
// "parameterOrder": [
// "sinkName"
// ],
// "parameters": {
// "sinkName": {
// "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".",
// "location": "path",
// "pattern": "^projects/[^/]+/sinks/[^/]+$",
// "required": true,
// "type": "string"
// },
// "uniqueWriterIdentity": {
// "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v2/{+sinkName}",
// "request": {
// "$ref": "LogSink"
// },
// "response": {
// "$ref": "LogSink"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/logging.admin"
// ]
// }
}
| NewOrganizationsSinksService |
mlnx_db_v2.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.orm import exc
from neutron.common import exceptions as q_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common import config # noqa
from neutron.plugins.mlnx.db import mlnx_models_v2
LOG = logging.getLogger(__name__)
def initialize():
db.configure_db()
def _remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids):
|
def _add_missing_allocatable_vlans(session, physical_network, vlan_ids):
for vlan_id in sorted(vlan_ids):
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
vlan_id)
session.add(entry)
def _remove_unconfigured_vlans(session, allocations):
for entries in allocations.itervalues():
for entry in entries:
if not entry.allocated:
LOG.debug(_("Removing vlan %(seg_id)s on physical "
"network %(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
session.delete(entry)
def sync_network_states(network_vlan_ranges):
"""Synchronize network_states table with current configured VLAN ranges."""
session = db.get_session()
with session.begin():
# get existing allocations for all physical networks
allocations = dict()
entries = (session.query(mlnx_models_v2.SegmentationIdAllocation).
all())
for entry in entries:
allocations.setdefault(entry.physical_network, set()).add(entry)
# process vlan ranges for each configured physical network
for physical_network, vlan_ranges in network_vlan_ranges.iteritems():
# determine current configured allocatable vlans for this
# physical network
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
# remove from table unallocated vlans not currently allocatable
_remove_non_allocatable_vlans(session, allocations,
physical_network, vlan_ids)
# add missing allocatable vlans to table
_add_missing_allocatable_vlans(session, physical_network, vlan_ids)
# remove from table unallocated vlans for any unconfigured physical
# networks
_remove_unconfigured_vlans(session, allocations)
def get_network_state(physical_network, segmentation_id):
"""Get entry of specified network."""
session = db.get_session()
qry = session.query(mlnx_models_v2.SegmentationIdAllocation)
qry = qry.filter_by(physical_network=physical_network,
segmentation_id=segmentation_id)
return qry.first()
def reserve_network(session):
with session.begin(subtransactions=True):
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if not entry:
raise q_exc.NoNetworkAvailable()
LOG.debug(_("Reserving vlan %(seg_id)s on physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': entry.physical_network})
entry.allocated = True
return (entry.physical_network, entry.segmentation_id)
def reserve_specific_network(session, physical_network, segmentation_id):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
entry = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
if entry.allocated:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(seg_id)s "
"on physical network %(phy_net)s from pool"),
log_args)
entry.allocated = True
except exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(seg_id)s on "
"physical network %(phy_net)s outside pool"),
log_args)
entry = mlnx_models_v2.SegmentationIdAllocation(physical_network,
segmentation_id)
entry.allocated = True
session.add(entry)
def release_network(session, physical_network,
segmentation_id, network_vlan_ranges):
with session.begin(subtransactions=True):
log_args = {'seg_id': segmentation_id, 'phy_net': physical_network}
try:
state = (session.query(mlnx_models_v2.SegmentationIdAllocation).
filter_by(physical_network=physical_network,
segmentation_id=segmentation_id).
with_lockmode('update').
one())
state.allocated = False
inside = False
for vlan_range in network_vlan_ranges.get(physical_network, []):
if (segmentation_id >= vlan_range[0] and
segmentation_id <= vlan_range[1]):
inside = True
break
if inside:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s to pool"),
log_args)
else:
LOG.debug(_("Releasing vlan %(seg_id)s "
"on physical network "
"%(phy_net)s outside pool"),
log_args)
session.delete(state)
except exc.NoResultFound:
LOG.warning(_("vlan_id %(seg_id)s on physical network "
"%(phy_net)s not found"),
log_args)
def add_network_binding(session, network_id, network_type,
physical_network, vlan_id):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.NetworkBinding(network_id, network_type,
physical_network, vlan_id)
session.add(binding)
def get_network_binding(session, network_id):
return (session.query(mlnx_models_v2.NetworkBinding).
filter_by(network_id=network_id).first())
def add_port_profile_binding(session, port_id, vnic_type):
with session.begin(subtransactions=True):
binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type)
session.add(binding)
def get_port_profile_binding(session, port_id):
return (session.query(mlnx_models_v2.PortProfileBinding).
filter_by(port_id=port_id).first())
def get_port_from_device(device):
"""Get port from database."""
LOG.debug(_("get_port_from_device() called"))
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(device))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_in_db, sg_id in port_and_sgs if sg_id
]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_from_device_mac(device_mac):
"""Get port from database."""
LOG.debug(_("Get_port_from_device_mac() called"))
session = db.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def set_port_status(port_id, status):
"""Set the port status."""
LOG.debug(_("Set_port_status as %s called"), status)
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
| if physical_network in allocations:
for entry in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(entry.segmentation_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not entry.allocated:
# it's not, so remove it from table
LOG.debug(_(
"Removing vlan %(seg_id)s on "
"physical network "
"%(net)s from pool"),
{'seg_id': entry.segmentation_id,
'net': physical_network})
session.delete(entry)
del allocations[physical_network] |
FocusDetection.tsx | import React, { useContext, createContext } from 'react';
import { observer } from 'mobx-react';
import { Button, Radio, Input, Select, Switch } from 'hubble-ui';
import { useTranslation } from 'react-i18next';
import { styles } from '../QueryAndAlgorithmLibrary';
import { Tooltip as CustomTooltip } from '../../../common';
import DataAnalyzeStore from '../../../../stores/GraphManagementStore/dataAnalyzeStore/dataAnalyzeStore';
import QuestionMarkIcon from '../../../../assets/imgs/ic_question_mark.svg';
const FocusDetection = observer(() => {
const { t } = useTranslation();
const dataAnalyzeStore = useContext(DataAnalyzeStore);
const algorithmAnalyzerStore = dataAnalyzeStore.algorithmAnalyzerStore;
const isValidExec =
Object.values(
algorithmAnalyzerStore.validateFocusDetectionParamsErrorMessage
).every((value) => value === '') &&
algorithmAnalyzerStore.focusDetectionParams.source !== '' &&
algorithmAnalyzerStore.focusDetectionParams.target !== '' &&
algorithmAnalyzerStore.focusDetectionParams.max_depth !== '';
return (
<div className="query-tab-content-form">
<div className="query-tab-content-form-row">
<div className="query-tab-content-form-item">
<div className="query-tab-content-form-item-title">
<i>*</i>
<span>
{t('data-analyze.algorithm-forms.focus-detection.options.source')}
</span>
</div>
<Input
width={400}
size="medium"
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
placeholder={t(
'data-analyze.algorithm-forms.focus-detection.placeholder.input-source-id'
)}
errorLocation="layer"
errorMessage={
algorithmAnalyzerStore.validateFocusDetectionParamsErrorMessage
.source
}
value={algorithmAnalyzerStore.focusDetectionParams.source}
onChange={(e: any) => {
algorithmAnalyzerStore.mutateFocusDetectionParams(
'source',
e.value as string
);
algorithmAnalyzerStore.validateFocusDetectionParams('source');
}}
originInputProps={{
onBlur() {
algorithmAnalyzerStore.validateFocusDetectionParams('source');
}
}}
/>
</div>
<div className="query-tab-content-form-item">
<div className="query-tab-content-form-item-title">
<span>
{t('data-analyze.algorithm-forms.focus-detection.options.label')}
</span>
</div>
<Select
size="medium"
trigger="click"
value={algorithmAnalyzerStore.focusDetectionParams.label}
notFoundContent={t(
'data-analyze.algorithm-forms.focus-detection.placeholder.no-edge-types'
)}
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
width={400}
onChange={(value: string) => {
algorithmAnalyzerStore.mutateFocusDetectionParams('label', value);
}}
>
<Select.Option value="__all__" key="__all__">
{t('data-analyze.algorithm-forms.focus-detection.pre-value')}
</Select.Option>
{dataAnalyzeStore.edgeTypes.map(({ name }) => (
<Select.Option value={name} key={name}>
{name}
</Select.Option>
))}
</Select>
</div>
</div>
<div className="query-tab-content-form-row">
<div className="query-tab-content-form-item">
<div className="query-tab-content-form-item-title">
<i>*</i>
<span>
{t('data-analyze.algorithm-forms.focus-detection.options.target')}
</span>
</div>
<Input
width={400}
size="medium"
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
placeholder={t(
'data-analyze.algorithm-forms.focus-detection.placeholder.input-target-id'
)}
errorLocation="layer"
errorMessage={
algorithmAnalyzerStore.validateFocusDetectionParamsErrorMessage
.target
}
value={algorithmAnalyzerStore.focusDetectionParams.target}
onChange={(e: any) => {
algorithmAnalyzerStore.mutateFocusDetectionParams(
'target',
e.value as string
);
algorithmAnalyzerStore.validateFocusDetectionParams('target');
}}
originInputProps={{
onBlur() {
algorithmAnalyzerStore.validateFocusDetectionParams('target');
}
}}
/>
</div>
<div className="query-tab-content-form-item">
<div className="query-tab-content-form-item-title">
<span>
{t(
'data-analyze.algorithm-forms.focus-detection.options.max_degree'
)}
</span>
</div>
<Input
width={400}
size="medium"
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
placeholder={t(
'data-analyze.algorithm-forms.focus-detection.placeholder.input-integer'
)}
errorLocation="layer"
errorMessage={
algorithmAnalyzerStore.validateFocusDetectionParamsErrorMessage
.max_degree
}
value={algorithmAnalyzerStore.focusDetectionParams.max_degree}
onChange={(e: any) => {
algorithmAnalyzerStore.mutateFocusDetectionParams(
'max_degree',
e.value as string
);
algorithmAnalyzerStore.validateFocusDetectionParams('max_degree');
}}
originInputProps={{
onBlur() {
algorithmAnalyzerStore.validateFocusDetectionParams(
'max_degree'
);
}
}}
/>
</div>
</div>
<div className="query-tab-content-form-row">
<div className="query-tab-content-form-item">
<div className="query-tab-content-form-item-title">
<i>*</i>
<span>
{t(
'data-analyze.algorithm-forms.focus-detection.options.direction'
)}
</span>
</div>
<Radio.Group
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
value={algorithmAnalyzerStore.focusDetectionParams.direction}
onChange={(e: React.ChangeEvent<HTMLSelectElement>) => {
algorithmAnalyzerStore.mutateFocusDetectionParams(
'direction',
e.target.value
);
}}
>
<Radio value="BOTH">both</Radio>
<Radio value="OUT">out</Radio>
<Radio value="IN">in</Radio>
</Radio.Group>
</div>
<div className="query-tab-content-form-item">
<div className="query-tab-content-form-item-title">
<span>
{t('data-analyze.algorithm-forms.focus-detection.options.limit')}
</span>
</div>
<Input
width={400}
size="medium"
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
placeholder={t(
'data-analyze.algorithm-forms.focus-detection.placeholder.input-positive-integer'
)}
errorLocation="layer"
errorMessage={
algorithmAnalyzerStore.validateFocusDetectionParamsErrorMessage
.limit
}
value={algorithmAnalyzerStore.focusDetectionParams.limit}
onChange={(e: any) => {
algorithmAnalyzerStore.mutateFocusDetectionParams(
'limit',
e.value as string
);
algorithmAnalyzerStore.validateFocusDetectionParams('limit');
}}
originInputProps={{
onBlur() {
algorithmAnalyzerStore.validateFocusDetectionParams('limit');
}
}}
/>
</div>
</div>
<div className="query-tab-content-form-row">
<div className="query-tab-content-form-item">
<div className="query-tab-content-form-item-title">
<i>*</i>
<span>
{t(
'data-analyze.algorithm-forms.focus-detection.options.max_depth'
)}
</span>
<CustomTooltip
trigger="hover"
placement="bottom-start"
modifiers={{
offset: {
offset: '0, 8'
}
}}
tooltipWrapperProps={{
className: 'tooltips-dark',
style: {
zIndex: 7
}
}}
tooltipWrapper={t(
'data-analyze.algorithm-forms.focus-detection.hint.max-depth'
)}
childrenProps={{
src: QuestionMarkIcon,
alt: 'hint',
style: {
marginLeft: 5
}
}}
childrenWrapperElement="img"
/>
</div>
<Input
width={400}
size="medium"
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
placeholder={t(
'data-analyze.algorithm-forms.focus-detection.placeholder.input-positive-integer'
)}
errorLocation="layer"
errorMessage={
algorithmAnalyzerStore.validateFocusDetectionParamsErrorMessage
.max_depth
}
value={algorithmAnalyzerStore.focusDetectionParams.max_depth}
onChange={(e: any) => {
algorithmAnalyzerStore.mutateFocusDetectionParams(
'max_depth',
e.value as string
);
algorithmAnalyzerStore.validateFocusDetectionParams('max_depth');
}}
originInputProps={{
onBlur() {
algorithmAnalyzerStore.validateFocusDetectionParams(
'max_depth'
);
}
}}
/>
</div>
<div className="query-tab-content-form-item">
<div className="query-tab-content-form-item-title">
<span>
{t(
'data-analyze.algorithm-forms.focus-detection.options.capacity'
)}
</span>
</div>
<Input
width={400}
size="medium"
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
placeholder={t(
'data-analyze.algorithm-forms.focus-detection.placeholder.input-positive-integer'
)}
errorLocation="layer"
errorMessage={
algorithmAnalyzerStore.validateFocusDetectionParamsErrorMessage
.capacity
}
value={algorithmAnalyzerStore.focusDetectionParams.capacity}
onChange={(e: any) => {
algorithmAnalyzerStore.mutateFocusDetectionParams(
'capacity',
e.value as string
);
algorithmAnalyzerStore.validateFocusDetectionParams('capacity');
}}
originInputProps={{
onBlur() {
algorithmAnalyzerStore.validateFocusDetectionParams('capacity');
}
}}
/>
</div>
</div>
<div
className="query-tab-content-form-row"
style={{ marginLeft: 92, justifyContent: 'flex-start' }}
>
<Button
type="primary"
style={styles.primaryButton}
disabled={
!isValidExec ||
dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'
}
onClick={async () => {
algorithmAnalyzerStore.switchCollapse(true);
dataAnalyzeStore.switchGraphLoaded(false);
const timerId = dataAnalyzeStore.addTempExecLog();
await dataAnalyzeStore.fetchGraphs({
url: 'crosspoints',
type: 'focus-detection'
});
await dataAnalyzeStore.fetchExecutionLogs();
window.clearTimeout(timerId); | }}
>
{t('data-analyze.manipulations.execution')}
</Button>
<Button
style={styles.primaryButton}
disabled={dataAnalyzeStore.requestStatus.fetchGraphs === 'pending'}
onClick={() => {
algorithmAnalyzerStore.resetFocusDetectionParams();
}}
>
{t('data-analyze.manipulations.reset')}
</Button>
</div>
</div>
);
});
export default FocusDetection; | |
config.js | module.exports = {
//API: 'http://192.168.1.3:3001/api',
//IMGPath: 'http://192.168.1.3:3001/api/images/',
API: 'http://localhost:3001/api',
IMGPath: 'http://localhost:3001/api/images/',
MSG: {
'authError': 'Authentication Failed.' | },
RecordsPerPage:10
}; |
|
about_classes.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutClasses(Koan):
class Dog(object):
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
fido = self.Dog()
self.assertEqual(AboutClasses.Dog, fido.__class__)
def test_classes_have_docstrings(self):
self.assertMatch('Dogs', self.Dog.__doc__)
# ------------------------------------------------------------------
class Dog2(object):
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual('Paul', dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual('Fido', dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual("Fido", getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual("Fido", fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3(object):
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual("Fido", fido.get_name()) # access as method
self.assertEqual("Fido", fido.name) # access as property | # ------------------------------------------------------------------
class Dog4(object):
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual('Fido', fido.name)
# ------------------------------------------------------------------
class Dog5(object):
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual('Fido', fido.name)
def test_args_must_match_init(self):
self.assertRaises(TypeError, self.Dog5) # Evaluates self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_difference_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(False, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6(object):
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
return self._name
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(fido, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual("Fido", str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual("My dog is Fido", "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual("<Dog named 'Fido'>", repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual('[1, 2, 3]', str(seq))
self.assertEqual('[1, 2, 3]', repr(seq))
self.assertEqual("STRING", str("STRING"))
self.assertEqual("'STRING'", repr("STRING")) | |
actor_critic.py | import torch
import torch.nn as nn
class ValueNet(nn.Module):
|
class ActionNet(nn.Module):
"""
The part of the actor critic network that computes the action value.
"""
def __init__(self, n_action_inputs: int, n_value_hidden: int,
n_action_hidden: int = None):
"""
Takes as input the action features and the hidden values from the value
net. Returns a value for the action.
"""
super(ActionNet, self).__init__()
if n_action_hidden is None:
n_action_hidden = (n_action_inputs + n_value_hidden + 2) // 2
self.hidden = nn.Sequential(
nn.Linear(n_action_inputs + n_value_hidden, n_action_hidden),
nn.ReLU()
)
self.action_value = nn.Linear(n_action_hidden, 1)
def forward(self, action_x, value_hidden):
"""
Returns the value of the state and the hidden layer values.
"""
x = self.hidden(torch.cat((action_x, value_hidden), 1))
return self.action_value(x)
| """
The part of the actor critic network that computes the state value. Also,
returns the hidden layer before state valuation, for use in action network.
"""
def __init__(self, n_inputs: int, n_hidden: int = None):
"""
Specify the number of inputs. Also, specify the number of nodes in each
hidden layer. If no value is provided for the number of hidden, then
it is set to half the number of inputs.
"""
super(ValueNet, self).__init__()
if n_hidden is None:
n_hidden = (n_inputs + 2) // 2
self.n_hidden = n_hidden
self.hidden = nn.Sequential(
nn.Linear(n_inputs, n_hidden),
nn.ReLU()
)
self.value = nn.Linear(n_hidden, 1)
def forward(self, x):
"""
Returns the value of the state and the hidden layer values.
"""
x = self.hidden(x)
return self.value(x), x |
family-beneficiaries.class.js | /* eslint-disable no-unused-vars */
const logger = require('winston');
const jsend = require('jsend');
class | {
constructor(options) {
this.options = options || {};
}
async find(params) {
console.log(params.query);
const familyCoverService = this.app.service('families');
const _families = await familyCoverService.find({
query: params.query
});
console.log(_families);
let families = [];
_families.data.map(x => {
x.familyCovers.map(y => {
const item = y;
y.familyId = x._id;
families.push(y);
});
});
return jsend.success(families);
}
get(id, params) {
return Promise.resolve({
id,
text: `A new message with ID: ${id}!`
});
}
create(data, params) {
const operation = data;
let app = this.app;
return new Promise(function (resolve, reject) {
app.service('families').find({
query: {
'facilityId': operation.facilityId
}
}).then(payload => {
if (payload.data.length > 0) {
let facFamilyCover = payload.data[0];
if (operation.operation === 'update') {
let model = {
filNo: operation.model.filNo,
surname: operation.model.surname,
othernames: operation.model.othernames,
gender: operation.model.gender,
serial: operation.model.serial,
address: operation.model.address,
email: operation.model.email,
phone: operation.model.phone,
status: operation.model.status
};
const indexEnrollee = facFamilyCover.familyCovers.findIndex(x => x.serial === model.serial);
facFamilyCover.familyCovers[indexEnrollee] = model;
operation.dependants.forEach((dependant, i) => {
if (dependant.operation === 'update') {
let model = {
category: 'DEPENDANT',
filNo: dependant.filNo,
othernames: dependant.othernames,
gender: dependant.gender,
serial: dependant.serial,
surname: dependant.surname,
address: operation.model.address,
email: dependant.email,
phone: dependant.phone,
status: dependant.status
};
const indexEnrollee = facFamilyCover.familyCovers.findIndex(x => x.serial === model.serial);
facFamilyCover.familyCovers[indexEnrollee] = model;
} else if (dependant.operation === 'save') {
let model = {
category: 'DEPENDANT',
filNo: operation.model.filNo + String.fromCharCode(65 + i),
othernames: dependant.othernames,
gender: dependant.gender,
serial: facFamilyCover.familyCovers.length + 1,
surname: dependant.surname,
address: operation.model.address,
email: dependant.email,
phone: dependant.phone,
status: dependant.status
};
facFamilyCover.familyCovers.push(model);
}
});
app.service('families').update(facFamilyCover._id, facFamilyCover).then(pay => {
resolve(pay);
});
} else if (operation.operation === 'save') {
let model = {
category: 'PRINCIPAL',
filNo: operation.model.filNo,
othernames: operation.model.othernames,
gender: operation.model.gender,
serial: facFamilyCover.familyCovers.length + 1,
surname: operation.model.surname,
address: operation.model.address,
email: operation.model.email,
phone: operation.model.phone,
status: operation.model.status
};
facFamilyCover.familyCovers.push(model);
operation.dependants.forEach((dependant, i) => {
if (dependant.operation === 'update') {
let model = {
category: 'DEPENDANT',
filNo: dependant.filNo,
othernames: dependant.othernames,
gender: dependant.gender,
serial: dependant.serial,
surname: dependant.surname,
address: operation.model.address,
email: dependant.email,
phone: dependant.phone,
status: dependant.status
};
const indexEnrollee = facFamilyCover.familyCovers.findIndex(x => x.serial === model.serial);
facFamilyCover.familyCovers[indexEnrollee] = model;
} else if (dependant.operation === 'save') {
let model = {
category: 'DEPENDANT',
filNo: operation.model.filNo + String.fromCharCode(65 + i),
othernames: dependant.othernames,
gender: dependant.gender,
serial: facFamilyCover.familyCovers.length + 1,
surname: dependant.surname,
address: operation.model.address,
email: dependant.email,
phone: dependant.phone,
status: dependant.status
};
facFamilyCover.familyCovers.push(model);
}
});
app.service('families').update(facFamilyCover._id, facFamilyCover).then(pay => {
resolve(pay);
});
}
} else {
let familyCover = {
facilityId: operation.facilityId,
familyCovers: []
};
app.service('families').create(familyCover).then(pay => {
let facFamilyCover = pay;
if (operation.operation === 'update') {
let model = {
filNo: operation.model.filNo,
surname: operation.model.surname,
othernames: operation.model.othernames,
gender: operation.model.gender,
serial: operation.model.serial,
address: operation.model.address,
email: operation.model.email,
phone: operation.model.phone,
status: operation.model.status
};
const indexEnrollee = facFamilyCover.familyCovers.findIndex(x => x.serial === model.serial);
facFamilyCover.familyCovers[indexEnrollee] = model;
operation.dependants.forEach((dependant, i) => {
if (dependant.operation === 'update') {
let model = {
category: 'DEPENDANT',
filNo: dependant.filNo,
othernames: dependant.othernames,
gender: dependant.gender,
serial: dependant.serial,
surname: dependant.surname,
address: operation.model.address,
email: dependant.email,
phone: dependant.phone,
status: dependant.status
};
const indexEnrollee = facFamilyCover.familyCovers.findIndex(x => x.serial === model.serial);
facFamilyCover.familyCovers[indexEnrollee] = model;
} else if (dependant.operation === 'save') {
let model = {
category: 'DEPENDANT',
filNo: operation.model.filNo + String.fromCharCode(65 + i),
othernames: dependant.othernames,
gender: dependant.gender,
serial: facFamilyCover.familyCovers.length + 1,
surname: dependant.surname,
address: operation.model.address,
email: dependant.email,
phone: dependant.phone,
status: dependant.status
};
facFamilyCover.familyCovers.push(model);
}
});
app.service('families').update(facFamilyCover._id, facFamilyCover).then(pay => {
resolve(pay);
});
} else if (operation.operation === 'save') {
let model = {
category: 'PRINCIPAL',
filNo: operation.model.filNo,
othernames: operation.model.othernames,
gender: operation.model.gender,
serial: facFamilyCover.familyCovers.length + 1,
surname: operation.model.surname,
address: operation.model.address,
email: operation.model.email,
phone: operation.model.phone,
status: operation.model.status
};
facFamilyCover.familyCovers.push(model);
operation.dependants.forEach((dependant, i) => {
if (dependant.operation === 'update') {
let model = {
category: 'DEPENDANT',
filNo: dependant.filNo,
othernames: dependant.othernames,
gender: dependant.gender,
serial: dependant.serial,
surname: dependant.surname,
address: operation.model.address,
email: dependant.email,
phone: dependant.phone,
status: dependant.status
};
const indexEnrollee = facFamilyCover.familyCovers.findIndex(x => x.serial === model.serial);
facFamilyCover.familyCovers[indexEnrollee] = model;
} else if (dependant.operation === 'save') {
let model = {
category: 'DEPENDANT',
filNo: operation.model.filNo + String.fromCharCode(65 + i),
othernames: dependant.othernames,
gender: dependant.gender,
serial: facFamilyCover.familyCovers.length + 1,
surname: dependant.surname,
address: operation.model.address,
email: dependant.email,
phone: dependant.phone,
status: dependant.status
};
facFamilyCover.familyCovers.push(model);
}
});
app.service('families').update(facFamilyCover._id, facFamilyCover).then(pay => {
resolve(pay);
});
}
});
}
});
});
}
update(id, data, params) {
return Promise.resolve(data);
}
patch(id, data, params) {
return Promise.resolve(data);
}
remove(id, params) {
return Promise.resolve({
id
});
}
setup(app) {
this.app = app;
}
}
module.exports = function (options) {
return new Service(options);
};
module.exports.Service = Service;
| Service |
secure.py | # -*-coding:utf-8-*-
from flask import Flask
__author__ = 'ZeroLoo' |
||
naive_scheduler.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{PresentParameters, PresentationInfo, SchedulingLib},
async_trait::async_trait,
fuchsia_async::Time as fasync_time, | };
// Schedule a frame at each on_next_frame_begin, whether a frame has been requested or not.
//
// Note:
// Does not track present credits. Since it's limited to one Present() call per OnNextFrameBegin()
// event, we're guaranteed not to run out of credits.
pub struct NaiveScheduler {
next_expected_times: Cell<PresentationInfo>,
next_frame_begin: Cell<bool>,
}
impl NaiveScheduler {
pub fn new() -> NaiveScheduler {
let now = fasync_time::now().into_zx();
NaiveScheduler {
next_expected_times: Cell::new(PresentationInfo {
latch_point: now,
presentation_time: now,
}),
next_frame_begin: Cell::new(true),
}
}
}
#[async_trait(?Send)]
impl SchedulingLib for NaiveScheduler {
fn on_next_frame_begin(
&self,
_additional_present_credits: u32,
future_presentation_infos: Vec<PresentationInfo>,
) {
assert!(!future_presentation_infos.is_empty());
self.next_expected_times.set(future_presentation_infos[0]);
self.next_frame_begin.set(true);
}
// Waits until the next on_next_frame_begin() after a request_frame().
async fn wait_to_update(&self) -> PresentParameters {
// Async tracing for the waiting period
let _trace_guard =
trace::async_enter!(trace::generate_nonce(), "gfx", "NaiveScheduler::WaitForPresent");
// Loops until ready, yielding for 500 microseconds each loop.
while !self.next_frame_begin.get() {
const YIELD_TIME: zx::Duration = zx::Duration::from_micros(500);
fuchsia_async::Timer::new(zx::Time::after(YIELD_TIME)).await;
}
// Reset for next frame.
self.next_frame_begin.set(false);
let PresentationInfo { latch_point, presentation_time } = self.next_expected_times.get();
PresentParameters {
expected_latch_point: latch_point,
expected_presentation_time: presentation_time,
requested_presentation_time: zx::Time::from_nanos(0),
unsquashable: false,
}
}
}
#[cfg(test)]
mod tests {
use {super::*, fuchsia_async as fasync, matches::assert_matches, std::task::Poll};
#[fasync::run_until_stalled(test)]
async fn first_wait_completes_immediately() {
let sched = NaiveScheduler::new();
assert_matches!(
sched.wait_to_update().await,
PresentParameters {
expected_latch_point: _,
expected_presentation_time: _,
requested_presentation_time: _,
unsquashable: false,
}
);
}
#[test]
fn following_waits_never_completes_without_on_next_frame_begin() {
let mut exec = fasync::TestExecutor::new().unwrap();
let sched = NaiveScheduler::new();
// Initial wait always completes immediately.
exec.run_until_stalled(&mut sched.wait_to_update()).is_ready();
let mut fut = sched.wait_to_update();
assert!(exec.run_until_stalled(&mut fut).is_pending()); // Will never complete.
exec.wake_next_timer();
assert!(exec.run_until_stalled(&mut fut).is_pending()); // Still didn't complete.
}
#[fasync::run_until_stalled(test)]
async fn on_next_frame_begin_before_wait_makes_wait_return_immediately() {
let sched = NaiveScheduler::new();
sched.wait_to_update().await; // Initial wait always completes immediately.
sched.on_next_frame_begin(
1,
vec![PresentationInfo {
latch_point: zx::Time::from_nanos(1),
presentation_time: zx::Time::from_nanos(1),
}],
);
assert_eq!(
sched.wait_to_update().await,
PresentParameters {
expected_latch_point: zx::Time::from_nanos(1),
expected_presentation_time: zx::Time::from_nanos(1),
requested_presentation_time: zx::Time::from_nanos(0),
unsquashable: false,
}
);
}
#[test]
fn wait_completes_after_on_next_frame_begin() {
let mut exec = fasync::TestExecutor::new().unwrap();
let sched = NaiveScheduler::new();
// Initial wait always completes immediately.
let mut fut = sched.wait_to_update();
exec.run_until_stalled(&mut fut).is_ready();
// Next wait should not fire until on_next_frame_begin() has been called.
let mut fut = sched.wait_to_update();
assert!(exec.run_until_stalled(&mut fut).is_pending());
sched.on_next_frame_begin(
10,
vec![PresentationInfo {
latch_point: zx::Time::from_nanos(1),
presentation_time: zx::Time::from_nanos(1),
}],
);
exec.wake_next_timer();
assert_eq!(
exec.run_until_stalled(&mut fut),
Poll::Ready(PresentParameters {
expected_latch_point: zx::Time::from_nanos(1),
expected_presentation_time: zx::Time::from_nanos(1),
requested_presentation_time: zx::Time::from_nanos(0),
unsquashable: false,
})
);
}
} | fuchsia_trace as trace, fuchsia_zircon as zx,
std::cell::Cell, |
logical.rs | // Copyright 2020 The OctoSQL Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use crate::physical::aggregate;
use crate::physical::trigger;
use crate::physical::csv::CSVSource;
use crate::physical::expression;
use crate::physical::expression::WildcardExpression;
use crate::physical::filter::Filter;
use crate::physical::functions::BUILTIN_FUNCTIONS;
use crate::physical::group_by::GroupBy;
use crate::physical::json::JSONSource;
use crate::physical::map;
use crate::physical::physical;
use crate::physical::physical::Identifier;
use crate::physical::requalifier::Requalifier;
use crate::physical::stream_join::StreamJoin;
#[derive(Debug)]
pub enum | {
Unexpected(String),
}
#[derive(Debug)]
pub enum Node {
Source {
name: Identifier,
alias: Option<Identifier>,
},
Filter {
source: Box<Node>,
filter_expr: Box<Expression>,
},
Map {
source: Box<Node>,
expressions: Vec<(Box<Expression>, Identifier)>,
wildcards: Vec<Option<String>>,
keep_source_fields: bool,
},
GroupBy {
source: Box<Node>,
key_exprs: Vec<Box<Expression>>,
aggregates: Vec<Aggregate>,
aggregated_exprs: Vec<Box<Expression>>,
output_fields: Vec<Identifier>,
trigger: Vec<Trigger>,
},
Join {
source: Box<Node>,
source_key: Vec<Box<Expression>>,
joined: Box<Node>,
joined_key: Vec<Box<Expression>>,
},
Requalifier {
source: Box<Node>,
alias: String,
},
}
#[derive(Debug)]
pub enum Expression {
Variable(Identifier),
Constant(physical::ScalarValue),
Function(Identifier, Vec<Box<Expression>>),
Wildcard(Option<String>),
Subquery(Box<Node>),
}
#[derive(Debug)]
pub enum Aggregate {
KeyPart,
Count,
Sum,
}
#[derive(Debug)]
pub enum Trigger {
Counting(u64),
}
pub struct MaterializationContext {}
impl Node {
pub fn physical(
&self,
mat_ctx: &MaterializationContext,
) -> Result<Arc<dyn physical::Node>, Error> {
match self {
Node::Source { name, alias: _ } => {
let path = name.to_string();
if path.contains(".json") {
Ok(Arc::new(JSONSource::new(path)))
} else if path.contains(".csv") {
Ok(Arc::new(CSVSource::new(path)))
} else {
unimplemented!()
}
}
Node::Filter {
source,
filter_expr,
} => {
Ok(Arc::new(Filter::new(
filter_expr.physical(mat_ctx)?,
source.physical(mat_ctx)?,
)))
}
Node::Map {
source,
expressions,
wildcards,
keep_source_fields,
} => {
let expr_vec_res = expressions
.iter()
.map(|(expr, ident)|
expr.physical(mat_ctx).map(|expr| (expr, ident.clone())))
.collect::<Vec<_>>();
let mut expr_vec = Vec::with_capacity(expr_vec_res.len());
let mut name_vec = Vec::with_capacity(expr_vec_res.len());
for expr_res in expr_vec_res {
let (expr, name) = expr_res?;
expr_vec.push(expr);
name_vec.push(name);
}
Ok(Arc::new(map::Map::new(source.physical(mat_ctx)?, expr_vec, name_vec, wildcards.clone(), *keep_source_fields)))
}
Node::GroupBy {
source,
key_exprs,
aggregates,
aggregated_exprs,
output_fields,
trigger,
} => {
let key_exprs_physical = key_exprs
.into_iter()
.map(|expr| expr.physical(mat_ctx))
.collect::<Result<_, _>>()?;
let (aggregates_no_key_part, aggregated_exprs_no_key_part): (Vec<_>, Vec<_>) = aggregates.iter()
.zip(aggregated_exprs.iter())
.filter(|(aggregate, _aggregated_expr)| if let Aggregate::KeyPart = **aggregate { false } else { true })
.unzip();
let aggregate_vec = aggregates_no_key_part
.iter()
.map(|expr| expr.physical(mat_ctx))
.collect::<Result<_, _>>()?;
let aggregated_exprs_physical = aggregated_exprs_no_key_part
.into_iter()
.map(|expr| expr.physical(mat_ctx))
.collect::<Result<_, _>>()?;
let aggregated_exprs_key_part = aggregates.iter()
.zip(aggregated_exprs.iter())
.filter(|(aggregate, _aggregated_expr)| if let Aggregate::KeyPart = **aggregate { true } else { false })
.map(|(_aggregate, aggregated_expr)| aggregated_expr)
.collect::<Vec<_>>();
let aggregate_output_names = aggregates.iter()
.enumerate()
.filter(|(_i, aggregate)| if let Aggregate::KeyPart = **aggregate { false } else { true })
.map(|(i, _)| output_fields[i].clone())
.collect();
let mut output_key_indices = Vec::with_capacity(aggregated_exprs_key_part.len());
for expr in aggregated_exprs_key_part {
if let Expression::Variable(var_name) = expr.as_ref() {
let mut found = false;
for i in 0..key_exprs.len() {
if let Expression::Variable(key_var_name) = key_exprs[i].as_ref() {
if var_name == key_var_name {
output_key_indices.push(i);
found = true;
break;
}
}
}
if !found {
return Err(Error::Unexpected(format!("key part variable {} not found in key", var_name.to_string())));
}
} else {
return Err(Error::Unexpected("key part can only contain variables".to_string()));
}
}
let trigger_prototypes = trigger.iter()
.map(|t| t.physical(mat_ctx))
.collect::<Result<_, _>>()?;
Ok(Arc::new(GroupBy::new(
key_exprs_physical,
output_key_indices,
aggregated_exprs_physical,
aggregate_vec,
aggregate_output_names,
trigger_prototypes,
source.physical(mat_ctx)?,
)))
}
Node::Join {
source,
source_key,
joined,
joined_key,
} => {
let source_key_exprs = source_key
.into_iter()
.map(|expr| expr.physical(mat_ctx))
.collect::<Result<_, _>>()?;
let joined_key_exprs = joined_key
.into_iter()
.map(|expr| expr.physical(mat_ctx))
.collect::<Result<_, _>>()?;
Ok(Arc::new(StreamJoin::new(
source.physical(mat_ctx)?,
source_key_exprs,
joined.physical(mat_ctx)?,
joined_key_exprs,
)))
}
Node::Requalifier { source, alias } => {
Ok(Arc::new(Requalifier::new(alias.clone(), source.physical(mat_ctx)?)))
}
}
}
}
impl Expression {
pub fn physical(
&self,
mat_ctx: &MaterializationContext,
) -> Result<Arc<dyn expression::Expression>, Error> {
match self {
Expression::Variable(name) => Ok(Arc::new(expression::FieldExpression::new(name.clone()))),
Expression::Constant(value) => Ok(Arc::new(expression::Constant::new(value.clone()))),
Expression::Function(name, args) => {
let args_physical = args
.into_iter()
.map(|expr| expr.physical(mat_ctx))
.collect::<Result<_, _>>()?;
match name {
Identifier::SimpleIdentifier(ident) => {
match BUILTIN_FUNCTIONS.get(ident.to_lowercase().as_str()) {
None => { Err(Error::Unexpected(format!("unknown function: {}", ident.as_str()))) }
Some(fn_constructor) => Ok(fn_constructor(args_physical)),
}
}
_ => unimplemented!(),
}
}
Expression::Wildcard(qualifier) => Ok(Arc::new(WildcardExpression::new(qualifier.as_ref().map(|s| s.as_str())))),
Expression::Subquery(query) => Ok(Arc::new(expression::Subquery::new(query.physical(mat_ctx)?))),
}
}
}
impl Aggregate {
pub fn physical(
&self,
_mat_ctx: &MaterializationContext,
) -> Result<Arc<dyn aggregate::Aggregate>, Error> {
match self {
Aggregate::Count => Ok(Arc::new(aggregate::Count {})),
Aggregate::Sum => Ok(Arc::new(aggregate::Sum {})),
_ => unimplemented!(),
}
}
}
impl Trigger {
pub fn physical(
&self,
_mat_ctx: &MaterializationContext,
) -> Result<Arc<dyn trigger::TriggerPrototype>, Error> {
match self {
Trigger::Counting(n) => Ok(Arc::new(trigger::CountingTriggerPrototype::new(n.clone()))),
_ => unimplemented!(),
}
}
}
| Error |
ifs.rs | pub fn ifs() {
println!("***Ifs***");
ifs_one();
println!("");
}
fn ifs_one() | {
let x = 5;
if x == 5 {
println!("x is five!");
}
else if x == 6 {
println!("x is six!");
}
else {
println!("x is neither five nor six");
}
let y = if x == 5 { 2 } else { 8 };
println!("y is: {}", y);
} |
|
mock-adapter-pair-device-command.go | // Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT.
package messages
import "fmt"
import "encoding/json"
// Message-specific data
type MockAdapterPairDeviceCommandJsonData struct {
// ID of the adapter
AdapterId string `json:"adapterId" yaml:"adapterId"`
// Description of the device
DeviceDescr MockAdapterPairDeviceCommandJsonDataDeviceDescr `json:"deviceDescr" yaml:"deviceDescr"`
// ID of the device
DeviceId string `json:"deviceId" yaml:"deviceId"` |
// ID of the plugin
PluginId string `json:"pluginId" yaml:"pluginId"`
}
// Description of the device
type MockAdapterPairDeviceCommandJsonDataDeviceDescr map[string]any
// UnmarshalJSON implements json.Unmarshaler.
func (j *MockAdapterPairDeviceCommandJsonData) UnmarshalJSON(b []byte) error {
var raw map[string]any
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["adapterId"]; !ok || v == nil {
return fmt.Errorf("field adapterId in MockAdapterPairDeviceCommandJsonData: required")
}
if v, ok := raw["deviceDescr"]; !ok || v == nil {
return fmt.Errorf("field deviceDescr in MockAdapterPairDeviceCommandJsonData: required")
}
if v, ok := raw["deviceId"]; !ok || v == nil {
return fmt.Errorf("field deviceId in MockAdapterPairDeviceCommandJsonData: required")
}
if v, ok := raw["pluginId"]; !ok || v == nil {
return fmt.Errorf("field pluginId in MockAdapterPairDeviceCommandJsonData: required")
}
type Plain MockAdapterPairDeviceCommandJsonData
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
*j = MockAdapterPairDeviceCommandJsonData(plain)
return nil
}
// Tell the mock adapter to pair a device
type MockAdapterPairDeviceCommandJson struct {
// Message-specific data
Data MockAdapterPairDeviceCommandJsonData `json:"data" yaml:"data"`
// The message type, used by the IPC client and api to differentiate messages
MessageType int `json:"messageType" yaml:"messageType"`
}
// UnmarshalJSON implements json.Unmarshaler.
func (j *MockAdapterPairDeviceCommandJson) UnmarshalJSON(b []byte) error {
var raw map[string]any
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["data"]; !ok || v == nil {
return fmt.Errorf("field data in MockAdapterPairDeviceCommandJson: required")
}
if v, ok := raw["messageType"]; !ok || v == nil {
return fmt.Errorf("field messageType in MockAdapterPairDeviceCommandJson: required")
}
type Plain MockAdapterPairDeviceCommandJson
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
*j = MockAdapterPairDeviceCommandJson(plain)
return nil
} | |
lex.go | package logql
import (
"strings"
"text/scanner"
"time"
"unicode"
"github.com/dustin/go-humanize"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/util/strutil"
)
var tokens = map[string]int{
",": COMMA,
".": DOT,
"{": OPEN_BRACE,
"}": CLOSE_BRACE,
"=": EQ,
OpTypeNEQ: NEQ,
"=~": RE,
"!~": NRE,
"|=": PIPE_EXACT,
"|~": PIPE_MATCH,
OpPipe: PIPE,
OpUnwrap: UNWRAP,
"(": OPEN_PARENTHESIS,
")": CLOSE_PARENTHESIS,
"by": BY,
"without": WITHOUT,
"bool": BOOL,
"[": OPEN_BRACKET,
"]": CLOSE_BRACKET,
OpLabelReplace: LABEL_REPLACE,
// binops
OpTypeOr: OR,
OpTypeAnd: AND,
OpTypeUnless: UNLESS,
OpTypeAdd: ADD,
OpTypeSub: SUB,
OpTypeMul: MUL,
OpTypeDiv: DIV,
OpTypeMod: MOD,
OpTypePow: POW,
// comparison binops
OpTypeCmpEQ: CMP_EQ,
OpTypeGT: GT,
OpTypeGTE: GTE,
OpTypeLT: LT,
OpTypeLTE: LTE,
// parsers
OpParserTypeJSON: JSON,
OpParserTypeRegexp: REGEXP,
OpParserTypeLogfmt: LOGFMT,
// fmt
OpFmtLabel: LABEL_FMT,
OpFmtLine: LINE_FMT,
}
| // functionTokens are tokens that needs to be suffixes with parenthesis
var functionTokens = map[string]int{
// range vec ops
OpRangeTypeRate: RATE,
OpRangeTypeCount: COUNT_OVER_TIME,
OpRangeTypeBytesRate: BYTES_RATE,
OpRangeTypeBytes: BYTES_OVER_TIME,
OpRangeTypeAvg: AVG_OVER_TIME,
OpRangeTypeSum: SUM_OVER_TIME,
OpRangeTypeMin: MIN_OVER_TIME,
OpRangeTypeMax: MAX_OVER_TIME,
OpRangeTypeStdvar: STDVAR_OVER_TIME,
OpRangeTypeStddev: STDDEV_OVER_TIME,
OpRangeTypeQuantile: QUANTILE_OVER_TIME,
OpRangeTypeAbsent: ABSENT_OVER_TIME,
// vec ops
OpTypeSum: SUM,
OpTypeAvg: AVG,
OpTypeMax: MAX,
OpTypeMin: MIN,
OpTypeCount: COUNT,
OpTypeStddev: STDDEV,
OpTypeStdvar: STDVAR,
OpTypeBottomK: BOTTOMK,
OpTypeTopK: TOPK,
// conversion Op
OpConvBytes: BYTES_CONV,
OpConvDuration: DURATION_CONV,
OpConvDurationSeconds: DURATION_SECONDS_CONV,
}
type lexer struct {
scanner.Scanner
errs []ParseError
}
func (l *lexer) Lex(lval *exprSymType) int {
r := l.Scan()
switch r {
case '#':
// Scan until a newline or EOF is encountered
for next := l.Peek(); !(next == '\n' || next == scanner.EOF); next = l.Next() {
}
return l.Lex(lval)
case scanner.EOF:
return 0
case scanner.Int, scanner.Float:
numberText := l.TokenText()
duration, ok := tryScanDuration(numberText, &l.Scanner)
if ok {
lval.duration = duration
return DURATION
}
bytes, ok := tryScanBytes(numberText, &l.Scanner)
if ok {
lval.bytes = bytes
return BYTES
}
lval.str = numberText
return NUMBER
case scanner.String, scanner.RawString:
var err error
lval.str, err = strutil.Unquote(l.TokenText())
if err != nil {
l.Error(err.Error())
return 0
}
return STRING
}
// scanning duration tokens
if r == '[' {
d := ""
for r := l.Next(); r != scanner.EOF; r = l.Next() {
if string(r) == "]" {
i, err := model.ParseDuration(d)
if err != nil {
l.Error(err.Error())
return 0
}
lval.duration = time.Duration(i)
return RANGE
}
d += string(r)
}
l.Error("missing closing ']' in duration")
return 0
}
tokenText := l.TokenText()
tokenNext := tokenText + string(l.Peek())
if tok, ok := functionTokens[tokenNext]; ok {
// create a copy to advance to the entire token for testing suffix
sc := l.Scanner
sc.Next()
if isFunction(sc) {
l.Next()
return tok
}
}
if tok, ok := functionTokens[tokenText]; ok && isFunction(l.Scanner) {
return tok
}
if tok, ok := tokens[tokenNext]; ok {
l.Next()
return tok
}
if tok, ok := tokens[tokenText]; ok {
return tok
}
lval.str = tokenText
return IDENTIFIER
}
func (l *lexer) Error(msg string) {
l.errs = append(l.errs, newParseError(msg, l.Line, l.Column))
}
func tryScanDuration(number string, l *scanner.Scanner) (time.Duration, bool) {
var sb strings.Builder
sb.WriteString(number)
//copy the scanner to avoid advancing it in case it's not a duration.
s := *l
consumed := 0
for r := s.Peek(); r != scanner.EOF && !unicode.IsSpace(r); r = s.Peek() {
if !unicode.IsNumber(r) && !isDurationRune(r) && r != '.' {
break
}
_, _ = sb.WriteRune(r)
_ = s.Next()
consumed++
}
if consumed == 0 {
return 0, false
}
// we've found more characters before a whitespace or the end
d, err := time.ParseDuration(sb.String())
if err != nil {
return 0, false
}
// we need to consume the scanner, now that we know this is a duration.
for i := 0; i < consumed; i++ {
_ = l.Next()
}
return d, true
}
func isDurationRune(r rune) bool {
// "ns", "us" (or "µs"), "ms", "s", "m", "h".
switch r {
case 'n', 's', 'u', 'm', 'h', 'µ':
return true
default:
return false
}
}
func tryScanBytes(number string, l *scanner.Scanner) (uint64, bool) {
var sb strings.Builder
sb.WriteString(number)
//copy the scanner to avoid advancing it in case it's not a duration.
s := *l
consumed := 0
for r := s.Peek(); r != scanner.EOF && !unicode.IsSpace(r); r = s.Peek() {
if !unicode.IsNumber(r) && !isBytesSizeRune(r) && r != '.' {
break
}
_, _ = sb.WriteRune(r)
_ = s.Next()
consumed++
}
if consumed == 0 {
return 0, false
}
// we've found more characters before a whitespace or the end
b, err := humanize.ParseBytes(sb.String())
if err != nil {
return 0, false
}
// we need to consume the scanner, now that we know this is a duration.
for i := 0; i < consumed; i++ {
_ = l.Next()
}
return b, true
}
func isBytesSizeRune(r rune) bool {
// Accept: B, kB, MB, GB, TB, PB, KB, KiB, MiB, GiB, TiB, PiB
// Do not accept: EB, ZB, YB, PiB, ZiB and YiB. They are not supported since the value migh not be represented in an uint64
switch r {
case 'B', 'i', 'k', 'K', 'M', 'G', 'T', 'P':
return true
default:
return false
}
}
// isFunction check if the next runes are either an open parenthesis
// or by/without tokens. This allows to dissociate functions and identifier correctly.
func isFunction(sc scanner.Scanner) bool {
var sb strings.Builder
sc = trimSpace(sc)
for r := sc.Next(); r != scanner.EOF; r = sc.Next() {
sb.WriteRune(r)
switch sb.String() {
case "(":
return true
case "by", "without":
sc = trimSpace(sc)
return sc.Next() == '('
}
}
return false
}
func trimSpace(l scanner.Scanner) scanner.Scanner {
for n := l.Peek(); n != scanner.EOF; n = l.Peek() {
if unicode.IsSpace(n) {
l.Next()
continue
}
return l
}
return l
} | |
lexicon.rs | pub mod trie;
pub mod word_id_table;
pub mod word_infos;
pub mod word_params;
use nom::le_u32;
use self::trie::Trie;
use self::word_id_table::WordIdTable;
use self::word_infos::{WordInfo, WordInfos};
use self::word_params::WordParams;
use pyo3::prelude::*;
#[pyclass]
pub struct Lexicon {
trie: Trie,
word_id_table: WordIdTable,
word_params: WordParams,
word_infos: WordInfos,
}
#[pymethods]
impl Lexicon {
#[new]
pub fn new(buf: &[u8], original_offset: usize) -> Self {
let mut offset = original_offset;
let (_rest, trie_size) = parse_size(buf, offset).unwrap();
offset += 4;
let (_rest, trie_array) = parse_trie_array(buf, offset, trie_size).unwrap();
let trie = Trie::new(trie_array, trie_size);
offset += trie.total_size();
let (_rest, word_id_table_size) = parse_size(buf, offset).unwrap();
let word_id_table = WordIdTable::new(buf.to_vec(), word_id_table_size, offset + 4);
offset += word_id_table.storage_size();
let (_rest, word_params_size) = parse_size(buf, offset).unwrap();
let word_params = WordParams::new(buf.to_vec(), word_params_size, offset + 4);
offset += word_params.storage_size();
let word_infos = WordInfos::new(buf.to_vec(), offset, word_params.size());
Lexicon {
trie,
word_id_table,
word_params,
word_infos,
}
}
pub fn lookup(&self, input: &[u8], offset: usize) -> Vec<(u32, usize)> {
let result = self.trie.common_prefix_search(input, offset);
let mut l: Vec<(u32, usize)> = Vec::new(); // (word_id, length)
for item in result {
let length = item.1;
for word_id in self.word_id_table.get(item.0) {
l.push((word_id, length));
}
}
l
}
pub fn get_word_info(&self, word_id: usize) -> WordInfo {
self.word_infos.get_word_info(word_id)
}
pub fn get_word_param(&self, word_id: usize) -> (i16, i16, i16) {
let left_id = self.word_params.get_left_id(word_id);
let right_id = self.word_params.get_right_id(word_id);
let cost = self.word_params.get_cost(word_id);
(left_id, right_id, cost)
}
}
named_args!(
parse_size(offset: usize)<&[u8], u32>, | (size)
)
);
named_args!(
parse_trie_array(offset: usize, trie_size: u32)<&[u8], Vec<u32>>,
do_parse!(
_seek: take!(offset) >>
trie_array: count!(le_u32, trie_size as usize) >>
(trie_array)
// TODO: copied? &[u32] from bytes without copy? Java: `bytes.asIntBuffer();`
)
); | do_parse!(
_seek: take!(offset) >>
size: le_u32 >>
|
e_animations_2axis.py | # encoding: utf-8
##################################################
# This script shows how to create animated plots using matplotlib and a basic dataset
# Multiple tutorials inspired the current design but they mostly came from:
# hhttps://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1
# Note: the project keeps updating every course almost yearly
##################################################
#
##################################################
# Author: Diego Pajarito
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: [email protected]
# Status: development
##################################################
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
# We need to import numpy and matplotlib library
# importing libraries
import pandas as pd
import seaborn as sns
# Read files and prepare data
data = pd.read_csv('../data/2021_seguiment-covid19-bcn.csv')
#data = pd.read_csv('https://opendata-ajuntament.barcelona.cat/data/dataset/4f3ffbda-d5be-4f2a-a836-26a77be6df1a/resource/f627ac0a-d05f-416d-9773-eeb464a3fc44/download')
data.columns = ['date_indicator', 'frequency_indicator', 'place', 'name_indicator',
'name_variable', 'value', 'unit', 'source']
# We will use two datasets to generate plots
data_daily = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (diari)']
data_accumulated = data[data['name_indicator'] == 'Casos de COVID-19 a Barcelona (acumulat)']
# We need the data to be in time format to calculate values in days after day zero
data_daily.loc[:, 'date_indicator'] = pd.to_datetime(data_daily['date_indicator'])
initial_day = data_daily['date_indicator'].min()
data_daily.loc[:, 'day_after_zero'] = data_daily['date_indicator'] - initial_day
data_daily.loc[:, 'day_after_zero'] = data_daily['day_after_zero']/np.timedelta64(1, 'D')
# We need the data to be in time format to calculate values in days after day zero
data_accumulated.loc[:, 'date_indicator'] = pd.to_datetime(data_accumulated['date_indicator'])
data_accumulated.loc[:, 'day_after_zero'] = data_accumulated['date_indicator'] - initial_day
data_accumulated.loc[:, 'day_after_zero'] = data_accumulated['day_after_zero']/np.timedelta64(1, 'D')
# we also extract some values to set the plot limits
max_day = data_daily['day_after_zero'].max().astype(int)
max_cases_daily = data_daily['value'].max()
max_cases_accumulated = data_accumulated['value'].max()
title = 'Barcelona: '
# We then prepare the writer and animation file options
Writer = animation.writers['ffmpeg']
writer = Writer(fps=20, metadata=dict(artist='MaCTResearcher'), bitrate=1800)
# If error using anaconda try to install ffmpeg
# conda install -c conda-forge ffmpeg
# We create an initial plot with basic configuration a single line
fig, ax1 = plt.subplots()
fig.set_size_inches(10, 6)
plt.title(title + 'Covid-19 cases', fontsize=18)
plt.xlabel('Day after case 1', fontsize=14)
plt.ylim(0, max_cases_accumulated)
plt.ylabel('Accumulated', fontsize=18)
# # now we configure the secondary axis
ax2 = ax1.twinx()
plt.ylim(0, max_cases_daily*2)
cases_ticks = np.arange(0, max_day, 50)
# We need to set an animation function to handle individual behaviour per frame
# variable "i" is the frame id that can be used to handle queries or filters for your data
def | (i):
frame_data_daily = data_daily[data_daily['day_after_zero'] <= i]
frame_data_accumulated = data_accumulated[data_accumulated['day_after_zero'] <= i]
sns.lineplot(x='day_after_zero', y='value', data=frame_data_accumulated, color="r", ax=ax1)
sns.barplot(x='day_after_zero', y='value', data=frame_data_daily, color='b', ax=ax2)
plt.ylabel('Daily', fontsize=18)
plt.xlim(0, max_day)
plt.xticks(cases_ticks)
plt.xlabel('Day after case 1', fontsize=18)
# Handling secondary axis implies different management in the animate function
ani = matplotlib.animation.FuncAnimation(fig, animate, frames=max_day, repeat=True)
ani.save('covid_cases_bcn_2axis.mp4', writer=writer)
print('end')
| animate |
lib.rs | use on_wire::{witness, FromWire, IntoWire, NewType};
use prost::Message;
pub struct ProtoBuf<A>(pub A);
impl<A> ProtoBuf<A> {
pub fn new(a: A) -> Self {
ProtoBuf(a)
}
pub fn get(self) -> A {
self.0
}
}
/// This is the witness for protobuf types (types with a prost::Message
/// implementation) and types that convert to a protobuf type (types with a
/// ToProto implementation).
pub fn protobuf<A, B>(a: ProtoBuf<A>, b: B) -> (A, ProtoBuf<B>) | B: ToProto,
{
witness(a, b)
}
/// This is deliberately less flexible than From/TryFrom because they can have
/// multiple types they can be transformed into, preventing type inference. Each
/// type can only have one proto type it maps to.
pub trait ToProto: Sized {
type Proto: Message + Default;
fn from_proto(_: Self::Proto) -> Result<Self, String>;
fn to_proto(self) -> Self::Proto;
}
impl<Type: Message + Default> ToProto for Type {
type Proto = Type;
fn from_proto(pt: Self::Proto) -> Result<Self, String> {
Ok(pt)
}
fn to_proto(self) -> Self::Proto {
self
}
}
impl<Type: ToProto> FromWire for ProtoBuf<Type> {
fn from_bytes(bytes: Vec<u8>) -> Result<Self, String> {
let ty = Type::Proto::decode(&bytes[..]).map_err(|e| e.to_string())?;
Ok(ProtoBuf(Type::from_proto(ty)?))
}
}
impl<Type: ToProto> IntoWire for ProtoBuf<Type> {
fn into_bytes(self) -> Result<Vec<u8>, String> {
let proto_type = self.0.to_proto();
let mut buf = Vec::with_capacity(proto_type.encoded_len());
proto_type.encode(&mut buf).map_err(|e| e.to_string())?;
Ok(buf)
}
}
impl<T> NewType for ProtoBuf<T> {
type Inner = T;
fn into_inner(self) -> T {
self.0
}
fn from_inner(t: T) -> Self {
ProtoBuf::new(t)
}
} | where
A: ToProto, |
benchmark.rs | use crate::{sys, Error, LargePage, LocalCPtr, Result, Word, TCB};
pub fn reset_log() -> Result<()> {
Error::wrap(unsafe { sys::seL4_BenchmarkResetLog() })
}
pub fn finalize_log() -> Word {
unsafe { sys::seL4_BenchmarkFinalizeLog() }
}
pub fn set_log_buffer(frame: LargePage) -> Result<()> {
Error::wrap(unsafe { sys::seL4_BenchmarkSetLogBuffer(frame.raw()) })
}
pub fn get_thread_utilisation(tcb: TCB) {
unsafe { sys::seL4_BenchmarkGetThreadUtilisation(tcb.raw()) }
}
pub fn reset_thread_utilisation(tcb: TCB) {
unsafe { sys::seL4_BenchmarkResetThreadUtilisation(tcb.raw()) }
} | }
pub fn reset_all_thread_utilisation() {
unsafe { sys::seL4_BenchmarkResetAllThreadsUtilisation() }
} |
pub fn dump_all_thread_utilisation() {
unsafe { sys::seL4_BenchmarkDumpAllThreadsUtilisation() } |
wsd.py | import abc
import csv
from collections import namedtuple, defaultdict, OrderedDict, Counter
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity as sim
from sklearn.pipeline import Pipeline
STOP_POS = {'CONJ', 'INTJ', 'PART', 'PR', 'UNKNOWN'}
Synset = namedtuple('Synset', 'id synonyms hypernyms bag')
class Inventory(object):
"""Sense inventory representation and loader."""
synsets = {}
index = defaultdict(list)
def __init__(self, inventory_path):
"""
During the construction, BaseWSD parses the given sense inventory file.
"""
def field_to_bag(field):
return {word: freq for record in field.split(', ')
for word, freq in (self.lexeme(record),)
if record}
with open(inventory_path, 'r', encoding='utf-8', newline='') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
id = row[0]
synonyms = field_to_bag(row[2])
hypernyms = field_to_bag(row[4])
self.synsets[id] = Synset(
id=id,
synonyms=synonyms,
hypernyms=hypernyms,
bag={**synonyms, **hypernyms}
)
for word in self.synsets[id].bag:
self.index[word].append(id)
def lexeme(self, record):
"""
Parse the sense representations like 'word#sid:freq'.
Actually, we do not care about the sid field because
we use synset identifiers instead.
"""
if '#' in record:
word, tail = record.split('#', 1)
else:
word, tail = record, None
if tail:
if ':' in tail:
sid, tail = tail.split(':', 1)
else:
sid, tail = tail, None
if tail:
freq = float(tail)
else:
freq = 1
return word, freq
Span = namedtuple('Span', 'token pos lemma index')
class BaseWSD(object):
"""
Base class for word sense disambiguation routines. Should not be used.
Descendant classes must implement the disambiguate_word() method.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, inventory):
self.inventory = inventory
def lemmatize(self, sentence):
"""
This method transforms the given sentence into the dict that
maps the word indices to their lemmas. It also excludes those
words which part of speech is in the stop list.
"""
return {i: lemma for i, (_, lemma, pos) in enumerate(sentence)
if pos not in STOP_POS}
@abc.abstractmethod
def disambiguate_word(self, sentence, index):
"""
Return word sense identifier for the given word in the sentence.
"""
if not sentence or not isinstance(sentence, list):
raise ValueError('sentence should be a list')
if not isinstance(index, int) or index < 0 or index >= len(sentence):
raise ValueError('index should be in [0...%d]' % len(sentence))
def disambiguate(self, sentence):
"""
Return word sense identifiers corresponding to the words
in the given sentence.
"""
result = OrderedDict()
for index, span in enumerate(sentence):
# here, span is (token, pos, lemma), but we also need index
span = Span(*span, index)
result[span] = self.disambiguate_word(sentence, index)
return result
class OneBaseline(BaseWSD):
"""
A simple baseline that treats every word as monosemeous. Not thread-safe.
"""
counter = {}
def __init__(self):
super().__init__(None)
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
word, _, _ = sentence[index]
if word not in self.counter:
self.counter[word] = len(self.counter)
return str(self.counter[word])
class SingletonsBaseline(BaseWSD):
"""
A simple baseline that puts every instance into a different cluster. Not thread-safe.
"""
counter = 0
def __init__(self):
super().__init__(None)
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
self.counter += 1
return str(self.counter)
class SparseWSD(BaseWSD):
"""
A simple sparse word sense disambiguation.
"""
sparse = Pipeline([('dict', DictVectorizer()), ('tfidf', TfidfTransformer())])
def __init__(self, inventory):
super().__init__(inventory)
self.sparse.fit([synset.bag for synset in self.inventory.synsets.values()])
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
lemmas = self.lemmatize(sentence)
if index not in lemmas:
return
svector = self.sparse.transform(Counter(lemmas.values())) # sentence vector
def search(query):
"""
Map synset identifiers to the cosine similarity value.
This function calls the function query(id) that retrieves
the corresponding dict of words.
"""
return Counter({id: sim(svector, self.sparse.transform(query(id))).item(0)
for id in self.inventory.index[lemmas[index]]})
candidates = search(lambda id: self.inventory.synsets[id].synonyms)
# give the hypernyms a chance if nothing is found
if not candidates:
candidates = search(lambda id: self.inventory.synsets[id].bag)
if not candidates:
return
for id, _ in candidates.most_common(1):
return id
class DenseWSD(BaseWSD):
"""
A word sense disambiguation approach that is based on SenseGram.
"""
class densedict(dict):
"""
A handy dict that transforms a synset into its dense representation.
"""
def __init__(self, synsets, sensegram):
self.synsets = synsets
self.sensegram = sensegram
def __missing__(self, id):
value = self[id] = self.sensegram(self.synsets[id].bag.keys())
return value
def __init__(self, inventory, wv):
super().__init__(inventory)
self.wv = wv
self.dense = self.densedict(self.inventory.synsets, self.sensegram)
def sensegram(self, words):
"""
This is a simple implementation of SenseGram.
It just averages the embeddings corresponding to the given words.
"""
vectors = self.words_vec(set(words))
if not vectors:
return
return np.mean(np.vstack(tuple(vectors.values())), axis=0).reshape(1, -1)
def words_vec(self, words, use_norm=False):
"""
Return a dict that maps the given words to their embeddings.
"""
if callable(getattr(self.wv, 'words_vec', None)):
return self.wv.words_vec(words, use_norm)
return {word: self.wv.word_vec(word, use_norm) for word in words if word in self.wv}
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
lemmas = self.lemmatize(sentence)
if index not in lemmas:
return
svector = self.sensegram(lemmas.values()) # sentence vector
if svector is None:
return
# map synset identifiers to the cosine similarity value
candidates = Counter({id: sim(svector, self.dense[id]).item(0)
for id in self.inventory.index[lemmas[index]]
if self.dense[id] is not None})
if not candidates:
return
for id, _ in candidates.most_common(1):
return id
class LeskWSD(BaseWSD):
"""
A word sense disambiguation approach that is based on Lesk method.
"""
def __init__(self, inventory):
|
def disambiguate_word(self, sentence, word_index):
super().disambiguate_word(sentence, word_index)
lemmas = self.lemmatize(sentence)
if word_index not in lemmas:
return
mentions_dict = dict()
for synset_number in self.inventory.index[lemmas[word_index]]:
mentions_dict[synset_number] = 0
for context_word in lemmas.values():
if context_word != lemmas[word_index]:
if context_word in self.inventory.synsets[synset_number].synonyms:
mentions_dict[synset_number] = mentions_dict[synset_number] + 1
elif context_word in self.inventory.synsets[synset_number].hypernyms:
mentions_dict[synset_number] = mentions_dict[synset_number] + \
self.inventory.synsets[synset_number].hypernyms[context_word]
if len(mentions_dict) > 0:
return max(mentions_dict, key=mentions_dict.get)
else:
return
| super().__init__(inventory) |
lib.rs | //!
//! Macros for Rust Bento-boxes
//!
//! Copyright (c) 2020, Arm Limited. All rights reserved.
//! SPDX-License-Identifier: BSD-3-Clause
//!
extern crate proc_macro;
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use proc_macro2::TokenTree as TokenTree2;
use proc_macro2::Span;
use syn::parse_macro_input;
use syn::parse_quote;
use syn::parse::Parse;
use syn::parse::ParseStream;
use syn::spanned::Spanned;
use quote::quote;
use std::sync::atomic;
static SEEN_EXPORTS: atomic::AtomicBool
= atomic::AtomicBool::new(false);
#[derive(Clone)]
struct ExportExportAttrs {
link_name: Option<syn::LitStr>,
ty: syn::Type,
}
impl Parse for ExportExportAttrs {
fn parse(input: ParseStream) -> syn::Result<Self> |
}
#[proc_macro_attribute]
pub fn export_export(attrs: TokenStream, input: TokenStream) -> TokenStream {
// parse
let attrs = parse_macro_input!(attrs as ExportExportAttrs);
let f = parse_macro_input!(input as syn::ItemFn);
let name = f.sig.ident.clone();
let link_name = attrs.link_name.unwrap_or_else(||
syn::LitStr::new(&name.to_string(), name.span()));
let export_name = syn::Ident::new(
&format!("__box_export_{}", name),
name.span());
let ty = attrs.ty;
let export_f = syn::ItemFn{
attrs: vec![
parse_quote!{#[export_name=#link_name]}
],
vis: syn::Visibility::Inherited,
sig: syn::Signature{
abi: parse_quote!{extern "C"},
..f.sig.clone()
},
..f
};
let extern_f = syn::ForeignItemFn{
attrs: f.attrs,
vis: f.vis,
sig: f.sig,
semi_token: parse_quote!{;},
};
// convert to tokens so we can replace the reference with a
// macro identifier
fn replace_ident(
tokens: TokenStream2,
from: &syn::Ident,
to: &TokenStream2
) -> TokenStream2 {
let mut ntokens = vec![];
for token in tokens {
match token {
TokenTree2::Ident(ident) if &ident == from => {
ntokens.extend(to.clone());
}
TokenTree2::Group(group) => {
ntokens.push(TokenTree2::Group(proc_macro2::Group::new(
group.delimiter(),
replace_ident(group.stream(), from, to))));
}
_ => {
ntokens.push(token);
}
}
}
ntokens.into_iter().collect()
}
let predeclarations = if !SEEN_EXPORTS.swap(
true, atomic::Ordering::SeqCst) {
Some(quote!{pub use crate as __box_exports;})
} else {
None
};
let ma = quote! {
($name:ident) => {
const _: #ty = $name;
#export_f
};
};
let ma = replace_ident(
ma, &export_name, "e!{$name});
let ma = replace_ident(
ma, &syn::Ident::new("__box_exports", Span::call_site()),
"e!{$crate});
let ma = replace_ident(
ma, &name, "e!{#export_name});
let q = quote! {
// need to re-declare because macros are placed in crate root
#predeclarations
// macro that generates the export
#[macro_export]
macro_rules! #export_name {
#ma
}
// expose linkage here
extern "C" {
#[link_name=#link_name]
#extern_f
}
};
if cfg!(feature = "debug-gen") {
println!("export_export gen => {}", q);
}
q.into()
}
#[proc_macro_attribute]
pub fn export(attrs: TokenStream, input: TokenStream) -> TokenStream {
let path = parse_macro_input!(attrs as syn::Path);
let f = parse_macro_input!(input as syn::ItemFn);
let mut export_path = path.clone();
let x = export_path.segments.pop().unwrap().into_value();
export_path.segments.push(syn::PathSegment::from(syn::Ident::new(
"__box_exports",
x.span())));
export_path.segments.push(syn::PathSegment::from(syn::Ident::new(
&format!("__box_export_{}", x.ident),
x.span())));
let name = f.sig.ident.clone();
let q = quote!{
#export_path!(#name);
#f
};
if cfg!(feature = "debug-gen") {
println!("export gen => {}", q);
}
q.into()
}
| {
let mut link_name = None;
let mut ty = None;
while !input.is_empty() {
// parse attributes
let attr = if input.peek(syn::Token![type]) {
let token = input.parse::<syn::Token![type]>()?;
syn::Ident::new("type", token.span())
} else {
input.parse::<syn::Ident>()?
};
match attr.to_string().as_ref() {
"link_name" => {
input.parse::<syn::Token![=]>()?;
let s = input.parse::<syn::LitStr>()?;
link_name = Some(s);
}
"type" => {
input.parse::<syn::Token![=]>()?;
let f = input.parse::<syn::Type>()?;
ty = Some(f);
}
_ => {
Err(syn::Error::new(attr.span(),
format!("unknown export attribute `{}`", attr)))?;
}
};
input.parse::<Option<syn::Token![,]>>()?;
}
// require type
let ty = ty.ok_or_else(|| syn::Error::new(
Span::call_site(), "export missing `type`"))?;
Ok(ExportExportAttrs{link_name, ty})
} |
main.rs | //! An introduction to storing and retrieving session data, in a type safe way, with the Gotham
//! web framework.
#![cfg_attr(feature = "cargo-clippy", allow(clippy::get_unwrap))]
use gotham::middleware::session::{NewSessionMiddleware, SessionData};
use gotham::pipeline::new_pipeline;
use gotham::pipeline::single::single_pipeline;
use gotham::router::builder::*;
use gotham::router::Router;
use gotham::state::{FromState, State};
/// Handler function for `GET` requests directed to `/`
///
/// Each request made will increment a counter of requests which have been made,
/// and tell you how many times you've visited the page.
fn get_handler(mut state: State) -> (State, String) |
/// Create a `Router`
fn router() -> Router {
// Install middleware which handles session creation before, and updating after, our handler is
// called.
// The default NewSessionMiddleware stores session data in an in-memory map, which means that
// server restarts will throw the data away, but it can be customized as needed.
let middleware = NewSessionMiddleware::default()
// Configure the type of data which we want to store in the session.
// See the custom_data_type example for storing more complex data.
.with_session_type::<usize>()
// By default, the cookies used are only sent over secure connections. For our test server,
// we don't set up an HTTPS certificate, so we allow the cookies to be sent over insecure
// connections. This should not be done in real applications.
.insecure();
let (chain, pipelines) = single_pipeline(new_pipeline().add(middleware).build());
build_router(chain, pipelines, |route| {
route.get("/").to(get_handler);
})
}
/// Start a server and use a `Router` to dispatch requests
pub fn main() {
let addr = "127.0.0.1:7878";
println!("Listening for requests at http://{}", addr);
gotham::start(addr, router())
}
#[cfg(test)]
mod tests {
use super::*;
use cookie::Cookie;
use gotham::hyper::header::{COOKIE, SET_COOKIE};
use gotham::hyper::StatusCode;
use gotham::test::TestServer;
#[test]
fn cookie_is_set_and_counter_increments() {
let test_server = TestServer::new(router()).unwrap();
let response = test_server
.client()
.get("http://localhost/")
.perform()
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let cookie: Cookie = {
let set_cookie: Vec<_> = response
.headers()
.get_all(SET_COOKIE)
.iter()
.flat_map(|hv| hv.to_str())
.collect();
assert!(set_cookie.len() == 1);
set_cookie.get(0).unwrap().to_string().parse().unwrap()
};
let body = response.read_body().unwrap();
assert_eq!(
&body[..],
"You have visited this page 0 time(s) before\n".as_bytes()
);
let response = test_server
.client()
.get("http://localhost/")
.with_header(COOKIE, (&cookie.to_string()).parse().unwrap())
.perform()
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = response.read_body().unwrap();
assert_eq!(
&body[..],
"You have visited this page 1 time(s) before\n".as_bytes()
);
}
}
| {
// Define a narrow scope so that state can be borrowed/moved later in the function.
let visits = {
// Borrow a reference to the usize stored for the session (keyed by a cookie) from state.
// We don't need to worry about the underlying cookie mechanics, we just ask for our usize.
let visits: &usize = SessionData::<usize>::borrow_from(&state);
*visits
};
let message = format!("You have visited this page {} time(s) before\n", visits);
{
// Mutably borrow the usize, so we can increment it.
let visits: &mut usize = SessionData::<usize>::borrow_mut_from(&mut state);
*visits += 1;
}
(state, message)
} |
entropymodel.py | import math
import scipy
import csv
class EntropyModel:
def training_to_bigrams(self, training):
"""
Transform a list of training instances into a list of bigrams.
Each training instance X is changed to bXe.
'b' denotes the start symbol.
'e' denotes the end symbol
E.g.
training: ['ABB','BBC', 'BCA']
bigram_list: ['bA', 'AB', 'BB', 'Be', 'bB', BB', 'BC', 'Ce', 'bB', 'BC', 'CA', 'Ae']
:param training: a list of training instance strings
:return: a list of bigrams
"""
# transform into bigrams
bigram_list = []
for item in training:
bigram_list.extend(self.string_to_bigram(item))
return bigram_list
def string_to_bigram (self, str):
"""
Change a string into a list of bigrams.
The string str is changed to bstre. |
E.g.
str: 'ABB'
bigrams: ['bA', 'AB', 'BB', 'Be']
"""
str = 'b' + str + 'e'
bigrams = []
for i in range(0, len(str)-1):
bg = str[i: i+2]
bigrams.append(bg)
return bigrams
def bigram_frequency(self, start_symbol, bigram_list, symbols):
"""
Calculate the frequency of each bigram starts with a given start symbol.
E.g.
start_symbol = 'A'
symbols = ['A', 'B', 'C', 'e']
bigram_list: ['bA', 'AB', 'BB', 'Be', 'bB', BB', 'BC', 'Ce', 'bB', 'BC', 'CA', 'Ae']
freq_list = {'AA': 0, 'AB':1, 'AC': 0, 'Ae': 1}
"""
# init the frequency dictionary
freq_list = {}
for s in symbols[1:]:
key = start_symbol + s
freq_list[key] = 0
# add frequency
for item in bigram_list:
if item.startswith(start_symbol):
freq_list[item] = freq_list[item] + 1
return freq_list
def bigram_prob(self, freq_dict):
"""
Calculate the probability of each unique bigram start with a given symbol
:param freq_dict: frequency of each unique bigram start with a given symbol
E.g.
freq_list = {'AA': 0, 'AB':1, 'AC': 0, 'Ae': 1}
prob_list = {'AA': 0, 'AB':0.5, 'AC': 0, 'Ae': 0.5}
"""
# calculate the sum of all bigrams start with a given symbol
freq_sum = sum([freq_dict[key] for key in freq_dict.keys()])
# convert frequency to probability
prob_list = {}
for key in freq_dict.keys():
prob_list[key] = freq_dict[key] / freq_sum
return prob_list
def bigram_entropy(self, prob_dict):
"""
Calculate the entropy of bigrams start with a given symbol
:param prob_dict: probability of each unique bigram start with a given symbol
:return: an entropy value
E.g.
prob_list = {'AA': 0, 'AB':0.5, 'AC': 0, 'Ae': 0.5}
entropy = - (0*2 + 0.5log2(0.5)*2)
"""
entropy = 0
for bigram in prob_dict.keys():
prob = prob_dict[bigram]
if (prob == 0):
continue
else:
entropy += (prob * (math.log2(prob)))
return -1 * entropy
def sum_entropy(self, test_item, edict, bigram_prob):
"""
Sum of bigram entropy for a test item
:param test_item: test item
:param edict: bigram entropy list
:return: an entropy value
E.g.
test_item = 'ABB'
edict = {'bA':1.2, 'AB': 1.3, 'BB': 0.9, 'Be': 0.4,....}
entropy = E('bA') + E('AB') + E('BB') + E('Be') = 1.2 + 1.3 + 0.9 + 0.4 = 3.8
"""
sum_entropy = 0
test_bigram = self.string_to_bigram(test_item)
for bigram in test_bigram:
if bigram_prob[bigram] == 0:
prob = 1/len(edict)
sum_entropy += (-len(edict) * prob * math.log2(prob))
else:
start_symbol = bigram[0]
sum_entropy += edict[start_symbol]
return sum_entropy
def average_entropy(self, test_item, edict, bigram_prob):
"""
Calculate the average bigram entropy of a test item
"""
return self.sum_entropy(test_item, edict, bigram_prob) / (len(test_item) + 1)
def avg_prob(self, test_item, prob_dict):
"""
:param test_item:
:param prob_dict:
:return:
"""
bigram_list = self.string_to_bigram(test_item)
sum_prob = 0
for bigram in bigram_list:
sum_prob += prob_dict[bigram]
return sum_prob/(len(bigram_list)) | 'b' denotes the start symbol.
'e' denotes the end symbol |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.