patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -66,6 +66,7 @@ type OnewayOutbound interface {
// Outbounds encapsulates outbound types for a service
type Outbounds struct {
- Unary UnaryOutbound
- Oneway OnewayOutbound
+ ServiceName string
+ Unary UnaryOutbound
+ Oneway OnewayOutbound
} | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package transport
import "context"
// Outbound is the common interface for all outbounds
type Outbound interface {
Lifecycle
// Transports returns the transports that used by this outbound, so they
// can be collected for lifecycle management, typically by a Dispatcher.
//
// Though most outbounds only use a single transport, composite outbounds
// may use multiple transport protocols, particularly for shadowing traffic
// across multiple transport protocols during a transport protocol
// migration.
Transports() []Transport
}
// UnaryOutbound is a transport that knows how to send unary requests for procedure
// calls.
type UnaryOutbound interface {
Outbound
// Call sends the given request through this transport and returns its
// response.
//
// This MUST NOT be called before Start() has been called successfully. This
// MAY panic if called without calling Start(). This MUST be safe to call
// concurrently.
Call(ctx context.Context, request *Request) (*Response, error)
}
// OnewayOutbound is a transport that knows how to send oneway requests for
// procedure calls.
type OnewayOutbound interface {
Outbound
// CallOneway sends the given request through this transport and returns an
// ack.
//
// This MUST NOT be called before Start() has been called successfully. This
// MAY panic if called without calling Start(). This MUST be safe to call
// concurrently.
CallOneway(ctx context.Context, request *Request) (Ack, error)
}
// Outbounds encapsulates outbound types for a service
type Outbounds struct {
Unary UnaryOutbound
Oneway OnewayOutbound
}
| 1 | 12,304 | We definitely want some detailed and thoughtful docs being added to this type. | yarpc-yarpc-go | go |
@@ -467,7 +467,7 @@ func NewIntDataplaneDriver(config Config) *InternalDataplane {
}
// TODO Integrate XDP and BPF infra.
- if !config.BPFEnabled && dp.xdpState == nil {
+ if !config.BPFEnabled && config.XDPEnabled && dp.xdpState == nil {
xdpState, err := NewXDPState(config.XDPAllowGeneric)
if err == nil {
if err := xdpState.WipeXDP(); err != nil { | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intdataplane
import (
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"k8s.io/client-go/kubernetes"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/arp"
"github.com/projectcalico/felix/bpf/conntrack"
"github.com/projectcalico/felix/bpf/failsafes"
bpfipsets "github.com/projectcalico/felix/bpf/ipsets"
"github.com/projectcalico/felix/bpf/nat"
bpfproxy "github.com/projectcalico/felix/bpf/proxy"
"github.com/projectcalico/felix/bpf/routes"
"github.com/projectcalico/felix/bpf/state"
"github.com/projectcalico/felix/bpf/tc"
"github.com/projectcalico/felix/config"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/jitter"
"github.com/projectcalico/felix/labelindex"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/routetable"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/felix/throttle"
"github.com/projectcalico/felix/wireguard"
"github.com/projectcalico/libcalico-go/lib/health"
lclogutils "github.com/projectcalico/libcalico-go/lib/logutils"
cprometheus "github.com/projectcalico/libcalico-go/lib/prometheus"
"github.com/projectcalico/libcalico-go/lib/set"
)
const (
// msgPeekLimit is the maximum number of messages we'll try to grab from the to-dataplane
// channel before we apply the changes. Higher values allow us to batch up more work on
// the channel for greater throughput when we're under load (at cost of higher latency).
msgPeekLimit = 100
// Interface name used by kube-proxy to bind service ips.
KubeIPVSInterface = "kube-ipvs0"
)
var (
countDataplaneSyncErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_int_dataplane_failures",
Help: "Number of times dataplane updates failed and will be retried.",
})
countMessages = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "felix_int_dataplane_messages",
Help: "Number dataplane messages by type.",
}, []string{"type"})
summaryApplyTime = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_apply_time_seconds",
Help: "Time in seconds that it took to apply a dataplane update.",
})
summaryBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_msg_batch_size",
Help: "Number of messages processed in each batch. Higher values indicate we're " +
"doing more batching to try to keep up.",
})
summaryIfaceBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_iface_msg_batch_size",
Help: "Number of interface state messages processed in each batch. Higher " +
"values indicate we're doing more batching to try to keep up.",
})
summaryAddrBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_addr_msg_batch_size",
Help: "Number of interface address messages processed in each batch. Higher " +
"values indicate we're doing more batching to try to keep up.",
})
processStartTime time.Time
zeroKey = wgtypes.Key{}
)
func init() {
prometheus.MustRegister(countDataplaneSyncErrors)
prometheus.MustRegister(summaryApplyTime)
prometheus.MustRegister(countMessages)
prometheus.MustRegister(summaryBatchSize)
prometheus.MustRegister(summaryIfaceBatchSize)
prometheus.MustRegister(summaryAddrBatchSize)
processStartTime = time.Now()
}
type Config struct {
Hostname string
IPv6Enabled bool
RuleRendererOverride rules.RuleRenderer
IPIPMTU int
VXLANMTU int
VXLANPort int
MaxIPSetSize int
IptablesBackend string
IPSetsRefreshInterval time.Duration
RouteRefreshInterval time.Duration
DeviceRouteSourceAddress net.IP
DeviceRouteProtocol int
RemoveExternalRoutes bool
IptablesRefreshInterval time.Duration
IptablesPostWriteCheckInterval time.Duration
IptablesInsertMode string
IptablesLockFilePath string
IptablesLockTimeout time.Duration
IptablesLockProbeInterval time.Duration
XDPRefreshInterval time.Duration
Wireguard wireguard.Config
NetlinkTimeout time.Duration
RulesConfig rules.Config
IfaceMonitorConfig ifacemonitor.Config
StatusReportingInterval time.Duration
ConfigChangedRestartCallback func()
FatalErrorRestartCallback func(error)
PostInSyncCallback func()
HealthAggregator *health.HealthAggregator
RouteTableManager *idalloc.IndexAllocator
DebugSimulateDataplaneHangAfter time.Duration
ExternalNodesCidrs []string
BPFEnabled bool
BPFDisableUnprivileged bool
BPFKubeProxyIptablesCleanupEnabled bool
BPFLogLevel string
BPFExtToServiceConnmark int
BPFDataIfacePattern *regexp.Regexp
XDPEnabled bool
XDPAllowGeneric bool
BPFConntrackTimeouts conntrack.Timeouts
BPFCgroupV2 string
BPFConnTimeLBEnabled bool
BPFMapRepin bool
BPFNodePortDSREnabled bool
KubeProxyMinSyncPeriod time.Duration
KubeProxyEndpointSlicesEnabled bool
SidecarAccelerationEnabled bool
LookPathOverride func(file string) (string, error)
KubeClientSet *kubernetes.Clientset
FeatureDetectOverrides map[string]string
// Populated with the smallest host MTU based on auto-detection.
hostMTU int
MTUIfacePattern *regexp.Regexp
RouteSource string
KubernetesProvider config.Provider
}
type UpdateBatchResolver interface {
// Opportunity for a manager component to resolve state that depends jointly on the updates
// that it has seen since the preceding CompleteDeferredWork call. Processing here can
// include passing resolved state to other managers. It should not include any actual
// dataplane updates yet. (Those should be actioned in CompleteDeferredWork.)
ResolveUpdateBatch() error
}
// InternalDataplane implements an in-process Felix dataplane driver based on iptables
// and ipsets. It communicates with the datastore-facing part of Felix via the
// Send/RecvMessage methods, which operate on the protobuf-defined API objects.
//
// Architecture
//
// The internal dataplane driver is organised around a main event loop, which handles
// update events from the datastore and dataplane.
//
// Each pass around the main loop has two phases. In the first phase, updates are fanned
// out to "manager" objects, which calculate the changes that are needed and pass them to
// the dataplane programming layer. In the second phase, the dataplane layer applies the
// updates in a consistent sequence. The second phase is skipped until the datastore is
// in sync; this ensures that the first update to the dataplane applies a consistent
// snapshot.
//
// Having the dataplane layer batch updates has several advantages. It is much more
// efficient to batch updates, since each call to iptables/ipsets has a high fixed cost.
// In addition, it allows for different managers to make updates without having to
// coordinate on their sequencing.
//
// Requirements on the API
//
// The internal dataplane does not do consistency checks on the incoming data (as the
// old Python-based driver used to do). It expects to be told about dependent resources
// before they are needed and for their lifetime to exceed that of the resources that
// depend on them. For example, it is important the the datastore layer send an
// IP set create event before it sends a rule that references that IP set.
type InternalDataplane struct {
toDataplane chan interface{}
fromDataplane chan interface{}
allIptablesTables []*iptables.Table
iptablesMangleTables []*iptables.Table
iptablesNATTables []*iptables.Table
iptablesRawTables []*iptables.Table
iptablesFilterTables []*iptables.Table
ipSets []ipsetsDataplane
ipipManager *ipipManager
wireguardManager *wireguardManager
ifaceMonitor *ifacemonitor.InterfaceMonitor
ifaceUpdates chan *ifaceUpdate
ifaceAddrUpdates chan *ifaceAddrsUpdate
endpointStatusCombiner *endpointStatusCombiner
allManagers []Manager
managersWithRouteTables []ManagerWithRouteTables
ruleRenderer rules.RuleRenderer
// dataplaneNeedsSync is set if the dataplane is dirty in some way, i.e. we need to
// call apply().
dataplaneNeedsSync bool
// forceIPSetsRefresh is set by the IP sets refresh timer to indicate that we should
// check the IP sets in the dataplane.
forceIPSetsRefresh bool
// forceRouteRefresh is set by the route refresh timer to indicate that we should
// check the routes in the dataplane.
forceRouteRefresh bool
// forceXDPRefresh is set by the XDP refresh timer to indicate that we should
// check the XDP state in the dataplane.
forceXDPRefresh bool
// doneFirstApply is set after we finish the first update to the dataplane. It indicates
// that the dataplane should now be in sync.
doneFirstApply bool
reschedTimer *time.Timer
reschedC <-chan time.Time
applyThrottle *throttle.Throttle
config Config
debugHangC <-chan time.Time
xdpState *xdpState
sockmapState *sockmapState
endpointsSourceV4 endpointsSource
ipsetsSourceV4 ipsetsSource
callbacks *callbacks
loopSummarizer *logutils.Summarizer
}
const (
healthName = "int_dataplane"
healthInterval = 10 * time.Second
ipipMTUOverhead = 20
vxlanMTUOverhead = 50
wireguardMTUOverhead = 60
aksMTUOverhead = 100
)
func NewIntDataplaneDriver(config Config) *InternalDataplane {
log.WithField("config", config).Info("Creating internal dataplane driver.")
ruleRenderer := config.RuleRendererOverride
if ruleRenderer == nil {
ruleRenderer = rules.NewRenderer(config.RulesConfig)
}
epMarkMapper := rules.NewEndpointMarkMapper(
config.RulesConfig.IptablesMarkEndpoint,
config.RulesConfig.IptablesMarkNonCaliEndpoint)
// Auto-detect host MTU.
hostMTU, err := findHostMTU(config.MTUIfacePattern)
if err != nil {
log.WithError(err).Fatal("Unable to detect host MTU, shutting down")
return nil
}
ConfigureDefaultMTUs(hostMTU, &config)
podMTU := determinePodMTU(config)
if err := writeMTUFile(podMTU); err != nil {
log.WithError(err).Error("Failed to write MTU file, pod MTU may not be properly set")
}
dp := &InternalDataplane{
toDataplane: make(chan interface{}, msgPeekLimit),
fromDataplane: make(chan interface{}, 100),
ruleRenderer: ruleRenderer,
ifaceMonitor: ifacemonitor.New(config.IfaceMonitorConfig, config.FatalErrorRestartCallback),
ifaceUpdates: make(chan *ifaceUpdate, 100),
ifaceAddrUpdates: make(chan *ifaceAddrsUpdate, 100),
config: config,
applyThrottle: throttle.New(10),
loopSummarizer: logutils.NewSummarizer("dataplane reconciliation loops"),
}
dp.applyThrottle.Refill() // Allow the first apply() immediately.
dp.ifaceMonitor.StateCallback = dp.onIfaceStateChange
dp.ifaceMonitor.AddrCallback = dp.onIfaceAddrsChange
backendMode := iptables.DetectBackend(config.LookPathOverride, iptables.NewRealCmd, config.IptablesBackend)
// Most iptables tables need the same options.
iptablesOptions := iptables.TableOptions{
HistoricChainPrefixes: rules.AllHistoricChainNamePrefixes,
InsertMode: config.IptablesInsertMode,
RefreshInterval: config.IptablesRefreshInterval,
PostWriteInterval: config.IptablesPostWriteCheckInterval,
LockTimeout: config.IptablesLockTimeout,
LockProbeInterval: config.IptablesLockProbeInterval,
BackendMode: backendMode,
LookPathOverride: config.LookPathOverride,
OnStillAlive: dp.reportHealth,
OpRecorder: dp.loopSummarizer,
}
if config.BPFEnabled && config.BPFKubeProxyIptablesCleanupEnabled {
// If BPF-mode is enabled, clean up kube-proxy's rules too.
log.Info("BPF enabled, configuring iptables layer to clean up kube-proxy's rules.")
iptablesOptions.ExtraCleanupRegexPattern = rules.KubeProxyInsertRuleRegex
iptablesOptions.HistoricChainPrefixes = append(iptablesOptions.HistoricChainPrefixes, rules.KubeProxyChainPrefixes...)
}
// However, the NAT tables need an extra cleanup regex.
iptablesNATOptions := iptablesOptions
if iptablesNATOptions.ExtraCleanupRegexPattern == "" {
iptablesNATOptions.ExtraCleanupRegexPattern = rules.HistoricInsertedNATRuleRegex
} else {
iptablesNATOptions.ExtraCleanupRegexPattern += "|" + rules.HistoricInsertedNATRuleRegex
}
featureDetector := iptables.NewFeatureDetector(config.FeatureDetectOverrides)
iptablesFeatures := featureDetector.GetFeatures()
var iptablesLock sync.Locker
if iptablesFeatures.RestoreSupportsLock {
log.Debug("Calico implementation of iptables lock disabled (because detected version of " +
"iptables-restore will use its own implementation).")
iptablesLock = dummyLock{}
} else if config.IptablesLockTimeout <= 0 {
log.Debug("Calico implementation of iptables lock disabled (by configuration).")
iptablesLock = dummyLock{}
} else {
// Create the shared iptables lock. This allows us to block other processes from
// manipulating iptables while we make our updates. We use a shared lock because we
// actually do multiple updates in parallel (but to different tables), which is safe.
log.WithField("timeout", config.IptablesLockTimeout).Debug(
"Calico implementation of iptables lock enabled")
iptablesLock = iptables.NewSharedLock(
config.IptablesLockFilePath,
config.IptablesLockTimeout,
config.IptablesLockProbeInterval,
)
}
mangleTableV4 := iptables.NewTable(
"mangle",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
natTableV4 := iptables.NewTable(
"nat",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesNATOptions,
)
rawTableV4 := iptables.NewTable(
"raw",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
filterTableV4 := iptables.NewTable(
"filter",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
ipSetsConfigV4 := config.RulesConfig.IPSetConfigV4
ipSetsV4 := ipsets.NewIPSets(ipSetsConfigV4, dp.loopSummarizer)
dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV4)
dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV4)
dp.iptablesMangleTables = append(dp.iptablesMangleTables, mangleTableV4)
dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV4)
dp.ipSets = append(dp.ipSets, ipSetsV4)
if config.RulesConfig.VXLANEnabled {
routeTableVXLAN := routetable.New([]string{"^vxlan.calico$"}, 4, true, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, true, 0,
dp.loopSummarizer)
vxlanManager := newVXLANManager(
ipSetsV4,
routeTableVXLAN,
"vxlan.calico",
config,
dp.loopSummarizer,
)
go vxlanManager.KeepVXLANDeviceInSync(config.VXLANMTU, iptablesFeatures.ChecksumOffloadBroken, 10*time.Second)
dp.RegisterManager(vxlanManager)
} else {
cleanUpVXLANDevice()
}
dp.endpointStatusCombiner = newEndpointStatusCombiner(dp.fromDataplane, config.IPv6Enabled)
callbacks := newCallbacks()
dp.callbacks = callbacks
if !config.BPFEnabled && config.XDPEnabled {
if err := bpf.SupportsXDP(); err != nil {
log.WithError(err).Warn("Can't enable XDP acceleration.")
} else {
st, err := NewXDPState(config.XDPAllowGeneric)
if err != nil {
log.WithError(err).Warn("Can't enable XDP acceleration.")
} else {
dp.xdpState = st
dp.xdpState.PopulateCallbacks(callbacks)
log.Info("XDP acceleration enabled.")
}
}
} else {
log.Info("XDP acceleration disabled.")
}
// TODO Integrate XDP and BPF infra.
if !config.BPFEnabled && dp.xdpState == nil {
xdpState, err := NewXDPState(config.XDPAllowGeneric)
if err == nil {
if err := xdpState.WipeXDP(); err != nil {
log.WithError(err).Warn("Failed to cleanup preexisting XDP state")
}
}
// if we can't create an XDP state it means we couldn't get a working
// bpffs so there's nothing to clean up
}
if config.SidecarAccelerationEnabled {
if err := bpf.SupportsSockmap(); err != nil {
log.WithError(err).Warn("Can't enable Sockmap acceleration.")
} else {
st, err := NewSockmapState()
if err != nil {
log.WithError(err).Warn("Can't enable Sockmap acceleration.")
} else {
dp.sockmapState = st
dp.sockmapState.PopulateCallbacks(callbacks)
if err := dp.sockmapState.SetupSockmapAcceleration(); err != nil {
dp.sockmapState = nil
log.WithError(err).Warn("Failed to set up Sockmap acceleration")
} else {
log.Info("Sockmap acceleration enabled.")
}
}
}
}
if dp.sockmapState == nil {
st, err := NewSockmapState()
if err == nil {
st.WipeSockmap(bpf.FindInBPFFSOnly)
}
// if we can't create a sockmap state it means we couldn't get a working
// bpffs so there's nothing to clean up
}
if !config.BPFEnabled {
// BPF mode disabled, create the iptables-only managers.
ipsetsManager := newIPSetsManager(ipSetsV4, config.MaxIPSetSize, callbacks)
dp.RegisterManager(ipsetsManager)
dp.ipsetsSourceV4 = ipsetsManager
// TODO Connect host IP manager to BPF
dp.RegisterManager(newHostIPManager(
config.RulesConfig.WorkloadIfacePrefixes,
rules.IPSetIDThisHostIPs,
ipSetsV4,
config.MaxIPSetSize))
dp.RegisterManager(newPolicyManager(rawTableV4, mangleTableV4, filterTableV4, ruleRenderer, 4, callbacks))
// Clean up any leftover BPF state.
err := nat.RemoveConnectTimeLoadBalancer("")
if err != nil {
log.WithError(err).Info("Failed to remove BPF connect-time load balancer, ignoring.")
}
tc.CleanUpProgramsAndPins()
}
interfaceRegexes := make([]string, len(config.RulesConfig.WorkloadIfacePrefixes))
for i, r := range config.RulesConfig.WorkloadIfacePrefixes {
interfaceRegexes[i] = "^" + r + ".*"
}
bpfMapContext := &bpf.MapContext{
RepinningEnabled: config.BPFMapRepin,
}
var (
bpfEndpointManager *bpfEndpointManager
)
if config.BPFEnabled {
log.Info("BPF enabled, starting BPF endpoint manager and map manager.")
// Register map managers first since they create the maps that will be used by the endpoint manager.
// Important that we create the maps before we load a BPF program with TC since we make sure the map
// metadata name is set whereas TC doesn't set that field.
ipSetIDAllocator := idalloc.New()
ipSetsMap := bpfipsets.Map(bpfMapContext)
err := ipSetsMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create ipsets BPF map.")
}
ipSetsV4 := bpfipsets.NewBPFIPSets(
ipSetsConfigV4,
ipSetIDAllocator,
ipSetsMap,
dp.loopSummarizer,
)
dp.ipSets = append(dp.ipSets, ipSetsV4)
dp.RegisterManager(newIPSetsManager(ipSetsV4, config.MaxIPSetSize, callbacks))
bpfRTMgr := newBPFRouteManager(config.Hostname, config.ExternalNodesCidrs, bpfMapContext, dp.loopSummarizer)
dp.RegisterManager(bpfRTMgr)
// Forwarding into an IPIP tunnel fails silently because IPIP tunnels are L3 devices and support for
// L3 devices in BPF is not available yet. Disable the FIB lookup in that case.
fibLookupEnabled := !config.RulesConfig.IPIPEnabled
stateMap := state.Map(bpfMapContext)
err = stateMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create state BPF map.")
}
arpMap := arp.Map(bpfMapContext)
err = arpMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create ARP BPF map.")
}
// The failsafe manager sets up the failsafe port map. It's important that it is registered before the
// endpoint managers so that the map is brought up to date before they run for the first time.
failsafesMap := failsafes.Map(bpfMapContext)
err = failsafesMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create failsafe port BPF map.")
}
failsafeMgr := failsafes.NewManager(
failsafesMap,
config.RulesConfig.FailsafeInboundHostPorts,
config.RulesConfig.FailsafeOutboundHostPorts,
dp.loopSummarizer,
)
dp.RegisterManager(failsafeMgr)
workloadIfaceRegex := regexp.MustCompile(strings.Join(interfaceRegexes, "|"))
bpfEndpointManager = newBPFEndpointManager(
config.BPFLogLevel,
config.Hostname,
fibLookupEnabled,
config.RulesConfig.EndpointToHostAction,
config.BPFDataIfacePattern,
workloadIfaceRegex,
ipSetIDAllocator,
config.VXLANMTU,
uint16(config.VXLANPort),
config.BPFNodePortDSREnabled,
config.BPFExtToServiceConnmark,
ipSetsMap,
stateMap,
ruleRenderer,
filterTableV4,
dp.reportHealth,
dp.loopSummarizer,
)
dp.RegisterManager(bpfEndpointManager)
// Pre-create the NAT maps so that later operations can assume access.
frontendMap := nat.FrontendMap(bpfMapContext)
err = frontendMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT frontend BPF map.")
}
backendMap := nat.BackendMap(bpfMapContext)
err = backendMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT backend BPF map.")
}
backendAffinityMap := nat.AffinityMap(bpfMapContext)
err = backendAffinityMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT backend affinity BPF map.")
}
routeMap := routes.Map(bpfMapContext)
err = routeMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create routes BPF map.")
}
ctMap := conntrack.Map(bpfMapContext)
err = ctMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create conntrack BPF map.")
}
conntrackScanner := conntrack.NewScanner(ctMap,
conntrack.NewLivenessScanner(config.BPFConntrackTimeouts, config.BPFNodePortDSREnabled))
// Before we start, scan for all finished / timed out connections to
// free up the conntrack table asap as it may take time to sync up the
// proxy and kick off the first full cleaner scan.
conntrackScanner.Scan()
bpfproxyOpts := []bpfproxy.Option{
bpfproxy.WithMinSyncPeriod(config.KubeProxyMinSyncPeriod),
}
if config.KubeProxyEndpointSlicesEnabled {
bpfproxyOpts = append(bpfproxyOpts, bpfproxy.WithEndpointsSlices())
}
if config.BPFNodePortDSREnabled {
bpfproxyOpts = append(bpfproxyOpts, bpfproxy.WithDSREnabled())
}
if config.KubeClientSet != nil {
// We have a Kubernetes connection, start watching services and populating the NAT maps.
kp, err := bpfproxy.StartKubeProxy(
config.KubeClientSet,
config.Hostname,
frontendMap,
backendMap,
backendAffinityMap,
ctMap,
bpfproxyOpts...,
)
if err != nil {
log.WithError(err).Panic("Failed to start kube-proxy.")
}
bpfRTMgr.setHostIPUpdatesCallBack(kp.OnHostIPsUpdate)
bpfRTMgr.setRoutesCallBacks(kp.OnRouteUpdate, kp.OnRouteDelete)
conntrackScanner.AddUnlocked(conntrack.NewStaleNATScanner(kp))
conntrackScanner.Start()
} else {
log.Info("BPF enabled but no Kubernetes client available, unable to run kube-proxy module.")
}
if config.BPFConnTimeLBEnabled {
// Activate the connect-time load balancer.
err = nat.InstallConnectTimeLoadBalancer(frontendMap, backendMap, routeMap, config.BPFCgroupV2, config.BPFLogLevel)
if err != nil {
log.WithError(err).Panic("BPFConnTimeLBEnabled but failed to attach connect-time load balancer, bailing out.")
}
} else {
// Deactivate the connect-time load balancer.
err = nat.RemoveConnectTimeLoadBalancer(config.BPFCgroupV2)
if err != nil {
log.WithError(err).Warn("Failed to detach connect-time load balancer. Ignoring.")
}
}
}
routeTableV4 := routetable.New(interfaceRegexes, 4, false, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, config.RemoveExternalRoutes, 0,
dp.loopSummarizer)
epManager := newEndpointManager(
rawTableV4,
mangleTableV4,
filterTableV4,
ruleRenderer,
routeTableV4,
4,
epMarkMapper,
config.RulesConfig.KubeIPVSSupportEnabled,
config.RulesConfig.WorkloadIfacePrefixes,
dp.endpointStatusCombiner.OnEndpointStatusUpdate,
config.BPFEnabled,
bpfEndpointManager,
callbacks)
dp.RegisterManager(epManager)
dp.endpointsSourceV4 = epManager
dp.RegisterManager(newFloatingIPManager(natTableV4, ruleRenderer, 4))
dp.RegisterManager(newMasqManager(ipSetsV4, natTableV4, ruleRenderer, config.MaxIPSetSize, 4))
if config.RulesConfig.IPIPEnabled {
// Add a manger to keep the all-hosts IP set up to date.
dp.ipipManager = newIPIPManager(ipSetsV4, config.MaxIPSetSize, config.ExternalNodesCidrs)
dp.RegisterManager(dp.ipipManager) // IPv4-only
}
// Add a manager for wireguard configuration. This is added irrespective of whether wireguard is actually enabled
// because it may need to tidy up some of the routing rules when disabled.
cryptoRouteTableWireguard := wireguard.New(config.Hostname, &config.Wireguard, config.NetlinkTimeout,
config.DeviceRouteProtocol, func(publicKey wgtypes.Key) error {
if publicKey == zeroKey {
dp.fromDataplane <- &proto.WireguardStatusUpdate{PublicKey: ""}
} else {
dp.fromDataplane <- &proto.WireguardStatusUpdate{PublicKey: publicKey.String()}
}
return nil
},
dp.loopSummarizer)
dp.wireguardManager = newWireguardManager(cryptoRouteTableWireguard, config)
dp.RegisterManager(dp.wireguardManager) // IPv4-only
dp.RegisterManager(newServiceLoopManager(filterTableV4, ruleRenderer, 4))
if config.IPv6Enabled {
mangleTableV6 := iptables.NewTable(
"mangle",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
natTableV6 := iptables.NewTable(
"nat",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesNATOptions,
)
rawTableV6 := iptables.NewTable(
"raw",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
filterTableV6 := iptables.NewTable(
"filter",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
ipSetsConfigV6 := config.RulesConfig.IPSetConfigV6
ipSetsV6 := ipsets.NewIPSets(ipSetsConfigV6, dp.loopSummarizer)
dp.ipSets = append(dp.ipSets, ipSetsV6)
dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV6)
dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV6)
dp.iptablesMangleTables = append(dp.iptablesMangleTables, mangleTableV6)
dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV6)
routeTableV6 := routetable.New(
interfaceRegexes, 6, false, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, config.RemoveExternalRoutes, 0,
dp.loopSummarizer)
if !config.BPFEnabled {
dp.RegisterManager(newIPSetsManager(ipSetsV6, config.MaxIPSetSize, callbacks))
dp.RegisterManager(newHostIPManager(
config.RulesConfig.WorkloadIfacePrefixes,
rules.IPSetIDThisHostIPs,
ipSetsV6,
config.MaxIPSetSize))
dp.RegisterManager(newPolicyManager(rawTableV6, mangleTableV6, filterTableV6, ruleRenderer, 6, callbacks))
}
dp.RegisterManager(newEndpointManager(
rawTableV6,
mangleTableV6,
filterTableV6,
ruleRenderer,
routeTableV6,
6,
epMarkMapper,
config.RulesConfig.KubeIPVSSupportEnabled,
config.RulesConfig.WorkloadIfacePrefixes,
dp.endpointStatusCombiner.OnEndpointStatusUpdate,
config.BPFEnabled,
nil,
callbacks))
dp.RegisterManager(newFloatingIPManager(natTableV6, ruleRenderer, 6))
dp.RegisterManager(newMasqManager(ipSetsV6, natTableV6, ruleRenderer, config.MaxIPSetSize, 6))
dp.RegisterManager(newServiceLoopManager(filterTableV6, ruleRenderer, 6))
}
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesMangleTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesNATTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesFilterTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesRawTables...)
// Register that we will report liveness and readiness.
if config.HealthAggregator != nil {
log.Info("Registering to report health.")
config.HealthAggregator.RegisterReporter(
healthName,
&health.HealthReport{Live: true, Ready: true},
healthInterval*2,
)
}
if config.DebugSimulateDataplaneHangAfter != 0 {
log.WithField("delay", config.DebugSimulateDataplaneHangAfter).Warn(
"Simulating a dataplane hang.")
dp.debugHangC = time.After(config.DebugSimulateDataplaneHangAfter)
}
return dp
}
// findHostMTU auto-detects the smallest host interface MTU.
func findHostMTU(matchRegex *regexp.Regexp) (int, error) {
// Find all the interfaces on the host.
links, err := netlink.LinkList()
if err != nil {
log.WithError(err).Error("Failed to list interfaces. Unable to auto-detect MTU")
return 0, err
}
// Iterate through them, keeping track of the lowest MTU.
smallest := 0
for _, l := range links {
// Skip links that we know are not external interfaces.
fields := log.Fields{"mtu": l.Attrs().MTU, "name": l.Attrs().Name}
if matchRegex == nil || !matchRegex.MatchString(l.Attrs().Name) {
log.WithFields(fields).Debug("Skipping interface for MTU detection")
continue
}
log.WithFields(fields).Debug("Examining link for MTU calculation")
if l.Attrs().MTU < smallest || smallest == 0 {
smallest = l.Attrs().MTU
}
}
if smallest == 0 {
// We failed to find a usable interface. Default the MTU of the host
// to 1460 - the smallest among common cloud providers.
log.Warn("Failed to auto-detect host MTU - no interfaces matched the MTU interface pattern. To use auto-MTU, set mtuIfacePattern to match your host's interfaces")
return 1460, nil
}
return smallest, nil
}
// writeMTUFile writes the smallest MTU among enabled encapsulation types to disk
// for use by other components (e.g., CNI plugin).
func writeMTUFile(mtu int) error {
// Make sure directory exists.
if err := os.MkdirAll("/var/lib/calico", os.ModePerm); err != nil {
return fmt.Errorf("failed to create directory /var/lib/calico: %s", err)
}
// Write the smallest MTU to disk so other components can rely on this calculation consistently.
filename := "/var/lib/calico/mtu"
log.Debugf("Writing %d to "+filename, mtu)
if err := ioutil.WriteFile(filename, []byte(fmt.Sprintf("%d", mtu)), 0644); err != nil {
log.WithError(err).Error("Unable to write to " + filename)
return err
}
return nil
}
// determinePodMTU looks at the configured MTUs and enabled encapsulations to determine which
// value for MTU should be used for pod interfaces.
func determinePodMTU(config Config) int {
// Determine the smallest MTU among enabled encap methods. If none of the encap methods are
// enabled, we'll just use the host's MTU.
mtu := 0
type mtuState struct {
mtu int
enabled bool
}
for _, s := range []mtuState{
{config.IPIPMTU, config.RulesConfig.IPIPEnabled},
{config.VXLANMTU, config.RulesConfig.VXLANEnabled},
{config.Wireguard.MTU, config.Wireguard.Enabled},
} {
if s.enabled && s.mtu != 0 && (s.mtu < mtu || mtu == 0) {
mtu = s.mtu
}
}
if mtu == 0 {
// No enabled encapsulation. Just use the host MTU.
mtu = config.hostMTU
} else if mtu > config.hostMTU {
fields := logrus.Fields{"mtu": mtu, "hostMTU": config.hostMTU}
log.WithFields(fields).Warn("Configured MTU is larger than detected host interface MTU")
}
log.WithField("mtu", mtu).Info("Determined pod MTU")
return mtu
}
// ConfigureDefaultMTUs defaults any MTU configurations that have not been set.
// We default the values even if the encap is not enabled, in order to match behavior from earlier versions of Calico.
// However, they MTU will only be considered for allocation to pod interfaces if the encap is enabled.
func ConfigureDefaultMTUs(hostMTU int, c *Config) {
c.hostMTU = hostMTU
if c.IPIPMTU == 0 {
log.Debug("Defaulting IPIP MTU based on host")
c.IPIPMTU = hostMTU - ipipMTUOverhead
}
if c.VXLANMTU == 0 {
log.Debug("Defaulting VXLAN MTU based on host")
c.VXLANMTU = hostMTU - vxlanMTUOverhead
}
if c.Wireguard.MTU == 0 {
if c.KubernetesProvider == config.ProviderAKS && c.RouteSource == "WorkloadIPs" {
// The default MTU on Azure is 1500, but the underlying network stack will fragment packets at 1400 bytes,
// see https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu
// for details.
// Additionally, Wireguard sets the DF bit on its packets, and so if the MTU is set too high large packets
// will be dropped. Therefore it is necessary to allow for the difference between the MTU of the host and
// the underlying network.
log.Debug("Defaulting Wireguard MTU based on host and AKS with WorkloadIPs")
c.Wireguard.MTU = hostMTU - aksMTUOverhead - wireguardMTUOverhead
} else {
log.Debug("Defaulting Wireguard MTU based on host")
c.Wireguard.MTU = hostMTU - wireguardMTUOverhead
}
}
}
func cleanUpVXLANDevice() {
// If VXLAN is not enabled, check to see if there is a VXLAN device and delete it if there is.
log.Debug("Checking if we need to clean up the VXLAN device")
link, err := netlink.LinkByName("vxlan.calico")
if err != nil {
if _, ok := err.(netlink.LinkNotFoundError); ok {
log.Debug("VXLAN disabled and no VXLAN device found")
return
}
log.WithError(err).Warnf("VXLAN disabled and failed to query VXLAN device. Ignoring.")
return
}
if err = netlink.LinkDel(link); err != nil {
log.WithError(err).Error("VXLAN disabled and failed to delete unwanted VXLAN device. Ignoring.")
}
}
type Manager interface {
// OnUpdate is called for each protobuf message from the datastore. May either directly
// send updates to the IPSets and iptables.Table objects (which will queue the updates
// until the main loop instructs them to act) or (for efficiency) may wait until
// a call to CompleteDeferredWork() to flush updates to the dataplane.
OnUpdate(protoBufMsg interface{})
// Called before the main loop flushes updates to the dataplane to allow for batched
// work to be completed.
CompleteDeferredWork() error
}
type ManagerWithRouteTables interface {
Manager
GetRouteTableSyncers() []routeTableSyncer
}
func (d *InternalDataplane) routeTableSyncers() []routeTableSyncer {
var rts []routeTableSyncer
for _, mrts := range d.managersWithRouteTables {
rts = append(rts, mrts.GetRouteTableSyncers()...)
}
return rts
}
func (d *InternalDataplane) RegisterManager(mgr Manager) {
switch mgr := mgr.(type) {
case ManagerWithRouteTables:
// Used to log the whole manager out here but if we do that then we cause races if the manager has
// other threads or locks.
log.WithField("manager", reflect.TypeOf(mgr).Name()).Debug("registering ManagerWithRouteTables")
d.managersWithRouteTables = append(d.managersWithRouteTables, mgr)
}
d.allManagers = append(d.allManagers, mgr)
}
func (d *InternalDataplane) Start() {
// Do our start-of-day configuration.
d.doStaticDataplaneConfig()
// Then, start the worker threads.
go d.loopUpdatingDataplane()
go d.loopReportingStatus()
go d.ifaceMonitor.MonitorInterfaces()
go d.monitorHostMTU()
}
// onIfaceStateChange is our interface monitor callback. It gets called from the monitor's thread.
func (d *InternalDataplane) onIfaceStateChange(ifaceName string, state ifacemonitor.State, ifIndex int) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"ifIndex": ifIndex,
"state": state,
}).Info("Linux interface state changed.")
d.ifaceUpdates <- &ifaceUpdate{
Name: ifaceName,
State: state,
Index: ifIndex,
}
}
type ifaceUpdate struct {
Name string
State ifacemonitor.State
Index int
}
// Check if current felix ipvs config is correct when felix gets an kube-ipvs0 interface update.
// If KubeIPVSInterface is UP and felix ipvs support is disabled (kube-proxy switched from iptables to ipvs mode),
// or if KubeIPVSInterface is DOWN and felix ipvs support is enabled (kube-proxy switched from ipvs to iptables mode),
// restart felix to pick up correct ipvs support mode.
func (d *InternalDataplane) checkIPVSConfigOnStateUpdate(state ifacemonitor.State) {
if (!d.config.RulesConfig.KubeIPVSSupportEnabled && state == ifacemonitor.StateUp) ||
(d.config.RulesConfig.KubeIPVSSupportEnabled && state == ifacemonitor.StateDown) {
log.WithFields(log.Fields{
"ipvsIfaceState": state,
"ipvsSupport": d.config.RulesConfig.KubeIPVSSupportEnabled,
}).Info("kube-proxy mode changed. Restart felix.")
d.config.ConfigChangedRestartCallback()
}
}
// onIfaceAddrsChange is our interface address monitor callback. It gets called
// from the monitor's thread.
func (d *InternalDataplane) onIfaceAddrsChange(ifaceName string, addrs set.Set) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"addrs": addrs,
}).Info("Linux interface addrs changed.")
d.ifaceAddrUpdates <- &ifaceAddrsUpdate{
Name: ifaceName,
Addrs: addrs,
}
}
type ifaceAddrsUpdate struct {
Name string
Addrs set.Set
}
func (d *InternalDataplane) SendMessage(msg interface{}) error {
d.toDataplane <- msg
return nil
}
func (d *InternalDataplane) RecvMessage() (interface{}, error) {
return <-d.fromDataplane, nil
}
func (d *InternalDataplane) monitorHostMTU() {
for {
mtu, err := findHostMTU(d.config.MTUIfacePattern)
if err != nil {
log.WithError(err).Error("Error detecting host MTU")
} else if d.config.hostMTU != mtu {
// Since log writing is done a background thread, we set the force-flush flag on this log to ensure that
// all the in-flight logs get written before we exit.
log.WithFields(log.Fields{lclogutils.FieldForceFlush: true}).Info("Host MTU changed")
d.config.ConfigChangedRestartCallback()
}
time.Sleep(30 * time.Second)
}
}
// doStaticDataplaneConfig sets up the kernel and our static iptables chains. Should be called
// once at start of day before starting the main loop. The actual iptables programming is deferred
// to the main loop.
func (d *InternalDataplane) doStaticDataplaneConfig() {
// Check/configure global kernel parameters.
d.configureKernel()
if d.config.BPFEnabled {
d.setUpIptablesBPF()
} else {
d.setUpIptablesNormal()
}
if d.config.RulesConfig.IPIPEnabled {
log.Info("IPIP enabled, starting thread to keep tunnel configuration in sync.")
go d.ipipManager.KeepIPIPDeviceInSync(
d.config.IPIPMTU,
d.config.RulesConfig.IPIPTunnelAddress,
)
} else {
log.Info("IPIP disabled. Not starting tunnel update thread.")
}
}
func (d *InternalDataplane) setUpIptablesBPF() {
rulesConfig := d.config.RulesConfig
for _, t := range d.iptablesFilterTables {
fwdRules := []iptables.Rule{
{
// Bypass is a strong signal from the BPF program, it means that the flow is approved
// by the program at both ingress and egress.
Comment: []string{"Pre-approved by BPF programs."},
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypass, tc.MarkSeenBypassMask),
Action: iptables.AcceptAction{},
},
}
var inputRules, outputRules []iptables.Rule
// Handle packets for flows that pre-date the BPF programs. The BPF program doesn't have any conntrack
// state for these so it allows them to fall through to iptables with a mark set.
inputRules = append(inputRules,
iptables.Rule{
Match: iptables.Match().
MarkMatchesWithMask(tc.MarkSeenFallThrough, tc.MarkSeenFallThroughMask).
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Accept packets from flows that pre-date BPF."},
Action: iptables.AcceptAction{},
},
iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenFallThrough, tc.MarkSeenFallThroughMask),
Comment: []string{"Drop packets from unknown flows."},
Action: iptables.DropAction{},
},
)
// Mark traffic leaving the host that already has an established linux conntrack entry.
outputRules = append(outputRules,
iptables.Rule{
Match: iptables.Match().
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Mark pre-established host flows."},
Action: iptables.SetMaskedMarkAction{
Mark: tc.MarkLinuxConntrackEstablished,
Mask: tc.MarkLinuxConntrackEstablishedMask,
},
},
)
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
fwdRules = append(fwdRules,
// Drop packets that have come from a workload but have not been through our BPF program.
iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").NotMarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.DropAction{},
Comment: []string{"From workload without BPF seen mark"},
},
)
if rulesConfig.EndpointToHostAction == "ACCEPT" {
// Only need to worry about ACCEPT here. Drop gets compiled into the BPF program and
// RETURN would be a no-op since there's nothing to RETURN from.
inputRules = append(inputRules, iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").MarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.AcceptAction{},
})
}
// Catch any workload to host packets that haven't been through the BPF program.
inputRules = append(inputRules, iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").NotMarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.DropAction{},
})
}
if t.IPVersion == 6 {
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// In BPF mode, we don't support IPv6 yet. Drop it.
fwdRules = append(fwdRules, iptables.Rule{
Match: iptables.Match().OutInterface(prefix + "+"),
Action: iptables.DropAction{},
Comment: []string{"To workload, drop IPv6."},
})
}
} else {
// Let the BPF programs know if Linux conntrack knows about the flow.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Mark pre-established flows."},
Action: iptables.SetMaskedMarkAction{
Mark: tc.MarkLinuxConntrackEstablished,
Mask: tc.MarkLinuxConntrackEstablishedMask,
},
},
)
// The packet may be about to go to a local workload. However, the local workload may not have a BPF
// program attached (yet). To catch that case, we send the packet through a dispatch chain. We only
// add interfaces to the dispatch chain if the BPF program is in place.
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// Make sure iptables rules don't drop packets that we're about to process through BPF.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().OutInterface(prefix + "+"),
Action: iptables.JumpAction{Target: rules.ChainToWorkloadDispatch},
Comment: []string{"To workload, check workload is known."},
},
)
}
// Need a final rule to accept traffic that is from a workload and going somewhere else.
// Otherwise, if iptables has a DROP policy on the forward chain, the packet will get dropped.
// This rule must come after the to-workload jump rules above to ensure that we don't accept too
// early before the destination is checked.
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// Make sure iptables rules don't drop packets that we're about to process through BPF.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().InInterface(prefix + "+"),
Action: iptables.AcceptAction{},
Comment: []string{"To workload, mark has already been verified."},
},
)
}
}
t.InsertOrAppendRules("INPUT", inputRules)
t.InsertOrAppendRules("FORWARD", fwdRules)
t.InsertOrAppendRules("OUTPUT", outputRules)
}
for _, t := range d.iptablesNATTables {
t.UpdateChains(d.ruleRenderer.StaticNATPostroutingChains(t.IPVersion))
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPostrouting},
}})
}
for _, t := range d.iptablesRawTables {
// Do not RPF check what is marked as to be skipped by RPF check.
rpfRules := []iptables.Rule{{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypassSkipRPF, tc.MarkSeenBypassSkipRPFMask),
Action: iptables.ReturnAction{},
}}
// For anything we approved for forward, permit accept_local as it is
// traffic encapped for NodePort, ICMP replies etc. - stuff we trust.
rpfRules = append(rpfRules, iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypassForward, tc.MarksMask).RPFCheckPassed(true),
Action: iptables.ReturnAction{},
})
// Do the full RPF check and dis-allow accept_local for anything else.
rpfRules = append(rpfRules, rules.RPFilter(t.IPVersion, tc.MarkSeen, tc.MarkSeenMask,
rulesConfig.OpenStackSpecialCasesEnabled, false)...)
rpfChain := []*iptables.Chain{{
Name: rules.ChainNamePrefix + "RPF",
Rules: rpfRules,
}}
t.UpdateChains(rpfChain)
var rawRules []iptables.Rule
if t.IPVersion == 4 && rulesConfig.WireguardEnabled && len(rulesConfig.WireguardInterfaceName) > 0 &&
rulesConfig.RouteSource == "WorkloadIPs" {
// Set a mark on packets coming from any interface except for lo, wireguard, or pod veths to ensure the RPF
// check allows it.
log.Debug("Adding Wireguard iptables rule chain")
rawRules = append(rawRules, iptables.Rule{
Match: nil,
Action: iptables.JumpAction{Target: rules.ChainSetWireguardIncomingMark},
})
t.UpdateChain(d.ruleRenderer.WireguardIncomingMarkChain())
}
rawRules = append(rawRules, iptables.Rule{
Action: iptables.JumpAction{Target: rpfChain[0].Name},
})
rawChains := []*iptables.Chain{{
Name: rules.ChainRawPrerouting,
Rules: rawRules,
}}
t.UpdateChains(rawChains)
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawPrerouting},
}})
}
if d.config.BPFExtToServiceConnmark != 0 {
mark := uint32(d.config.BPFExtToServiceConnmark)
for _, t := range d.iptablesMangleTables {
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Match: iptables.Match().MarkMatchesWithMask(
tc.MarkSeen|mark,
tc.MarkSeenMask|mark,
),
Comment: []string{"Mark connections with ExtToServiceConnmark"},
Action: iptables.SetConnMarkAction{Mark: mark, Mask: mark},
}})
}
}
}
func (d *InternalDataplane) setUpIptablesNormal() {
for _, t := range d.iptablesRawTables {
rawChains := d.ruleRenderer.StaticRawTableChains(t.IPVersion)
t.UpdateChains(rawChains)
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawPrerouting},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawOutput},
}})
}
for _, t := range d.iptablesFilterTables {
filterChains := d.ruleRenderer.StaticFilterTableChains(t.IPVersion)
t.UpdateChains(filterChains)
t.InsertOrAppendRules("FORWARD", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterForward},
}})
t.InsertOrAppendRules("INPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterInput},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterOutput},
}})
// Include rules which should be appended to the filter table forward chain.
t.AppendRules("FORWARD", d.ruleRenderer.StaticFilterForwardAppendRules())
}
for _, t := range d.iptablesNATTables {
t.UpdateChains(d.ruleRenderer.StaticNATTableChains(t.IPVersion))
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPrerouting},
}})
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPostrouting},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATOutput},
}})
}
for _, t := range d.iptablesMangleTables {
t.UpdateChains(d.ruleRenderer.StaticMangleTableChains(t.IPVersion))
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainManglePrerouting},
}})
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainManglePostrouting},
}})
}
if d.xdpState != nil {
if err := d.setXDPFailsafePorts(); err != nil {
log.Warnf("failed to set XDP failsafe ports, disabling XDP: %v", err)
if err := d.shutdownXDPCompletely(); err != nil {
log.Warnf("failed to disable XDP: %v, will proceed anyway.", err)
}
}
}
}
func stringToProtocol(protocol string) (labelindex.IPSetPortProtocol, error) {
switch protocol {
case "tcp":
return labelindex.ProtocolTCP, nil
case "udp":
return labelindex.ProtocolUDP, nil
case "sctp":
return labelindex.ProtocolSCTP, nil
}
return labelindex.ProtocolNone, fmt.Errorf("unknown protocol %q", protocol)
}
func (d *InternalDataplane) setXDPFailsafePorts() error {
inboundPorts := d.config.RulesConfig.FailsafeInboundHostPorts
if _, err := d.xdpState.common.bpfLib.NewFailsafeMap(); err != nil {
return err
}
for _, p := range inboundPorts {
proto, err := stringToProtocol(p.Protocol)
if err != nil {
return err
}
if err := d.xdpState.common.bpfLib.UpdateFailsafeMap(uint8(proto), p.Port); err != nil {
return err
}
}
log.Infof("Set XDP failsafe ports: %+v", inboundPorts)
return nil
}
// shutdownXDPCompletely attempts to disable XDP state. This could fail in cases where XDP isn't working properly.
func (d *InternalDataplane) shutdownXDPCompletely() error {
if d.xdpState == nil {
return nil
}
if d.callbacks != nil {
d.xdpState.DepopulateCallbacks(d.callbacks)
}
// spend 1 second attempting to wipe XDP, in case of a hiccup.
maxTries := 10
waitInterval := 100 * time.Millisecond
var err error
for i := 0; i < maxTries; i++ {
err = d.xdpState.WipeXDP()
if err == nil {
d.xdpState = nil
return nil
}
log.WithError(err).WithField("try", i).Warn("failed to wipe the XDP state")
time.Sleep(waitInterval)
}
return fmt.Errorf("Failed to wipe the XDP state after %v tries over %v seconds: Error %v", maxTries, waitInterval, err)
}
func (d *InternalDataplane) loopUpdatingDataplane() {
log.Info("Started internal iptables dataplane driver loop")
healthTicks := time.NewTicker(healthInterval).C
d.reportHealth()
// Retry any failed operations every 10s.
retryTicker := time.NewTicker(10 * time.Second)
// If configured, start tickers to refresh the IP sets and routing table entries.
var ipSetsRefreshC <-chan time.Time
if d.config.IPSetsRefreshInterval > 0 {
log.WithField("interval", d.config.IptablesRefreshInterval).Info(
"Will refresh IP sets on timer")
refreshTicker := jitter.NewTicker(
d.config.IPSetsRefreshInterval,
d.config.IPSetsRefreshInterval/10,
)
ipSetsRefreshC = refreshTicker.C
}
var routeRefreshC <-chan time.Time
if d.config.RouteRefreshInterval > 0 {
log.WithField("interval", d.config.RouteRefreshInterval).Info(
"Will refresh routes on timer")
refreshTicker := jitter.NewTicker(
d.config.RouteRefreshInterval,
d.config.RouteRefreshInterval/10,
)
routeRefreshC = refreshTicker.C
}
var xdpRefreshC <-chan time.Time
if d.config.XDPRefreshInterval > 0 && d.xdpState != nil {
log.WithField("interval", d.config.XDPRefreshInterval).Info(
"Will refresh XDP on timer")
refreshTicker := jitter.NewTicker(
d.config.XDPRefreshInterval,
d.config.XDPRefreshInterval/10,
)
xdpRefreshC = refreshTicker.C
}
// Fill the apply throttle leaky bucket.
throttleC := jitter.NewTicker(100*time.Millisecond, 10*time.Millisecond).C
beingThrottled := false
datastoreInSync := false
processMsgFromCalcGraph := func(msg interface{}) {
log.WithField("msg", proto.MsgStringer{Msg: msg}).Infof(
"Received %T update from calculation graph", msg)
d.recordMsgStat(msg)
for _, mgr := range d.allManagers {
mgr.OnUpdate(msg)
}
switch msg.(type) {
case *proto.InSync:
log.WithField("timeSinceStart", time.Since(processStartTime)).Info(
"Datastore in sync, flushing the dataplane for the first time...")
datastoreInSync = true
}
}
processIfaceUpdate := func(ifaceUpdate *ifaceUpdate) {
log.WithField("msg", ifaceUpdate).Info("Received interface update")
if ifaceUpdate.Name == KubeIPVSInterface {
d.checkIPVSConfigOnStateUpdate(ifaceUpdate.State)
return
}
for _, mgr := range d.allManagers {
mgr.OnUpdate(ifaceUpdate)
}
for _, mgr := range d.managersWithRouteTables {
for _, routeTable := range mgr.GetRouteTableSyncers() {
routeTable.OnIfaceStateChanged(ifaceUpdate.Name, ifaceUpdate.State)
}
}
}
processAddrsUpdate := func(ifaceAddrsUpdate *ifaceAddrsUpdate) {
log.WithField("msg", ifaceAddrsUpdate).Info("Received interface addresses update")
for _, mgr := range d.allManagers {
mgr.OnUpdate(ifaceAddrsUpdate)
}
}
for {
select {
case msg := <-d.toDataplane:
// Process the message we received, then opportunistically process any other
// pending messages.
batchSize := 1
processMsgFromCalcGraph(msg)
msgLoop1:
for i := 0; i < msgPeekLimit; i++ {
select {
case msg := <-d.toDataplane:
processMsgFromCalcGraph(msg)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop1
}
}
d.dataplaneNeedsSync = true
summaryBatchSize.Observe(float64(batchSize))
case ifaceUpdate := <-d.ifaceUpdates:
// Process the message we received, then opportunistically process any other
// pending messages.
batchSize := 1
processIfaceUpdate(ifaceUpdate)
msgLoop2:
for i := 0; i < msgPeekLimit; i++ {
select {
case ifaceUpdate := <-d.ifaceUpdates:
processIfaceUpdate(ifaceUpdate)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop2
}
}
d.dataplaneNeedsSync = true
summaryIfaceBatchSize.Observe(float64(batchSize))
case ifaceAddrsUpdate := <-d.ifaceAddrUpdates:
batchSize := 1
processAddrsUpdate(ifaceAddrsUpdate)
msgLoop3:
for i := 0; i < msgPeekLimit; i++ {
select {
case ifaceAddrsUpdate := <-d.ifaceAddrUpdates:
processAddrsUpdate(ifaceAddrsUpdate)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop3
}
}
summaryAddrBatchSize.Observe(float64(batchSize))
d.dataplaneNeedsSync = true
case <-ipSetsRefreshC:
log.Debug("Refreshing IP sets state")
d.forceIPSetsRefresh = true
d.dataplaneNeedsSync = true
case <-routeRefreshC:
log.Debug("Refreshing routes")
d.forceRouteRefresh = true
d.dataplaneNeedsSync = true
case <-xdpRefreshC:
log.Debug("Refreshing XDP")
d.forceXDPRefresh = true
d.dataplaneNeedsSync = true
case <-d.reschedC:
log.Debug("Reschedule kick received")
d.dataplaneNeedsSync = true
// nil out the channel to record that the timer is now inactive.
d.reschedC = nil
case <-throttleC:
d.applyThrottle.Refill()
case <-healthTicks:
d.reportHealth()
case <-retryTicker.C:
case <-d.debugHangC:
log.Warning("Debug hang simulation timer popped, hanging the dataplane!!")
time.Sleep(1 * time.Hour)
log.Panic("Woke up after 1 hour, something's probably wrong with the test.")
}
if datastoreInSync && d.dataplaneNeedsSync {
// Dataplane is out-of-sync, check if we're throttled.
if d.applyThrottle.Admit() {
if beingThrottled && d.applyThrottle.WouldAdmit() {
log.Info("Dataplane updates no longer throttled")
beingThrottled = false
}
log.Debug("Applying dataplane updates")
applyStart := time.Now()
// Actually apply the changes to the dataplane.
d.apply()
// Record stats.
applyTime := time.Since(applyStart)
summaryApplyTime.Observe(applyTime.Seconds())
if d.dataplaneNeedsSync {
// Dataplane is still dirty, record an error.
countDataplaneSyncErrors.Inc()
}
d.loopSummarizer.EndOfIteration(applyTime)
if !d.doneFirstApply {
log.WithField(
"secsSinceStart", time.Since(processStartTime).Seconds(),
).Info("Completed first update to dataplane.")
d.loopSummarizer.RecordOperation("first-update")
d.doneFirstApply = true
if d.config.PostInSyncCallback != nil {
d.config.PostInSyncCallback()
}
}
d.reportHealth()
} else {
if !beingThrottled {
log.Info("Dataplane updates throttled")
beingThrottled = true
}
}
}
}
}
func (d *InternalDataplane) configureKernel() {
// Attempt to modprobe nf_conntrack_proto_sctp. In some kernels this is a
// module that needs to be loaded, otherwise all SCTP packets are marked
// INVALID by conntrack and dropped by Calico's rules. However, some kernels
// (confirmed in Ubuntu 19.10's build of 5.3.0-24-generic) include this
// conntrack without it being a kernel module, and so modprobe will fail.
// Log result at INFO level for troubleshooting, but otherwise ignore any
// failed modprobe calls.
mp := newModProbe(moduleConntrackSCTP, newRealCmd)
out, err := mp.Exec()
log.WithError(err).WithField("output", out).Infof("attempted to modprobe %s", moduleConntrackSCTP)
log.Info("Making sure IPv4 forwarding is enabled.")
err = writeProcSys("/proc/sys/net/ipv4/ip_forward", "1")
if err != nil {
log.WithError(err).Error("Failed to set IPv4 forwarding sysctl")
}
if d.config.IPv6Enabled {
log.Info("Making sure IPv6 forwarding is enabled.")
err = writeProcSys("/proc/sys/net/ipv6/conf/all/forwarding", "1")
if err != nil {
log.WithError(err).Error("Failed to set IPv6 forwarding sysctl")
}
}
if d.config.BPFEnabled && d.config.BPFDisableUnprivileged {
log.Info("BPF enabled, disabling unprivileged BPF usage.")
err := writeProcSys("/proc/sys/kernel/unprivileged_bpf_disabled", "1")
if err != nil {
log.WithError(err).Error("Failed to set unprivileged_bpf_disabled sysctl")
}
}
if d.config.Wireguard.Enabled {
// wireguard module is available in linux kernel >= 5.6
mpwg := newModProbe(moduleWireguard, newRealCmd)
out, err = mpwg.Exec()
log.WithError(err).WithField("output", out).Infof("attempted to modprobe %s", moduleWireguard)
}
}
func (d *InternalDataplane) recordMsgStat(msg interface{}) {
typeName := reflect.ValueOf(msg).Elem().Type().Name()
countMessages.WithLabelValues(typeName).Inc()
}
func (d *InternalDataplane) apply() {
// Update sequencing is important here because iptables rules have dependencies on ipsets.
// Creating a rule that references an unknown IP set fails, as does deleting an IP set that
// is in use.
// Unset the needs-sync flag, we'll set it again if something fails.
d.dataplaneNeedsSync = false
// First, give the managers a chance to resolve any state based on the preceding batch of
// updates. In some cases, e.g. EndpointManager, this can result in an update to another
// manager (BPFEndpointManager.OnHEPUpdate) that must happen before either of those managers
// begins its dataplane programming updates.
for _, mgr := range d.allManagers {
if handler, ok := mgr.(UpdateBatchResolver); ok {
err := handler.ResolveUpdateBatch()
if err != nil {
log.WithField("manager", reflect.TypeOf(mgr).Name()).WithError(err).Debug(
"couldn't resolve update batch for manager, will try again later")
d.dataplaneNeedsSync = true
}
d.reportHealth()
}
}
// Now allow managers to complete the dataplane programming updates that they need.
for _, mgr := range d.allManagers {
err := mgr.CompleteDeferredWork()
if err != nil {
log.WithField("manager", reflect.TypeOf(mgr).Name()).WithError(err).Debug(
"couldn't complete deferred work for manager, will try again later")
d.dataplaneNeedsSync = true
}
d.reportHealth()
}
if d.xdpState != nil {
if d.forceXDPRefresh {
// Refresh timer popped.
d.xdpState.QueueResync()
d.forceXDPRefresh = false
}
var applyXDPError error
d.xdpState.ProcessPendingDiffState(d.endpointsSourceV4)
if err := d.applyXDPActions(); err != nil {
applyXDPError = err
} else {
err := d.xdpState.ProcessMemberUpdates()
d.xdpState.DropPendingDiffState()
if err != nil {
log.WithError(err).Warning("Failed to process XDP member updates, will resync later...")
if err := d.applyXDPActions(); err != nil {
applyXDPError = err
}
}
d.xdpState.UpdateState()
}
if applyXDPError != nil {
log.WithError(applyXDPError).Info("Applying XDP actions did not succeed, disabling XDP")
if err := d.shutdownXDPCompletely(); err != nil {
log.Warnf("failed to disable XDP: %v, will proceed anyway.", err)
}
}
}
d.reportHealth()
if d.forceRouteRefresh {
// Refresh timer popped.
for _, r := range d.routeTableSyncers() {
// Queue a resync on the next Apply().
r.QueueResync()
}
d.forceRouteRefresh = false
}
if d.forceIPSetsRefresh {
// Refresh timer popped.
for _, r := range d.ipSets {
// Queue a resync on the next Apply().
r.QueueResync()
}
d.forceIPSetsRefresh = false
}
// Next, create/update IP sets. We defer deletions of IP sets until after we update
// iptables.
var ipSetsWG sync.WaitGroup
for _, ipSets := range d.ipSets {
ipSetsWG.Add(1)
go func(ipSets ipsetsDataplane) {
ipSets.ApplyUpdates()
d.reportHealth()
ipSetsWG.Done()
}(ipSets)
}
// Update the routing table in parallel with the other updates. We'll wait for it to finish
// before we return.
var routesWG sync.WaitGroup
for _, r := range d.routeTableSyncers() {
routesWG.Add(1)
go func(r routeTableSyncer) {
err := r.Apply()
if err != nil {
log.Warn("Failed to synchronize routing table, will retry...")
d.dataplaneNeedsSync = true
}
d.reportHealth()
routesWG.Done()
}(r)
}
// Wait for the IP sets update to finish. We can't update iptables until it has.
ipSetsWG.Wait()
// Update iptables, this should sever any references to now-unused IP sets.
var reschedDelayMutex sync.Mutex
var reschedDelay time.Duration
var iptablesWG sync.WaitGroup
for _, t := range d.allIptablesTables {
iptablesWG.Add(1)
go func(t *iptables.Table) {
tableReschedAfter := t.Apply()
reschedDelayMutex.Lock()
defer reschedDelayMutex.Unlock()
if tableReschedAfter != 0 && (reschedDelay == 0 || tableReschedAfter < reschedDelay) {
reschedDelay = tableReschedAfter
}
d.reportHealth()
iptablesWG.Done()
}(t)
}
iptablesWG.Wait()
// Now clean up any left-over IP sets.
for _, ipSets := range d.ipSets {
ipSetsWG.Add(1)
go func(s ipsetsDataplane) {
s.ApplyDeletions()
d.reportHealth()
ipSetsWG.Done()
}(ipSets)
}
ipSetsWG.Wait()
// Wait for the route updates to finish.
routesWG.Wait()
// And publish and status updates.
d.endpointStatusCombiner.Apply()
// Set up any needed rescheduling kick.
if d.reschedC != nil {
// We have an active rescheduling timer, stop it so we can restart it with a
// different timeout below if it is still needed.
// This snippet comes from the docs for Timer.Stop().
if !d.reschedTimer.Stop() {
// Timer had already popped, drain its channel.
<-d.reschedC
}
// Nil out our copy of the channel to record that the timer is inactive.
d.reschedC = nil
}
if reschedDelay != 0 {
// We need to reschedule.
log.WithField("delay", reschedDelay).Debug("Asked to reschedule.")
if d.reschedTimer == nil {
// First time, create the timer.
d.reschedTimer = time.NewTimer(reschedDelay)
} else {
// Have an existing timer, reset it.
d.reschedTimer.Reset(reschedDelay)
}
d.reschedC = d.reschedTimer.C
}
}
func (d *InternalDataplane) applyXDPActions() error {
var err error = nil
for i := 0; i < 10; i++ {
err = d.xdpState.ResyncIfNeeded(d.ipsetsSourceV4)
if err != nil {
return err
}
if err = d.xdpState.ApplyBPFActions(d.ipsetsSourceV4); err == nil {
return nil
} else {
log.WithError(err).Info("Applying XDP BPF actions did not succeed, will retry with resync...")
}
}
return err
}
func (d *InternalDataplane) loopReportingStatus() {
log.Info("Started internal status report thread")
if d.config.StatusReportingInterval <= 0 {
log.Info("Process status reports disabled")
return
}
// Wait before first report so that we don't check in if we're in a tight cyclic restart.
time.Sleep(10 * time.Second)
for {
uptimeSecs := time.Since(processStartTime).Seconds()
d.fromDataplane <- &proto.ProcessStatusUpdate{
IsoTimestamp: time.Now().UTC().Format(time.RFC3339),
Uptime: uptimeSecs,
}
time.Sleep(d.config.StatusReportingInterval)
}
}
// iptablesTable is a shim interface for iptables.Table.
type iptablesTable interface {
UpdateChain(chain *iptables.Chain)
UpdateChains([]*iptables.Chain)
RemoveChains([]*iptables.Chain)
RemoveChainByName(name string)
}
func (d *InternalDataplane) reportHealth() {
if d.config.HealthAggregator != nil {
d.config.HealthAggregator.Report(
healthName,
&health.HealthReport{Live: true, Ready: d.doneFirstApply},
)
}
}
type dummyLock struct{}
func (d dummyLock) Lock() {
}
func (d dummyLock) Unlock() {
}
| 1 | 19,174 | We would like this code to run, even when `config.XDPEnabled` is false, so that Felix can clean up its own XDP state after a restart. | projectcalico-felix | go |
@@ -25,6 +25,18 @@ DEFINE_int32(clean_wal_interval_secs, 600, "inerval to trigger clean expired wal
namespace nebula {
namespace kvstore {
+#define SPACE_EXIST_RETURN_IF_ERROR(...) \
+ do { \
+ auto s = (__VA_ARGS__); \
+ if (!ok(s)) { \
+ if (spaceRet.left() == ResultCode::ERR_SPACE_NOT_FOUND) { \
+ LOG(INFO) << "Space " << spaceId << " does not exist, skip it."; \
+ return ResultCode::SUCCEEDED; \
+ } \
+ return error(s); \
+ } \
+ } while (0) \
+
NebulaStore::~NebulaStore() {
LOG(INFO) << "Cut off the relationship with meta client";
options_.partMan_.reset(); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/NebulaStore.h"
#include <folly/Likely.h>
#include <algorithm>
#include <cstdint>
#include "network/NetworkUtils.h"
#include "fs/FileUtils.h"
#include "kvstore/RocksEngine.h"
#include "kvstore/SnapshotManagerImpl.h"
#include <folly/ScopeGuard.h>
DEFINE_string(engine_type, "rocksdb", "rocksdb, memory...");
DEFINE_int32(custom_filter_interval_secs, 24 * 3600,
"interval to trigger custom compaction, < 0 means always do default minor compaction");
DEFINE_int32(num_workers, 4, "Number of worker threads");
DEFINE_bool(check_leader, true, "Check leader or not");
DEFINE_int32(clean_wal_interval_secs, 600, "inerval to trigger clean expired wal");
namespace nebula {
namespace kvstore {
NebulaStore::~NebulaStore() {
LOG(INFO) << "Cut off the relationship with meta client";
options_.partMan_.reset();
LOG(INFO) << "Stop the raft service...";
raftService_->stop();
LOG(INFO) << "Waiting for the raft service stop...";
raftService_->waitUntilStop();
spaces_.clear();
bgWorkers_->stop();
bgWorkers_->wait();
LOG(INFO) << "~NebulaStore()";
}
bool NebulaStore::init() {
LOG(INFO) << "Start the raft service...";
bgWorkers_ = std::make_shared<thread::GenericThreadPool>();
bgWorkers_->start(FLAGS_num_workers, "nebula-bgworkers");
snapshot_.reset(new SnapshotManagerImpl(this));
raftService_ = raftex::RaftexService::createService(ioPool_,
workers_,
raftAddr_.second);
if (!raftService_->start()) {
LOG(ERROR) << "Start the raft service failed";
return false;
}
CHECK(!!options_.partMan_);
LOG(INFO) << "Scan the local path, and init the spaces_";
{
std::unordered_set<std::pair<GraphSpaceID, PartitionID>> spacePartIdSet;
for (auto& path : options_.dataPaths_) {
auto rootPath = folly::stringPrintf("%s/nebula", path.c_str());
auto dirs = fs::FileUtils::listAllDirsInDir(rootPath.c_str());
for (auto& dir : dirs) {
LOG(INFO) << "Scan path \"" << path << "/" << dir << "\"";
try {
GraphSpaceID spaceId;
try {
spaceId = folly::to<GraphSpaceID>(dir);
} catch (const std::exception& ex) {
LOG(ERROR) << "Data path invalid: " << ex.what();
return false;
}
if (!options_.partMan_->spaceExist(storeSvcAddr_, spaceId).ok()) {
// TODO We might want to have a second thought here.
// Removing the data directly feels a little strong
LOG(INFO) << "Space " << spaceId
<< " does not exist any more, remove the data!";
auto dataPath = folly::stringPrintf("%s/%s",
rootPath.c_str(),
dir.c_str());
CHECK(fs::FileUtils::remove(dataPath.c_str(), true));
continue;
}
KVEngine* enginePtr = nullptr;
{
folly::RWSpinLock::WriteHolder wh(&lock_);
auto engine = newEngine(spaceId, path);
auto spaceIt = this->spaces_.find(spaceId);
if (spaceIt == this->spaces_.end()) {
LOG(INFO) << "Load space " << spaceId << " from disk";
spaceIt = this->spaces_.emplace(
spaceId,
std::make_unique<SpacePartInfo>()).first;
}
spaceIt->second->engines_.emplace_back(std::move(engine));
enginePtr = spaceIt->second->engines_.back().get();
}
// partIds is the partition in this host waiting to open
std::vector<PartitionID> partIds;
for (auto& partId : enginePtr->allParts()) {
if (!options_.partMan_->partExist(storeSvcAddr_, spaceId, partId).ok()) {
LOG(INFO) << "Part " << partId
<< " does not exist any more, remove it!";
enginePtr->removePart(partId);
continue;
} else {
auto spacePart = std::make_pair(spaceId, partId);
if (spacePartIdSet.find(spacePart) != spacePartIdSet.end()) {
LOG(INFO) << "Part " << partId
<< " has been loaded, skip current one, remove it!";
enginePtr->removePart(partId);
} else {
spacePartIdSet.emplace(spacePart);
partIds.emplace_back(partId);
}
}
}
if (partIds.empty()) {
continue;
}
std::atomic<size_t> counter(partIds.size());
folly::Baton<true, std::atomic> baton;
LOG(INFO) << "Need to open " << partIds.size() << " parts of space " << spaceId;
for (auto& partId : partIds) {
bgWorkers_->addTask([
spaceId, partId, enginePtr, &counter, &baton, this] () mutable {
auto part = std::make_shared<Part>(spaceId,
partId,
raftAddr_,
folly::stringPrintf("%s/wal/%d",
enginePtr->getDataRoot(),
partId),
enginePtr,
ioPool_,
bgWorkers_,
workers_,
snapshot_);
auto status = options_.partMan_->partMeta(spaceId, partId);
if (!status.ok()) {
LOG(WARNING) << status.status().toString();
return;
}
auto partMeta = status.value();
std::vector<HostAddr> peers;
for (auto& h : partMeta.peers_) {
if (h != storeSvcAddr_) {
peers.emplace_back(getRaftAddr(h));
VLOG(1) << "Add peer " << peers.back();
}
}
raftService_->addPartition(part);
part->start(std::move(peers), false);
LOG(INFO) << "Load part " << spaceId << ", " << partId << " from disk";
{
folly::RWSpinLock::WriteHolder holder(&lock_);
auto iter = spaces_.find(spaceId);
CHECK(iter != spaces_.end());
iter->second->parts_.emplace(partId, part);
}
counter.fetch_sub(1);
if (counter.load() == 0) {
baton.post();
}
});
}
baton.wait();
LOG(INFO) << "Load space " << spaceId << " complete";
} catch (std::exception& e) {
LOG(FATAL) << "Invalid data directory \"" << dir << "\"";
}
}
}
}
LOG(INFO) << "Init data from partManager for " << storeSvcAddr_;
auto partsMap = options_.partMan_->parts(storeSvcAddr_);
for (auto& entry : partsMap) {
auto spaceId = entry.first;
addSpace(spaceId);
std::vector<PartitionID> partIds;
for (auto it = entry.second.begin(); it != entry.second.end(); it++) {
partIds.emplace_back(it->first);
}
std::sort(partIds.begin(), partIds.end());
for (auto& partId : partIds) {
addPart(spaceId, partId, false);
}
}
bgWorkers_->addDelayTask(FLAGS_clean_wal_interval_secs * 1000, &NebulaStore::cleanWAL, this);
LOG(INFO) << "Register handler...";
options_.partMan_->registerHandler(this);
return true;
}
void NebulaStore::stop() {
for (const auto& space : spaces_) {
for (const auto& engine : space.second->engines_) {
engine->stop();
}
}
}
std::unique_ptr<KVEngine> NebulaStore::newEngine(GraphSpaceID spaceId,
const std::string& path) {
if (FLAGS_engine_type == "rocksdb") {
std::shared_ptr<KVCompactionFilterFactory> cfFactory = nullptr;
if (options_.cffBuilder_ != nullptr) {
cfFactory = options_.cffBuilder_->buildCfFactory(spaceId);
}
return std::make_unique<RocksEngine>(spaceId,
path,
options_.mergeOp_,
cfFactory);
} else {
LOG(FATAL) << "Unknown engine type " << FLAGS_engine_type;
return nullptr;
}
}
ErrorOr<ResultCode, HostAddr> NebulaStore::partLeader(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return getStoreAddr(partIt->second->leader());
}
void NebulaStore::addSpace(GraphSpaceID spaceId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
if (this->spaces_.find(spaceId) != this->spaces_.end()) {
LOG(INFO) << "Space " << spaceId << " has existed!";
return;
}
LOG(INFO) << "Create space " << spaceId;
this->spaces_[spaceId] = std::make_unique<SpacePartInfo>();
for (auto& path : options_.dataPaths_) {
this->spaces_[spaceId]->engines_.emplace_back(newEngine(spaceId, path));
}
}
void NebulaStore::addPart(GraphSpaceID spaceId,
PartitionID partId,
bool asLearner,
const std::vector<HostAddr>& peers) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
CHECK(spaceIt != this->spaces_.end()) << "Space should exist!";
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
LOG(INFO) << "[Space: " << spaceId << ", Part: " << partId << "] has existed!";
if (!peers.empty()) {
LOG(INFO) << "[Space: " << spaceId << ", Part: " << partId << "] check peers...";
partIt->second->checkAndResetPeers(peers);
}
return;
}
int32_t minIndex = -1;
int32_t index = 0;
int32_t minPartsNum = 0x7FFFFFFF;
auto& engines = spaceIt->second->engines_;
for (auto& engine : engines) {
if (engine->totalPartsNum() < minPartsNum) {
minPartsNum = engine->totalPartsNum();
minIndex = index;
}
index++;
}
CHECK_GE(minIndex, 0) << "engines number:" << engines.size();
const auto& targetEngine = engines[minIndex];
// Write the information into related engine.
targetEngine->addPart(partId);
spaceIt->second->parts_.emplace(
partId,
newPart(spaceId, partId, targetEngine.get(), asLearner, peers));
LOG(INFO) << "Space " << spaceId << ", part " << partId
<< " has been added, asLearner " << asLearner;
}
std::shared_ptr<Part> NebulaStore::newPart(GraphSpaceID spaceId,
PartitionID partId,
KVEngine* engine,
bool asLearner,
const std::vector<HostAddr>& defaultPeers) {
auto part = std::make_shared<Part>(spaceId,
partId,
raftAddr_,
folly::stringPrintf("%s/wal/%d",
engine->getDataRoot(),
partId),
engine,
ioPool_,
bgWorkers_,
workers_,
snapshot_);
std::vector<HostAddr> peers;
if (defaultPeers.empty()) {
// pull the information from meta
auto metaStatus = options_.partMan_->partMeta(spaceId, partId);
if (!metaStatus.ok()) {
LOG(ERROR) << "options_.partMan_->partMeta(spaceId, partId); error: "
<< metaStatus.status().toString()
<< " spaceId: " << spaceId << ", partId: " << partId;
return nullptr;
}
auto partMeta = metaStatus.value();
for (auto& h : partMeta.peers_) {
if (h != storeSvcAddr_) {
peers.emplace_back(getRaftAddr(h));
VLOG(1) << "Add peer " << peers.back();
}
}
} else {
for (auto& h : defaultPeers) {
if (h != raftAddr_) {
peers.emplace_back(h);
}
}
}
raftService_->addPartition(part);
part->start(std::move(peers), asLearner);
return part;
}
void NebulaStore::removeSpace(GraphSpaceID spaceId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
auto& engines = spaceIt->second->engines_;
for (auto& engine : engines) {
auto parts = engine->allParts();
for (auto& partId : parts) {
engine->removePart(partId);
}
CHECK_EQ(0, engine->totalPartsNum());
}
this->spaces_.erase(spaceIt);
// TODO(dangleptr): Should we delete the data?
LOG(INFO) << "Space " << spaceId << " has been removed!";
}
void NebulaStore::removePart(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
if (spaceIt != this->spaces_.end()) {
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
auto* e = partIt->second->engine();
CHECK_NOTNULL(e);
raftService_->removePartition(partIt->second);
partIt->second->reset();
spaceIt->second->parts_.erase(partId);
e->removePart(partId);
}
}
LOG(INFO) << "Space " << spaceId << ", part " << partId << " has been removed!";
}
void NebulaStore::updateSpaceOption(GraphSpaceID spaceId,
const std::unordered_map<std::string, std::string>& options,
bool isDbOption) {
if (isDbOption) {
for (const auto& kv : options) {
setDBOption(spaceId, kv.first, kv.second);
}
} else {
for (const auto& kv : options) {
setOption(spaceId, kv.first, kv.second);
}
}
}
ResultCode NebulaStore::get(GraphSpaceID spaceId,
PartitionID partId,
const std::string& key,
std::string* value) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->get(key, value);
}
std::pair<ResultCode, std::vector<Status>> NebulaStore::multiGet(
GraphSpaceID spaceId,
PartitionID partId,
const std::vector<std::string>& keys,
std::vector<std::string>* values) {
std::vector<Status> status;
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return {error(ret), status};
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return {ResultCode::ERR_LEADER_CHANGED, status};
}
status = part->engine()->multiGet(keys, values);
auto allExist = std::all_of(status.begin(), status.end(),
[] (const auto& s) {
return s.ok();
});
if (allExist) {
return {ResultCode::SUCCEEDED, status};
} else {
return {ResultCode::ERR_PARTIAL_RESULT, status};
}
}
ResultCode NebulaStore::range(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& end,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->range(start, end, iter);
}
ResultCode NebulaStore::prefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& prefix,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->prefix(prefix, iter);
}
ResultCode NebulaStore::rangeWithPrefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& prefix,
std::unique_ptr<KVIterator>* iter) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto part = nebula::value(ret);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
return part->engine()->rangeWithPrefix(start, prefix, iter);
}
ResultCode NebulaStore::sync(GraphSpaceID spaceId,
PartitionID partId) {
auto partRet = part(spaceId, partId);
if (!ok(partRet)) {
return error(partRet);
}
auto part = nebula::value(partRet);
if (!checkLeader(part)) {
return ResultCode::ERR_LEADER_CHANGED;
}
auto ret = ResultCode::SUCCEEDED;
folly::Baton<true, std::atomic> baton;
part->sync([&] (kvstore::ResultCode code) {
ret = code;
baton.post();
});
baton.wait();
return ret;
}
void NebulaStore::asyncMultiPut(GraphSpaceID spaceId,
PartitionID partId,
std::vector<KV> keyValues,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncMultiPut(std::move(keyValues), std::move(cb));
}
void NebulaStore::asyncRemove(GraphSpaceID spaceId,
PartitionID partId,
const std::string& key,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncRemove(key, std::move(cb));
}
void NebulaStore::asyncMultiRemove(GraphSpaceID spaceId,
PartitionID partId,
std::vector<std::string> keys,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncMultiRemove(std::move(keys), std::move(cb));
}
void NebulaStore::asyncRemoveRange(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& end,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncRemoveRange(start, end, std::move(cb));
}
void NebulaStore::asyncAtomicOp(GraphSpaceID spaceId,
PartitionID partId,
raftex::AtomicOp op,
KVCallback cb) {
auto ret = part(spaceId, partId);
if (!ok(ret)) {
cb(error(ret));
return;
}
auto part = nebula::value(ret);
part->asyncAtomicOp(std::move(op), std::move(cb));
}
ErrorOr<ResultCode, std::shared_ptr<Part>> NebulaStore::part(GraphSpaceID spaceId,
PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return partIt->second;
}
ResultCode NebulaStore::ingest(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto parts = engine->allParts();
for (auto part : parts) {
auto ret = this->engine(spaceId, part);
if (!ok(ret)) {
return error(ret);
}
auto path = folly::stringPrintf("%s/download/%d", value(ret)->getDataRoot(), part);
if (!fs::FileUtils::exist(path)) {
LOG(INFO) << path << " not existed";
continue;
}
auto files = nebula::fs::FileUtils::listAllFilesInDir(path.c_str(), true, "*.sst");
for (auto file : files) {
LOG(INFO) << "Ingesting extra file: " << file;
auto code = engine->ingest(std::vector<std::string>({file}));
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setOption(GraphSpaceID spaceId,
const std::string& configKey,
const std::string& configValue) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->setOption(configKey, configValue);
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setDBOption(GraphSpaceID spaceId,
const std::string& configKey,
const std::string& configValue) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->setDBOption(configKey, configValue);
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::compact(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
auto code = ResultCode::SUCCEEDED;
std::vector<std::thread> threads;
LOG(INFO) << "Space " << spaceId << " start compaction.";
for (auto& engine : space->engines_) {
threads.emplace_back(std::thread([&engine, &code] {
auto ret = engine->compact();
if (ret != ResultCode::SUCCEEDED) {
code = ret;
}
}));
}
// Wait for all threads to finish
for (auto& t : threads) {
t.join();
}
LOG(INFO) << "Space " << spaceId << " compaction done.";
return code;
}
ResultCode NebulaStore::flush(GraphSpaceID spaceId) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->flush();
if (code != ResultCode::SUCCEEDED) {
return code;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::createCheckpoint(GraphSpaceID spaceId, const std::string& name) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto code = engine->createCheckpoint(name);
if (code != ResultCode::SUCCEEDED) {
return code;
}
// create wal hard link for all parts
auto parts = engine->allParts();
for (auto& part : parts) {
auto ret = this->part(spaceId, part);
if (!ok(ret)) {
LOG(ERROR) << "Part not found. space : " << spaceId << " Part : " << part;
return error(ret);
}
auto walPath = folly::stringPrintf("%s/checkpoints/%s/wal/%d",
engine->getDataRoot(), name.c_str(), part);
auto p = nebula::value(ret);
if (!p->linkCurrentWAL(walPath.data())) {
return ResultCode::ERR_CHECKPOINT_ERROR;
}
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::dropCheckpoint(GraphSpaceID spaceId, const std::string& name) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
/**
* Drop checkpoint and wal together
**/
auto checkpointPath = folly::stringPrintf("%s/checkpoints/%s",
engine->getDataRoot(),
name.c_str());
LOG(INFO) << "Drop checkpoint : " << checkpointPath;
if (!fs::FileUtils::exist(checkpointPath)) {
continue;
}
if (!fs::FileUtils::remove(checkpointPath.data(), true)) {
LOG(ERROR) << "Drop checkpoint dir failed : " << checkpointPath;
return ResultCode::ERR_IO_ERROR;
}
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setWriteBlocking(GraphSpaceID spaceId, bool sign) {
auto spaceRet = space(spaceId);
if (!ok(spaceRet)) {
LOG(ERROR) << "Get Space " << spaceId << " Failed";
return error(spaceRet);
}
auto space = nebula::value(spaceRet);
for (auto& engine : space->engines_) {
auto parts = engine->allParts();
for (auto& part : parts) {
auto partRet = this->part(spaceId, part);
if (!ok(partRet)) {
LOG(ERROR) << "Part not found. space : " << spaceId << " Part : " << part;
return error(partRet);
}
auto p = nebula::value(partRet);
if (p->isLeader()) {
auto ret = ResultCode::SUCCEEDED;
p->setBlocking(sign);
if (sign) {
folly::Baton<true, std::atomic> baton;
p->sync([&ret, &baton] (kvstore::ResultCode code) {
if (kvstore::ResultCode::SUCCEEDED != code) {
ret = code;
}
baton.post();
});
baton.wait();
}
if (ret != ResultCode::SUCCEEDED) {
LOG(ERROR) << "Part sync failed. space : " << spaceId << " Part : " << part;
return ret;
}
}
}
}
return ResultCode::SUCCEEDED;
}
bool NebulaStore::isLeader(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto spaceIt = spaces_.find(spaceId);
if (spaceIt != this->spaces_.end()) {
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
return partIt->second->isLeader();
} else {
return false;
}
}
return false;
}
ErrorOr<ResultCode, KVEngine*> NebulaStore::engine(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return partIt->second->engine();
}
ErrorOr<ResultCode, std::shared_ptr<SpacePartInfo>> NebulaStore::space(GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
return it->second;
}
int32_t NebulaStore::allLeader(std::unordered_map<GraphSpaceID,
std::vector<PartitionID>>& leaderIds) {
folly::RWSpinLock::ReadHolder rh(&lock_);
int32_t count = 0;
for (const auto& spaceIt : spaces_) {
auto spaceId = spaceIt.first;
for (const auto& partIt : spaceIt.second->parts_) {
auto partId = partIt.first;
if (partIt.second->isLeader()) {
leaderIds[spaceId].emplace_back(partId);
++count;
}
}
}
return count;
}
bool NebulaStore::checkLeader(std::shared_ptr<Part> part) const {
return !FLAGS_check_leader || (part->isLeader() && part->leaseValid());
}
void NebulaStore::cleanWAL() {
SCOPE_EXIT {
bgWorkers_->addDelayTask(FLAGS_clean_wal_interval_secs * 1000,
&NebulaStore::cleanWAL,
this);
};
for (const auto& spaceEntry : spaces_) {
for (const auto& engine : spaceEntry.second->engines_) {
engine->flush();
}
for (const auto& partEntry : spaceEntry.second->parts_) {
auto& part = partEntry.second;
if (part->needToCleanWal()) {
part->wal()->cleanWAL();
}
}
}
}
} // namespace kvstore
} // namespace nebula
| 1 | 29,835 | If the status is `ERR_SPACE_NOT_FOUND` why return `SUCCEEDED` ? | vesoft-inc-nebula | cpp |
@@ -17,6 +17,8 @@ class ResLayer(nn.Sequential):
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
+ reverse (bool): Reverse the order of planes in layer sequence.
+ False for ResNet, True for Houglass. Default: False
"""
def __init__(self, | 1 | from mmcv.cnn import build_conv_layer, build_norm_layer
from torch import nn as nn
class ResLayer(nn.Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down and stride != 1:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
| 1 | 19,760 | It is is more appropriate to use `downsample_first`. If `downsample_first=True`, the downsample block is the first block and it is used for ResNet. If `downsample_first=False`, the downsample block is the last block, which is used by Hourglass network. | open-mmlab-mmdetection | py |
@@ -198,6 +198,10 @@ class Driver extends webdriver.WebDriver {
* @return {!Driver} A new driver instance.
*/
static createSession(options, service = getDefaultService()) {
+ if (!service) {
+ service = getDefaultService();
+ }
+
let client = service.start().then(url => new http.HttpClient(url));
let executor = new http.Executor(client);
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview Defines a {@linkplain Driver WebDriver} client for
* Microsoft's Edge web browser. Before using this module,
* you must download and install the latest
* [MicrosoftEdgeDriver](http://go.microsoft.com/fwlink/?LinkId=619687) server.
* Ensure that the MicrosoftEdgeDriver is on your
* [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
*
* There are three primary classes exported by this module:
*
* 1. {@linkplain ServiceBuilder}: configures the
* {@link ./remote.DriverService remote.DriverService}
* that manages the [MicrosoftEdgeDriver] child process.
*
* 2. {@linkplain Options}: defines configuration options for each new
* MicrosoftEdgeDriver session, such as which
* {@linkplain Options#setProxy proxy} to use when starting the browser.
*
* 3. {@linkplain Driver}: the WebDriver client; each new instance will control
* a unique browser session.
*
* __Customizing the MicrosoftEdgeDriver Server__ <a id="custom-server"></a>
*
* By default, every MicrosoftEdge session will use a single driver service,
* which is started the first time a {@link Driver} instance is created and
* terminated when this process exits. The default service will inherit its
* environment from the current process.
* You may obtain a handle to this default service using
* {@link #getDefaultService getDefaultService()} and change its configuration
* with {@link #setDefaultService setDefaultService()}.
*
* You may also create a {@link Driver} with its own driver service. This is
* useful if you need to capture the server's log output for a specific session:
*
* var edge = require('selenium-webdriver/edge');
*
* var service = new edge.ServiceBuilder()
* .setPort(55555)
* .build();
*
* var options = new edge.Options();
* // configure browser options ...
*
* var driver = edge.Driver.createSession(options, service);
*
* Users should only instantiate the {@link Driver} class directly when they
* need a custom driver service configuration (as shown above). For normal
* operation, users should start MicrosoftEdge using the
* {@link ./builder.Builder selenium-webdriver.Builder}.
*
* [MicrosoftEdgeDriver]: https://msdn.microsoft.com/en-us/library/mt188085(v=vs.85).aspx
*/
'use strict';
const fs = require('fs');
const util = require('util');
const http = require('./http');
const io = require('./io');
const portprober = require('./net/portprober');
const promise = require('./lib/promise');
const remote = require('./remote');
const Symbols = require('./lib/symbols');
const webdriver = require('./lib/webdriver');
const {Browser, Capabilities} = require('./lib/capabilities');
const EDGEDRIVER_EXE = 'MicrosoftWebDriver.exe';
/**
* _Synchronously_ attempts to locate the edge driver executable on the current
* system.
*
* @return {?string} the located executable, or `null`.
*/
function locateSynchronously() {
return process.platform === 'win32'
? io.findInPath(EDGEDRIVER_EXE, true) : null;
}
/**
* Class for managing MicrosoftEdgeDriver specific options.
*/
class Options extends Capabilities {
/**
* @param {(Capabilities|Map<string, ?>|Object)=} other Another set of
* capabilities to initialize this instance from.
*/
constructor(other = undefined) {
super(other);
this.setBrowserName(Browser.EDGE);
}
}
/**
* Creates {@link remote.DriverService} instances that manage a
* MicrosoftEdgeDriver server in a child process.
*/
class ServiceBuilder extends remote.DriverService.Builder {
/**
* @param {string=} opt_exe Path to the server executable to use. If omitted,
* the builder will attempt to locate the MicrosoftEdgeDriver on the current
* PATH.
* @throws {Error} If provided executable does not exist, or the
* MicrosoftEdgeDriver cannot be found on the PATH.
*/
constructor(opt_exe) {
let exe = opt_exe || locateSynchronously();
if (!exe) {
throw Error(
'The ' + EDGEDRIVER_EXE + ' could not be found on the current PATH. ' +
'Please download the latest version of the MicrosoftEdgeDriver from ' +
'https://www.microsoft.com/en-us/download/details.aspx?id=48212 and ' +
'ensure it can be found on your PATH.');
}
super(exe);
// Binding to the loopback address will fail if not running with
// administrator privileges. Since we cannot test for that in script
// (or can we?), force the DriverService to use "localhost".
this.setHostname('localhost');
}
/**
* Enables verbose logging.
* @return {!ServiceBuilder} A self reference.
*/
enableVerboseLogging() {
return this.addArguments('--verbose');
}
}
/** @type {remote.DriverService} */
var defaultService = null;
/**
* Sets the default service to use for new MicrosoftEdgeDriver instances.
* @param {!remote.DriverService} service The service to use.
* @throws {Error} If the default service is currently running.
*/
function setDefaultService(service) {
if (defaultService && defaultService.isRunning()) {
throw Error(
'The previously configured EdgeDriver service is still running. ' +
'You must shut it down before you may adjust its configuration.');
}
defaultService = service;
}
/**
* Returns the default MicrosoftEdgeDriver service. If such a service has
* not been configured, one will be constructed using the default configuration
* for an MicrosoftEdgeDriver executable found on the system PATH.
* @return {!remote.DriverService} The default MicrosoftEdgeDriver service.
*/
function getDefaultService() {
if (!defaultService) {
defaultService = new ServiceBuilder().build();
}
return defaultService;
}
/**
* Creates a new WebDriver client for Microsoft's Edge.
*/
class Driver extends webdriver.WebDriver {
/**
* Creates a new browser session for Microsoft's Edge browser.
*
* @param {(Capabilities|Options)=} options The configuration options.
* @param {remote.DriverService=} service The session to use; will use
* the {@linkplain #getDefaultService default service} by default.
* @return {!Driver} A new driver instance.
*/
static createSession(options, service = getDefaultService()) {
let client = service.start().then(url => new http.HttpClient(url));
let executor = new http.Executor(client);
options = options || new Options();
return /** @type {!Driver} */(super.createSession(
executor, options, () => service.kill()));
}
/**
* This function is a no-op as file detectors are not supported by this
* implementation.
* @override
*/
setFileDetector() {}
}
// PUBLIC API
exports.Driver = Driver;
exports.Options = Options;
exports.ServiceBuilder = ServiceBuilder;
exports.getDefaultService = getDefaultService;
exports.setDefaultService = setDefaultService;
exports.locateSynchronously = locateSynchronously;
| 1 | 15,395 | Would you mind removing the default parameter above? (I doubt I'll ever use defaults again since you still have to protect against callers explicitly passing `null` or `undefined`) | SeleniumHQ-selenium | java |
@@ -273,6 +273,11 @@ class RefactoringChecker(checkers.BaseTokenChecker):
"consider using the list, dict or set constructor. "
"It is faster and simpler.",
),
+ "R1722": (
+ "Consider using sys.exit()",
+ "consider-using-sys-exit",
+ "Instead of using exit() or quit(), consider using the sys.exit().",
+ ),
}
options = (
( | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 Claudiu Popa <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2016 Alexander Todorov <[email protected]>
# Copyright (c) 2017-2018 hippo91 <[email protected]>
# Copyright (c) 2017 Ville Skyttä <[email protected]>
# Copyright (c) 2017-2018 Bryce Guinta <[email protected]>
# Copyright (c) 2017 Hugo <[email protected]>
# Copyright (c) 2017 Łukasz Sznuk <[email protected]>
# Copyright (c) 2017 Alex Hearn <[email protected]>
# Copyright (c) 2017 Antonio Ossa <[email protected]>
# Copyright (c) 2018 Konstantin Manna <[email protected]>
# Copyright (c) 2018 Konstantin <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Matej Marušák <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Mr. Senko <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Looks for code which can be refactored."""
import builtins
import collections
import itertools
import tokenize
from functools import reduce
import astroid
from astroid import decorators
from pylint import checkers, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
KNOWN_INFINITE_ITERATORS = {"itertools.count"}
def _if_statement_is_always_returning(if_node, returning_node_class):
for node in if_node.body:
if isinstance(node, returning_node_class):
return True
return False
def _is_len_call(node):
"""Checks if node is len(SOMETHING)."""
return (
isinstance(node, astroid.Call)
and isinstance(node.func, astroid.Name)
and node.func.name == "len"
)
def _is_constant_zero(node):
return isinstance(node, astroid.Const) and node.value == 0
def _node_is_test_condition(node):
""" Checks if node is an if, while, assert or if expression statement."""
return isinstance(node, (astroid.If, astroid.While, astroid.Assert, astroid.IfExp))
def _is_trailing_comma(tokens, index):
"""Check if the given token is a trailing comma
:param tokens: Sequence of modules tokens
:type tokens: list[tokenize.TokenInfo]
:param int index: Index of token under check in tokens
:returns: True if the token is a comma which trails an expression
:rtype: bool
"""
token = tokens[index]
if token.exact_type != tokenize.COMMA:
return False
# Must have remaining tokens on the same line such as NEWLINE
left_tokens = itertools.islice(tokens, index + 1, None)
same_line_remaining_tokens = list(
itertools.takewhile(
lambda other_token, _token=token: other_token.start[0] == _token.start[0],
left_tokens,
)
)
# Note: If the newline is tokenize.NEWLINE and not tokenize.NL
# then the newline denotes the end of expression
is_last_element = all(
other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
for other_token in same_line_remaining_tokens
)
if not same_line_remaining_tokens or not is_last_element:
return False
def get_curline_index_start():
"""Get the index denoting the start of the current line"""
for subindex, token in enumerate(reversed(tokens[:index])):
# See Lib/tokenize.py and Lib/token.py in cpython for more info
if token.type in (tokenize.NEWLINE, tokenize.NL):
return index - subindex
return 0
curline_start = get_curline_index_start()
expected_tokens = {"return", "yield"}
for prevtoken in tokens[curline_start:index]:
if "=" in prevtoken.string or prevtoken.string in expected_tokens:
return True
return False
class RefactoringChecker(checkers.BaseTokenChecker):
"""Looks for code which can be refactored
This checker also mixes the astroid and the token approaches
in order to create knowledge about whether an "else if" node
is a true "else if" node, or an "elif" node.
"""
__implements__ = (interfaces.ITokenChecker, interfaces.IAstroidChecker)
name = "refactoring"
msgs = {
"R1701": (
"Consider merging these isinstance calls to isinstance(%s, (%s))",
"consider-merging-isinstance",
"Used when multiple consecutive isinstance calls can be merged into one.",
),
"R1706": (
"Consider using ternary (%s)",
"consider-using-ternary",
"Used when one of known pre-python 2.5 ternary syntax is used.",
),
"R1709": (
"Boolean expression may be simplified to %s",
"simplify-boolean-expression",
"Emitted when redundant pre-python 2.5 ternary syntax is used.",
),
"R1702": (
"Too many nested blocks (%s/%s)",
"too-many-nested-blocks",
"Used when a function or a method has too many nested "
"blocks. This makes the code less understandable and "
"maintainable.",
{"old_names": [("R0101", "too-many-nested-blocks")]},
),
"R1703": (
"The if statement can be replaced with %s",
"simplifiable-if-statement",
"Used when an if statement can be replaced with 'bool(test)'. ",
{"old_names": [("R0102", "simplifiable-if-statement")]},
),
"R1704": (
"Redefining argument with the local name %r",
"redefined-argument-from-local",
"Used when a local name is redefining an argument, which might "
"suggest a potential error. This is taken in account only for "
"a handful of name binding operations, such as for iteration, "
"with statement assignment and exception handler assignment.",
),
"R1705": (
'Unnecessary "%s" after "return"',
"no-else-return",
"Used in order to highlight an unnecessary block of "
"code following an if containing a return statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"return statement.",
),
"R1707": (
"Disallow trailing comma tuple",
"trailing-comma-tuple",
"In Python, a tuple is actually created by the comma symbol, "
"not by the parentheses. Unfortunately, one can actually create a "
"tuple by misplacing a trailing comma, which can lead to potential "
"weird bugs in your code. You should always use parentheses "
"explicitly for creating a tuple.",
),
"R1708": (
"Do not raise StopIteration in generator, use return statement instead",
"stop-iteration-return",
"According to PEP479, the raise of StopIteration to end the loop of "
"a generator may lead to hard to find bugs. This PEP specify that "
"raise StopIteration has to be replaced by a simple return statement",
),
"R1710": (
"Either all return statements in a function should return an expression, "
"or none of them should.",
"inconsistent-return-statements",
"According to PEP8, if any return statement returns an expression, "
"any return statements where no value is returned should explicitly "
"state this as return None, and an explicit return statement "
"should be present at the end of the function (if reachable)",
),
"R1711": (
"Useless return at end of function or method",
"useless-return",
'Emitted when a single "return" or "return None" statement is found '
"at the end of function or method definition. This statement can safely be "
"removed because Python will implicitly return None",
),
"R1712": (
"Consider using tuple unpacking for swapping variables",
"consider-swap-variables",
"You do not have to use a temporary variable in order to "
'swap variables. Using "tuple unpacking" to directly swap '
"variables makes the intention more clear.",
),
"R1713": (
"Consider using str.join(sequence) for concatenating "
"strings from an iterable",
"consider-using-join",
"Using str.join(sequence) is faster, uses less memory "
"and increases readability compared to for-loop iteration.",
),
"R1714": (
'Consider merging these comparisons with "in" to %r',
"consider-using-in",
"To check if a variable is equal to one of many values,"
'combine the values into a tuple and check if the variable is contained "in" it '
"instead of checking for equality against each of the values."
"This is faster and less verbose.",
),
"R1715": (
"Consider using dict.get for getting values from a dict "
"if a key is present or a default if not",
"consider-using-get",
"Using the builtin dict.get for getting a value from a dictionary "
"if a key is present or a default if not, is simpler and considered "
"more idiomatic, although sometimes a bit slower",
),
"R1716": (
"Simplify chained comparison between the operands",
"chained-comparison",
"This message is emitted when pylint encounters boolean operation like"
'"a < b and b < c", suggesting instead to refactor it to "a < b < c"',
),
"R1717": (
"Consider using a dictionary comprehension",
"consider-using-dict-comprehension",
"Emitted when we detect the creation of a dictionary "
"using the dict() callable and a transient list. "
"Although there is nothing syntactically wrong with this code, "
"it is hard to read and can be simplified to a dict comprehension."
"Also it is faster since you don't need to create another "
"transient list",
),
"R1718": (
"Consider using a set comprehension",
"consider-using-set-comprehension",
"Although there is nothing syntactically wrong with this code, "
"it is hard to read and can be simplified to a set comprehension."
"Also it is faster since you don't need to create another "
"transient list",
),
"R1719": (
"The if expression can be replaced with %s",
"simplifiable-if-expression",
"Used when an if expression can be replaced with 'bool(test)'. ",
),
"R1720": (
'Unnecessary "%s" after "raise"',
"no-else-raise",
"Used in order to highlight an unnecessary block of "
"code following an if containing a raise statement. "
"As such, it will warn when it encounters an else "
"following a chain of ifs, all of them containing a "
"raise statement.",
),
"R1721": (
"Unnecessary use of a comprehension",
"unnecessary-comprehension",
"Instead of using an identitiy comprehension, "
"consider using the list, dict or set constructor. "
"It is faster and simpler.",
),
}
options = (
(
"max-nested-blocks",
{
"default": 5,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of nested blocks for function / method body",
},
),
(
"never-returning-functions",
{
"default": ("sys.exit",),
"type": "csv",
"help": "Complete name of functions that never returns. When checking "
"for inconsistent-return-statements if a never returning function is "
"called then it will be considered as an explicit return statement "
"and no message will be printed.",
},
),
)
priority = 0
def __init__(self, linter=None):
checkers.BaseTokenChecker.__init__(self, linter)
self._return_nodes = {}
self._init()
self._never_returning_functions = None
def _init(self):
self._nested_blocks = []
self._elifs = []
self._nested_blocks_msg = None
self._reported_swap_nodes = set()
def open(self):
# do this in open since config not fully initialized in __init__
self._never_returning_functions = set(self.config.never_returning_functions)
@decorators.cachedproperty
def _dummy_rgx(self):
return lint_utils.get_global_option(self, "dummy-variables-rgx", default=None)
@staticmethod
def _is_bool_const(node):
return isinstance(node.value, astroid.Const) and isinstance(
node.value.value, bool
)
def _is_actual_elif(self, node):
"""Check if the given node is an actual elif
This is a problem we're having with the builtin ast module,
which splits `elif` branches into a separate if statement.
Unfortunately we need to know the exact type in certain
cases.
"""
if isinstance(node.parent, astroid.If):
orelse = node.parent.orelse
# current if node must directly follow an "else"
if orelse and orelse == [node]:
if (node.lineno, node.col_offset) in self._elifs:
return True
return False
def _check_simplifiable_if(self, node):
"""Check if the given if node can be simplified.
The if statement can be reduced to a boolean expression
in some cases. For instance, if there are two branches
and both of them return a boolean value that depends on
the result of the statement's test, then this can be reduced
to `bool(test)` without losing any functionality.
"""
if self._is_actual_elif(node):
# Not interested in if statements with multiple branches.
return
if len(node.orelse) != 1 or len(node.body) != 1:
return
# Check if both branches can be reduced.
first_branch = node.body[0]
else_branch = node.orelse[0]
if isinstance(first_branch, astroid.Return):
if not isinstance(else_branch, astroid.Return):
return
first_branch_is_bool = self._is_bool_const(first_branch)
else_branch_is_bool = self._is_bool_const(else_branch)
reduced_to = "'return bool(test)'"
elif isinstance(first_branch, astroid.Assign):
if not isinstance(else_branch, astroid.Assign):
return
# Check if we assign to the same value
first_branch_targets = [
target.name
for target in first_branch.targets
if isinstance(target, astroid.AssignName)
]
else_branch_targets = [
target.name
for target in else_branch.targets
if isinstance(target, astroid.AssignName)
]
if not first_branch_targets or not else_branch_targets:
return
if sorted(first_branch_targets) != sorted(else_branch_targets):
return
first_branch_is_bool = self._is_bool_const(first_branch)
else_branch_is_bool = self._is_bool_const(else_branch)
reduced_to = "'var = bool(test)'"
else:
return
if not first_branch_is_bool or not else_branch_is_bool:
return
if not first_branch.value.value:
# This is a case that can't be easily simplified and
# if it can be simplified, it will usually result in a
# code that's harder to understand and comprehend.
# Let's take for instance `arg and arg <= 3`. This could theoretically be
# reduced to `not arg or arg > 3`, but the net result is that now the
# condition is harder to understand, because it requires understanding of
# an extra clause:
# * first, there is the negation of truthness with `not arg`
# * the second clause is `arg > 3`, which occurs when arg has a
# a truth value, but it implies that `arg > 3` is equivalent
# with `arg and arg > 3`, which means that the user must
# think about this assumption when evaluating `arg > 3`.
# The original form is easier to grasp.
return
self.add_message("simplifiable-if-statement", node=node, args=(reduced_to,))
def process_tokens(self, tokens):
# Process tokens and look for 'if' or 'elif'
for index, token in enumerate(tokens):
token_string = token[1]
if token_string == "elif":
# AST exists by the time process_tokens is called, so
# it's safe to assume tokens[index+1]
# exists. tokens[index+1][2] is the elif's position as
# reported by CPython and PyPy,
# tokens[index][2] is the actual position and also is
# reported by IronPython.
self._elifs.extend([tokens[index][2], tokens[index + 1][2]])
elif _is_trailing_comma(tokens, index):
if self.linter.is_message_enabled("trailing-comma-tuple"):
self.add_message("trailing-comma-tuple", line=token.start[0])
def leave_module(self, _):
self._init()
@utils.check_messages("too-many-nested-blocks")
def visit_tryexcept(self, node):
self._check_nested_blocks(node)
visit_tryfinally = visit_tryexcept
visit_while = visit_tryexcept
def _check_redefined_argument_from_local(self, name_node):
if self._dummy_rgx and self._dummy_rgx.match(name_node.name):
return
if not name_node.lineno:
# Unknown position, maybe it is a manually built AST?
return
scope = name_node.scope()
if not isinstance(scope, astroid.FunctionDef):
return
for defined_argument in scope.args.nodes_of_class(
astroid.AssignName, skip_klass=(astroid.Lambda,)
):
if defined_argument.name == name_node.name:
self.add_message(
"redefined-argument-from-local",
node=name_node,
args=(name_node.name,),
)
@utils.check_messages("redefined-argument-from-local", "too-many-nested-blocks")
def visit_for(self, node):
self._check_nested_blocks(node)
for name in node.target.nodes_of_class(astroid.AssignName):
self._check_redefined_argument_from_local(name)
@utils.check_messages("redefined-argument-from-local")
def visit_excepthandler(self, node):
if node.name and isinstance(node.name, astroid.AssignName):
self._check_redefined_argument_from_local(node.name)
@utils.check_messages("redefined-argument-from-local")
def visit_with(self, node):
for _, names in node.items:
if not names:
continue
for name in names.nodes_of_class(astroid.AssignName):
self._check_redefined_argument_from_local(name)
def _check_superfluous_else(self, node, msg_id, returning_node_class):
if not node.orelse:
# Not interested in if statements without else.
return
if self._is_actual_elif(node):
# Not interested in elif nodes; only if
return
if _if_statement_is_always_returning(node, returning_node_class):
orelse = node.orelse[0]
followed_by_elif = (orelse.lineno, orelse.col_offset) in self._elifs
self.add_message(
msg_id, node=node, args="elif" if followed_by_elif else "else"
)
def _check_superfluous_else_return(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-return", returning_node_class=astroid.Return
)
def _check_superfluous_else_raise(self, node):
return self._check_superfluous_else(
node, msg_id="no-else-raise", returning_node_class=astroid.Raise
)
def _check_consider_get(self, node):
def type_and_name_are_equal(node_a, node_b):
for _type in [astroid.Name, astroid.AssignName]:
if all(isinstance(_node, _type) for _node in [node_a, node_b]):
return node_a.name == node_b.name
if all(isinstance(_node, astroid.Const) for _node in [node_a, node_b]):
return node_a.value == node_b.value
return False
if_block_ok = (
isinstance(node.test, astroid.Compare)
and len(node.body) == 1
and isinstance(node.body[0], astroid.Assign)
and isinstance(node.body[0].value, astroid.Subscript)
and type_and_name_are_equal(node.body[0].value.value, node.test.ops[0][1])
and isinstance(node.body[0].value.slice, astroid.Index)
and type_and_name_are_equal(node.body[0].value.slice.value, node.test.left)
and len(node.body[0].targets) == 1
and isinstance(node.body[0].targets[0], astroid.AssignName)
and isinstance(utils.safe_infer(node.test.ops[0][1]), astroid.Dict)
)
if if_block_ok and not node.orelse:
self.add_message("consider-using-get", node=node)
elif (
if_block_ok
and len(node.orelse) == 1
and isinstance(node.orelse[0], astroid.Assign)
and type_and_name_are_equal(
node.orelse[0].targets[0], node.body[0].targets[0]
)
and len(node.orelse[0].targets) == 1
):
self.add_message("consider-using-get", node=node)
@utils.check_messages(
"too-many-nested-blocks",
"simplifiable-if-statement",
"no-else-return",
"no-else-raise",
"consider-using-get",
)
def visit_if(self, node):
self._check_simplifiable_if(node)
self._check_nested_blocks(node)
self._check_superfluous_else_return(node)
self._check_superfluous_else_raise(node)
self._check_consider_get(node)
@utils.check_messages("simplifiable-if-expression")
def visit_ifexp(self, node):
self._check_simplifiable_ifexp(node)
def _check_simplifiable_ifexp(self, node):
if not isinstance(node.body, astroid.Const) or not isinstance(
node.orelse, astroid.Const
):
return
if not isinstance(node.body.value, bool) or not isinstance(
node.orelse.value, bool
):
return
if isinstance(node.test, astroid.Compare):
test_reduced_to = "test"
else:
test_reduced_to = "bool(test)"
if (node.body.value, node.orelse.value) == (True, False):
reduced_to = "'{}'".format(test_reduced_to)
elif (node.body.value, node.orelse.value) == (False, True):
reduced_to = "'not test'"
else:
return
self.add_message("simplifiable-if-expression", node=node, args=(reduced_to,))
@utils.check_messages(
"too-many-nested-blocks", "inconsistent-return-statements", "useless-return"
)
def leave_functiondef(self, node):
# check left-over nested blocks stack
self._emit_nested_blocks_message_if_needed(self._nested_blocks)
# new scope = reinitialize the stack of nested blocks
self._nested_blocks = []
# check consistent return statements
self._check_consistent_returns(node)
# check for single return or return None at the end
self._check_return_at_the_end(node)
self._return_nodes[node.name] = []
@utils.check_messages("stop-iteration-return")
def visit_raise(self, node):
self._check_stop_iteration_inside_generator(node)
def _check_stop_iteration_inside_generator(self, node):
"""Check if an exception of type StopIteration is raised inside a generator"""
frame = node.frame()
if not isinstance(frame, astroid.FunctionDef) or not frame.is_generator():
return
if utils.node_ignores_exception(node, StopIteration):
return
if not node.exc:
return
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return
if self._check_exception_inherit_from_stopiteration(exc):
self.add_message("stop-iteration-return", node=node)
@staticmethod
def _check_exception_inherit_from_stopiteration(exc):
"""Return True if the exception node in argument inherit from StopIteration"""
stopiteration_qname = "{}.StopIteration".format(utils.EXCEPTIONS_MODULE)
return any(_class.qname() == stopiteration_qname for _class in exc.mro())
def _check_consider_using_comprehension_constructor(self, node):
if (
isinstance(node.func, astroid.Name)
and node.args
and isinstance(node.args[0], astroid.ListComp)
):
if node.func.name == "dict" and not isinstance(
node.args[0].elt, astroid.Call
):
message_name = "consider-using-dict-comprehension"
self.add_message(message_name, node=node)
elif node.func.name == "set":
message_name = "consider-using-set-comprehension"
self.add_message(message_name, node=node)
@utils.check_messages(
"stop-iteration-return",
"consider-using-dict-comprehension",
"consider-using-set-comprehension",
)
def visit_call(self, node):
self._check_raising_stopiteration_in_generator_next_call(node)
self._check_consider_using_comprehension_constructor(node)
def _check_raising_stopiteration_in_generator_next_call(self, node):
"""Check if a StopIteration exception is raised by the call to next function
If the next value has a default value, then do not add message.
:param node: Check to see if this Call node is a next function
:type node: :class:`astroid.node_classes.Call`
"""
def _looks_like_infinite_iterator(param):
inferred = utils.safe_infer(param)
if inferred:
return inferred.qname() in KNOWN_INFINITE_ITERATORS
return False
if isinstance(node.func, astroid.Attribute):
# A next() method, which is now what we want.
return
inferred = utils.safe_infer(node.func)
if getattr(inferred, "name", "") == "next":
frame = node.frame()
# The next builtin can only have up to two
# positional arguments and no keyword arguments
has_sentinel_value = len(node.args) > 1
if (
isinstance(frame, astroid.FunctionDef)
and frame.is_generator()
and not has_sentinel_value
and not utils.node_ignores_exception(node, StopIteration)
and not _looks_like_infinite_iterator(node.args[0])
):
self.add_message("stop-iteration-return", node=node)
def _check_nested_blocks(self, node):
"""Update and check the number of nested blocks
"""
# only check block levels inside functions or methods
if not isinstance(node.scope(), astroid.FunctionDef):
return
# messages are triggered on leaving the nested block. Here we save the
# stack in case the current node isn't nested in the previous one
nested_blocks = self._nested_blocks[:]
if node.parent == node.scope():
self._nested_blocks = [node]
else:
# go through ancestors from the most nested to the less
for ancestor_node in reversed(self._nested_blocks):
if ancestor_node == node.parent:
break
self._nested_blocks.pop()
# if the node is an elif, this should not be another nesting level
if isinstance(node, astroid.If) and self._is_actual_elif(node):
if self._nested_blocks:
self._nested_blocks.pop()
self._nested_blocks.append(node)
# send message only once per group of nested blocks
if len(nested_blocks) > len(self._nested_blocks):
self._emit_nested_blocks_message_if_needed(nested_blocks)
def _emit_nested_blocks_message_if_needed(self, nested_blocks):
if len(nested_blocks) > self.config.max_nested_blocks:
self.add_message(
"too-many-nested-blocks",
node=nested_blocks[0],
args=(len(nested_blocks), self.config.max_nested_blocks),
)
@staticmethod
def _duplicated_isinstance_types(node):
"""Get the duplicated types from the underlying isinstance calls.
:param astroid.BoolOp node: Node which should contain a bunch of isinstance calls.
:returns: Dictionary of the comparison objects from the isinstance calls,
to duplicate values from consecutive calls.
:rtype: dict
"""
duplicated_objects = set()
all_types = collections.defaultdict(set)
for call in node.values:
if not isinstance(call, astroid.Call) or len(call.args) != 2:
continue
inferred = utils.safe_infer(call.func)
if not inferred or not utils.is_builtin_object(inferred):
continue
if inferred.name != "isinstance":
continue
isinstance_object = call.args[0].as_string()
isinstance_types = call.args[1]
if isinstance_object in all_types:
duplicated_objects.add(isinstance_object)
if isinstance(isinstance_types, astroid.Tuple):
elems = [
class_type.as_string() for class_type in isinstance_types.itered()
]
else:
elems = [isinstance_types.as_string()]
all_types[isinstance_object].update(elems)
# Remove all keys which not duplicated
return {
key: value for key, value in all_types.items() if key in duplicated_objects
}
def _check_consider_merging_isinstance(self, node):
"""Check isinstance calls which can be merged together."""
if node.op != "or":
return
first_args = self._duplicated_isinstance_types(node)
for duplicated_name, class_names in first_args.items():
names = sorted(name for name in class_names)
self.add_message(
"consider-merging-isinstance",
node=node,
args=(duplicated_name, ", ".join(names)),
)
def _check_consider_using_in(self, node):
allowed_ops = {"or": "==", "and": "!="}
if node.op not in allowed_ops or len(node.values) < 2:
return
for value in node.values:
if (
not isinstance(value, astroid.Compare)
or len(value.ops) != 1
or value.ops[0][0] not in allowed_ops[node.op]
):
return
for comparable in value.left, value.ops[0][1]:
if isinstance(comparable, astroid.Call):
return
# Gather variables and values from comparisons
variables, values = [], []
for value in node.values:
variable_set = set()
for comparable in value.left, value.ops[0][1]:
if isinstance(comparable, astroid.Name):
variable_set.add(comparable.as_string())
values.append(comparable.as_string())
variables.append(variable_set)
# Look for (common-)variables that occur in all comparisons
common_variables = reduce(lambda a, b: a.intersection(b), variables)
if not common_variables:
return
# Gather information for the suggestion
common_variable = sorted(list(common_variables))[0]
comprehension = "in" if node.op == "or" else "not in"
values = list(collections.OrderedDict.fromkeys(values))
values.remove(common_variable)
values_string = ", ".join(values) if len(values) != 1 else values[0] + ","
suggestion = "%s %s (%s)" % (common_variable, comprehension, values_string)
self.add_message("consider-using-in", node=node, args=(suggestion,))
def _check_chained_comparison(self, node):
"""Check if there is any chained comparison in the expression.
Add a refactoring message if a boolOp contains comparison like a < b and b < c,
which can be chained as a < b < c.
Care is taken to avoid simplifying a < b < c and b < d.
"""
if node.op != "and" or len(node.values) < 2:
return
def _find_lower_upper_bounds(comparison_node, uses):
left_operand = comparison_node.left
for operator, right_operand in comparison_node.ops:
for operand in (left_operand, right_operand):
value = None
if isinstance(operand, astroid.Name):
value = operand.name
elif isinstance(operand, astroid.Const):
value = operand.value
if value is None:
continue
if operator in ("<", "<="):
if operand is left_operand:
uses[value]["lower_bound"].add(comparison_node)
elif operand is right_operand:
uses[value]["upper_bound"].add(comparison_node)
elif operator in (">", ">="):
if operand is left_operand:
uses[value]["upper_bound"].add(comparison_node)
elif operand is right_operand:
uses[value]["lower_bound"].add(comparison_node)
left_operand = right_operand
uses = collections.defaultdict(
lambda: {"lower_bound": set(), "upper_bound": set()}
)
for comparison_node in node.values:
if isinstance(comparison_node, astroid.Compare):
_find_lower_upper_bounds(comparison_node, uses)
for _, bounds in uses.items():
num_shared = len(bounds["lower_bound"].intersection(bounds["upper_bound"]))
num_lower_bounds = len(bounds["lower_bound"])
num_upper_bounds = len(bounds["upper_bound"])
if num_shared < num_lower_bounds and num_shared < num_upper_bounds:
self.add_message("chained-comparison", node=node)
break
@utils.check_messages(
"consider-merging-isinstance", "consider-using-in", "chained-comparison"
)
def visit_boolop(self, node):
self._check_consider_merging_isinstance(node)
self._check_consider_using_in(node)
self._check_chained_comparison(node)
@staticmethod
def _is_simple_assignment(node):
return (
isinstance(node, astroid.Assign)
and len(node.targets) == 1
and isinstance(node.targets[0], astroid.node_classes.AssignName)
and isinstance(node.value, astroid.node_classes.Name)
)
def _check_swap_variables(self, node):
if not node.next_sibling() or not node.next_sibling().next_sibling():
return
assignments = [node, node.next_sibling(), node.next_sibling().next_sibling()]
if not all(self._is_simple_assignment(node) for node in assignments):
return
if any(node in self._reported_swap_nodes for node in assignments):
return
left = [node.targets[0].name for node in assignments]
right = [node.value.name for node in assignments]
if left[0] == right[-1] and left[1:] == right[:-1]:
self._reported_swap_nodes.update(assignments)
message = "consider-swap-variables"
self.add_message(message, node=node)
@utils.check_messages(
"simplify-boolean-expression",
"consider-using-ternary",
"consider-swap-variables",
)
def visit_assign(self, node):
self._check_swap_variables(node)
if self._is_and_or_ternary(node.value):
cond, truth_value, false_value = self._and_or_ternary_arguments(node.value)
else:
return
if all(
isinstance(value, astroid.Compare) for value in (truth_value, false_value)
):
return
inferred_truth_value = utils.safe_infer(truth_value)
if inferred_truth_value in (None, astroid.Uninferable):
truth_boolean_value = True
else:
truth_boolean_value = truth_value.bool_value()
if truth_boolean_value is False:
message = "simplify-boolean-expression"
suggestion = false_value.as_string()
else:
message = "consider-using-ternary"
suggestion = "{truth} if {cond} else {false}".format(
truth=truth_value.as_string(),
cond=cond.as_string(),
false=false_value.as_string(),
)
self.add_message(message, node=node, args=(suggestion,))
visit_return = visit_assign
def _check_consider_using_join(self, aug_assign):
"""
We start with the augmented assignment and work our way upwards.
Names of variables for nodes if match successful:
result = '' # assign
for number in ['1', '2', '3'] # for_loop
result += number # aug_assign
"""
for_loop = aug_assign.parent
if not isinstance(for_loop, astroid.For) or len(for_loop.body) > 1:
return
assign = for_loop.previous_sibling()
if not isinstance(assign, astroid.Assign):
return
result_assign_names = {
target.name
for target in assign.targets
if isinstance(target, astroid.AssignName)
}
is_concat_loop = (
aug_assign.op == "+="
and isinstance(aug_assign.target, astroid.AssignName)
and len(for_loop.body) == 1
and aug_assign.target.name in result_assign_names
and isinstance(assign.value, astroid.Const)
and isinstance(assign.value.value, str)
and isinstance(aug_assign.value, astroid.Name)
and aug_assign.value.name == for_loop.target.name
)
if is_concat_loop:
self.add_message("consider-using-join", node=aug_assign)
@utils.check_messages("consider-using-join")
def visit_augassign(self, node):
self._check_consider_using_join(node)
@utils.check_messages("unnecessary-comprehension")
def visit_comprehension(self, node):
self._check_unnecessary_comprehension(node)
def _check_unnecessary_comprehension(self, node):
if (
isinstance(node.parent, astroid.GeneratorExp)
or len(node.ifs) != 0
or len(node.parent.generators) != 1
or node.is_async
):
return
if (
isinstance(node.parent, astroid.DictComp)
and isinstance(node.parent.key, astroid.Name)
and isinstance(node.parent.value, astroid.Name)
and isinstance(node.target, astroid.Tuple)
and all(isinstance(elt, astroid.AssignName) for elt in node.target.elts)
):
expr_list = [node.parent.key.name, node.parent.value.name]
target_list = [elt.name for elt in node.target.elts]
elif isinstance(node.parent, (astroid.ListComp, astroid.SetComp)):
expr = node.parent.elt
expr_list = (
expr.name
if isinstance(expr, astroid.Name)
else (
[elt.name for elt in expr.elts if isinstance(elt, astroid.Name)]
if isinstance(expr, astroid.Tuple)
else []
)
)
target = node.parent.generators[0].target
target_list = (
target.name
if isinstance(target, astroid.AssignName)
else (
[
elt.name
for elt in target.elts
if isinstance(elt, astroid.AssignName)
]
if isinstance(target, astroid.Tuple)
else []
)
)
else:
return
if expr_list == target_list != []:
self.add_message("unnecessary-comprehension", node=node)
@staticmethod
def _is_and_or_ternary(node):
"""
Returns true if node is 'condition and true_value or false_value' form.
All of: condition, true_value and false_value should not be a complex boolean expression
"""
return (
isinstance(node, astroid.BoolOp)
and node.op == "or"
and len(node.values) == 2
and isinstance(node.values[0], astroid.BoolOp)
and not isinstance(node.values[1], astroid.BoolOp)
and node.values[0].op == "and"
and not isinstance(node.values[0].values[1], astroid.BoolOp)
and len(node.values[0].values) == 2
)
@staticmethod
def _and_or_ternary_arguments(node):
false_value = node.values[1]
condition, true_value = node.values[0].values
return condition, true_value, false_value
def visit_functiondef(self, node):
self._return_nodes[node.name] = list(
node.nodes_of_class(astroid.Return, skip_klass=astroid.FunctionDef)
)
def _check_consistent_returns(self, node):
"""Check that all return statements inside a function are consistent.
Return statements are consistent if:
- all returns are explicit and if there is no implicit return;
- all returns are empty and if there is, possibly, an implicit return.
Args:
node (astroid.FunctionDef): the function holding the return statements.
"""
# explicit return statements are those with a not None value
explicit_returns = [
_node for _node in self._return_nodes[node.name] if _node.value is not None
]
if not explicit_returns:
return
if len(explicit_returns) == len(
self._return_nodes[node.name]
) and self._is_node_return_ended(node):
return
self.add_message("inconsistent-return-statements", node=node)
def _is_node_return_ended(self, node):
"""Check if the node ends with an explicit return statement.
Args:
node (astroid.NodeNG): node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# Recursion base case
if isinstance(node, astroid.Return):
return True
if isinstance(node, astroid.Call):
try:
funcdef_node = node.func.inferred()[0]
if self._is_function_def_never_returning(funcdef_node):
return True
except astroid.InferenceError:
pass
# Avoid the check inside while loop as we don't know
# if they will be completed
if isinstance(node, astroid.While):
return True
if isinstance(node, astroid.Raise):
# a Raise statement doesn't need to end with a return statement
# but if the exception raised is handled, then the handler has to
# ends with a return statement
if not node.exc:
# Ignore bare raises
return True
if not utils.is_node_inside_try_except(node):
# If the raise statement is not inside a try/except statement
# then the exception is raised and cannot be caught. No need
# to infer it.
return True
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return False
exc_name = exc.pytype().split(".")[-1]
handlers = utils.get_exception_handlers(node, exc_name)
handlers = list(handlers) if handlers is not None else []
if handlers:
# among all the handlers handling the exception at least one
# must end with a return statement
return any(
self._is_node_return_ended(_handler) for _handler in handlers
)
# if no handlers handle the exception then it's ok
return True
if isinstance(node, astroid.If):
# if statement is returning if there are exactly two return statements in its
# children : one for the body part, the other for the orelse part
# Do not check if inner function definition are return ended.
is_orelse_returning = any(
self._is_node_return_ended(_ore)
for _ore in node.orelse
if not isinstance(_ore, astroid.FunctionDef)
)
is_if_returning = any(
self._is_node_return_ended(_ifn)
for _ifn in node.body
if not isinstance(_ifn, astroid.FunctionDef)
)
return is_if_returning and is_orelse_returning
# recurses on the children of the node except for those which are except handler
# because one cannot be sure that the handler will really be used
return any(
self._is_node_return_ended(_child)
for _child in node.get_children()
if not isinstance(_child, astroid.ExceptHandler)
)
def _is_function_def_never_returning(self, node):
"""Return True if the function never returns. False otherwise.
Args:
node (astroid.FunctionDef): function definition node to be analyzed.
Returns:
bool: True if the function never returns, False otherwise.
"""
try:
return node.qname() in self._never_returning_functions
except TypeError:
return False
def _check_return_at_the_end(self, node):
"""Check for presence of a *single* return statement at the end of a
function. "return" or "return None" are useless because None is the
default return type if they are missing.
NOTE: produces a message only if there is a single return statement
in the function body. Otherwise _check_consistent_returns() is called!
Per its implementation and PEP8 we can have a "return None" at the end
of the function body if there are other return statements before that!
"""
if len(self._return_nodes[node.name]) > 1:
return
if len(node.body) <= 1:
return
last = node.body[-1]
if isinstance(last, astroid.Return):
# e.g. "return"
if last.value is None:
self.add_message("useless-return", node=node)
# return None"
elif isinstance(last.value, astroid.Const) and (last.value.value is None):
self.add_message("useless-return", node=node)
class RecommandationChecker(checkers.BaseChecker):
__implements__ = (interfaces.IAstroidChecker,)
name = "refactoring"
msgs = {
"C0200": (
"Consider using enumerate instead of iterating with range and len",
"consider-using-enumerate",
"Emitted when code that iterates with range and len is "
"encountered. Such code can be simplified by using the "
"enumerate builtin.",
),
"C0201": (
"Consider iterating the dictionary directly instead of calling .keys()",
"consider-iterating-dictionary",
"Emitted when the keys of a dictionary are iterated through the .keys() "
"method. It is enough to just iterate through the dictionary itself, as "
'in "for key in dictionary".',
),
}
@staticmethod
def _is_builtin(node, function):
inferred = utils.safe_infer(node)
if not inferred:
return False
return utils.is_builtin_object(inferred) and inferred.name == function
@utils.check_messages("consider-iterating-dictionary")
def visit_call(self, node):
inferred = utils.safe_infer(node.func)
if not inferred:
return
if not isinstance(inferred, astroid.BoundMethod):
return
if not isinstance(inferred.bound, astroid.Dict) or inferred.name != "keys":
return
if isinstance(node.parent, (astroid.For, astroid.Comprehension)):
self.add_message("consider-iterating-dictionary", node=node)
@utils.check_messages("consider-using-enumerate")
def visit_for(self, node):
"""Emit a convention whenever range and len are used for indexing."""
# Verify that we have a `range([start], len(...), [stop])` call and
# that the object which is iterated is used as a subscript in the
# body of the for.
# Is it a proper range call?
if not isinstance(node.iter, astroid.Call):
return
if not self._is_builtin(node.iter.func, "range"):
return
if len(node.iter.args) == 2 and not _is_constant_zero(node.iter.args[0]):
return
if len(node.iter.args) > 2:
return
# Is it a proper len call?
if not isinstance(node.iter.args[-1], astroid.Call):
return
second_func = node.iter.args[-1].func
if not self._is_builtin(second_func, "len"):
return
len_args = node.iter.args[-1].args
if not len_args or len(len_args) != 1:
return
iterating_object = len_args[0]
if not isinstance(iterating_object, astroid.Name):
return
# If we're defining __iter__ on self, enumerate won't work
scope = node.scope()
if iterating_object.name == "self" and scope.name == "__iter__":
return
# Verify that the body of the for loop uses a subscript
# with the object that was iterated. This uses some heuristics
# in order to make sure that the same object is used in the
# for body.
for child in node.body:
for subscript in child.nodes_of_class(astroid.Subscript):
if not isinstance(subscript.value, astroid.Name):
continue
if not isinstance(subscript.slice, astroid.Index):
continue
if not isinstance(subscript.slice.value, astroid.Name):
continue
if subscript.slice.value.name != node.target.name:
continue
if iterating_object.name != subscript.value.name:
continue
if subscript.value.scope() != node.scope():
# Ignore this subscript if it's not in the same
# scope. This means that in the body of the for
# loop, another scope was created, where the same
# name for the iterating object was used.
continue
self.add_message("consider-using-enumerate", node=node)
return
class NotChecker(checkers.BaseChecker):
"""checks for too many not in comparison expressions
- "not not" should trigger a warning
- "not" followed by a comparison should trigger a warning
"""
__implements__ = (interfaces.IAstroidChecker,)
msgs = {
"C0113": (
'Consider changing "%s" to "%s"',
"unneeded-not",
"Used when a boolean expression contains an unneeded negation.",
)
}
name = "refactoring"
reverse_op = {
"<": ">=",
"<=": ">",
">": "<=",
">=": "<",
"==": "!=",
"!=": "==",
"in": "not in",
"is": "is not",
}
# sets are not ordered, so for example "not set(LEFT_VALS) <= set(RIGHT_VALS)" is
# not equivalent to "set(LEFT_VALS) > set(RIGHT_VALS)"
skipped_nodes = (astroid.Set,)
# 'builtins' py3, '__builtin__' py2
skipped_classnames = [
"%s.%s" % (builtins.__name__, qname) for qname in ("set", "frozenset")
]
@utils.check_messages("unneeded-not")
def visit_unaryop(self, node):
if node.op != "not":
return
operand = node.operand
if isinstance(operand, astroid.UnaryOp) and operand.op == "not":
self.add_message(
"unneeded-not",
node=node,
args=(node.as_string(), operand.operand.as_string()),
)
elif isinstance(operand, astroid.Compare):
left = operand.left
# ignore multiple comparisons
if len(operand.ops) > 1:
return
operator, right = operand.ops[0]
if operator not in self.reverse_op:
return
# Ignore __ne__ as function of __eq__
frame = node.frame()
if frame.name == "__ne__" and operator == "==":
return
for _type in (utils.node_type(left), utils.node_type(right)):
if not _type:
return
if isinstance(_type, self.skipped_nodes):
return
if (
isinstance(_type, astroid.Instance)
and _type.qname() in self.skipped_classnames
):
return
suggestion = "%s %s %s" % (
left.as_string(),
self.reverse_op[operator],
right.as_string(),
)
self.add_message(
"unneeded-not", node=node, args=(node.as_string(), suggestion)
)
class LenChecker(checkers.BaseChecker):
"""Checks for incorrect usage of len() inside conditions.
Pep8 states:
For sequences, (strings, lists, tuples), use the fact that empty sequences are false.
Yes: if not seq:
if seq:
No: if len(seq):
if not len(seq):
Problems detected:
* if len(sequence):
* if not len(sequence):
* elif len(sequence):
* elif not len(sequence):
* while len(sequence):
* while not len(sequence):
* assert len(sequence):
* assert not len(sequence):
"""
__implements__ = (interfaces.IAstroidChecker,)
# configuration section name
name = "refactoring"
msgs = {
"C1801": (
"Do not use `len(SEQUENCE)` without comparison to determine if a sequence is empty",
"len-as-condition",
"Used when Pylint detects that len(sequence) is being used "
"without explicit comparison inside a condition to determine if a sequence is empty. "
"Instead of coercing the length to a boolean, either "
"rely on the fact that empty sequences are false or "
"compare the length against a scalar.",
)
}
priority = -2
options = ()
@utils.check_messages("len-as-condition")
def visit_call(self, node):
# a len(S) call is used inside a test condition
# could be if, while, assert or if expression statement
# e.g. `if len(S):`
if _is_len_call(node):
# the len() call could also be nested together with other
# boolean operations, e.g. `if z or len(x):`
parent = node.parent
while isinstance(parent, astroid.BoolOp):
parent = parent.parent
# we're finally out of any nested boolean operations so check if
# this len() call is part of a test condition
if not _node_is_test_condition(parent):
return
if not (node is parent.test or parent.test.parent_of(node)):
return
self.add_message("len-as-condition", node=node)
@utils.check_messages("len-as-condition")
def visit_unaryop(self, node):
"""`not len(S)` must become `not S` regardless if the parent block
is a test condition or something else (boolean expression)
e.g. `if not len(S):`"""
if (
isinstance(node, astroid.UnaryOp)
and node.op == "not"
and _is_len_call(node.operand)
):
self.add_message("len-as-condition", node=node)
def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(RefactoringChecker(linter))
linter.register_checker(NotChecker(linter))
linter.register_checker(RecommandationChecker(linter))
linter.register_checker(LenChecker(linter))
| 1 | 11,332 | I don't know if there are strict conventions about this, but I think the name of the warning should be the diagnosis, not the suggested course of action. In this case, that would mean changing the name of the warning to `interactive-exit` or something like that. | PyCQA-pylint | py |
@@ -315,7 +315,7 @@ func (c *controller) certificateRequiresIssuance(ctx context.Context, log logr.L
}
// validate the common name is correct
- expectedCN := pki.CommonNameForCertificate(crt)
+ expectedCN := crt.Spec.CommonName
if expectedCN != cert.Subject.CommonName {
log.Info("certificate common name is not as expected, re-issuing")
return true | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhookbootstrap
import (
"context"
"crypto"
"crypto/x509"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/clock"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
controllerpkg "github.com/jetstack/cert-manager/pkg/controller"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/scheduler"
"github.com/jetstack/cert-manager/pkg/util"
"github.com/jetstack/cert-manager/pkg/util/pki"
)
// The webhook bootstrapper is responsible for managing the CA used
// by cert-manager's own CRD conversion/validation webhook.
// This is required because whilst the conversion webhook is unavailable, it is
// not guaranteed that certificate issuance can proceed so we have a 'bootstrap
// problem'.
// This controller relies on static configuration passed as arguments in order
// to issue certificates without interacting with cert-manager CRDs:
// - --webhook-ca-secret
// - --webhook-serving-secret
// - --webhook-dns-names
// - --webhook-namespace
type controller struct {
webhookCASecret string
webhookServingSecret string
webhookDNSNames []string
webhookNamespace string
scheduledWorkQueue scheduler.ScheduledWorkQueue
secretLister corelisters.SecretLister
kubeClient kubernetes.Interface
clock clock.Clock
// certificateNeedsRenew is a function that can be used to determine whether
// a certificate currently requires renewal.
// This is a field on the controller struct to avoid having to maintain a reference
// to the controller context, and to make it easier to fake out this call during tests.
certificateNeedsRenew func(ctx context.Context, cert *x509.Certificate, crt *cmapi.Certificate) bool
// defined as a field to make it easy to stub out for testing purposes
generatePrivateKeyBytes generatePrivateKeyBytesFn
signCertificate signCertificateFunc
}
type signCertificateFunc func(crt *cmapi.Certificate, signeeKey, signerKey crypto.Signer, signerCert *x509.Certificate) ([]byte, error)
func signCertificateImpl(crt *cmapi.Certificate, signeeKey, signerKey crypto.Signer, signerCert *x509.Certificate) ([]byte, error) {
cert, err := pki.GenerateTemplate(crt)
if err != nil {
return nil, err
}
if signerCert == nil {
signerCert = cert
}
crtData, _, err := pki.SignCertificate(cert, signerCert, signeeKey.Public(), signerKey)
if err != nil {
return nil, err
}
return crtData, nil
}
// Register registers and constructs the controller using the provided context.
// It returns the workqueue to be used to enqueue items, a list of
// InformerSynced functions that must be synced, or an error.
func (c *controller) Register(ctx *controllerpkg.Context) (workqueue.RateLimitingInterface, []cache.InformerSynced, []controllerpkg.RunFunc, error) {
// create a queue used to queue up items to be processed
queue := workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(time.Second*5, time.Minute*30), ControllerName)
// obtain references to all the informers used by this controller
// don't use the SharedInformerFactory here as it is configured to watch
// *all* namespaces, whereas we only want to watch the webhook bootstrap
// namespace for secret resources.
secretsInformer := coreinformers.NewSecretInformer(ctx.Client, ctx.WebhookBootstrapOptions.Namespace, time.Minute*5, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
// build a list of InformerSynced functions that will be returned by the Register method.
// the controller will only begin processing items once all of these informers have synced.
mustSync := []cache.InformerSynced{
secretsInformer.HasSynced,
}
// set all the references to the listers for use by the Sync function
c.secretLister = corelisters.NewSecretLister(secretsInformer.GetIndexer())
// register handler functions
secretsInformer.AddEventHandler(&controllerpkg.QueuingEventHandler{Queue: queue})
c.kubeClient = ctx.Client
// Create a scheduled work queue that calls the ctrl.queue.Add method for
// each object in the queue. This is used to schedule re-checks of
// Certificate resources when they get near to expiry
c.scheduledWorkQueue = scheduler.NewScheduledWorkQueue(queue.Add)
c.webhookDNSNames = ctx.WebhookBootstrapOptions.DNSNames
c.webhookCASecret = ctx.WebhookBootstrapOptions.CASecretName
c.webhookServingSecret = ctx.WebhookBootstrapOptions.ServingSecretName
c.webhookNamespace = ctx.WebhookBootstrapOptions.Namespace
c.certificateNeedsRenew = ctx.IssuerOptions.CertificateNeedsRenew
c.generatePrivateKeyBytes = generatePrivateKeyBytesImpl
c.signCertificate = signCertificateImpl
c.clock = ctx.Clock
return queue, mustSync, []controllerpkg.RunFunc{secretsInformer.Run}, nil
}
func (c *controller) ProcessItem(ctx context.Context, key string) error {
ctx = logf.NewContext(ctx, nil, ControllerName)
log := logf.FromContext(ctx)
if len(c.webhookDNSNames) == 0 {
log.Info("no webhook DNS names provided on start-up, not processing any resources.")
return nil
}
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
log.Error(err, "error parsing resource key in queue")
return nil
}
if c.webhookNamespace != namespace || !(c.webhookCASecret == name || c.webhookServingSecret == name) {
return nil
}
secret, err := c.secretLister.Secrets(namespace).Get(name)
if apierrors.IsNotFound(err) {
log.Info("secret resource no longer exists", "key", key)
return nil
}
if err != nil {
return err
}
switch name {
case c.webhookCASecret:
return c.syncCASecret(ctx, secret)
case c.webhookServingSecret:
return c.syncServingSecret(ctx, secret)
}
return nil
}
func (c *controller) syncCASecret(ctx context.Context, secret *corev1.Secret) error {
log := logf.FromContext(ctx, "ca-secret")
log = logf.WithResource(log, secret)
crt := buildCACertificate(secret)
// read the existing private key
pkData := readSecretDataKey(secret, corev1.TLSPrivateKeyKey)
if pkData == nil {
log.Info("generating new private key")
return c.generatePrivateKey(crt, secret)
}
pk, err := pki.DecodePrivateKeyBytes(pkData)
if err != nil {
log.Info("regenerating new private key")
return c.generatePrivateKey(crt, secret)
}
// read the existing certificate
if !c.certificateRequiresIssuance(ctx, log, secret, pk, crt) {
c.scheduleRenewal(log, secret)
log.Info("ca certificate already up to date")
return nil
}
signedCert, err := c.selfSignCertificate(crt, pk)
if err != nil {
log.Error(err, "error signing certificate")
return err
}
return c.updateSecret(secret, pkData, signedCert, signedCert)
}
func (c *controller) syncServingSecret(ctx context.Context, secret *corev1.Secret) error {
log := logf.FromContext(ctx, "ca-secret")
log = logf.WithResource(log, secret)
crt := buildServingCertificate(secret, c.webhookDNSNames)
// first fetch the CA private key & certificate
caSecret, err := c.secretLister.Secrets(c.webhookNamespace).Get(c.webhookCASecret)
if apierrors.IsNotFound(err) {
log.Error(err, "ca secret does not yet exist")
// TODO: automatically sync the serving secret when the ca secret
// is updated and return nil here instead
return err
}
if err != nil {
return err
}
caPKData := readSecretDataKey(caSecret, corev1.TLSPrivateKeyKey)
caPK, err := pki.DecodePrivateKeyBytes(caPKData)
if err != nil {
log.Error(err, "error decoding CA private key")
return err
}
caCertData := readSecretDataKey(caSecret, corev1.TLSCertKey)
caCert, err := pki.DecodeX509CertificateBytes(caCertData)
if err != nil {
log.Error(err, "error decoding CA certificate data")
return err
}
// read the existing private key
pkData := readSecretDataKey(secret, corev1.TLSPrivateKeyKey)
if pkData == nil {
log.Info("generating new private key")
return c.generatePrivateKey(crt, secret)
}
pk, err := pki.DecodePrivateKeyBytes(pkData)
if err != nil {
log.Info("regenerating new private key")
return c.generatePrivateKey(crt, secret)
}
// read the existing certificate
if !c.certificateRequiresIssuance(ctx, log, secret, pk, crt) {
c.scheduleRenewal(log, secret)
log.Info("serving certificate already up to date")
return nil
}
// TODO: check to make sure the serving certificate is signed by the CA
certData, err := c.signCertificate(crt, pk, caPK, caCert)
if err != nil {
log.Error(err, "error signing certificate")
return err
}
return c.updateSecret(secret, pkData, caCertData, certData)
}
func (c *controller) scheduleRenewal(log logr.Logger, s *corev1.Secret) {
log = logf.WithResource(log, s)
// read the existing certificate
crtData := readSecretDataKey(s, corev1.TLSCertKey)
if crtData == nil {
log.Info("no certificate data found in secret")
return
}
cert, err := pki.DecodeX509CertificateBytes(crtData)
if err != nil {
log.Error(err, "failed to decode certificate data in secret")
return
}
key, err := controllerpkg.KeyFunc(s)
if err != nil {
log.Error(err, "internal error determining string key for secret")
return
}
// renew 30d before expiry
renewIn := cert.NotAfter.Add(-1 * time.Hour * 24 * 30).Sub(c.clock.Now())
c.scheduledWorkQueue.Add(key, renewIn)
}
func (c *controller) certificateRequiresIssuance(ctx context.Context, log logr.Logger, secret *corev1.Secret, pk crypto.Signer, crt *cmapi.Certificate) bool {
// read the existing certificate
crtData := readSecretDataKey(secret, corev1.TLSCertKey)
if crtData == nil {
log.Info("issuing webhook certificate")
return true
}
cert, err := pki.DecodeX509CertificateBytes(crtData)
if err != nil {
log.Info("re-issuing webhook certificate")
return true
}
// ensure private key is valid for certificate
matches, err := pki.PublicKeyMatchesCertificate(pk.Public(), cert)
if err != nil {
log.Error(err, "internal error checking certificate, re-issuing certificate")
return true
}
if !matches {
log.Info("certificate does not match private key, re-issuing")
return true
}
// validate the common name is correct
expectedCN := pki.CommonNameForCertificate(crt)
if expectedCN != cert.Subject.CommonName {
log.Info("certificate common name is not as expected, re-issuing")
return true
}
// validate the dns names are correct
expectedDNSNames := pki.DNSNamesForCertificate(crt)
if !util.EqualUnsorted(cert.DNSNames, expectedDNSNames) {
log.Info("certificate dns names are not as expected, re-issuing")
return true
}
// validate the ip addresses are correct
if !util.EqualUnsorted(pki.IPAddressesToString(cert.IPAddresses), crt.Spec.IPAddresses) {
log.Info("certificate ip addresses are not as expected, re-issuing")
return true
}
if c.certificateNeedsRenew(ctx, cert, crt) {
log.Info("certificate requires renewal, re-issuing")
return true
}
return false
}
func readSecretDataKey(secret *corev1.Secret, key string) []byte {
if secret.Data == nil {
return nil
}
d, ok := secret.Data[key]
if !ok {
return nil
}
return d
}
func (c *controller) generatePrivateKey(crt *cmapi.Certificate, secret *corev1.Secret) error {
pk, err := c.generatePrivateKeyBytes(crt)
if err != nil {
return err
}
return c.updateSecret(secret, pk, nil, nil)
}
func (c *controller) selfSignCertificate(crt *cmapi.Certificate, signeeKey crypto.Signer) ([]byte, error) {
return c.signCertificate(crt, signeeKey, signeeKey, nil)
}
func (c *controller) updateSecret(secret *corev1.Secret, pk, ca, crt []byte) error {
secret = secret.DeepCopy()
if secret.Annotations == nil {
secret.Annotations = make(map[string]string)
}
secret.Annotations[cmapi.AllowsInjectionFromSecretAnnotation] = "true"
if secret.Data == nil {
secret.Data = make(map[string][]byte)
}
secret.Data[corev1.TLSPrivateKeyKey] = pk
secret.Data[corev1.TLSCertKey] = crt
secret.Data[cmmeta.TLSCAKey] = ca
_, err := c.kubeClient.CoreV1().Secrets(secret.Namespace).Update(secret)
return err
}
// ensureSecretsExist ensures that the webhook secrets actually exist.
// This is to ensure that the ProcessItem function is actually called with the
// webhook's Secret resource, so that it can be provisioned.
func (c *controller) ensureSecretsExist(ctx context.Context) {
// TODO: we should be able to just not run the controller at all if these
// are not set, but for now we add this hacky check.
if c.webhookNamespace == "" || c.webhookCASecret == "" || c.webhookServingSecret == "" {
return
}
c.ensureSecretExists(ctx, c.webhookCASecret)
c.ensureSecretExists(ctx, c.webhookServingSecret)
}
func (c *controller) ensureSecretExists(ctx context.Context, name string) {
log := logf.FromContext(ctx)
log = log.WithValues(logf.ResourceNameKey, name, logf.ResourceNamespaceKey, c.webhookNamespace, logf.ResourceKindKey, "Secret")
_, err := c.secretLister.Secrets(c.webhookNamespace).Get(name)
if apierrors.IsNotFound(err) {
log.Info("existing Secret does not exist, creating new empty secret")
c.createEmptySecret(ctx, log, name)
return
}
if err != nil {
log.Error(err, "failed to GET existing Secret resource")
return
}
}
func (c *controller) createEmptySecret(ctx context.Context, log logr.Logger, name string) {
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: c.webhookNamespace,
Annotations: map[string]string{
cmapi.AllowsInjectionFromSecretAnnotation: "true",
},
},
Data: map[string][]byte{
corev1.TLSCertKey: nil,
corev1.TLSPrivateKeyKey: nil,
cmmeta.TLSCAKey: nil,
},
Type: corev1.SecretTypeTLS,
}
if _, err := c.kubeClient.CoreV1().Secrets(c.webhookNamespace).Create(s); err != nil {
log.Error(err, "failed to create new empty Secret")
}
return
}
const (
selfSignedIssuerName = "cert-manager-webhook-selfsigner"
caIssuerName = "cert-manager-webhook-ca"
caKeyAlgorithm = cmapi.RSAKeyAlgorithm
caKeySize = 2048
caKeyEncoding = cmapi.PKCS1
servingKeyAlgorithm = cmapi.RSAKeyAlgorithm
servingKeySize = 2048
servingKeyEncoding = cmapi.PKCS1
)
func buildCACertificate(secret *corev1.Secret) *cmapi.Certificate {
return &cmapi.Certificate{
ObjectMeta: metav1.ObjectMeta{
Name: secret.Name,
Namespace: secret.Namespace,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(secret, corev1.SchemeGroupVersion.WithKind("Secret"))},
},
Spec: cmapi.CertificateSpec{
SecretName: secret.Name,
Organization: []string{"cert-manager.system"},
CommonName: "cert-manager.webhook.ca",
// root CA is valid for 5 years as we don't currently handle
// rotating the root properly
Duration: &metav1.Duration{Duration: time.Hour * 24 * 365 * 5},
IssuerRef: cmmeta.ObjectReference{
Name: selfSignedIssuerName,
},
IsCA: true,
KeyAlgorithm: caKeyAlgorithm,
KeySize: caKeySize,
KeyEncoding: caKeyEncoding,
},
}
}
func buildServingCertificate(secret *corev1.Secret, dnsNames []string) *cmapi.Certificate {
return &cmapi.Certificate{
ObjectMeta: metav1.ObjectMeta{
Name: secret.Name,
Namespace: secret.Namespace,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(secret, corev1.SchemeGroupVersion.WithKind("Secret"))},
},
Spec: cmapi.CertificateSpec{
SecretName: secret.Name,
Organization: []string{"cert-manager.system"},
DNSNames: dnsNames,
Duration: &metav1.Duration{Duration: time.Hour * 24 * 365 * 1},
IssuerRef: cmmeta.ObjectReference{
Name: caIssuerName,
},
KeyAlgorithm: servingKeyAlgorithm,
KeySize: servingKeySize,
KeyEncoding: servingKeyEncoding,
},
}
}
type generatePrivateKeyBytesFn func(*cmapi.Certificate) ([]byte, error)
func generatePrivateKeyBytesImpl(crt *cmapi.Certificate) ([]byte, error) {
signer, err := pki.GeneratePrivateKeyForCertificate(crt)
if err != nil {
return nil, err
}
keyData, err := pki.EncodePrivateKey(signer, crt.Spec.KeyEncoding)
if err != nil {
return nil, err
}
return keyData, nil
}
const (
ControllerName = "webhook-bootstrap"
)
func init() {
controllerpkg.Register(ControllerName, func(ctx *controllerpkg.Context) (controllerpkg.Interface, error) {
ctrl := &controller{}
return controllerpkg.NewBuilder(ctx, ControllerName).
For(ctrl).
With(ctrl.ensureSecretsExist, time.Second*10).
Complete()
})
}
| 1 | 18,980 | If `crt.Spec.CommonName` is not set, and `cert.Subject.CommonName` *is* set, we are not correctly handling it here. | jetstack-cert-manager | go |
@@ -23,8 +23,15 @@ import pytest
from PyQt5.QtCore import Qt
from qutebrowser.mainwindow import prompt as promptmod
-from qutebrowser.utils import usertypes
-
+from qutebrowser.utils import usertypes, objreg
+from qutebrowser.misc import cmdhistory
+
[email protected](autouse=True)
+def test_init(fake_save_manager, data_tmpdir, config_stub):
+ cmdhistory.init()
+ yield
+ objreg.delete('command-history')
+ objreg.delete('fprompt-history')
class TestFileCompletion:
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
import os
import pytest
from PyQt5.QtCore import Qt
from qutebrowser.mainwindow import prompt as promptmod
from qutebrowser.utils import usertypes
class TestFileCompletion:
@pytest.fixture
def get_prompt(self, qtbot, config_stub, key_config_stub):
"""Get a function to display a prompt with a path."""
config_stub.val.bindings.default = {}
def _get_prompt_func(path):
question = usertypes.Question()
question.title = "test"
question.default = path
prompt = promptmod.DownloadFilenamePrompt(question)
qtbot.add_widget(prompt)
with qtbot.wait_signal(prompt._file_model.directoryLoaded):
pass
assert prompt._lineedit.text() == path
return prompt
return _get_prompt_func
@pytest.mark.parametrize('steps, where, subfolder', [
(1, 'next', '..'),
(1, 'prev', 'c'),
(2, 'next', 'a'),
(2, 'prev', 'b'),
])
def test_simple_completion(self, tmp_path, get_prompt, steps, where,
subfolder):
"""Simply trying to tab through items."""
testdir = tmp_path / 'test'
for directory in 'abc':
(testdir / directory).mkdir(parents=True)
prompt = get_prompt(str(testdir) + os.sep)
for _ in range(steps):
prompt.item_focus(where)
assert prompt._lineedit.text() == str((testdir / subfolder).resolve())
def test_backspacing_path(self, qtbot, tmp_path, get_prompt):
"""When we start deleting a path we want to see the subdir."""
testdir = tmp_path / 'test'
for directory in ['bar', 'foo']:
(testdir / directory).mkdir(parents=True)
prompt = get_prompt(str(testdir / 'foo') + os.sep)
# Deleting /f[oo/]
with qtbot.wait_signal(prompt._file_model.directoryLoaded):
for _ in range(3):
qtbot.keyPress(prompt._lineedit, Qt.Key_Backspace)
# For some reason, this isn't always called when using qtbot.keyPress.
prompt._set_fileview_root(prompt._lineedit.text())
# '..' and 'foo' should get completed from 'f'
prompt.item_focus('next')
assert prompt._lineedit.text() == str(tmp_path)
prompt.item_focus('next')
assert prompt._lineedit.text() == str(testdir / 'foo')
# Deleting /[foo]
for _ in range(3):
qtbot.keyPress(prompt._lineedit, Qt.Key_Backspace)
# We should now show / again, so tabbing twice gives us .. -> bar
prompt.item_focus('next')
prompt.item_focus('next')
assert prompt._lineedit.text() == str(testdir / 'bar')
@pytest.mark.parametrize("keys, expected", [
([], ['..', 'bar', 'bat', 'foo']),
([Qt.Key_F], ['..', 'foo']),
([Qt.Key_A], ['..', 'bar', 'bat']),
])
def test_filtering_path(self, qtbot, tmp_path, get_prompt, keys, expected):
testdir = tmp_path / 'test'
for directory in ['bar', 'foo', 'bat']:
(testdir / directory).mkdir(parents=True)
prompt = get_prompt(str(testdir) + os.sep)
for key in keys:
qtbot.keyPress(prompt._lineedit, key)
prompt._set_fileview_root(prompt._lineedit.text())
num_rows = prompt._file_model.rowCount(prompt._file_view.rootIndex())
visible = []
for row in range(num_rows):
parent = prompt._file_model.index(
os.path.dirname(prompt._lineedit.text()))
index = prompt._file_model.index(row, 0, parent)
if not prompt._file_view.isRowHidden(index.row(), index.parent()):
visible.append(index.data())
assert visible == expected
@pytest.mark.linux
def test_root_path(self, get_prompt):
"""With / as path, show root contents."""
prompt = get_prompt('/')
assert prompt._file_model.rootPath() == '/'
| 1 | 26,397 | This isn't a test, so it shouldn't be named `test_init`. You could name it `cmdhistory_init` or so. | qutebrowser-qutebrowser | py |
@@ -292,6 +292,10 @@ class PySparkTask(SparkSubmitTask):
if self.deploy_mode == "cluster":
return [self.run_pickle]
+ @property
+ def pickle_protocol(self):
+ return configuration.get_config().getint(self.spark_version, "pickle-protocol", pickle.DEFAULT_PROTOCOL)
+
def setup(self, conf):
"""
Called by the pyspark_runner with a SparkConf instance that will be used to instantiate the SparkContext | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import logging
import os
import re
import sys
import tempfile
import shutil
import importlib
import tarfile
import inspect
import pickle
from luigi.contrib.external_program import ExternalProgramTask
from luigi import configuration
logger = logging.getLogger('luigi-interface')
class SparkSubmitTask(ExternalProgramTask):
"""
Template task for running a Spark job
Supports running jobs on Spark local, standalone, Mesos or Yarn
See http://spark.apache.org/docs/latest/submitting-applications.html
for more information
"""
# Application (.jar or .py file)
name = None
entry_class = None
app = None
# Only log stderr if spark fails (since stderr is normally quite verbose)
always_log_stderr = False
# Spark applications write its logs into stderr
stream_for_searching_tracking_url = 'stderr'
@property
def tracking_url_pattern(self):
if self.deploy_mode == "cluster":
# in cluster mode client only receives application status once a period of time
return r"tracking URL: (https?://.*)\s"
else:
return r"Bound (?:.*) to (?:.*), and started at (https?://.*)\s"
def app_options(self):
"""
Subclass this method to map your task parameters to the app's arguments
"""
return []
@property
def pyspark_python(self):
return None
@property
def pyspark_driver_python(self):
return None
@property
def hadoop_user_name(self):
return None
@property
def spark_version(self):
return "spark"
@property
def spark_submit(self):
return configuration.get_config().get(self.spark_version, 'spark-submit', 'spark-submit')
@property
def master(self):
return configuration.get_config().get(self.spark_version, "master", None)
@property
def deploy_mode(self):
return configuration.get_config().get(self.spark_version, "deploy-mode", None)
@property
def jars(self):
return self._list_config(configuration.get_config().get(self.spark_version, "jars", None))
@property
def packages(self):
return self._list_config(configuration.get_config().get(
self.spark_version, "packages", None))
@property
def py_files(self):
return self._list_config(configuration.get_config().get(
self.spark_version, "py-files", None))
@property
def files(self):
return self._list_config(configuration.get_config().get(self.spark_version, "files", None))
@property
def _conf(self):
conf = collections.OrderedDict(self.conf or {})
if self.pyspark_python:
conf['spark.pyspark.python'] = self.pyspark_python
if self.pyspark_driver_python:
conf['spark.pyspark.driver.python'] = self.pyspark_driver_python
return conf
@property
def conf(self):
return self._dict_config(configuration.get_config().get(self.spark_version, "conf", None))
@property
def properties_file(self):
return configuration.get_config().get(self.spark_version, "properties-file", None)
@property
def driver_memory(self):
return configuration.get_config().get(self.spark_version, "driver-memory", None)
@property
def driver_java_options(self):
return configuration.get_config().get(self.spark_version, "driver-java-options", None)
@property
def driver_library_path(self):
return configuration.get_config().get(self.spark_version, "driver-library-path", None)
@property
def driver_class_path(self):
return configuration.get_config().get(self.spark_version, "driver-class-path", None)
@property
def executor_memory(self):
return configuration.get_config().get(self.spark_version, "executor-memory", None)
@property
def driver_cores(self):
return configuration.get_config().get(self.spark_version, "driver-cores", None)
@property
def supervise(self):
return bool(configuration.get_config().get(self.spark_version, "supervise", False))
@property
def total_executor_cores(self):
return configuration.get_config().get(self.spark_version, "total-executor-cores", None)
@property
def executor_cores(self):
return configuration.get_config().get(self.spark_version, "executor-cores", None)
@property
def queue(self):
return configuration.get_config().get(self.spark_version, "queue", None)
@property
def num_executors(self):
return configuration.get_config().get(self.spark_version, "num-executors", None)
@property
def archives(self):
return self._list_config(configuration.get_config().get(
self.spark_version, "archives", None))
@property
def hadoop_conf_dir(self):
return configuration.get_config().get(self.spark_version, "hadoop-conf-dir", None)
def get_environment(self):
env = os.environ.copy()
for prop in ('HADOOP_CONF_DIR', 'HADOOP_USER_NAME'):
var = getattr(self, prop.lower(), None)
if var:
env[prop] = var
return env
def program_environment(self):
return self.get_environment()
def program_args(self):
return self.spark_command() + self.app_command()
def spark_command(self):
command = [self.spark_submit]
command += self._text_arg('--master', self.master)
command += self._text_arg('--deploy-mode', self.deploy_mode)
command += self._text_arg('--name', self.name)
command += self._text_arg('--class', self.entry_class)
command += self._list_arg('--jars', self.jars)
command += self._list_arg('--packages', self.packages)
command += self._list_arg('--py-files', self.py_files)
command += self._list_arg('--files', self.files)
command += self._list_arg('--archives', self.archives)
command += self._dict_arg('--conf', self._conf)
command += self._text_arg('--properties-file', self.properties_file)
command += self._text_arg('--driver-memory', self.driver_memory)
command += self._text_arg('--driver-java-options', self.driver_java_options)
command += self._text_arg('--driver-library-path', self.driver_library_path)
command += self._text_arg('--driver-class-path', self.driver_class_path)
command += self._text_arg('--executor-memory', self.executor_memory)
command += self._text_arg('--driver-cores', self.driver_cores)
command += self._flag_arg('--supervise', self.supervise)
command += self._text_arg('--total-executor-cores', self.total_executor_cores)
command += self._text_arg('--executor-cores', self.executor_cores)
command += self._text_arg('--queue', self.queue)
command += self._text_arg('--num-executors', self.num_executors)
return command
def app_command(self):
if not self.app:
raise NotImplementedError("subclass should define an app (.jar or .py file)")
return [self.app] + self.app_options()
def _list_config(self, config):
if config and isinstance(config, str):
return list(map(lambda x: x.strip(), config.split(',')))
def _dict_config(self, config):
if config and isinstance(config, str):
return dict(map(lambda i: i.split('=', 1), config.split('|')))
def _text_arg(self, name, value):
if value:
return [name, value]
return []
def _list_arg(self, name, value):
if value and isinstance(value, (list, tuple)):
return [name, ','.join(value)]
return []
def _dict_arg(self, name, value):
command = []
if value and isinstance(value, dict):
for prop, value in value.items():
command += [name, '{0}={1}'.format(prop, value)]
return command
def _flag_arg(self, name, value):
if value:
return [name]
return []
class PySparkTask(SparkSubmitTask):
"""
Template task for running an inline PySpark job
Simply implement the ``main`` method in your subclass
You can optionally define package names to be distributed to the cluster
with ``py_packages`` (uses luigi's global py-packages configuration by default)
"""
# Path to the pyspark program passed to spark-submit
app = os.path.join(os.path.dirname(__file__), 'pyspark_runner.py')
@property
def name(self):
return self.__class__.__name__
@property
def py_packages(self):
packages = configuration.get_config().get('spark', 'py-packages', None)
if packages:
return map(lambda s: s.strip(), packages.split(','))
@property
def files(self):
if self.deploy_mode == "cluster":
return [self.run_pickle]
def setup(self, conf):
"""
Called by the pyspark_runner with a SparkConf instance that will be used to instantiate the SparkContext
:param conf: SparkConf
"""
def setup_remote(self, sc):
self._setup_packages(sc)
def main(self, sc, *args):
"""
Called by the pyspark_runner with a SparkContext and any arguments returned by ``app_options()``
:param sc: SparkContext
:param args: arguments list
"""
raise NotImplementedError("subclass should define a main method")
def app_command(self):
if self.deploy_mode == "cluster":
pickle_loc = os.path.basename(self.run_pickle)
else:
pickle_loc = self.run_pickle
return [self.app, pickle_loc] + self.app_options()
def run(self):
path_name_fragment = re.sub(r'[^\w]', '_', self.name)
self.run_path = tempfile.mkdtemp(prefix=path_name_fragment)
self.run_pickle = os.path.join(self.run_path, '.'.join([path_name_fragment, 'pickle']))
with open(self.run_pickle, 'wb') as fd:
# Copy module file to run path.
module_path = os.path.abspath(inspect.getfile(self.__class__))
shutil.copy(module_path, os.path.join(self.run_path, '.'))
self._dump(fd)
try:
super(PySparkTask, self).run()
finally:
shutil.rmtree(self.run_path)
def _dump(self, fd):
with self.no_unpicklable_properties():
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace(b'c__main__', b'c' + module_name.encode('ascii'))
fd.write(d)
else:
pickle.dump(self, fd)
def _setup_packages(self, sc):
"""
This method compresses and uploads packages to the cluster
"""
packages = self.py_packages
if not packages:
return
for package in packages:
mod = importlib.import_module(package)
try:
mod_path = mod.__path__[0]
except AttributeError:
mod_path = mod.__file__
tar_path = os.path.join(self.run_path, package + '.tar.gz')
tar = tarfile.open(tar_path, "w:gz")
tar.add(mod_path, os.path.basename(mod_path))
tar.close()
sc.addPyFile(tar_path)
| 1 | 19,931 | why is this pulling from `self.spark_version` config section rather than the `spark` config section (`py-packages` appears to pull from a config section called `spark`) | spotify-luigi | py |
@@ -31,6 +31,18 @@ func TestDefaultOptions(t *testing.T) {
}
}
+func TestOptions_RandomPort(t *testing.T) {
+ opts := &Options{
+ Port: RANDOM_PORT,
+ }
+ processOptions(opts)
+
+ if opts.Port != 0 {
+ t.Fatalf("Process of options should have resolved random port to "+
+ "zero.\nexpected: %d\ngot: %d\n", 0, opts.Port)
+ }
+}
+
func TestConfigFile(t *testing.T) {
golden := &Options{
Host: "apcera.me", | 1 | // Copyright 2013-2014 Apcera Inc. All rights reserved.
package server
import (
"reflect"
"testing"
"time"
)
func TestDefaultOptions(t *testing.T) {
golden := &Options{
Host: DEFAULT_HOST,
Port: DEFAULT_PORT,
MaxConn: DEFAULT_MAX_CONNECTIONS,
PingInterval: DEFAULT_PING_INTERVAL,
MaxPingsOut: DEFAULT_PING_MAX_OUT,
SslTimeout: float64(SSL_TIMEOUT) / float64(time.Second),
AuthTimeout: float64(AUTH_TIMEOUT) / float64(time.Second),
MaxControlLine: MAX_CONTROL_LINE_SIZE,
MaxPayload: MAX_PAYLOAD_SIZE,
ClusterAuthTimeout: float64(AUTH_TIMEOUT) / float64(time.Second),
}
opts := &Options{}
processOptions(opts)
if !reflect.DeepEqual(golden, opts) {
t.Fatalf("Default Options are incorrect.\nexpected: %+v\ngot: %+v",
golden, opts)
}
}
func TestConfigFile(t *testing.T) {
golden := &Options{
Host: "apcera.me",
Port: 4242,
Username: "derek",
Password: "bella",
AuthTimeout: 1.0,
Debug: false,
Trace: true,
Logtime: false,
HTTPPort: 8222,
LogFile: "/tmp/gnatsd.log",
PidFile: "/tmp/gnatsd.pid",
ProfPort: 6543,
}
opts, err := ProcessConfigFile("./configs/test.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v\n", err)
}
if !reflect.DeepEqual(golden, opts) {
t.Fatalf("Options are incorrect.\nexpected: %+v\ngot: %+v",
golden, opts)
}
}
func TestMergeOverrides(t *testing.T) {
golden := &Options{
Host: "apcera.me",
Port: 2222,
Username: "derek",
Password: "spooky",
AuthTimeout: 1.0,
Debug: true,
Trace: true,
Logtime: false,
HTTPPort: DEFAULT_HTTP_PORT,
LogFile: "/tmp/gnatsd.log",
PidFile: "/tmp/gnatsd.pid",
ProfPort: 6789,
}
fopts, err := ProcessConfigFile("./configs/test.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v\n", err)
}
// Overrides via flags
opts := &Options{
Port: 2222,
Password: "spooky",
Debug: true,
HTTPPort: DEFAULT_HTTP_PORT,
ProfPort: 6789,
}
merged := MergeOptions(fopts, opts)
if !reflect.DeepEqual(golden, merged) {
t.Fatalf("Options are incorrect.\nexpected: %+v\ngot: %+v",
golden, merged)
}
}
| 1 | 5,977 | nit: Think it can be one line.. | nats-io-nats-server | go |
@@ -12,10 +12,7 @@
*/
package org.camunda.bpm.application;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.ServiceLoader;
+import java.util.*;
import java.util.concurrent.Callable;
import javax.script.ScriptEngine; | 1 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.application;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.ServiceLoader;
import java.util.concurrent.Callable;
import javax.script.ScriptEngine;
import org.camunda.bpm.application.impl.DefaultElResolverLookup;
import org.camunda.bpm.application.impl.ProcessApplicationLogger;
import org.camunda.bpm.application.impl.ProcessApplicationScriptEnvironment;
import org.camunda.bpm.container.RuntimeContainerDelegate;
import org.camunda.bpm.engine.delegate.ExecutionListener;
import org.camunda.bpm.engine.delegate.TaskListener;
import org.camunda.bpm.engine.impl.ProcessEngineLogger;
import org.camunda.bpm.engine.impl.javax.el.BeanELResolver;
import org.camunda.bpm.engine.impl.javax.el.ELResolver;
import org.camunda.bpm.engine.impl.scripting.ExecutableScript;
import org.camunda.bpm.engine.impl.util.ClassLoaderUtil;
import org.camunda.bpm.engine.impl.variable.serializer.VariableSerializers;
import org.camunda.bpm.engine.repository.DeploymentBuilder;
/**
* @author Daniel Meyer
*
*/
public abstract class AbstractProcessApplication implements ProcessApplicationInterface {
private static ProcessApplicationLogger LOG = ProcessEngineLogger.PROCESS_APPLICATION_LOGGER;
protected ELResolver processApplicationElResolver;
protected BeanELResolver processApplicationBeanElResolver;
protected ProcessApplicationScriptEnvironment processApplicationScriptEnvironment;
protected VariableSerializers variableSerializers;
protected boolean isDeployed = false;
// deployment /////////////////////////////////////////////////////
public void deploy() {
if(isDeployed) {
LOG.alreadyDeployed();
}
else {
// deploy the application
RuntimeContainerDelegate.INSTANCE.get().deployProcessApplication(this);
isDeployed = true;
}
}
public void undeploy() {
if(!isDeployed) {
LOG.notDeployed();
} else {
// delegate stopping of the process application to the runtime container.
RuntimeContainerDelegate.INSTANCE.get().undeployProcessApplication(this);
isDeployed = false;
}
}
public void createDeployment(String processArchiveName, DeploymentBuilder deploymentBuilder) {
// default implementation does nothing
}
// Runtime ////////////////////////////////////////////
public String getName() {
Class<? extends AbstractProcessApplication> processApplicationClass = getClass();
String name = null;
ProcessApplication annotation = processApplicationClass.getAnnotation(ProcessApplication.class);
if(annotation != null) {
name = annotation.value();
if (name == null || name.length() == 0) {
name = annotation.name();
}
}
if(name == null || name.length()==0) {
name = autodetectProcessApplicationName();
}
return name;
}
/**
* Override this method to autodetect an application name in case the
* {@link ProcessApplication} annotation was used but without parameter.
*/
protected abstract String autodetectProcessApplicationName();
public <T> T execute(Callable<T> callable) throws ProcessApplicationExecutionException {
ClassLoader originalClassloader = ClassLoaderUtil.getContextClassloader();
ClassLoader processApplicationClassloader = getProcessApplicationClassloader();
try {
ClassLoaderUtil.setContextClassloader(processApplicationClassloader);
return callable.call();
}
catch(Exception e) {
throw LOG.processApplicationExecutionException(e);
}
finally {
ClassLoaderUtil.setContextClassloader(originalClassloader);
}
}
public <T> T execute(Callable<T> callable, InvocationContext invocationContext) throws ProcessApplicationExecutionException {
// allows to hook into the invocation
return execute(callable);
}
public ClassLoader getProcessApplicationClassloader() {
// the default implementation uses the classloader that loaded
// the application-provided subclass of this class.
return ClassLoaderUtil.getClassloader(getClass());
}
public ProcessApplicationInterface getRawObject() {
return this;
}
public Map<String, String> getProperties() {
return Collections.<String, String>emptyMap();
}
public ELResolver getElResolver() {
if(processApplicationElResolver == null) {
synchronized (this) {
if(processApplicationElResolver == null) {
processApplicationElResolver = initProcessApplicationElResolver();
}
}
}
return processApplicationElResolver;
}
public BeanELResolver getBeanElResolver() {
if(processApplicationBeanElResolver == null) {
synchronized (this) {
if(processApplicationBeanElResolver == null) {
processApplicationBeanElResolver = new BeanELResolver();
}
}
}
return processApplicationBeanElResolver;
}
/**
* <p>Initializes the process application provided ElResolver. This implementation uses the
* Java SE {@link ServiceLoader} facilities for resolving implementations of {@link ProcessApplicationElResolver}.</p>
*
* <p>If you want to provide a custom implementation in your application, place a file named
* <code>META-INF/org.camunda.bpm.application.ProcessApplicationElResolver</code> inside your application
* which contains the fully qualified classname of your implementation. Or simply override this method.</p>
*
* @return the process application ElResolver.
*/
protected ELResolver initProcessApplicationElResolver() {
return DefaultElResolverLookup.lookupResolver(this);
}
public ExecutionListener getExecutionListener() {
return null;
}
public TaskListener getTaskListener() {
return null;
}
/**
* see {@link ProcessApplicationScriptEnvironment#getScriptEngineForName(String, boolean)}
*/
public ScriptEngine getScriptEngineForName(String name, boolean cache) {
return getProcessApplicationScriptEnvironment().getScriptEngineForName(name, cache);
}
/**
* see {@link ProcessApplicationScriptEnvironment#getEnvironmentScripts()}
*/
public Map<String, List<ExecutableScript>> getEnvironmentScripts() {
return getProcessApplicationScriptEnvironment().getEnvironmentScripts();
}
protected ProcessApplicationScriptEnvironment getProcessApplicationScriptEnvironment() {
if (processApplicationScriptEnvironment == null) {
synchronized (this) {
if (processApplicationScriptEnvironment == null) {
processApplicationScriptEnvironment = new ProcessApplicationScriptEnvironment(this);
}
}
}
return processApplicationScriptEnvironment;
}
public VariableSerializers getVariableSerializers() {
return variableSerializers;
}
public void setVariableSerializers(VariableSerializers variableSerializers) {
this.variableSerializers = variableSerializers;
}
}
| 1 | 8,992 | please inline imports | camunda-camunda-bpm-platform | java |
@@ -85,8 +85,7 @@ func (p *setnsProcess) start() (err error) {
if err = p.execSetns(); err != nil {
return newSystemErrorWithCause(err, "executing setns process")
}
- // We can't join cgroups if we're in a rootless container.
- if !p.config.Rootless && len(p.cgroupPaths) > 0 {
+ if len(p.cgroupPaths) > 0 {
if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil {
return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid())
} | 1 | // +build linux
package libcontainer
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall" // only for Signal
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/utils"
"golang.org/x/sys/unix"
)
type parentProcess interface {
// pid returns the pid for the running process.
pid() int
// start starts the process execution.
start() error
// send a SIGKILL to the process and wait for the exit.
terminate() error
// wait waits on the process returning the process state.
wait() (*os.ProcessState, error)
// startTime returns the process start time.
startTime() (uint64, error)
signal(os.Signal) error
externalDescriptors() []string
setExternalDescriptors(fds []string)
}
type setnsProcess struct {
cmd *exec.Cmd
parentPipe *os.File
childPipe *os.File
cgroupPaths map[string]string
intelRdtPath string
config *initConfig
fds []string
process *Process
bootstrapData io.Reader
}
func (p *setnsProcess) startTime() (uint64, error) {
stat, err := system.Stat(p.pid())
return stat.StartTime, err
}
func (p *setnsProcess) signal(sig os.Signal) error {
s, ok := sig.(syscall.Signal)
if !ok {
return errors.New("os: unsupported signal type")
}
return unix.Kill(p.pid(), s)
}
func (p *setnsProcess) start() (err error) {
defer p.parentPipe.Close()
err = p.cmd.Start()
p.childPipe.Close()
if err != nil {
return newSystemErrorWithCause(err, "starting setns process")
}
if p.bootstrapData != nil {
if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
}
}
if err = p.execSetns(); err != nil {
return newSystemErrorWithCause(err, "executing setns process")
}
// We can't join cgroups if we're in a rootless container.
if !p.config.Rootless && len(p.cgroupPaths) > 0 {
if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil {
return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid())
}
}
if p.intelRdtPath != "" {
// if Intel RDT "resource control" filesystem path exists
_, err := os.Stat(p.intelRdtPath)
if err == nil {
if err := intelrdt.WriteIntelRdtTasks(p.intelRdtPath, p.pid()); err != nil {
return newSystemErrorWithCausef(err, "adding pid %d to Intel RDT resource control filesystem", p.pid())
}
}
}
// set rlimits, this has to be done here because we lose permissions
// to raise the limits once we enter a user-namespace
if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
return newSystemErrorWithCause(err, "setting rlimits for process")
}
if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
return newSystemErrorWithCause(err, "writing config to pipe")
}
ierr := parseSync(p.parentPipe, func(sync *syncT) error {
switch sync.Type {
case procReady:
// This shouldn't happen.
panic("unexpected procReady in setns")
case procHooks:
// This shouldn't happen.
panic("unexpected procHooks in setns")
default:
return newSystemError(fmt.Errorf("invalid JSON payload from child"))
}
})
if err := unix.Shutdown(int(p.parentPipe.Fd()), unix.SHUT_WR); err != nil {
return newSystemErrorWithCause(err, "calling shutdown on init pipe")
}
// Must be done after Shutdown so the child will exit and we can wait for it.
if ierr != nil {
p.wait()
return ierr
}
return nil
}
// execSetns runs the process that executes C code to perform the setns calls
// because setns support requires the C process to fork off a child and perform the setns
// before the go runtime boots, we wait on the process to die and receive the child's pid
// over the provided pipe.
func (p *setnsProcess) execSetns() error {
status, err := p.cmd.Process.Wait()
if err != nil {
p.cmd.Wait()
return newSystemErrorWithCause(err, "waiting on setns process to finish")
}
if !status.Success() {
p.cmd.Wait()
return newSystemError(&exec.ExitError{ProcessState: status})
}
var pid *pid
if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
p.cmd.Wait()
return newSystemErrorWithCause(err, "reading pid from init pipe")
}
// Clean up the zombie parent process
firstChildProcess, err := os.FindProcess(pid.PidFirstChild)
if err != nil {
return err
}
// Ignore the error in case the child has already been reaped for any reason
_, _ = firstChildProcess.Wait()
process, err := os.FindProcess(pid.Pid)
if err != nil {
return err
}
p.cmd.Process = process
p.process.ops = p
return nil
}
// terminate sends a SIGKILL to the forked process for the setns routine then waits to
// avoid the process becoming a zombie.
func (p *setnsProcess) terminate() error {
if p.cmd.Process == nil {
return nil
}
err := p.cmd.Process.Kill()
if _, werr := p.wait(); err == nil {
err = werr
}
return err
}
func (p *setnsProcess) wait() (*os.ProcessState, error) {
err := p.cmd.Wait()
// Return actual ProcessState even on Wait error
return p.cmd.ProcessState, err
}
func (p *setnsProcess) pid() int {
return p.cmd.Process.Pid
}
func (p *setnsProcess) externalDescriptors() []string {
return p.fds
}
func (p *setnsProcess) setExternalDescriptors(newFds []string) {
p.fds = newFds
}
type initProcess struct {
cmd *exec.Cmd
parentPipe *os.File
childPipe *os.File
config *initConfig
manager cgroups.Manager
intelRdtManager intelrdt.Manager
container *linuxContainer
fds []string
process *Process
bootstrapData io.Reader
sharePidns bool
}
func (p *initProcess) pid() int {
return p.cmd.Process.Pid
}
func (p *initProcess) externalDescriptors() []string {
return p.fds
}
// execSetns runs the process that executes C code to perform the setns calls
// because setns support requires the C process to fork off a child and perform the setns
// before the go runtime boots, we wait on the process to die and receive the child's pid
// over the provided pipe.
// This is called by initProcess.start function
func (p *initProcess) execSetns() error {
status, err := p.cmd.Process.Wait()
if err != nil {
p.cmd.Wait()
return err
}
if !status.Success() {
p.cmd.Wait()
return &exec.ExitError{ProcessState: status}
}
var pid *pid
if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
p.cmd.Wait()
return err
}
// Clean up the zombie parent process
firstChildProcess, err := os.FindProcess(pid.PidFirstChild)
if err != nil {
return err
}
// Ignore the error in case the child has already been reaped for any reason
_, _ = firstChildProcess.Wait()
process, err := os.FindProcess(pid.Pid)
if err != nil {
return err
}
p.cmd.Process = process
p.process.ops = p
return nil
}
func (p *initProcess) start() error {
defer p.parentPipe.Close()
err := p.cmd.Start()
p.process.ops = p
p.childPipe.Close()
if err != nil {
p.process.ops = nil
return newSystemErrorWithCause(err, "starting init process command")
}
// Do this before syncing with child so that no children can escape the
// cgroup. We don't need to worry about not doing this and not being root
// because we'd be using the rootless cgroup manager in that case.
if err := p.manager.Apply(p.pid()); err != nil {
return newSystemErrorWithCause(err, "applying cgroup configuration for process")
}
if p.intelRdtManager != nil {
if err := p.intelRdtManager.Apply(p.pid()); err != nil {
return newSystemErrorWithCause(err, "applying Intel RDT configuration for process")
}
}
defer func() {
if err != nil {
// TODO: should not be the responsibility to call here
p.manager.Destroy()
if p.intelRdtManager != nil {
p.intelRdtManager.Destroy()
}
}
}()
if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
}
if err := p.execSetns(); err != nil {
return newSystemErrorWithCause(err, "running exec setns process for init")
}
// Save the standard descriptor names before the container process
// can potentially move them (e.g., via dup2()). If we don't do this now,
// we won't know at checkpoint time which file descriptor to look up.
fds, err := getPipeFds(p.pid())
if err != nil {
return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", p.pid())
}
p.setExternalDescriptors(fds)
if err := p.createNetworkInterfaces(); err != nil {
return newSystemErrorWithCause(err, "creating network interfaces")
}
if err := p.sendConfig(); err != nil {
return newSystemErrorWithCause(err, "sending config to init process")
}
var (
sentRun bool
sentResume bool
)
ierr := parseSync(p.parentPipe, func(sync *syncT) error {
switch sync.Type {
case procReady:
// set rlimits, this has to be done here because we lose permissions
// to raise the limits once we enter a user-namespace
if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
return newSystemErrorWithCause(err, "setting rlimits for ready process")
}
// call prestart hooks
if !p.config.Config.Namespaces.Contains(configs.NEWNS) {
// Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
if err := p.manager.Set(p.config.Config); err != nil {
return newSystemErrorWithCause(err, "setting cgroup config for ready process")
}
if p.intelRdtManager != nil {
if err := p.intelRdtManager.Set(p.config.Config); err != nil {
return newSystemErrorWithCause(err, "setting Intel RDT config for ready process")
}
}
if p.config.Config.Hooks != nil {
s := configs.HookState{
Version: p.container.config.Version,
ID: p.container.id,
Pid: p.pid(),
Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"),
}
for i, hook := range p.config.Config.Hooks.Prestart {
if err := hook.Run(s); err != nil {
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
}
}
}
}
// Sync with child.
if err := writeSync(p.parentPipe, procRun); err != nil {
return newSystemErrorWithCause(err, "writing syncT 'run'")
}
sentRun = true
case procHooks:
// Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
if err := p.manager.Set(p.config.Config); err != nil {
return newSystemErrorWithCause(err, "setting cgroup config for procHooks process")
}
if p.intelRdtManager != nil {
if err := p.intelRdtManager.Set(p.config.Config); err != nil {
return newSystemErrorWithCause(err, "setting Intel RDT config for procHooks process")
}
}
if p.config.Config.Hooks != nil {
s := configs.HookState{
Version: p.container.config.Version,
ID: p.container.id,
Pid: p.pid(),
Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"),
}
for i, hook := range p.config.Config.Hooks.Prestart {
if err := hook.Run(s); err != nil {
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
}
}
}
// Sync with child.
if err := writeSync(p.parentPipe, procResume); err != nil {
return newSystemErrorWithCause(err, "writing syncT 'resume'")
}
sentResume = true
default:
return newSystemError(fmt.Errorf("invalid JSON payload from child"))
}
return nil
})
if !sentRun {
return newSystemErrorWithCause(ierr, "container init")
}
if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume {
return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process"))
}
if err := unix.Shutdown(int(p.parentPipe.Fd()), unix.SHUT_WR); err != nil {
return newSystemErrorWithCause(err, "shutting down init pipe")
}
// Must be done after Shutdown so the child will exit and we can wait for it.
if ierr != nil {
p.wait()
return ierr
}
return nil
}
func (p *initProcess) wait() (*os.ProcessState, error) {
err := p.cmd.Wait()
if err != nil {
return p.cmd.ProcessState, err
}
// we should kill all processes in cgroup when init is died if we use host PID namespace
if p.sharePidns {
signalAllProcesses(p.manager, unix.SIGKILL)
}
return p.cmd.ProcessState, nil
}
func (p *initProcess) terminate() error {
if p.cmd.Process == nil {
return nil
}
err := p.cmd.Process.Kill()
if _, werr := p.wait(); err == nil {
err = werr
}
return err
}
func (p *initProcess) startTime() (uint64, error) {
stat, err := system.Stat(p.pid())
return stat.StartTime, err
}
func (p *initProcess) sendConfig() error {
// send the config to the container's init process, we don't use JSON Encode
// here because there might be a problem in JSON decoder in some cases, see:
// https://github.com/docker/docker/issues/14203#issuecomment-174177790
return utils.WriteJSON(p.parentPipe, p.config)
}
func (p *initProcess) createNetworkInterfaces() error {
for _, config := range p.config.Config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
n := &network{
Network: *config,
}
if err := strategy.create(n, p.pid()); err != nil {
return err
}
p.config.Networks = append(p.config.Networks, n)
}
return nil
}
func (p *initProcess) signal(sig os.Signal) error {
s, ok := sig.(syscall.Signal)
if !ok {
return errors.New("os: unsupported signal type")
}
return unix.Kill(p.pid(), s)
}
func (p *initProcess) setExternalDescriptors(newFds []string) {
p.fds = newFds
}
func getPipeFds(pid int) ([]string, error) {
fds := make([]string, 3)
dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd")
for i := 0; i < 3; i++ {
// XXX: This breaks if the path is not a valid symlink (which can
// happen in certain particularly unlucky mount namespace setups).
f := filepath.Join(dirPath, strconv.Itoa(i))
target, err := os.Readlink(f)
if err != nil {
// Ignore permission errors, for rootless containers and other
// non-dumpable processes. if we can't get the fd for a particular
// file, there's not much we can do.
if os.IsPermission(err) {
continue
}
return fds, err
}
fds[i] = target
}
return fds, nil
}
// InitializeIO creates pipes for use with the process's stdio and returns the
// opposite side for each. Do not use this if you want to have a pseudoterminal
// set up for you by libcontainer (TODO: fix that too).
// TODO: This is mostly unnecessary, and should be handled by clients.
func (p *Process) InitializeIO(rootuid, rootgid int) (i *IO, err error) {
var fds []uintptr
i = &IO{}
// cleanup in case of an error
defer func() {
if err != nil {
for _, fd := range fds {
unix.Close(int(fd))
}
}
}()
// STDIN
r, w, err := os.Pipe()
if err != nil {
return nil, err
}
fds = append(fds, r.Fd(), w.Fd())
p.Stdin, i.Stdin = r, w
// STDOUT
if r, w, err = os.Pipe(); err != nil {
return nil, err
}
fds = append(fds, r.Fd(), w.Fd())
p.Stdout, i.Stdout = w, r
// STDERR
if r, w, err = os.Pipe(); err != nil {
return nil, err
}
fds = append(fds, r.Fd(), w.Fd())
p.Stderr, i.Stderr = w, r
// change ownership of the pipes incase we are in a user namespace
for _, fd := range fds {
if err := unix.Fchown(int(fd), rootuid, rootgid); err != nil {
return nil, err
}
}
return i, nil
}
| 1 | 15,148 | This check is still correct in some cases, but I guess erroring out is acceptable if someone explicitly asked for an impossible cgroup configuration (now that we could in principle nest things). I would like to see a test for this though. | opencontainers-runc | go |
@@ -34,6 +34,7 @@ var skip = map[string]string{
"task_per_line": "join produces inconsistent/racy results when table schemas do not match (https://github.com/influxdata/flux/issues/855)",
"rowfn_with_import": "imported libraries are not visible in user-defined functions (https://github.com/influxdata/flux/issues/1000)",
"string_trim": "cannot reference a package function from within a row function",
+ "integral_columns": "aggregates changed to operate on just a single columnm.",
}
var querier = querytest.NewQuerier() | 1 | package stdlib_test
import (
"bytes"
"context"
"strings"
"testing"
"github.com/influxdata/flux"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/lang"
"github.com/influxdata/flux/querytest"
"github.com/influxdata/flux/stdlib"
)
func init() {
flux.FinalizeBuiltIns()
}
// list of end-to-end tests that are meant to be skipped and not run for various reasons
var skip = map[string]string{
"string_max": "error: invalid use of function: *functions.MaxSelector has no implementation for type string (https://github.com/influxdata/platform/issues/224)",
"null_as_value": "null not supported as value in influxql (https://github.com/influxdata/platform/issues/353)",
"string_interp": "string interpolation not working as expected in flux (https://github.com/influxdata/platform/issues/404)",
"to": "to functions are not supported in the testing framework (https://github.com/influxdata/flux/issues/77)",
"covariance_missing_column_1": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"covariance_missing_column_2": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_before_rename": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_referenced": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"drop_non_existent": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"keep_non_existent": "need to support known errors in new test framework (https://github.com/influxdata/flux/issues/536)",
"yield": "yield requires special test case (https://github.com/influxdata/flux/issues/535)",
"task_per_line": "join produces inconsistent/racy results when table schemas do not match (https://github.com/influxdata/flux/issues/855)",
"rowfn_with_import": "imported libraries are not visible in user-defined functions (https://github.com/influxdata/flux/issues/1000)",
"string_trim": "cannot reference a package function from within a row function",
}
var querier = querytest.NewQuerier()
func TestFluxEndToEnd(t *testing.T) {
runEndToEnd(t, querier, stdlib.FluxTestPackages)
}
func BenchmarkFluxEndToEnd(b *testing.B) {
benchEndToEnd(b, querier, stdlib.FluxTestPackages)
}
func runEndToEnd(t *testing.T, querier *querytest.Querier, pkgs []*ast.Package) {
for _, pkg := range pkgs {
pkg := pkg.Copy().(*ast.Package)
name := pkg.Files[0].Name
t.Run(name, func(t *testing.T) {
n := strings.TrimSuffix(name, ".flux")
if reason, ok := skip[n]; ok {
t.Skip(reason)
}
testFlux(t, querier, pkg)
})
}
}
func benchEndToEnd(b *testing.B, querier *querytest.Querier, pkgs []*ast.Package) {
for _, pkg := range pkgs {
pkg := pkg.Copy().(*ast.Package)
name := pkg.Files[0].Name
b.Run(name, func(b *testing.B) {
n := strings.TrimSuffix(name, ".flux")
if reason, ok := skip[n]; ok {
b.Skip(reason)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
testFlux(b, querier, pkg)
}
})
}
}
func testFlux(t testing.TB, querier *querytest.Querier, pkg *ast.Package) {
pkg.Files = append(pkg.Files, stdlib.TestingRunCalls(pkg))
c := lang.ASTCompiler{AST: pkg}
// testing.run
doTestRun(t, querier, c)
// testing.inspect
if t.Failed() {
// Rerun the test case using testing.inspect
pkg.Files[len(pkg.Files)-1] = stdlib.TestingInspectCalls(pkg)
c := lang.ASTCompiler{AST: pkg}
doTestInspect(t, querier, c)
}
}
func doTestRun(t testing.TB, querier *querytest.Querier, c flux.Compiler) {
r, err := querier.C.Query(context.Background(), c)
if err != nil {
t.Fatalf("unexpected error while executing testing.run: %v", err)
}
defer r.Done()
result, ok := <-r.Ready()
if !ok {
t.Fatalf("unexpected error retrieving testing.run result: %s", r.Err())
}
// Read all results checking for errors
for _, res := range result {
err := res.Tables().Do(func(flux.Table) error {
return nil
})
if err != nil {
t.Error(err)
}
}
}
func doTestInspect(t testing.TB, querier *querytest.Querier, c flux.Compiler) {
r, err := querier.C.Query(context.Background(), c)
if err != nil {
t.Fatalf("unexpected error while executing testing.inspect: %v", err)
}
defer r.Done()
result, ok := <-r.Ready()
if !ok {
t.Fatalf("unexpected error retrieving testing.inspect result: %s", r.Err())
}
// Read all results and format them
var out bytes.Buffer
for _, res := range result {
if err := execute.FormatResult(&out, res); err != nil {
t.Error(err)
}
}
t.Log(out.String())
}
| 1 | 10,063 | Should we update integral to operate on a single column as well? | influxdata-flux | go |
@@ -91,7 +91,9 @@ static std::atomic<int> num_uvm_allocations(0);
} // namespace
void DeepCopyCuda(void *dst, const void *src, size_t n) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemcpy(dst, src, n, cudaMemcpyDefault));
+ cudaStream_t s = cuda_get_deep_copy_stream();
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, s));
}
void DeepCopyAsyncCuda(const Cuda &instance, void *dst, const void *src, | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Macros.hpp>
#ifdef KOKKOS_ENABLE_CUDA
#include <Kokkos_Core.hpp>
#include <Kokkos_Cuda.hpp>
#include <Kokkos_CudaSpace.hpp>
#include <cstdlib>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <algorithm>
#include <atomic>
//#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_MemorySpace.hpp>
#include <impl/Kokkos_Tools.hpp>
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
cudaStream_t Kokkos::Impl::cuda_get_deep_copy_stream() {
static cudaStream_t s = nullptr;
if (s == nullptr) {
cudaStreamCreate(&s);
}
return s;
}
const std::unique_ptr<Kokkos::Cuda> &Kokkos::Impl::cuda_get_deep_copy_space(
bool initialize) {
static std::unique_ptr<Cuda> space = nullptr;
if (!space && initialize)
space = std::make_unique<Cuda>(Kokkos::Impl::cuda_get_deep_copy_stream());
return space;
}
namespace Kokkos {
namespace Impl {
namespace {
static std::atomic<int> num_uvm_allocations(0);
} // namespace
void DeepCopyCuda(void *dst, const void *src, size_t n) {
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemcpy(dst, src, n, cudaMemcpyDefault));
}
void DeepCopyAsyncCuda(const Cuda &instance, void *dst, const void *src,
size_t n) {
KOKKOS_IMPL_CUDA_SAFE_CALL(
cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, instance.cuda_stream()));
}
void DeepCopyAsyncCuda(void *dst, const void *src, size_t n) {
cudaStream_t s = cuda_get_deep_copy_stream();
KOKKOS_IMPL_CUDA_SAFE_CALL(
cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, s));
Impl::cuda_stream_synchronize(
s,
Kokkos::Tools::Experimental::SpecialSynchronizationCases::
DeepCopyResourceSynchronization,
"Kokkos::Impl::DeepCopyAsyncCuda: Deep Copy Stream Sync");
}
} // namespace Impl
} // namespace Kokkos
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
namespace Kokkos {
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
KOKKOS_DEPRECATED void CudaSpace::access_error() {
const std::string msg(
"Kokkos::CudaSpace::access_error attempt to execute Cuda function from "
"non-Cuda space");
Kokkos::Impl::throw_runtime_exception(msg);
}
KOKKOS_DEPRECATED void CudaSpace::access_error(const void *const) {
const std::string msg(
"Kokkos::CudaSpace::access_error attempt to execute Cuda function from "
"non-Cuda space");
Kokkos::Impl::throw_runtime_exception(msg);
}
#endif
/*--------------------------------------------------------------------------*/
bool CudaUVMSpace::available() {
#if defined(CUDA_VERSION) && !defined(__APPLE__)
enum : bool { UVM_available = true };
#else
enum : bool { UVM_available = false };
#endif
return UVM_available;
}
/*--------------------------------------------------------------------------*/
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
int CudaUVMSpace::number_of_allocations() {
return Kokkos::Impl::num_uvm_allocations.load();
}
#endif
#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
// The purpose of the following variable is to allow a state-based choice
// for pinning UVM allocations to the CPU. For now this is considered
// an experimental debugging capability - with the potential to work around
// some CUDA issues.
bool CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = false;
bool CudaUVMSpace::cuda_pin_uvm_to_host() {
return CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v;
}
void CudaUVMSpace::cuda_set_pin_uvm_to_host(bool val) {
CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = val;
}
#endif
} // namespace Kokkos
#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
bool kokkos_impl_cuda_pin_uvm_to_host() {
return Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host();
}
void kokkos_impl_cuda_set_pin_uvm_to_host(bool val) {
Kokkos::CudaUVMSpace::cuda_set_pin_uvm_to_host(val);
}
#endif
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
namespace Kokkos {
CudaSpace::CudaSpace() : m_device(Kokkos::Cuda().cuda_device()) {}
CudaUVMSpace::CudaUVMSpace() : m_device(Kokkos::Cuda().cuda_device()) {}
CudaHostPinnedSpace::CudaHostPinnedSpace() {}
int memory_threshold_g = 40000; // 40 kB
//==============================================================================
// <editor-fold desc="allocate()"> {{{1
void *CudaSpace::allocate(const size_t arg_alloc_size) const {
return allocate("[unlabeled]", arg_alloc_size);
}
void *CudaSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size) const {
return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
}
void *CudaSpace::impl_allocate(
const char *arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size,
const Kokkos::Tools::SpaceHandle arg_handle) const {
void *ptr = nullptr;
#ifndef CUDART_VERSION
#error CUDART_VERSION undefined!
#elif (defined(KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC) && CUDART_VERSION >= 11020)
cudaError_t error_code;
if (arg_alloc_size >= memory_threshold_g) {
error_code = cudaMallocAsync(&ptr, arg_alloc_size, 0);
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
} else {
error_code = cudaMalloc(&ptr, arg_alloc_size);
}
#else
auto error_code = cudaMalloc(&ptr, arg_alloc_size);
#endif
if (error_code != cudaSuccess) { // TODO tag as unlikely branch
cudaGetLastError(); // This is the only way to clear the last error, which
// we should do here since we're turning it into an
// exception here
throw Experimental::CudaRawMemoryAllocationFailure(
arg_alloc_size, error_code,
Experimental::RawMemoryAllocationFailure::AllocationMechanism::
CudaMalloc);
}
if (Kokkos::Profiling::profileLibraryLoaded()) {
const size_t reported_size =
(arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
}
return ptr;
}
void *CudaUVMSpace::allocate(const size_t arg_alloc_size) const {
return allocate("[unlabeled]", arg_alloc_size);
}
void *CudaUVMSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size) const {
return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
}
void *CudaUVMSpace::impl_allocate(
const char *arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size,
const Kokkos::Tools::SpaceHandle arg_handle) const {
void *ptr = nullptr;
Cuda::impl_static_fence(
"Kokkos::CudaUVMSpace::impl_allocate: Pre UVM Allocation");
if (arg_alloc_size > 0) {
Kokkos::Impl::num_uvm_allocations++;
auto error_code =
cudaMallocManaged(&ptr, arg_alloc_size, cudaMemAttachGlobal);
#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
if (Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host())
cudaMemAdvise(ptr, arg_alloc_size, cudaMemAdviseSetPreferredLocation,
cudaCpuDeviceId);
#endif
if (error_code != cudaSuccess) { // TODO tag as unlikely branch
cudaGetLastError(); // This is the only way to clear the last error,
// which we should do here since we're turning it
// into an exception here
throw Experimental::CudaRawMemoryAllocationFailure(
arg_alloc_size, error_code,
Experimental::RawMemoryAllocationFailure::AllocationMechanism::
CudaMallocManaged);
}
}
Cuda::impl_static_fence(
"Kokkos::CudaUVMSpace::impl_allocate: Post UVM Allocation");
if (Kokkos::Profiling::profileLibraryLoaded()) {
const size_t reported_size =
(arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
}
return ptr;
}
void *CudaHostPinnedSpace::allocate(const size_t arg_alloc_size) const {
return allocate("[unlabeled]", arg_alloc_size);
}
void *CudaHostPinnedSpace::allocate(const char *arg_label,
const size_t arg_alloc_size,
const size_t arg_logical_size) const {
return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
}
void *CudaHostPinnedSpace::impl_allocate(
const char *arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size,
const Kokkos::Tools::SpaceHandle arg_handle) const {
void *ptr = nullptr;
auto error_code = cudaHostAlloc(&ptr, arg_alloc_size, cudaHostAllocDefault);
if (error_code != cudaSuccess) { // TODO tag as unlikely branch
cudaGetLastError(); // This is the only way to clear the last error, which
// we should do here since we're turning it into an
// exception here
throw Experimental::CudaRawMemoryAllocationFailure(
arg_alloc_size, error_code,
Experimental::RawMemoryAllocationFailure::AllocationMechanism::
CudaHostAlloc);
}
if (Kokkos::Profiling::profileLibraryLoaded()) {
const size_t reported_size =
(arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
}
return ptr;
}
// </editor-fold> end allocate() }}}1
//==============================================================================
void CudaSpace::deallocate(void *const arg_alloc_ptr,
const size_t arg_alloc_size) const {
deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
}
void CudaSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
const size_t arg_alloc_size,
const size_t arg_logical_size) const {
impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
}
void CudaSpace::impl_deallocate(
const char *arg_label, void *const arg_alloc_ptr,
const size_t arg_alloc_size, const size_t arg_logical_size,
const Kokkos::Tools::SpaceHandle arg_handle) const {
if (Kokkos::Profiling::profileLibraryLoaded()) {
const size_t reported_size =
(arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
reported_size);
}
try {
#ifndef CUDART_VERSION
#error CUDART_VERSION undefined!
#elif (defined(KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC) && CUDART_VERSION >= 11020)
if (arg_alloc_size >= memory_threshold_g) {
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeAsync(arg_alloc_ptr, 0));
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
} else {
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
}
#else
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
#endif
} catch (...) {
}
}
void CudaUVMSpace::deallocate(void *const arg_alloc_ptr,
const size_t arg_alloc_size) const {
deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
}
void CudaUVMSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
const size_t arg_alloc_size
,
const size_t arg_logical_size) const {
impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
}
void CudaUVMSpace::impl_deallocate(
const char *arg_label, void *const arg_alloc_ptr,
const size_t arg_alloc_size
,
const size_t arg_logical_size,
const Kokkos::Tools::SpaceHandle arg_handle) const {
Cuda::impl_static_fence(
"Kokkos::CudaUVMSpace::impl_deallocate: Pre UVM Deallocation");
if (Kokkos::Profiling::profileLibraryLoaded()) {
const size_t reported_size =
(arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
reported_size);
}
try {
if (arg_alloc_ptr != nullptr) {
Kokkos::Impl::num_uvm_allocations--;
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
}
} catch (...) {
}
Cuda::impl_static_fence(
"Kokkos::CudaUVMSpace::impl_deallocate: Post UVM Deallocation");
}
void CudaHostPinnedSpace::deallocate(void *const arg_alloc_ptr,
const size_t arg_alloc_size) const {
deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
}
void CudaHostPinnedSpace::deallocate(const char *arg_label,
void *const arg_alloc_ptr,
const size_t arg_alloc_size,
const size_t arg_logical_size) const {
impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
}
void CudaHostPinnedSpace::impl_deallocate(
const char *arg_label, void *const arg_alloc_ptr,
const size_t arg_alloc_size, const size_t arg_logical_size,
const Kokkos::Tools::SpaceHandle arg_handle) const {
if (Kokkos::Profiling::profileLibraryLoaded()) {
const size_t reported_size =
(arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
reported_size);
}
try {
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeHost(arg_alloc_ptr));
} catch (...) {
}
}
} // namespace Kokkos
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
#ifdef KOKKOS_ENABLE_DEBUG
SharedAllocationRecord<void, void>
SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record;
SharedAllocationRecord<void, void>
SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record;
SharedAllocationRecord<void, void>
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::s_root_record;
#endif
::cudaTextureObject_t
SharedAllocationRecord<Kokkos::CudaSpace, void>::attach_texture_object(
const unsigned sizeof_alias, void *const alloc_ptr,
size_t const alloc_size) {
enum { TEXTURE_BOUND_1D = 1u << 27 };
if ((alloc_ptr == nullptr) ||
(sizeof_alias * TEXTURE_BOUND_1D <= alloc_size)) {
std::ostringstream msg;
msg << "Kokkos::CudaSpace ERROR: Cannot attach texture object to"
<< " alloc_ptr(" << alloc_ptr << ")"
<< " alloc_size(" << alloc_size << ")"
<< " max_size(" << (sizeof_alias * TEXTURE_BOUND_1D) << ")";
std::cerr << msg.str() << std::endl;
std::cerr.flush();
Kokkos::Impl::throw_runtime_exception(msg.str());
}
::cudaTextureObject_t tex_obj;
struct cudaResourceDesc resDesc;
struct cudaTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.desc =
(sizeof_alias == 4
? cudaCreateChannelDesc<int>()
: (sizeof_alias == 8
? cudaCreateChannelDesc< ::int2>()
:
/* sizeof_alias == 16 */ cudaCreateChannelDesc< ::int4>()));
resDesc.res.linear.sizeInBytes = alloc_size;
resDesc.res.linear.devPtr = alloc_ptr;
KOKKOS_IMPL_CUDA_SAFE_CALL(
cudaCreateTextureObject(&tex_obj, &resDesc, &texDesc, nullptr));
return tex_obj;
}
//==============================================================================
// <editor-fold desc="SharedAllocationRecord destructors"> {{{1
SharedAllocationRecord<Kokkos::CudaSpace, void>::~SharedAllocationRecord() {
const char *label = nullptr;
if (Kokkos::Profiling::profileLibraryLoaded()) {
SharedAllocationHeader header;
Kokkos::Impl::DeepCopy<Kokkos::CudaSpace, HostSpace>(
&header, RecordBase::m_alloc_ptr, sizeof(SharedAllocationHeader));
label = header.label();
}
auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
m_space.deallocate(label, SharedAllocationRecord<void, void>::m_alloc_ptr,
alloc_size, (alloc_size - sizeof(SharedAllocationHeader)));
}
SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::~SharedAllocationRecord() {
const char *label = nullptr;
if (Kokkos::Profiling::profileLibraryLoaded()) {
label = RecordBase::m_alloc_ptr->m_label;
}
m_space.deallocate(label, SharedAllocationRecord<void, void>::m_alloc_ptr,
SharedAllocationRecord<void, void>::m_alloc_size,
(SharedAllocationRecord<void, void>::m_alloc_size -
sizeof(SharedAllocationHeader)));
}
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
void>::~SharedAllocationRecord() {
m_space.deallocate(RecordBase::m_alloc_ptr->m_label,
SharedAllocationRecord<void, void>::m_alloc_ptr,
SharedAllocationRecord<void, void>::m_alloc_size,
(SharedAllocationRecord<void, void>::m_alloc_size -
sizeof(SharedAllocationHeader)));
}
// </editor-fold> end SharedAllocationRecord destructors }}}1
//==============================================================================
//==============================================================================
// <editor-fold desc="SharedAllocationRecord constructors"> {{{1
SharedAllocationRecord<Kokkos::CudaSpace, void>::SharedAllocationRecord(
const Kokkos::CudaSpace &arg_space, const std::string &arg_label,
const size_t arg_alloc_size,
const SharedAllocationRecord<void, void>::function_type arg_dealloc)
// Pass through allocated [ SharedAllocationHeader , user_memory ]
// Pass through deallocation function
: base_t(
#ifdef KOKKOS_ENABLE_DEBUG
&SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record,
#endif
Impl::checked_allocation_with_header(arg_space, arg_label,
arg_alloc_size),
sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc),
m_tex_obj(0),
m_space(arg_space) {
SharedAllocationHeader header;
this->base_t::_fill_host_accessible_header_info(header, arg_label);
// Copy to device memory
Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(RecordBase::m_alloc_ptr, &header,
sizeof(SharedAllocationHeader));
}
SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::SharedAllocationRecord(
const Kokkos::CudaUVMSpace &arg_space, const std::string &arg_label,
const size_t arg_alloc_size,
const SharedAllocationRecord<void, void>::function_type arg_dealloc)
// Pass through allocated [ SharedAllocationHeader , user_memory ]
// Pass through deallocation function
: base_t(
#ifdef KOKKOS_ENABLE_DEBUG
&SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record,
#endif
Impl::checked_allocation_with_header(arg_space, arg_label,
arg_alloc_size),
sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc),
m_tex_obj(0),
m_space(arg_space) {
this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
arg_label);
}
SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::
SharedAllocationRecord(
const Kokkos::CudaHostPinnedSpace &arg_space,
const std::string &arg_label, const size_t arg_alloc_size,
const SharedAllocationRecord<void, void>::function_type arg_dealloc)
// Pass through allocated [ SharedAllocationHeader , user_memory ]
// Pass through deallocation function
: base_t(
#ifdef KOKKOS_ENABLE_DEBUG
&SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
void>::s_root_record,
#endif
Impl::checked_allocation_with_header(arg_space, arg_label,
arg_alloc_size),
sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc),
m_space(arg_space) {
this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
arg_label);
}
// </editor-fold> end SharedAllocationRecord constructors }}}1
//==============================================================================
void cuda_prefetch_pointer(const Cuda &space, const void *ptr, size_t bytes,
bool to_device) {
if ((ptr == nullptr) || (bytes == 0)) return;
cudaPointerAttributes attr;
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaPointerGetAttributes(&attr, ptr));
// I measured this and it turns out prefetching towards the host slows
// DualView syncs down. Probably because the latency is not too bad in the
// first place for the pull down. If we want to change that provde
// cudaCpuDeviceId as the device if to_device is false
#if CUDA_VERSION < 10000
bool is_managed = attr.isManaged;
#else
bool is_managed = attr.type == cudaMemoryTypeManaged;
#endif
if (to_device && is_managed &&
space.cuda_device_prop().concurrentManagedAccess) {
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemPrefetchAsync(
ptr, bytes, space.cuda_device(), space.cuda_stream()));
}
}
} // namespace Impl
} // namespace Kokkos
//==============================================================================
// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
#include <impl/Kokkos_SharedAlloc_timpl.hpp>
namespace Kokkos {
namespace Impl {
// To avoid additional compilation cost for something that's (mostly?) not
// performance sensitive, we explicity instantiate these CRTP base classes here,
// where we have access to the associated *_timpl.hpp header files.
template class SharedAllocationRecordCommon<Kokkos::CudaSpace>;
template class HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace>;
template class SharedAllocationRecordCommon<Kokkos::CudaUVMSpace>;
template class SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace>;
} // end namespace Impl
} // end namespace Kokkos
// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
//==============================================================================
#else
void KOKKOS_CORE_SRC_CUDA_CUDASPACE_PREVENT_LINK_ERROR() {}
#endif // KOKKOS_ENABLE_CUDA
| 1 | 31,205 | Doesn't belong to this PR | kokkos-kokkos | cpp |
@@ -26,7 +26,7 @@ type ECSClient interface {
// ContainerInstanceARN if successful. Supplying a non-empty container
// instance ARN allows a container instance to update its registered
// resources.
- RegisterContainerInstance(existingContainerInstanceArn string, attributes []*ecs.Attribute) (string, error)
+ RegisterContainerInstance(existingContainerInstanceArn string, attributes []*ecs.Attribute, registrationToken string) (string, error)
// SubmitTaskStateChange sends a state change and returns an error
// indicating if it was submitted
SubmitTaskStateChange(change TaskStateChange) error | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package api
import "github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
// ECSClient is an interface over the ECSSDK interface which abstracts away some
// details around constructing the request and reading the response down to the
// parts the agent cares about.
// For example, the ever-present 'Cluster' member is abstracted out so that it
// may be configured once and used throughout transparently.
type ECSClient interface {
// RegisterContainerInstance calculates the appropriate resources, creates
// the default cluster if necessary, and returns the registered
// ContainerInstanceARN if successful. Supplying a non-empty container
// instance ARN allows a container instance to update its registered
// resources.
RegisterContainerInstance(existingContainerInstanceArn string, attributes []*ecs.Attribute) (string, error)
// SubmitTaskStateChange sends a state change and returns an error
// indicating if it was submitted
SubmitTaskStateChange(change TaskStateChange) error
// SubmitContainerStateChange sends a state change and returns an error
// indicating if it was submitted
SubmitContainerStateChange(change ContainerStateChange) error
// DiscoverPollEndpoint takes a ContainerInstanceARN and returns the
// endpoint at which this Agent should contact ACS
DiscoverPollEndpoint(containerInstanceArn string) (string, error)
// DiscoverTelemetryEndpoint takes a ContainerInstanceARN and returns the
// endpoint at which this Agent should contact Telemetry Service
DiscoverTelemetryEndpoint(containerInstanceArn string) (string, error)
}
// ECSSDK is an interface that specifies the subset of the AWS Go SDK's ECS
// client that the Agent uses. This interface is meant to allow injecting a
// mock for testing.
type ECSSDK interface {
CreateCluster(*ecs.CreateClusterInput) (*ecs.CreateClusterOutput, error)
RegisterContainerInstance(*ecs.RegisterContainerInstanceInput) (*ecs.RegisterContainerInstanceOutput, error)
DiscoverPollEndpoint(*ecs.DiscoverPollEndpointInput) (*ecs.DiscoverPollEndpointOutput, error)
}
// ECSSubmitStateSDK is an interface with customized ecs client that
// implements the SubmitTaskStateChange and SubmitContainerStateChange
type ECSSubmitStateSDK interface {
SubmitContainerStateChange(*ecs.SubmitContainerStateChangeInput) (*ecs.SubmitContainerStateChangeOutput, error)
SubmitTaskStateChange(*ecs.SubmitTaskStateChangeInput) (*ecs.SubmitTaskStateChangeOutput, error)
}
| 1 | 20,883 | Just for my own understanding, does aws ecs API take this token as a new input attribute? Which version of the aws sdk? I did not find it in the official aws sdk doc. | aws-amazon-ecs-agent | go |
@@ -68,6 +68,7 @@ public class UserAccount {
private static final String TAG = "UserAccount";
private static final String FORWARD_SLASH = "/";
private static final String UNDERSCORE = "_";
+ private static final String SF_APP_FEATURE_CODE_USER_AUTH = "UA";
private String authToken;
private String refreshToken; | 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.accounts;
import org.json.JSONException;
import org.json.JSONObject;
import android.os.Bundle;
import android.text.TextUtils;
import android.util.Log;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
/**
* This class represents a single user account that is currently
* logged in against a Salesforce endpoint. It encapsulates data
* that is used to uniquely identify a single user account.
*
* @author bhariharan
*/
public class UserAccount {
public static final String AUTH_TOKEN = "authToken";
public static final String REFRESH_TOKEN = "refreshToken";
public static final String LOGIN_SERVER = "loginServer";
public static final String ID_URL = "idUrl";
public static final String INSTANCE_SERVER = "instanceServer";
public static final String ORG_ID = "orgId";
public static final String USER_ID = "userId";
public static final String USERNAME = "username";
public static final String ACCOUNT_NAME = "accountName";
public static final String CLIENT_ID = "clientId";
public static final String COMMUNITY_ID = "communityId";
public static final String COMMUNITY_URL = "communityUrl";
public static final String INTERNAL_COMMUNITY_ID = "000000000000000000";
public static final String INTERNAL_COMMUNITY_PATH = "internal";
public static final String EMAIL = "email";
public static final String FIRST_NAME = "first_name";
public static final String DISPLAY_NAME = "display_name";
public static final String LAST_NAME = "last_name";
public static final String PHOTO_URL = "photoUrl";
public static final String THUMBNAIL_URL = "thumbnailUrl";
private static final String TAG = "UserAccount";
private static final String FORWARD_SLASH = "/";
private static final String UNDERSCORE = "_";
private String authToken;
private String refreshToken;
private String loginServer;
private String idUrl;
private String instanceServer;
private String orgId;
private String userId;
private String username;
private String accountName;
private String clientId;
private String communityId;
private String communityUrl;
private String firstName;
private String lastName;
private String displayName;
private String email;
private String photoUrl;
private String thumbnailUrl;
/**
* Parameterized constructor.
*
* @param authToken Auth token.
* @param refreshToken Refresh token.
* @param loginServer Login server.
* @param idUrl Identity URL.
* @param instanceServer Instance server.
* @param orgId Org ID.
* @param userId User ID.
* @param username Username.
* @param accountName Account name.
* @param clientId Client ID.
* @param communityId Community ID.
* @param communityUrl Community URL.
*/
public UserAccount(String authToken, String refreshToken,
String loginServer, String idUrl, String instanceServer,
String orgId, String userId, String username, String accountName,
String clientId, String communityId, String communityUrl) {
this(authToken, refreshToken,
loginServer, idUrl, instanceServer,
orgId, userId, username, accountName,
clientId, communityId, communityUrl,
null, null, null, null, null, null);
}
/**
* Parameterized constructor.
*
* @param authToken Auth token.
* @param refreshToken Refresh token.
* @param loginServer Login server.
* @param idUrl Identity URL.
* @param instanceServer Instance server.
* @param orgId Org ID.
* @param userId User ID.
* @param username Username.
* @param accountName Account name.
* @param clientId Client ID.
* @param communityId Community ID.
* @param communityUrl Community URL.
* @param firstName First Name.
* @param lastName Last Name.
* @param displayName Display Name.
* @param email Email.
* @param photoUrl Photo URL.
* @param thumbnailUrl Thumbnail URL.
*/
public UserAccount(String authToken, String refreshToken,
String loginServer, String idUrl, String instanceServer,
String orgId, String userId, String username, String accountName,
String clientId, String communityId, String communityUrl,
String firstName, String lastName, String displayName, String email, String photoUrl,
String thumbnailUrl ) {
this.authToken = authToken;
this.refreshToken = refreshToken;
this.loginServer = loginServer;
this.idUrl = idUrl;
this.instanceServer = instanceServer;
this.orgId = orgId;
this.userId = userId;
this.username = username;
this.accountName = accountName;
this.clientId = clientId;
this.communityId = communityId;
this.communityUrl = communityUrl;
this.firstName = firstName;
this.lastName = lastName;
this.displayName = displayName;
this.email = email;
this.photoUrl = photoUrl;
this.thumbnailUrl = thumbnailUrl;
}
/**
* Parameterized constructor.
*
* @param object JSON object.
*/
public UserAccount(JSONObject object) {
if (object != null) {
authToken = object.optString(AUTH_TOKEN, null);
refreshToken = object.optString(REFRESH_TOKEN, null);
loginServer = object.optString(LOGIN_SERVER, null);
idUrl = object.optString(ID_URL, null);
instanceServer = object.optString(INSTANCE_SERVER, null);
orgId = object.optString(ORG_ID, null);
userId = object.optString(USER_ID, null);
username = object.optString(USERNAME, null);
clientId = object.optString(CLIENT_ID, null);
if (!TextUtils.isEmpty(username) && !TextUtils.isEmpty(instanceServer)) {
accountName = String.format("%s (%s) (%s)", username, instanceServer,
SalesforceSDKManager.getInstance().getApplicationName());
}
communityId = object.optString(COMMUNITY_ID, null);
communityUrl = object.optString(COMMUNITY_URL, null);
firstName = object.optString(FIRST_NAME, null);
lastName = object.optString(LAST_NAME, null);
displayName = object.optString(DISPLAY_NAME, null);
email = object.optString(EMAIL, null);
photoUrl = object.optString(PHOTO_URL, null);
thumbnailUrl = object.optString(THUMBNAIL_URL, null);
}
}
/**
* Parameterized constructor.
*
* @param bundle Bundle.
*/
public UserAccount(Bundle bundle) {
if (bundle != null) {
authToken = bundle.getString(AUTH_TOKEN);
refreshToken = bundle.getString(REFRESH_TOKEN);
loginServer = bundle.getString(LOGIN_SERVER);
idUrl = bundle.getString(ID_URL);
instanceServer = bundle.getString(INSTANCE_SERVER);
orgId = bundle.getString(ORG_ID);
userId = bundle.getString(USER_ID);
username = bundle.getString(USERNAME);
clientId = bundle.getString(CLIENT_ID);
accountName = bundle.getString(ACCOUNT_NAME);
communityId = bundle.getString(COMMUNITY_ID);
communityUrl = bundle.getString(COMMUNITY_URL);
firstName = bundle.getString(FIRST_NAME);
lastName = bundle.getString(LAST_NAME);
displayName = bundle.getString(DISPLAY_NAME);
email = bundle.getString(EMAIL);
photoUrl = bundle.getString(PHOTO_URL);
thumbnailUrl = bundle.getString(THUMBNAIL_URL);
}
}
/**
* Returns the auth token for this user account.
*
* @return Auth token.
*/
public String getAuthToken() {
return authToken;
}
/**
* Returns the refresh token for this user account.
*
* @return Refresh token.
*/
public String getRefreshToken() {
return refreshToken;
}
/**
* Returns the login server for this user account.
*
* @return Login server.
*/
public String getLoginServer() {
return loginServer;
}
/**
* Returns the identity URL for this user account.
*
* @return Identity URL.
*/
public String getIdUrl() {
return idUrl;
}
/**
* Returns the instance server for this user account.
*
* @return Instance server.
*/
public String getInstanceServer() {
return instanceServer;
}
/**
* Returns the org ID for this user account.
*
* @return Org ID.
*/
public String getOrgId() {
return orgId;
}
/**
* Returns the user ID for this user account.
*
* @return User ID.
*/
public String getUserId() {
return userId;
}
/**
* Returns the username for this user account.
*
* @return Username.
*/
public String getUsername() {
return username;
}
/**
* Returns the account name for this user account.
*
* @return Account name.
*/
public String getAccountName() {
return accountName;
}
/**
* Returns the client ID for this user account.
*
* @return Client ID.
*/
public String getClientId() {
return clientId;
}
/**
* Returns the community ID for this user account.
*
* @return Community ID.
*/
public String getCommunityId() {
return communityId;
}
/**
* Returns the community URL for this user account.
*
* @return Community URL.
*/
public String getCommunityUrl() {
return communityUrl;
}
/**
* Returns the first name for this user account.
*
* @return First Name.
*/
public String getFirstName() {
return firstName;
}
/**
* Returns the Display name for this user account.
*
* @return Display Name.
*/
public String getDisplayName() {
return displayName;
}
/**
* Returns the last name for this user account.
*
* @return Last Name.
*/
public String getLastName() {
return lastName;
}
/**
* Returns the email for this user account.
*
* @return Email.
*/
public String getEmail() {
return email;
}
/**
* Returns the photo url for this user.
*
* @return Photo URL.
*/
public String getPhotoUrl() {
return photoUrl;
}
/**
* Returns the thumbnail for this user.
*
* @return Thumbnail.
*/
public String getThumbnailUrl() {
return thumbnailUrl;
}
/**
* Returns the org level storage path for this user account, relative to
* the higher level directory of app data. The higher level directory
* could be 'files'. The output is of the format '/{orgID}/'.
* This storage path is meant for data that can be shared
* across multiple users of the same org.
*
* @return File storage path.
*/
public String getOrgLevelStoragePath() {
final StringBuffer sb = new StringBuffer(FORWARD_SLASH);
sb.append(orgId);
sb.append(FORWARD_SLASH);
return sb.toString();
}
/**
* Returns the user level storage path for this user account, relative to
* the higher level directory of app data. The higher level directory
* could be 'files'. The output is of the format '/{orgID}/{userId}/'.
* This storage path is meant for data that is unique to a particular
* user in an org, but common across all the communities that the
* user is a member of within that org.
*
* @return File storage path.
*/
public String getUserLevelStoragePath() {
final StringBuffer sb = new StringBuffer(FORWARD_SLASH);
sb.append(orgId);
sb.append(FORWARD_SLASH);
sb.append(userId);
sb.append(FORWARD_SLASH);
return sb.toString();
}
/**
* Returns the storage path for this user account, relative to the higher
* level directory of app data. The higher level directory could be 'files'.
* The output is of the format '/{orgID}/{userID}/{communityID}/'.
* If 'communityID' is null or the internal community ID, then the output
* would be '/{orgID}/{userID}/internal/'. This storage path is meant for
* data that is unique to a particular user in a specific community.
*
* @return File storage path.
*/
public String getCommunityLevelStoragePath() {
String leafDir = INTERNAL_COMMUNITY_PATH;
if (!TextUtils.isEmpty(communityId) && !communityId.equals(INTERNAL_COMMUNITY_ID)) {
leafDir = communityId;
}
return getCommunityLevelStoragePath(leafDir);
}
/**
* Returns the storage path for this user account, relative to the higher
* level directory of app data. The higher level directory could be 'files'.
* The output is of the format '/{orgID}/{userID}/{communityID}/'.
* If 'communityID' is null or the internal community ID, then the output
* would be '/{orgID}/{userID}/internal/'. This storage path is meant for
* data that is unique to a particular user in a specific community.
*
* @param communityId Community ID. Pass 'null' for internal community.
* @return File storage path.
*/
public String getCommunityLevelStoragePath(String communityId) {
final StringBuffer sb = new StringBuffer(FORWARD_SLASH);
sb.append(orgId);
sb.append(FORWARD_SLASH);
sb.append(userId);
sb.append(FORWARD_SLASH);
String leafDir = INTERNAL_COMMUNITY_PATH;
if (!TextUtils.isEmpty(communityId) && !communityId.equals(INTERNAL_COMMUNITY_ID)) {
leafDir = communityId;
}
sb.append(leafDir);
sb.append(FORWARD_SLASH);
return sb.toString();
}
/**
* Returns a unique suffix for this user account, that can be appended
* to a file to uniquely identify this account, at an org level.
* The output is of the format '_{orgID}'. This suffix is meant
* for data that can be shared across multiple users of the same org.
*
* @return Filename suffix.
*/
public String getOrgLevelFilenameSuffix() {
final StringBuffer sb = new StringBuffer(UNDERSCORE);
sb.append(orgId);
return sb.toString();
}
/**
* Returns a unique suffix for this user account, that can be appended
* to a file to uniquely identify this account, at a user level.
* The output is of the format '_{orgID}_{userID}'. This suffix
* is meant for data that is unique to a particular user in an org,
* but common across all the communities that the user is a member
* of within that org.
*
* @return Filename suffix.
*/
public String getUserLevelFilenameSuffix() {
final StringBuffer sb = new StringBuffer(UNDERSCORE);
sb.append(orgId);
sb.append(UNDERSCORE);
sb.append(userId);
return sb.toString();
}
/**
* Returns a unique suffix for this user account, that can be appended
* to a file to uniquely identify this account, at a community level.
* The output is of the format '_{orgID}_{userID}_{communityID}'.
* If 'communityID' is null or the internal community ID, then the output
* would be '_{orgID}_{userID}_internal'. This storage path is meant for
* data that is unique to a particular user in a specific community.
*
* @return Filename suffix.
*/
public String getCommunityLevelFilenameSuffix() {
String leafDir = INTERNAL_COMMUNITY_PATH;
if (!TextUtils.isEmpty(communityId) && !communityId.equals(INTERNAL_COMMUNITY_ID)) {
leafDir = communityId;
}
return getCommunityLevelFilenameSuffix(leafDir);
}
/**
* Returns a unique suffix for this user account, that can be appended
* to a file to uniquely identify this account, at a community level.
* The output is of the format '_{orgID}_{userID}_{communityID}'.
* If 'communityID' is null or the internal community ID, then the output
* would be '_{orgID}_{userID}_internal'. This storage path is meant for
* data that is unique to a particular user in a specific community.
*
* @param communityId Community ID. Pass 'null' for internal community.
* @return Filename suffix.
*/
public String getCommunityLevelFilenameSuffix(String communityId) {
final StringBuffer sb = new StringBuffer(UNDERSCORE);
sb.append(orgId);
sb.append(UNDERSCORE);
sb.append(userId);
sb.append(UNDERSCORE);
String leafDir = INTERNAL_COMMUNITY_PATH;
if (!TextUtils.isEmpty(communityId) && !communityId.equals(INTERNAL_COMMUNITY_ID)) {
leafDir = communityId;
}
sb.append(leafDir);
return sb.toString();
}
@Override
public boolean equals(Object object) {
if (object == null || !(object instanceof UserAccount)) {
return false;
}
final UserAccount userAccount = (UserAccount) object;
if (userId == null || orgId == null || userAccount.getUserId() == null
|| userAccount.getOrgId() == null) {
return false;
}
if (userAccount.getUserId().equals(userId) && userAccount.getOrgId().equals(orgId)) {
return true;
}
return false;
}
@Override
public int hashCode() {
int result = userId.hashCode();
result ^= orgId.hashCode() + result * 37;
return result;
}
/**
* Returns a JSON representation of this instance.
*
* @return JSONObject instance.
*/
public JSONObject toJson() {
final JSONObject object = new JSONObject();
try {
object.put(AUTH_TOKEN, authToken);
object.put(REFRESH_TOKEN, refreshToken);
object.put(LOGIN_SERVER, loginServer);
object.put(ID_URL, idUrl);
object.put(INSTANCE_SERVER, instanceServer);
object.put(ORG_ID, orgId);
object.put(USER_ID, userId);
object.put(USERNAME, username);
object.put(CLIENT_ID, clientId);
object.put(COMMUNITY_ID, communityId);
object.put(COMMUNITY_URL, communityUrl);
object.put(FIRST_NAME, firstName);
object.put(LAST_NAME, lastName);
object.put(DISPLAY_NAME, displayName);
object.put(EMAIL, email);
object.put(PHOTO_URL, photoUrl);
object.put(THUMBNAIL_URL, thumbnailUrl);
} catch (JSONException e) {
Log.e(TAG, "Unable to convert to JSON");
}
return object;
}
/**
* Returns a representation of this instance in a bundle.
*
* @return Bundle instance.
*/
public Bundle toBundle() {
final Bundle object = new Bundle();
object.putString(AUTH_TOKEN, authToken);
object.putString(REFRESH_TOKEN, refreshToken);
object.putString(LOGIN_SERVER, loginServer);
object.putString(ID_URL, idUrl);
object.putString(INSTANCE_SERVER, instanceServer);
object.putString(ORG_ID, orgId);
object.putString(USER_ID, userId);
object.putString(USERNAME, username);
object.putString(CLIENT_ID, clientId);
object.putString(ACCOUNT_NAME, accountName);
object.putString(COMMUNITY_ID, communityId);
object.putString(COMMUNITY_URL, communityUrl);
object.putString(FIRST_NAME, firstName);
object.putString(LAST_NAME, lastName);
object.putString(DISPLAY_NAME, displayName);
object.putString(EMAIL, email);
object.putString(PHOTO_URL, photoUrl);
object.putString(THUMBNAIL_URL, thumbnailUrl);
return object;
}
}
| 1 | 15,653 | Could we shorten this constant to maybe `FEATURE_USER_AUTH`? | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -14,6 +14,10 @@ import (
"golang.org/x/net/context"
)
+const (
+ numRekeyWorkers = 8
+)
+
type rekeyQueueEntry struct {
id tlf.ID
ch chan error | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"sync"
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
)
type rekeyQueueEntry struct {
id tlf.ID
ch chan error
}
// RekeyQueueStandard implements the RekeyQueue interface.
type RekeyQueueStandard struct {
config Config
log logger.Logger
queueMu sync.RWMutex // protects all of the below
queue []rekeyQueueEntry
hasWorkCh chan struct{}
cancel context.CancelFunc
wg kbfssync.RepeatedWaitGroup
}
// Test that RekeyQueueStandard fully implements the RekeyQueue interface.
var _ RekeyQueue = (*RekeyQueueStandard)(nil)
// NewRekeyQueueStandard instantiates a new rekey worker.
func NewRekeyQueueStandard(config Config) *RekeyQueueStandard {
log := config.MakeLogger("RQ")
rkq := &RekeyQueueStandard{
config: config,
log: log,
}
return rkq
}
// Enqueue implements the RekeyQueue interface for RekeyQueueStandard.
func (rkq *RekeyQueueStandard) Enqueue(id tlf.ID) <-chan error {
rkq.log.Debug("Enqueueing %s for rekey", id)
c := make(chan error, 1)
rkq.wg.Add(1)
func() {
rkq.queueMu.Lock()
defer rkq.queueMu.Unlock()
if rkq.cancel == nil {
// create a new channel
rkq.hasWorkCh = make(chan struct{}, 1)
// spawn goroutine if needed
var ctx context.Context
ctx, rkq.cancel = context.WithCancel(context.Background())
go rkq.processRekeys(ctx, rkq.hasWorkCh)
}
rkq.queue = append(rkq.queue, rekeyQueueEntry{id, c})
}()
// poke the channel
select {
case rkq.hasWorkCh <- struct{}{}:
default:
}
return c
}
// IsRekeyPending implements the RekeyQueue interface for RekeyQueueStandard.
func (rkq *RekeyQueueStandard) IsRekeyPending(id tlf.ID) bool {
return rkq.GetRekeyChannel(id) != nil
}
// GetRekeyChannel implements the RekeyQueue interface for RekeyQueueStandard.
func (rkq *RekeyQueueStandard) GetRekeyChannel(id tlf.ID) <-chan error {
rkq.queueMu.RLock()
defer rkq.queueMu.RUnlock()
for _, e := range rkq.queue {
if e.id == id {
return e.ch
}
}
return nil
}
// Clear implements the RekeyQueue interface for RekeyQueueStandard.
func (rkq *RekeyQueueStandard) Clear() {
channels := func() []chan error {
rkq.queueMu.Lock()
defer rkq.queueMu.Unlock()
if rkq.cancel != nil {
// cancel
rkq.cancel()
rkq.cancel = nil
}
// collect channels and clear queue
var channels []chan error
for _, e := range rkq.queue {
channels = append(channels, e.ch)
}
rkq.queue = make([]rekeyQueueEntry, 0)
return channels
}()
for _, c := range channels {
c <- context.Canceled
close(c)
}
}
// Wait implements the RekeyQueue interface for RekeyQueueStandard.
func (rkq *RekeyQueueStandard) Wait(ctx context.Context) error {
return rkq.wg.Wait(ctx)
}
// CtxRekeyTagKey is the type used for unique context tags within an
// enqueued Rekey.
type CtxRekeyTagKey int
const (
// CtxRekeyIDKey is the type of the tag for unique operation IDs
// within an enqueued Rekey.
CtxRekeyIDKey CtxRekeyTagKey = iota
)
// CtxRekeyOpID is the display name for the unique operation
// enqueued rekey ID tag.
const CtxRekeyOpID = "REKEYID"
// Dedicated goroutine to process the rekey queue.
func (rkq *RekeyQueueStandard) processRekeys(ctx context.Context, hasWorkCh chan struct{}) {
for {
select {
case <-hasWorkCh:
for {
id := rkq.peek()
if id == tlf.NullID {
break
}
func() {
defer rkq.wg.Done()
// Assign an ID to this rekey operation so we can track it.
newCtx := ctxWithRandomIDReplayable(ctx, CtxRekeyIDKey,
CtxRekeyOpID, nil)
rkq.log.CDebugf(newCtx, "Processing rekey for %s", id)
err := rkq.config.KBFSOps().Rekey(newCtx, id)
if ch := rkq.dequeue(); ch != nil {
ch <- err
close(ch)
}
}()
if ctx.Err() != nil {
close(hasWorkCh)
return
}
}
case <-ctx.Done():
close(hasWorkCh)
return
}
}
}
func (rkq *RekeyQueueStandard) peek() tlf.ID {
rkq.queueMu.Lock()
defer rkq.queueMu.Unlock()
if len(rkq.queue) != 0 {
return rkq.queue[0].id
}
return tlf.NullID
}
func (rkq *RekeyQueueStandard) dequeue() chan<- error {
rkq.queueMu.Lock()
defer rkq.queueMu.Unlock()
if len(rkq.queue) == 0 {
return nil
}
ch := rkq.queue[0].ch
rkq.queue = rkq.queue[1:]
return ch
}
| 1 | 15,419 | Any particular reason for 8? I feel like we could probably handle more... | keybase-kbfs | go |
@@ -91,6 +91,13 @@ class CodeSetAdminTest < ActionDispatch::IntegrationTest
assert_response :redirect
end
+ it 'should update retry_count' do
+ login_as admin
+ job = create(:fetch_job, repository: create(:repository))
+ put admin_job_path(job), job: { retry_count: 3 }
+ job.reload.retry_count.must_equal 3
+ end
+
it 'should delete job' do
login_as admin
job = create(:fetch_job, repository: create(:repository)) | 1 | require 'test_helper'
class CodeSetAdminTest < ActionDispatch::IntegrationTest
let(:admin) { create(:admin, password: TEST_PASSWORD) }
it 'mark_as_failed should work' do
job = create(:sloc_job, repository: create(:repository, best_code_set: create(:code_set)))
login_as admin
get mark_as_failed_admin_job_path(job), {}, 'HTTP_REFERER' => admin_jobs_path
assert_equal job.failure_group, nil
assert_equal SlaveLog.last.job, job
assert_equal flash[:notice], "Job #{job.id} marked as failed."
end
it 'recoount should work' do
job = create(:fetch_job, repository: create(:repository))
login_as admin
get recount_admin_job_path(job)
assert_redirected_to admin_job_path(job)
assert_equal job.retry_count, 0
assert_equal job.wait_until, nil
assert_equal flash[:notice], "Job #{job.id} retry attempts counter has been reset to 0."
end
it 'should render index page' do
login_as admin
create(:fetch_job, repository: create(:repository), slave: create(:slave))
get admin_jobs_path
assert_response :success
end
it 'should render project jobs index page for newly created project' do
login_as admin
repository = create(:repository)
create(:fetch_job, repository: repository, slave: create(:slave))
project = create(:project)
create(:enlistment, repository: repository, project: project)
get admin_jobs_path, project_id: project.vanity_url
assert_response :success
end
it 'should render project index page for analses completed project' do
login_as admin
project = create(:project)
create(:fetch_job, project: project, slave: create(:slave))
get admin_jobs_path, project_id: project.vanity_url
assert_response :success
end
it 'should render jobs show page' do
login_as admin
job = create(:fetch_job, repository: create(:repository), slave: create(:slave))
get admin_job_path(job)
assert_response :success
end
it 'should allow to reschedule' do
login_as admin
job = create(:fetch_job, repository: create(:repository), slave: create(:slave))
put reschedule_admin_job_path(job), {}, 'HTTP_REFERER' => admin_jobs_path
assert_response :redirect
end
it 'should not allow to reschedule if job is running' do
login_as admin
job = create(:fetch_job, repository: create(:repository), slave: create(:slave), status: Job::STATUS_RUNNING)
put reschedule_admin_job_path(job), {}, 'HTTP_REFERER' => admin_jobs_path
assert_response :redirect
end
it 'should rebuild people' do
login_as admin
job = create(:fetch_job, repository: create(:repository), slave: create(:slave))
put rebuild_people_admin_job_path(job), {}, 'HTTP_REFERER' => admin_jobs_path
assert_response :redirect
end
it 'should index repository jobs' do
login_as admin
get admin_repository_jobs_path(create(:repository))
assert_response :success
end
it 'should update priority' do
login_as admin
job = create(:fetch_job, repository: create(:repository))
put admin_job_path(job), job: { priority: 5 }
assert_response :redirect
end
it 'should delete job' do
login_as admin
job = create(:fetch_job, repository: create(:repository))
assert_difference 'Job.count', -1 do
delete admin_job_path(job)
end
assert_response :redirect
end
it 'should manually schedule job' do
login_as admin
post manually_schedule_admin_project_jobs_path(create(:project))
assert_response :redirect
end
end
| 1 | 8,634 | Try to use factory association declaration for default values, so it could minimal the line length. You may write in fetch_job factory like association :repository, factory: :repository. | blackducksoftware-ohloh-ui | rb |
@@ -1,7 +1,7 @@
<tr class="m-shared-email_status">
<td colspan="2" padding="10">
<table id="status-container-detail" width="770" class="col-md-12">
- <tr class="status-header-container">
+ <tr class="status-header-container cancelled">
<td>
<strong>Approval Status</strong>
</td> | 1 | <tr class="m-shared-email_status">
<td colspan="2" padding="10">
<table id="status-container-detail" width="770" class="col-md-12">
<tr class="status-header-container">
<td>
<strong>Approval Status</strong>
</td>
<td class="status-header">
<%= status_icon_tag(@proposal.status) %>
<strong><%= @proposal.status %></strong>
(<%= @proposal.number_approved %> of <%= @proposal.total_approvers %> approved)
</td>
</tr>
<tr class="status-header-container-detail">
<td colspan="2">
<table class="approval_details data_container">
<%- last_status = nil %>
<%- @proposal.approvals_in_list_order.each_with_index do |approval,index| %>
<%- display_status = approval.decorate.display_status %>
<tr class="approval-row">
<td>
<%- if display_status != last_status %>
<strong><%= display_status %></strong>
<%- end %>
</td>
<td>
<%- last_approver = index == @proposal.approvals_in_list_order.count - 1 %>
<%= status_icon_tag(display_status, @proposal.linear?, last_approver) %>
<span class="approver">
<%= mail_to approval.user_email_address, approval.user_full_name %>
</span>
<% if approval.approved? %>
<span class='timestamp'>on <%= date_with_tooltip(approval.approved_at) %></span>
<% end %>
</td>
</tr>
<%- last_status = display_status %>
<%- end %>
</table>
</td>
</tr>
</table>
</td>
</tr>
| 1 | 13,380 | Should the "cancelled" class be added regardless of the proposal's state? | 18F-C2 | rb |
@@ -115,7 +115,6 @@ const validOptions = require('./operations/mongo_client_ops').validOptions;
*/
function MongoClient(url, options) {
if (!(this instanceof MongoClient)) return new MongoClient(url, options);
-
// Set up event emitter
EventEmitter.call(this);
| 1 | 'use strict';
const ChangeStream = require('./change_stream');
const Db = require('./db');
const EventEmitter = require('events').EventEmitter;
const executeOperation = require('./utils').executeOperation;
const handleCallback = require('./utils').handleCallback;
const inherits = require('util').inherits;
const MongoError = require('mongodb-core').MongoError;
// Operations
const connectOp = require('./operations/mongo_client_ops').connectOp;
const logout = require('./operations/mongo_client_ops').logout;
const validOptions = require('./operations/mongo_client_ops').validOptions;
/**
* @fileOverview The **MongoClient** class is a class that allows for making Connections to MongoDB.
*
* @example
* // Connect using a MongoClient instance
* const MongoClient = require('mongodb').MongoClient;
* const test = require('assert');
* // Connection url
* const url = 'mongodb://localhost:27017';
* // Database Name
* const dbName = 'test';
* // Connect using MongoClient
* const mongoClient = new MongoClient(url);
* mongoClient.connect(function(err, client) {
* const db = client.db(dbName);
* client.close();
* });
*
* @example
* // Connect using the MongoClient.connect static method
* const MongoClient = require('mongodb').MongoClient;
* const test = require('assert');
* // Connection url
* const url = 'mongodb://localhost:27017';
* // Database Name
* const dbName = 'test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, client) {
* const db = client.db(dbName);
* client.close();
* });
*/
/**
* Creates a new MongoClient instance
* @class
* @param {string} url The connection URI string
* @param {object} [options] Optional settings
* @param {number} [options.poolSize=5] The maximum size of the individual server pool
* @param {boolean} [options.ssl=false] Enable SSL connection.
* @param {boolean} [options.sslValidate=true] Validate mongod server certificate against Certificate Authority
* @param {buffer} [options.sslCA=undefined] SSL Certificate store binary buffer
* @param {buffer} [options.sslCert=undefined] SSL Certificate binary buffer
* @param {buffer} [options.sslKey=undefined] SSL Key file binary buffer
* @param {string} [options.sslPass=undefined] SSL Certificate pass phrase
* @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer
* @param {boolean} [options.autoReconnect=true] Enable autoReconnect for single server instances
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket
* @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting
* @param {number} [options.family=null] Version of IP stack. Can be 4, 6 or null (default).
* If null, will attempt to connect with IPv6, and will fall back to IPv4 on failure
* @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
* @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies
* @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry
* @param {string} [options.replicaSet=undefined] The Replicaset set name
* @param {number} [options.secondaryAcceptableLatencyMS=15] Cutoff latency point in MS for Replicaset member selection
* @param {number} [options.acceptableLatencyMS=15] Cutoff latency point in MS for Mongos proxies selection
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
* @param {string} [options.authSource=undefined] Define the database to authenticate against
* @param {(number|string)} [options.w=null] The write concern
* @param {number} [options.wtimeout=null] The write concern timeout
* @param {boolean} [options.j=false] Specify a journal write concern
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers
* @param {number} [options.bufferMaxEntries=-1] Sets a cap on how many operations the driver will buffer up before giving up on getting a working connection, default is -1 which is unlimited
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST)
* @param {object} [options.pkFactory=null] A primary key factory object for generation of custom _id keys
* @param {object} [options.promiseLibrary=null] A Promise library class the application wishes to use such as Bluebird, must be ES6 compatible
* @param {object} [options.readConcern=null] Specify a read concern for the collection (only MongoDB 3.2 or higher supported)
* @param {string} [options.readConcern.level='local'] Specify a read concern level for the collection operations, one of [local|majority]. (only MongoDB 3.2 or higher supported)
* @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed)
* @param {string} [options.loggerLevel=undefined] The logging level (error/warn/info/debug)
* @param {object} [options.logger=undefined] Custom logger object
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers
* @param {boolean} [options.promoteLongs=true] Promotes long values to number if they fit inside the 53 bits resolution
* @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function
* @param {object} [options.validateOptions=false] Validate MongoClient passed in options for correctness
* @param {string} [options.appname=undefined] The name of the application that created this MongoClient instance. MongoDB 3.4 and newer will print this value in the server log upon establishing each connection. It is also recorded in the slow query log and profile collections
* @param {string} [options.auth.user=undefined] The username for auth
* @param {string} [options.auth.password=undefined] The password for auth
* @param {string} [options.authMechanism=undefined] Mechanism for authentication: MDEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1
* @param {object} [options.compression=null] Type of compression to use: snappy or zlib
* @param {boolean} [options.fsync=false] Specify a file sync write concern
* @param {array} [options.readPreferenceTags=null] Read preference tags
* @param {number} [options.numberOfRetries=5] The number of retries for a tailable cursor
* @param {boolean} [options.auto_reconnect=true] Enable auto reconnecting for single server instances
* @param {boolean} [options.monitorCommands=false] Enable command monitoring for this client
* @param {number} [options.minSize] If present, the connection pool will be initialized with minSize connections, and will never dip below minSize connections
* @param {boolean} [options.useNewUrlParser=false] Determines whether or not to use the new url parser
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {MongoClient} a MongoClient instance
*/
function MongoClient(url, options) {
if (!(this instanceof MongoClient)) return new MongoClient(url, options);
// Set up event emitter
EventEmitter.call(this);
// The internal state
this.s = {
url: url,
options: options || {},
promiseLibrary: null,
dbCache: {},
sessions: []
};
// Get the promiseLibrary
const promiseLibrary = this.s.options.promiseLibrary || Promise;
// Add the promise to the internal state
this.s.promiseLibrary = promiseLibrary;
}
/**
* @ignore
*/
inherits(MongoClient, EventEmitter);
/**
* The callback format for results
* @callback MongoClient~connectCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {MongoClient} client The connected client.
*/
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {Promise<MongoClient>} returns Promise if no callback passed
*/
MongoClient.prototype.connect = function(callback) {
// Validate options object
const err = validOptions(this.s.options);
if (typeof callback === 'string') {
throw new TypeError('`connect` only accepts a callback');
}
return executeOperation(this, connectOp, [this, err, callback], {
skipSessions: true
});
};
/**
* Logout user from server, fire off on all connections and remove all auth info
* @method
* @param {object} [options=null] Optional settings.
* @param {string} [options.dbName=null] Logout against different database than current.
* @param {Db~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
MongoClient.prototype.logout = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Establish the correct database name
const dbName = this.s.options.authSource ? this.s.options.authSource : this.s.options.dbName;
return executeOperation(this, logout, [this, dbName, callback], {
skipSessions: true
});
};
/**
* Close the db and its underlying connections
* @method
* @param {boolean} force Force close, emitting no events
* @param {Db~noResultCallback} [callback] The result callback
* @return {Promise} returns Promise if no callback passed
*/
MongoClient.prototype.close = function(force, callback) {
if (typeof force === 'function') (callback = force), (force = false);
// Close the topology connection
this.topology.close(force);
// Emit close event
this.emit('close', this);
// Fire close event on any cached db instances
for (const name in this.s.dbCache) {
this.s.dbCache[name].emit('close');
}
// Remove listeners after emit
this.removeAllListeners('close');
// Callback after next event loop tick
if (typeof callback === 'function')
return process.nextTick(() => {
handleCallback(callback, null);
});
// Return dummy promise
return new this.s.promiseLibrary(resolve => {
resolve();
});
};
/**
* Create a new Db instance sharing the current socket connections. Be aware that the new db instances are
* related in a parent-child relationship to the original instance so that events are correctly emitted on child
* db instances. Child db instances are cached so performing db('db1') twice will return the same instance.
* You can control these behaviors with the options noListener and returnNonCachedInstance.
*
* @method
* @param {string} [dbName] The name of the database we want to use. If not provided, use database name from connection string.
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.noListener=false] Do not make the db an event listener to the original connection.
* @param {boolean} [options.returnNonCachedInstance=false] Control if you want to return a cached instance or have a new one created
* @return {Db}
*/
MongoClient.prototype.db = function(dbName, options) {
options = options || {};
// Default to db from connection string if not provided
if (!dbName) {
dbName = this.s.options.dbName;
}
// Copy the options and add out internal override of the not shared flag
const finalOptions = Object.assign({}, this.s.options, options);
// Do we have the db in the cache already
if (this.s.dbCache[dbName] && finalOptions.returnNonCachedInstance !== true) {
return this.s.dbCache[dbName];
}
// Add promiseLibrary
finalOptions.promiseLibrary = this.s.promiseLibrary;
// If no topology throw an error message
if (!this.topology) {
throw new MongoError('MongoClient must be connected before calling MongoClient.prototype.db');
}
// Return the db object
const db = new Db(dbName, this.topology, finalOptions);
// Add the db to the cache
this.s.dbCache[dbName] = db;
// Return the database
return db;
};
/**
* Check if MongoClient is connected
*
* @method
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.noListener=false] Do not make the db an event listener to the original connection.
* @param {boolean} [options.returnNonCachedInstance=false] Control if you want to return a cached instance or have a new one created
* @return {boolean}
*/
MongoClient.prototype.isConnected = function(options) {
options = options || {};
if (!this.topology) return false;
return this.topology.isConnected(options);
};
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @static
* @param {string} url The connection URI string
* @param {object} [options] Optional settings
* @param {number} [options.poolSize=5] The maximum size of the individual server pool
* @param {boolean} [options.ssl=false] Enable SSL connection.
* @param {boolean} [options.sslValidate=true] Validate mongod server certificate against Certificate Authority
* @param {buffer} [options.sslCA=undefined] SSL Certificate store binary buffer
* @param {buffer} [options.sslCert=undefined] SSL Certificate binary buffer
* @param {buffer} [options.sslKey=undefined] SSL Key file binary buffer
* @param {string} [options.sslPass=undefined] SSL Certificate pass phrase
* @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer
* @param {boolean} [options.autoReconnect=true] Enable autoReconnect for single server instances
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {boolean} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket
* @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting
* @param {number} [options.family=null] Version of IP stack. Can be 4, 6 or null (default).
* If null, will attempt to connect with IPv6, and will fall back to IPv4 on failure
* @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
* @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies
* @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry
* @param {string} [options.replicaSet=undefined] The Replicaset set name
* @param {number} [options.secondaryAcceptableLatencyMS=15] Cutoff latency point in MS for Replicaset member selection
* @param {number} [options.acceptableLatencyMS=15] Cutoff latency point in MS for Mongos proxies selection
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
* @param {string} [options.authSource=undefined] Define the database to authenticate against
* @param {(number|string)} [options.w=null] The write concern
* @param {number} [options.wtimeout=null] The write concern timeout
* @param {boolean} [options.j=false] Specify a journal write concern
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers
* @param {number} [options.bufferMaxEntries=-1] Sets a cap on how many operations the driver will buffer up before giving up on getting a working connection, default is -1 which is unlimited
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST)
* @param {object} [options.pkFactory=null] A primary key factory object for generation of custom _id keys
* @param {object} [options.promiseLibrary=null] A Promise library class the application wishes to use such as Bluebird, must be ES6 compatible
* @param {object} [options.readConcern=null] Specify a read concern for the collection (only MongoDB 3.2 or higher supported)
* @param {string} [options.readConcern.level='local'] Specify a read concern level for the collection operations, one of [local|majority]. (only MongoDB 3.2 or higher supported)
* @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed)
* @param {string} [options.loggerLevel=undefined] The logging level (error/warn/info/debug)
* @param {object} [options.logger=undefined] Custom logger object
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers
* @param {boolean} [options.promoteLongs=true] Promotes long values to number if they fit inside the 53 bits resolution
* @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function
* @param {object} [options.validateOptions=false] Validate MongoClient passed in options for correctness
* @param {string} [options.appname=undefined] The name of the application that created this MongoClient instance. MongoDB 3.4 and newer will print this value in the server log upon establishing each connection. It is also recorded in the slow query log and profile collections
* @param {string} [options.auth.user=undefined] The username for auth
* @param {string} [options.auth.password=undefined] The password for auth
* @param {string} [options.authMechanism=undefined] Mechanism for authentication: MDEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1
* @param {object} [options.compression=null] Type of compression to use: snappy or zlib
* @param {boolean} [options.fsync=false] Specify a file sync write concern
* @param {array} [options.readPreferenceTags=null] Read preference tags
* @param {number} [options.numberOfRetries=5] The number of retries for a tailable cursor
* @param {boolean} [options.auto_reconnect=true] Enable auto reconnecting for single server instances
* @param {number} [options.minSize] If present, the connection pool will be initialized with minSize connections, and will never dip below minSize connections
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {Promise<MongoClient>} returns Promise if no callback passed
*/
MongoClient.connect = function(url, options, callback) {
const args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() : null;
options = options || {};
// Create client
const mongoClient = new MongoClient(url, options);
// Execute the connect method
return mongoClient.connect(callback);
};
/**
* Starts a new session on the server
*
* @param {object} [options] optional settings for a driver session
* @param {boolean} [options.causalConsistency] Indicate that this session should be causally consistent.
* @param {boolean} [options.autoStartTransaction=false] When enabled this session automatically starts a transaction with the provided defaultTransactionOptions.
* @param {object} [options.defaultTransactionOptions] The default TransactionOptions to use for transactions started on this session.
*
* @return {ClientSession} the newly established session
*/
MongoClient.prototype.startSession = function(options) {
options = Object.assign({ explicit: true }, options);
if (!this.topology) {
throw new MongoError('Must connect to a server before calling this method');
}
if (!this.topology.hasSessionSupport()) {
throw new MongoError('Current topology does not support sessions');
}
return this.topology.startSession(options, this.s.options);
};
/**
* Runs a given operation with an implicitly created session. The lifetime of the session
* will be handled without the need for user interaction.
*
* NOTE: presently the operation MUST return a Promise (either explicit or implicity as an async function)
*
* @param {Object} [options] Optional settings to be appled to implicitly created session
* @param {Function} operation An operation to execute with an implicitly created session. The signature of this MUST be `(session) => {}`
* @return {Promise}
*/
MongoClient.prototype.withSession = function(options, operation) {
if (typeof options === 'function') (operation = options), (options = undefined);
const session = this.startSession(options);
let cleanupHandler = (err, result, opts) => {
// prevent multiple calls to cleanupHandler
cleanupHandler = () => {
throw new ReferenceError('cleanupHandler was called too many times');
};
opts = Object.assign({ throw: true }, opts);
session.endSession();
if (err) {
if (opts.throw) throw err;
return Promise.reject(err);
}
};
try {
const result = operation(session);
return Promise.resolve(result)
.then(result => cleanupHandler(null, result))
.catch(err => cleanupHandler(err, null, { throw: true }));
} catch (err) {
return cleanupHandler(err, null, { throw: false });
}
};
/**
* Create a new Change Stream, watching for new changes (insertions, updates, replacements, deletions, and invalidations) in this cluster. Will ignore all changes to system collections, as well as the local, admin,
* and config databases.
* @method
* @since 3.1.0
* @param {Array} [pipeline] An array of {@link https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.
* @param {object} [options] Optional settings
* @param {string} [options.fullDocument='default'] Allowed values: ‘default’, ‘updateLookup’. When set to ‘updateLookup’, the change stream will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred.
* @param {object} [options.resumeAfter] Specifies the logical starting point for the new change stream. This should be the _id field from a previously returned change stream document.
* @param {number} [options.maxAwaitTimeMS] The maximum amount of time for the server to wait on new documents to satisfy a change stream query
* @param {number} [options.batchSize] The number of documents to return per batch. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @param {object} [options.collation] Specify collation settings for operation. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @param {ReadPreference} [options.readPreference] The read preference. See {@link https://docs.mongodb.com/manual/reference/read-preference|read preference documentation}.
* @param {Timestamp} [options.startAtClusterTime] receive change events that occur after the specified timestamp
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {ChangeStream} a ChangeStream instance.
*/
MongoClient.prototype.watch = function(pipeline, options) {
pipeline = pipeline || [];
options = options || {};
// Allow optionally not specifying a pipeline
if (!Array.isArray(pipeline)) {
options = pipeline;
pipeline = [];
}
return new ChangeStream(this, pipeline, options);
};
module.exports = MongoClient;
| 1 | 14,541 | nit: random whitespace change, let's try to keep these out of commits unless there are other useful changes. | mongodb-node-mongodb-native | js |
@@ -624,9 +624,15 @@ exports.populate = function populate(path, select, model, match, options, subPop
var ret = [];
var paths = path.split(' ');
+ var models = undefined;
+ if (typeof model === 'string') {
+ models = model.split(' ');
+ }
options = exports.clone(options, { retainKeyOrder: true });
for (var i = 0; i < paths.length; ++i) {
- ret.push(new PopulateOptions(paths[i], select, match, options, model, subPopulate));
+ var _model = model;
+ if (models && i < models.length) _model = models[i];
+ ret.push(new PopulateOptions(paths[i], select, match, options, _model, subPopulate));
}
return ret; | 1 | /*!
* Module dependencies.
*/
var ObjectId = require('./types/objectid');
var cloneRegExp = require('regexp-clone');
var sliced = require('sliced');
var mpath = require('mpath');
var ms = require('ms');
var MongooseBuffer;
var MongooseArray;
var Document;
/*!
* Produces a collection name from model `name`.
*
* @param {String} name a model name
* @return {String} a collection name
* @api private
*/
exports.toCollectionName = function(name, options) {
options = options || {};
if (name === 'system.profile') {
return name;
}
if (name === 'system.indexes') {
return name;
}
if (options.pluralization === false) {
return name;
}
return pluralize(name.toLowerCase());
};
/**
* Pluralization rules.
*
* These rules are applied while processing the argument to `toCollectionName`.
*
* @deprecated remove in 4.x gh-1350
*/
exports.pluralization = [
[/(m)an$/gi, '$1en'],
[/(pe)rson$/gi, '$1ople'],
[/(child)$/gi, '$1ren'],
[/^(ox)$/gi, '$1en'],
[/(ax|test)is$/gi, '$1es'],
[/(octop|vir)us$/gi, '$1i'],
[/(alias|status)$/gi, '$1es'],
[/(bu)s$/gi, '$1ses'],
[/(buffal|tomat|potat)o$/gi, '$1oes'],
[/([ti])um$/gi, '$1a'],
[/sis$/gi, 'ses'],
[/(?:([^f])fe|([lr])f)$/gi, '$1$2ves'],
[/(hive)$/gi, '$1s'],
[/([^aeiouy]|qu)y$/gi, '$1ies'],
[/(x|ch|ss|sh)$/gi, '$1es'],
[/(matr|vert|ind)ix|ex$/gi, '$1ices'],
[/([m|l])ouse$/gi, '$1ice'],
[/(kn|w|l)ife$/gi, '$1ives'],
[/(quiz)$/gi, '$1zes'],
[/s$/gi, 's'],
[/([^a-z])$/, '$1'],
[/$/gi, 's']
];
var rules = exports.pluralization;
/**
* Uncountable words.
*
* These words are applied while processing the argument to `toCollectionName`.
* @api public
*/
exports.uncountables = [
'advice',
'energy',
'excretion',
'digestion',
'cooperation',
'health',
'justice',
'labour',
'machinery',
'equipment',
'information',
'pollution',
'sewage',
'paper',
'money',
'species',
'series',
'rain',
'rice',
'fish',
'sheep',
'moose',
'deer',
'news',
'expertise',
'status',
'media'
];
var uncountables = exports.uncountables;
/*!
* Pluralize function.
*
* @author TJ Holowaychuk (extracted from _ext.js_)
* @param {String} string to pluralize
* @api private
*/
function pluralize(str) {
var found;
if (!~uncountables.indexOf(str.toLowerCase())) {
found = rules.filter(function(rule) {
return str.match(rule[0]);
});
if (found[0]) {
return str.replace(found[0][0], found[0][1]);
}
}
return str;
}
/*!
* Determines if `a` and `b` are deep equal.
*
* Modified from node/lib/assert.js
*
* @param {any} a a value to compare to `b`
* @param {any} b a value to compare to `a`
* @return {Boolean}
* @api private
*/
exports.deepEqual = function deepEqual(a, b) {
if (a === b) {
return true;
}
if (a instanceof Date && b instanceof Date) {
return a.getTime() === b.getTime();
}
if (a instanceof ObjectId && b instanceof ObjectId) {
return a.toString() === b.toString();
}
if (a instanceof RegExp && b instanceof RegExp) {
return a.source === b.source &&
a.ignoreCase === b.ignoreCase &&
a.multiline === b.multiline &&
a.global === b.global;
}
if (typeof a !== 'object' && typeof b !== 'object') {
return a == b;
}
if (a === null || b === null || a === undefined || b === undefined) {
return false;
}
if (a.prototype !== b.prototype) {
return false;
}
// Handle MongooseNumbers
if (a instanceof Number && b instanceof Number) {
return a.valueOf() === b.valueOf();
}
if (Buffer.isBuffer(a)) {
return exports.buffer.areEqual(a, b);
}
if (isMongooseObject(a)) {
a = a.toObject();
}
if (isMongooseObject(b)) {
b = b.toObject();
}
try {
var ka = Object.keys(a),
kb = Object.keys(b),
key, i;
} catch (e) {
// happens when one is a string literal and the other isn't
return false;
}
// having the same number of owned properties (keys incorporates
// hasOwnProperty)
if (ka.length !== kb.length) {
return false;
}
// the same set of keys (although not necessarily the same order),
ka.sort();
kb.sort();
// ~~~cheap key test
for (i = ka.length - 1; i >= 0; i--) {
if (ka[i] !== kb[i]) {
return false;
}
}
// equivalent values for every corresponding key, and
// ~~~possibly expensive deep test
for (i = ka.length - 1; i >= 0; i--) {
key = ka[i];
if (!deepEqual(a[key], b[key])) {
return false;
}
}
return true;
};
/*!
* Object clone with Mongoose natives support.
*
* If options.minimize is true, creates a minimal data object. Empty objects and undefined values will not be cloned. This makes the data payload sent to MongoDB as small as possible.
*
* Functions are never cloned.
*
* @param {Object} obj the object to clone
* @param {Object} options
* @return {Object} the cloned object
* @api private
*/
exports.clone = function clone(obj, options) {
if (obj === undefined || obj === null) {
return obj;
}
if (Array.isArray(obj)) {
return cloneArray(obj, options);
}
if (isMongooseObject(obj)) {
if (options && options.json && typeof obj.toJSON === 'function') {
return obj.toJSON(options);
}
return obj.toObject(options);
}
if (obj.constructor) {
switch (exports.getFunctionName(obj.constructor)) {
case 'Object':
return cloneObject(obj, options);
case 'Date':
return new obj.constructor(+obj);
case 'RegExp':
return cloneRegExp(obj);
default:
// ignore
break;
}
}
if (obj instanceof ObjectId) {
return new ObjectId(obj.id);
}
if (!obj.constructor && exports.isObject(obj)) {
// object created with Object.create(null)
return cloneObject(obj, options);
}
if (obj.valueOf) {
return obj.valueOf();
}
};
var clone = exports.clone;
/*!
* ignore
*/
function cloneObject(obj, options) {
var retainKeyOrder = options && options.retainKeyOrder,
minimize = options && options.minimize,
ret = {},
hasKeys,
keys,
val,
k,
i;
if (retainKeyOrder) {
for (k in obj) {
val = clone(obj[k], options);
if (!minimize || (typeof val !== 'undefined')) {
hasKeys || (hasKeys = true);
ret[k] = val;
}
}
} else {
// faster
keys = Object.keys(obj);
i = keys.length;
while (i--) {
k = keys[i];
val = clone(obj[k], options);
if (!minimize || (typeof val !== 'undefined')) {
if (!hasKeys) {
hasKeys = true;
}
ret[k] = val;
}
}
}
return minimize
? hasKeys && ret
: ret;
}
function cloneArray(arr, options) {
var ret = [];
for (var i = 0, l = arr.length; i < l; i++) {
ret.push(clone(arr[i], options));
}
return ret;
}
/*!
* Shallow copies defaults into options.
*
* @param {Object} defaults
* @param {Object} options
* @return {Object} the merged object
* @api private
*/
exports.options = function(defaults, options) {
var keys = Object.keys(defaults),
i = keys.length,
k;
options = options || {};
while (i--) {
k = keys[i];
if (!(k in options)) {
options[k] = defaults[k];
}
}
return options;
};
/*!
* Generates a random string
*
* @api private
*/
exports.random = function() {
return Math.random().toString().substr(3);
};
/*!
* Merges `from` into `to` without overwriting existing properties.
*
* @param {Object} to
* @param {Object} from
* @api private
*/
exports.merge = function merge(to, from, options) {
options = options || {};
var keys = Object.keys(from);
var i = 0;
var len = keys.length;
var key;
if (options.retainKeyOrder) {
while (i < len) {
key = keys[i++];
if (typeof to[key] === 'undefined') {
to[key] = from[key];
} else if (exports.isObject(from[key])) {
merge(to[key], from[key]);
} else if (options.overwrite) {
to[key] = from[key];
}
}
} else {
while (len--) {
key = keys[len];
if (typeof to[key] === 'undefined') {
to[key] = from[key];
} else if (exports.isObject(from[key])) {
merge(to[key], from[key]);
} else if (options.overwrite) {
to[key] = from[key];
}
}
}
};
/*!
* toString helper
*/
var toString = Object.prototype.toString;
/*!
* Applies toObject recursively.
*
* @param {Document|Array|Object} obj
* @return {Object}
* @api private
*/
exports.toObject = function toObject(obj) {
Document || (Document = require('./document'));
var ret;
if (exports.isNullOrUndefined(obj)) {
return obj;
}
if (obj instanceof Document) {
return obj.toObject();
}
if (Array.isArray(obj)) {
ret = [];
for (var i = 0, len = obj.length; i < len; ++i) {
ret.push(toObject(obj[i]));
}
return ret;
}
if ((obj.constructor && exports.getFunctionName(obj.constructor) === 'Object') ||
(!obj.constructor && exports.isObject(obj))) {
ret = {};
for (var k in obj) {
ret[k] = toObject(obj[k]);
}
return ret;
}
return obj;
};
/*!
* Determines if `arg` is an object.
*
* @param {Object|Array|String|Function|RegExp|any} arg
* @api private
* @return {Boolean}
*/
exports.isObject = function(arg) {
if (Buffer.isBuffer(arg)) {
return true;
}
return toString.call(arg) === '[object Object]';
};
/*!
* A faster Array.prototype.slice.call(arguments) alternative
* @api private
*/
exports.args = sliced;
/*!
* process.nextTick helper.
*
* Wraps `callback` in a try/catch + nextTick.
*
* node-mongodb-native has a habit of state corruption when an error is immediately thrown from within a collection callback.
*
* @param {Function} callback
* @api private
*/
exports.tick = function tick(callback) {
if (typeof callback !== 'function') {
return;
}
return function() {
try {
callback.apply(this, arguments);
} catch (err) {
// only nextTick on err to get out of
// the event loop and avoid state corruption.
process.nextTick(function() {
throw err;
});
}
};
};
/*!
* Returns if `v` is a mongoose object that has a `toObject()` method we can use.
*
* This is for compatibility with libs like Date.js which do foolish things to Natives.
*
* @param {any} v
* @api private
*/
exports.isMongooseObject = function(v) {
Document || (Document = require('./document'));
MongooseArray || (MongooseArray = require('./types').Array);
MongooseBuffer || (MongooseBuffer = require('./types').Buffer);
return v instanceof Document ||
(v && v.isMongooseArray) ||
(v && v.isMongooseBuffer);
};
var isMongooseObject = exports.isMongooseObject;
/*!
* Converts `expires` options of index objects to `expiresAfterSeconds` options for MongoDB.
*
* @param {Object} object
* @api private
*/
exports.expires = function expires(object) {
if (!(object && object.constructor.name === 'Object')) {
return;
}
if (!('expires' in object)) {
return;
}
var when;
if (typeof object.expires !== 'string') {
when = object.expires;
} else {
when = Math.round(ms(object.expires) / 1000);
}
object.expireAfterSeconds = when;
delete object.expires;
};
/*!
* Populate options constructor
*/
function PopulateOptions(path, select, match, options, model, subPopulate) {
this.path = path;
this.match = match;
this.select = select;
this.options = options;
this.model = model;
if (typeof subPopulate === 'object') {
this.populate = subPopulate;
}
this._docs = {};
}
// make it compatible with utils.clone
PopulateOptions.prototype.constructor = Object;
// expose
exports.PopulateOptions = PopulateOptions;
/*!
* populate helper
*/
exports.populate = function populate(path, select, model, match, options, subPopulate) {
// The order of select/conditions args is opposite Model.find but
// necessary to keep backward compatibility (select could be
// an array, string, or object literal).
// might have passed an object specifying all arguments
if (arguments.length === 1) {
if (path instanceof PopulateOptions) {
return [path];
}
if (Array.isArray(path)) {
return path.map(function(o) {
return exports.populate(o)[0];
});
}
if (exports.isObject(path)) {
match = path.match;
options = path.options;
select = path.select;
model = path.model;
subPopulate = path.populate;
path = path.path;
}
} else if (typeof model !== 'string' && typeof model !== 'function') {
options = match;
match = model;
model = undefined;
}
if (typeof path !== 'string') {
throw new TypeError('utils.populate: invalid path. Expected string. Got typeof `' + typeof path + '`');
}
if (typeof subPopulate === 'object') {
subPopulate = exports.populate(subPopulate);
}
var ret = [];
var paths = path.split(' ');
options = exports.clone(options, { retainKeyOrder: true });
for (var i = 0; i < paths.length; ++i) {
ret.push(new PopulateOptions(paths[i], select, match, options, model, subPopulate));
}
return ret;
};
/*!
* Return the value of `obj` at the given `path`.
*
* @param {String} path
* @param {Object} obj
*/
exports.getValue = function(path, obj, map) {
return mpath.get(path, obj, '_doc', map);
};
/*!
* Sets the value of `obj` at the given `path`.
*
* @param {String} path
* @param {Anything} val
* @param {Object} obj
*/
exports.setValue = function(path, val, obj, map) {
mpath.set(path, val, obj, '_doc', map);
};
/*!
* Returns an array of values from object `o`.
*
* @param {Object} o
* @return {Array}
* @private
*/
exports.object = {};
exports.object.vals = function vals(o) {
var keys = Object.keys(o),
i = keys.length,
ret = [];
while (i--) {
ret.push(o[keys[i]]);
}
return ret;
};
/*!
* @see exports.options
*/
exports.object.shallowCopy = exports.options;
/*!
* Safer helper for hasOwnProperty checks
*
* @param {Object} obj
* @param {String} prop
*/
var hop = Object.prototype.hasOwnProperty;
exports.object.hasOwnProperty = function(obj, prop) {
return hop.call(obj, prop);
};
/*!
* Determine if `val` is null or undefined
*
* @return {Boolean}
*/
exports.isNullOrUndefined = function(val) {
return val === null || val === undefined;
};
/*!
* ignore
*/
exports.array = {};
/*!
* Flattens an array.
*
* [ 1, [ 2, 3, [4] ]] -> [1,2,3,4]
*
* @param {Array} arr
* @param {Function} [filter] If passed, will be invoked with each item in the array. If `filter` returns a falsey value, the item will not be included in the results.
* @return {Array}
* @private
*/
exports.array.flatten = function flatten(arr, filter, ret) {
ret || (ret = []);
arr.forEach(function(item) {
if (Array.isArray(item)) {
flatten(item, filter, ret);
} else {
if (!filter || filter(item)) {
ret.push(item);
}
}
});
return ret;
};
/*!
* Removes duplicate values from an array
*
* [1, 2, 3, 3, 5] => [1, 2, 3, 5]
* [ ObjectId("550988ba0c19d57f697dc45e"), ObjectId("550988ba0c19d57f697dc45e") ]
* => [ObjectId("550988ba0c19d57f697dc45e")]
*
* @param {Array} arr
* @return {Array}
* @private
*/
exports.array.unique = function(arr) {
var primitives = {};
var ids = {};
var ret = [];
var length = arr.length;
for (var i = 0; i < length; ++i) {
if (typeof arr[i] === 'number' || typeof arr[i] === 'string') {
if (primitives[arr[i]]) {
continue;
}
ret.push(arr[i]);
primitives[arr[i]] = true;
} else if (arr[i] instanceof ObjectId) {
if (ids[arr[i].toString()]) {
continue;
}
ret.push(arr[i]);
ids[arr[i].toString()] = true;
} else {
ret.push(arr[i]);
}
}
return ret;
};
/*!
* Determines if two buffers are equal.
*
* @param {Buffer} a
* @param {Object} b
*/
exports.buffer = {};
exports.buffer.areEqual = function(a, b) {
if (!Buffer.isBuffer(a)) {
return false;
}
if (!Buffer.isBuffer(b)) {
return false;
}
if (a.length !== b.length) {
return false;
}
for (var i = 0, len = a.length; i < len; ++i) {
if (a[i] !== b[i]) {
return false;
}
}
return true;
};
exports.getFunctionName = function(fn) {
if (fn.name) {
return fn.name;
}
return (fn.toString().trim().match(/^function\s*([^\s(]+)/) || [])[1];
};
exports.decorate = function(destination, source) {
for (var key in source) {
destination[key] = source[key];
}
};
/**
* merges to with a copy of from
*
* @param {Object} to
* @param {Object} fromObj
* @api private
*/
exports.mergeClone = function(to, fromObj) {
var keys = Object.keys(fromObj);
var len = keys.length;
var i = 0;
var key;
while (i < len) {
key = keys[i++];
if (typeof to[key] === 'undefined') {
// make sure to retain key order here because of a bug handling the $each
// operator in mongodb 2.4.4
to[key] = exports.clone(fromObj[key], {retainKeyOrder: 1});
} else {
if (exports.isObject(fromObj[key])) {
var obj = fromObj[key];
if (isMongooseObject(fromObj[key]) && !fromObj[key].isMongooseBuffer) {
obj = obj.toObject({ transform: false, virtuals: false });
}
if (fromObj[key].isMongooseBuffer) {
obj = new Buffer(obj);
}
exports.mergeClone(to[key], obj);
} else {
// make sure to retain key order here because of a bug handling the
// $each operator in mongodb 2.4.4
to[key] = exports.clone(fromObj[key], {retainKeyOrder: 1});
}
}
}
};
/**
* Executes a function on each element of an array (like _.each)
*
* @param {Array} arr
* @param {Function} fn
* @api private
*/
exports.each = function(arr, fn) {
for (var i = 0; i < arr.length; ++i) {
fn(arr[i]);
}
};
| 1 | 13,376 | Seems kinda dangerous - what if models length is different from paths length? | Automattic-mongoose | js |
@@ -412,7 +412,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error {
s := stage(sync, tx, nil, stages.Senders)
log.Info("Stage", "name", s.ID, "progress", s.BlockNumber)
- cfg := stagedsync.StageSendersCfg(db, chainConfig, tmpdir)
+ cfg := stagedsync.StageSendersCfg(db, chainConfig, tmpdir, prune.Mode{})
if unwind > 0 {
u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber)
err = stagedsync.UnwindSendersStage(u, tx, cfg, ctx) | 1 | package commands
import (
"context"
"fmt"
"path"
"sort"
"strings"
"github.com/c2h5oh/datasize"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon/cmd/sentry/download"
"github.com/ledgerwatch/erigon/cmd/utils"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/etl"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/ethash"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/eth/fetcher"
"github.com/ledgerwatch/erigon/eth/integrity"
"github.com/ledgerwatch/erigon/eth/stagedsync"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
"github.com/ledgerwatch/erigon/ethdb/privateapi"
"github.com/ledgerwatch/erigon/ethdb/prune"
"github.com/ledgerwatch/erigon/migrations"
"github.com/ledgerwatch/erigon/p2p"
"github.com/ledgerwatch/erigon/params"
stages2 "github.com/ledgerwatch/erigon/turbo/stages"
"github.com/ledgerwatch/erigon/turbo/txpool"
"github.com/ledgerwatch/log/v3"
"github.com/spf13/cobra"
)
var cmdStageBodies = &cobra.Command{
Use: "stage_bodies",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageBodies(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdStageSenders = &cobra.Command{
Use: "stage_senders",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
logger := log.New()
ctx, _ := utils.RootContext()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageSenders(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdStageExec = &cobra.Command{
Use: "stage_exec",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageExec(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdStageTrie = &cobra.Command{
Use: "stage_trie",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageTrie(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdStageHashState = &cobra.Command{
Use: "stage_hash_state",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
logger := log.New()
ctx, _ := utils.RootContext()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageHashState(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdStageHistory = &cobra.Command{
Use: "stage_history",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageHistory(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdLogIndex = &cobra.Command{
Use: "stage_log_index",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageLogIndex(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdCallTraces = &cobra.Command{
Use: "stage_call_traces",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageCallTraces(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdStageTxLookup = &cobra.Command{
Use: "stage_tx_lookup",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
if err := stageTxLookup(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdPrintStages = &cobra.Command{
Use: "print_stages",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, false)
defer db.Close()
if err := printAllStages(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdPrintMigrations = &cobra.Command{
Use: "print_migrations",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, false)
defer db.Close()
if err := printAppliedMigrations(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdRemoveMigration = &cobra.Command{
Use: "remove_migration",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := utils.RootContext()
logger := log.New()
db := openDB(chaindata, logger, false)
defer db.Close()
if err := removeMigration(db, ctx); err != nil {
log.Error("Error", "err", err)
return err
}
return nil
},
}
var cmdRunMigrations = &cobra.Command{
Use: "run_migrations",
Short: "",
RunE: func(cmd *cobra.Command, args []string) error {
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
// Nothing to do, migrations will be applied automatically
return nil
},
}
var cmdSetPrune = &cobra.Command{
Use: "set_prune",
Short: "Override existing --prune flag value (if you know what you are doing)",
RunE: func(cmd *cobra.Command, args []string) error {
logger := log.New()
db := openDB(chaindata, logger, true)
defer db.Close()
return overrideStorageMode(db)
},
}
func init() {
withDatadir(cmdPrintStages)
withChain(cmdPrintStages)
rootCmd.AddCommand(cmdPrintStages)
withReset(cmdStageSenders)
withBlock(cmdStageSenders)
withUnwind(cmdStageSenders)
withDatadir(cmdStageSenders)
withChain(cmdStageSenders)
rootCmd.AddCommand(cmdStageSenders)
withDatadir(cmdStageBodies)
withUnwind(cmdStageBodies)
withChain(cmdStageBodies)
rootCmd.AddCommand(cmdStageBodies)
withDatadir(cmdStageExec)
withReset(cmdStageExec)
withBlock(cmdStageExec)
withUnwind(cmdStageExec)
withPruneTo(cmdStageExec)
withBatchSize(cmdStageExec)
withTxTrace(cmdStageExec)
withChain(cmdStageExec)
rootCmd.AddCommand(cmdStageExec)
withDatadir(cmdStageHashState)
withReset(cmdStageHashState)
withBlock(cmdStageHashState)
withUnwind(cmdStageHashState)
withPruneTo(cmdStageHashState)
withBatchSize(cmdStageHashState)
withChain(cmdStageHashState)
rootCmd.AddCommand(cmdStageHashState)
withDatadir(cmdStageTrie)
withReset(cmdStageTrie)
withBlock(cmdStageTrie)
withUnwind(cmdStageTrie)
withPruneTo(cmdStageTrie)
withIntegrityChecks(cmdStageTrie)
withChain(cmdStageTrie)
rootCmd.AddCommand(cmdStageTrie)
withDatadir(cmdStageHistory)
withReset(cmdStageHistory)
withBlock(cmdStageHistory)
withUnwind(cmdStageHistory)
withPruneTo(cmdStageHistory)
withChain(cmdStageHistory)
rootCmd.AddCommand(cmdStageHistory)
withDatadir(cmdLogIndex)
withReset(cmdLogIndex)
withBlock(cmdLogIndex)
withUnwind(cmdLogIndex)
withPruneTo(cmdLogIndex)
withChain(cmdLogIndex)
rootCmd.AddCommand(cmdLogIndex)
withDatadir(cmdCallTraces)
withReset(cmdCallTraces)
withBlock(cmdCallTraces)
withUnwind(cmdCallTraces)
withPruneTo(cmdCallTraces)
withChain(cmdCallTraces)
rootCmd.AddCommand(cmdCallTraces)
withReset(cmdStageTxLookup)
withBlock(cmdStageTxLookup)
withUnwind(cmdStageTxLookup)
withDatadir(cmdStageTxLookup)
withPruneTo(cmdStageTxLookup)
withChain(cmdStageTxLookup)
rootCmd.AddCommand(cmdStageTxLookup)
withDatadir(cmdPrintMigrations)
rootCmd.AddCommand(cmdPrintMigrations)
withDatadir(cmdRemoveMigration)
withMigration(cmdRemoveMigration)
withChain(cmdRemoveMigration)
rootCmd.AddCommand(cmdRemoveMigration)
withDatadir(cmdRunMigrations)
withChain(cmdRunMigrations)
rootCmd.AddCommand(cmdRunMigrations)
withDatadir(cmdSetPrune)
withChain(cmdSetPrune)
cmdSetPrune.Flags().StringVar(&pruneFlag, "prune", "hrtc", "")
cmdSetPrune.Flags().Uint64Var(&pruneH, "--prune.h.older", 0, "")
cmdSetPrune.Flags().Uint64Var(&pruneR, "--prune.r.older", 0, "")
cmdSetPrune.Flags().Uint64Var(&pruneT, "--prune.t.older", 0, "")
cmdSetPrune.Flags().Uint64Var(&pruneC, "--prune.c.older", 0, "")
cmdSetPrune.Flags().StringSliceVar(&experiments, "experiments", nil, "Storage mode to override database")
rootCmd.AddCommand(cmdSetPrune)
}
func stageBodies(db kv.RwDB, ctx context.Context) error {
return db.Update(ctx, func(tx kv.RwTx) error {
if unwind > 0 {
progress, err := stages.GetStageProgress(tx, stages.Bodies)
if err != nil {
return fmt.Errorf("read Bodies progress: %w", err)
}
if unwind > progress {
return fmt.Errorf("cannot unwind past 0")
}
if err = stages.SaveStageProgress(tx, stages.Bodies, progress-unwind); err != nil {
return fmt.Errorf("saving Bodies progress failed: %w", err)
}
progress, err = stages.GetStageProgress(tx, stages.Bodies)
if err != nil {
return fmt.Errorf("re-read Bodies progress: %w", err)
}
log.Info("Progress", "bodies", progress)
return nil
}
log.Info("This command only works with --unwind option")
return nil
})
}
func stageSenders(db kv.RwDB, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)
_, _, chainConfig, _, _, sync, _, _ := newSync(ctx, db, nil)
tx, err := db.BeginRw(ctx)
if err != nil {
return err
}
defer tx.Rollback()
if reset {
err = resetSenders(tx)
if err != nil {
return err
}
return tx.Commit()
}
s := stage(sync, tx, nil, stages.Senders)
log.Info("Stage", "name", s.ID, "progress", s.BlockNumber)
cfg := stagedsync.StageSendersCfg(db, chainConfig, tmpdir)
if unwind > 0 {
u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber)
err = stagedsync.UnwindSendersStage(u, tx, cfg, ctx)
if err != nil {
return err
}
} else {
err = stagedsync.SpawnRecoverSendersStage(cfg, s, sync, tx, block, ctx)
if err != nil {
return err
}
}
return tx.Commit()
}
func stageExec(db kv.RwDB, ctx context.Context) error {
pm, engine, chainConfig, vmConfig, _, sync, _, _ := newSync(ctx, db, nil)
if reset {
genesis, _ := byChain()
if err := db.Update(ctx, func(tx kv.RwTx) error { return resetExec(tx, genesis) }); err != nil {
return err
}
return nil
}
if txtrace {
// Activate tracing and writing into json files for each transaction
vmConfig.Tracer = nil
vmConfig.Debug = true
}
var batchSize datasize.ByteSize
must(batchSize.UnmarshalText([]byte(batchSizeStr)))
s := stage(sync, nil, db, stages.Execution)
log.Info("Stage", "name", s.ID, "progress", s.BlockNumber)
if pruneTo > 0 {
pm.History = prune.Distance(s.BlockNumber - pruneTo)
pm.Receipts = prune.Distance(s.BlockNumber - pruneTo)
pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo)
pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo)
}
cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpDBPath)
if unwind > 0 {
u := sync.NewUnwindState(stages.Execution, s.BlockNumber-unwind, s.BlockNumber)
err := stagedsync.UnwindExecutionStage(u, s, nil, ctx, cfg, false)
if err != nil {
return err
}
return nil
}
if pruneTo > 0 {
p, err := sync.PruneStageState(stages.Execution, s.BlockNumber, nil, db)
if err != nil {
return err
}
err = stagedsync.PruneExecutionStage(p, nil, cfg, ctx, false)
if err != nil {
return err
}
return nil
}
err := stagedsync.SpawnExecuteBlocksStage(s, sync, nil, block, ctx, cfg, false)
if err != nil {
return err
}
return nil
}
func stageTrie(db kv.RwDB, ctx context.Context) error {
pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)
tmpdir := path.Join(datadir, etl.TmpDirName)
tx, err := db.BeginRw(ctx)
if err != nil {
return err
}
defer tx.Rollback()
if reset {
if err := stagedsync.ResetIH(tx); err != nil {
return err
}
return tx.Commit()
}
execStage := stage(sync, tx, nil, stages.Execution)
s := stage(sync, tx, nil, stages.IntermediateHashes)
if pruneTo > 0 {
pm.History = prune.Distance(s.BlockNumber - pruneTo)
pm.Receipts = prune.Distance(s.BlockNumber - pruneTo)
pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo)
pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo)
}
fmt.Printf("distance: %d\n", pm.History)
log.Info("Stage4", "progress", execStage.BlockNumber)
log.Info("Stage5", "progress", s.BlockNumber)
cfg := stagedsync.StageTrieCfg(db, true, true, tmpdir)
if unwind > 0 {
u := sync.NewUnwindState(stages.IntermediateHashes, s.BlockNumber-unwind, s.BlockNumber)
if err := stagedsync.UnwindIntermediateHashesStage(u, s, tx, cfg, ctx); err != nil {
return err
}
} else if pruneTo > 0 {
p, err := sync.PruneStageState(stages.IntermediateHashes, s.BlockNumber, tx, db)
if err != nil {
return err
}
err = stagedsync.PruneIntermediateHashesStage(p, tx, cfg, ctx)
if err != nil {
return err
}
} else {
if _, err := stagedsync.SpawnIntermediateHashesStage(s, sync /* Unwinder */, tx, cfg, ctx); err != nil {
return err
}
}
integrity.Trie(tx, integritySlow, ctx)
return tx.Commit()
}
func stageHashState(db kv.RwDB, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)
pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)
tx, err := db.BeginRw(ctx)
if err != nil {
return err
}
defer tx.Rollback()
if reset {
err = stagedsync.ResetHashState(tx)
if err != nil {
return err
}
return tx.Commit()
}
s := stage(sync, tx, nil, stages.HashState)
if pruneTo > 0 {
pm.History = prune.Distance(s.BlockNumber - pruneTo)
pm.Receipts = prune.Distance(s.BlockNumber - pruneTo)
pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo)
pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo)
}
log.Info("Stage", "name", s.ID, "progress", s.BlockNumber)
cfg := stagedsync.StageHashStateCfg(db, tmpdir)
if unwind > 0 {
u := sync.NewUnwindState(stages.HashState, s.BlockNumber-unwind, s.BlockNumber)
err = stagedsync.UnwindHashStateStage(u, s, tx, cfg, ctx)
if err != nil {
return err
}
} else if pruneTo > 0 {
p, err := sync.PruneStageState(stages.HashState, s.BlockNumber, tx, nil)
if err != nil {
return err
}
err = stagedsync.PruneHashStateStage(p, tx, cfg, ctx)
if err != nil {
return err
}
} else {
err = stagedsync.SpawnHashStateStage(s, tx, cfg, ctx)
if err != nil {
return err
}
}
return tx.Commit()
}
func stageLogIndex(db kv.RwDB, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)
pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)
tx, err := db.BeginRw(ctx)
if err != nil {
return err
}
defer tx.Rollback()
if reset {
err = resetLogIndex(tx)
if err != nil {
return err
}
return tx.Commit()
}
execAt := progress(tx, stages.Execution)
s := stage(sync, tx, nil, stages.LogIndex)
if pruneTo > 0 {
pm.History = prune.Distance(s.BlockNumber - pruneTo)
pm.Receipts = prune.Distance(s.BlockNumber - pruneTo)
pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo)
pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo)
}
log.Info("Stage exec", "progress", execAt)
log.Info("Stage", "name", s.ID, "progress", s.BlockNumber)
cfg := stagedsync.StageLogIndexCfg(db, pm, tmpdir)
if unwind > 0 {
u := sync.NewUnwindState(stages.LogIndex, s.BlockNumber-unwind, s.BlockNumber)
err = stagedsync.UnwindLogIndex(u, s, tx, cfg, ctx)
if err != nil {
return err
}
} else if pruneTo > 0 {
p, err := sync.PruneStageState(stages.LogIndex, s.BlockNumber, nil, db)
if err != nil {
return err
}
err = stagedsync.PruneLogIndex(p, tx, cfg, ctx)
if err != nil {
return err
}
} else {
if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx); err != nil {
return err
}
}
return tx.Commit()
}
func stageCallTraces(kv kv.RwDB, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)
pm, _, _, _, _, sync, _, _ := newSync(ctx, kv, nil)
tx, err := kv.BeginRw(ctx)
if err != nil {
return err
}
defer tx.Rollback()
if reset {
err = resetCallTraces(tx)
if err != nil {
return err
}
return tx.Commit()
}
var batchSize datasize.ByteSize
must(batchSize.UnmarshalText([]byte(batchSizeStr)))
execStage := progress(tx, stages.Execution)
s := stage(sync, tx, nil, stages.CallTraces)
if pruneTo > 0 {
pm.History = prune.Distance(s.BlockNumber - pruneTo)
pm.Receipts = prune.Distance(s.BlockNumber - pruneTo)
pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo)
pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo)
}
log.Info("ID exec", "progress", execStage)
if block != 0 {
s.BlockNumber = block
log.Info("Overriding initial state", "block", block)
}
log.Info("ID call traces", "progress", s.BlockNumber)
cfg := stagedsync.StageCallTracesCfg(kv, pm, block, tmpdir)
if unwind > 0 {
u := sync.NewUnwindState(stages.CallTraces, s.BlockNumber-unwind, s.BlockNumber)
err = stagedsync.UnwindCallTraces(u, s, tx, cfg, ctx)
if err != nil {
return err
}
} else if pruneTo > 0 {
p, err := sync.PruneStageState(stages.CallTraces, s.BlockNumber, tx, nil)
if err != nil {
return err
}
err = stagedsync.PruneCallTraces(p, tx, cfg, ctx)
if err != nil {
return err
}
} else {
if err := stagedsync.SpawnCallTraces(s, tx, cfg, ctx); err != nil {
return err
}
}
return tx.Commit()
}
func stageHistory(db kv.RwDB, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)
pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)
tx, err := db.BeginRw(ctx)
if err != nil {
return err
}
defer tx.Rollback()
if reset {
err = resetHistory(tx)
if err != nil {
return err
}
return tx.Commit()
}
execStage := progress(tx, stages.Execution)
stageStorage := stage(sync, tx, nil, stages.StorageHistoryIndex)
stageAcc := stage(sync, tx, nil, stages.AccountHistoryIndex)
if pruneTo > 0 {
pm.History = prune.Distance(stageAcc.BlockNumber - pruneTo)
pm.Receipts = prune.Distance(stageAcc.BlockNumber - pruneTo)
pm.CallTraces = prune.Distance(stageAcc.BlockNumber - pruneTo)
pm.TxIndex = prune.Distance(stageAcc.BlockNumber - pruneTo)
}
log.Info("ID exec", "progress", execStage)
log.Info("ID acc history", "progress", stageAcc.BlockNumber)
log.Info("ID storage history", "progress", stageStorage.BlockNumber)
cfg := stagedsync.StageHistoryCfg(db, pm, tmpdir)
if unwind > 0 { //nolint:staticcheck
u := sync.NewUnwindState(stages.StorageHistoryIndex, stageStorage.BlockNumber-unwind, stageStorage.BlockNumber)
if err := stagedsync.UnwindStorageHistoryIndex(u, stageStorage, tx, cfg, ctx); err != nil {
return err
}
u = sync.NewUnwindState(stages.AccountHistoryIndex, stageAcc.BlockNumber-unwind, stageAcc.BlockNumber)
if err := stagedsync.UnwindAccountHistoryIndex(u, stageAcc, tx, cfg, ctx); err != nil {
return err
}
} else if pruneTo > 0 {
pa, err := sync.PruneStageState(stages.AccountHistoryIndex, stageAcc.BlockNumber, tx, db)
if err != nil {
return err
}
err = stagedsync.PruneAccountHistoryIndex(pa, tx, cfg, ctx)
if err != nil {
return err
}
ps, err := sync.PruneStageState(stages.StorageHistoryIndex, stageStorage.BlockNumber, tx, db)
if err != nil {
return err
}
err = stagedsync.PruneAccountHistoryIndex(ps, tx, cfg, ctx)
if err != nil {
return err
}
} else {
if err := stagedsync.SpawnAccountHistoryIndex(stageAcc, tx, cfg, ctx); err != nil {
return err
}
if err := stagedsync.SpawnStorageHistoryIndex(stageStorage, tx, cfg, ctx); err != nil {
return err
}
}
return tx.Commit()
}
func stageTxLookup(db kv.RwDB, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)
pm, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)
tx, err := db.BeginRw(ctx)
if err != nil {
return err
}
defer tx.Rollback()
if reset {
err = resetTxLookup(tx)
if err != nil {
return err
}
return tx.Commit()
}
s := stage(sync, tx, nil, stages.TxLookup)
if pruneTo > 0 {
pm.History = prune.Distance(s.BlockNumber - pruneTo)
pm.Receipts = prune.Distance(s.BlockNumber - pruneTo)
pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo)
pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo)
}
log.Info("Stage", "name", s.ID, "progress", s.BlockNumber)
cfg := stagedsync.StageTxLookupCfg(db, pm, tmpdir)
if unwind > 0 {
u := sync.NewUnwindState(stages.TxLookup, s.BlockNumber-unwind, s.BlockNumber)
err = stagedsync.UnwindTxLookup(u, s, tx, cfg, ctx)
if err != nil {
return err
}
} else if pruneTo > 0 {
p, err := sync.PruneStageState(stages.TxPool, s.BlockNumber, tx, nil)
if err != nil {
return err
}
err = stagedsync.PruneTxLookup(p, tx, cfg, ctx)
if err != nil {
return err
}
} else {
err = stagedsync.SpawnTxLookup(s, tx, cfg, ctx)
if err != nil {
return err
}
}
return tx.Commit()
}
func printAllStages(db kv.RoDB, ctx context.Context) error {
return db.View(ctx, func(tx kv.Tx) error { return printStages(tx) })
}
func printAppliedMigrations(db kv.RwDB, ctx context.Context) error {
return db.View(ctx, func(tx kv.Tx) error {
applied, err := migrations.AppliedMigrations(tx, false /* withPayload */)
if err != nil {
return err
}
var appliedStrs = make([]string, len(applied))
i := 0
for k := range applied {
appliedStrs[i] = k
i++
}
sort.Strings(appliedStrs)
log.Info("Applied", "migrations", strings.Join(appliedStrs, " "))
return nil
})
}
func removeMigration(db kv.RwDB, ctx context.Context) error {
return db.Update(ctx, func(tx kv.RwTx) error {
return tx.Delete(kv.Migrations, []byte(migration), nil)
})
}
func byChain() (*core.Genesis, *params.ChainConfig) {
var chainConfig *params.ChainConfig
var genesis *core.Genesis
switch chain {
case "", params.MainnetChainName:
chainConfig = params.MainnetChainConfig
genesis = core.DefaultGenesisBlock()
case params.RopstenChainName:
chainConfig = params.RopstenChainConfig
genesis = core.DefaultRopstenGenesisBlock()
case params.GoerliChainName:
chainConfig = params.GoerliChainConfig
genesis = core.DefaultGoerliGenesisBlock()
case params.RinkebyChainName:
chainConfig = params.RinkebyChainConfig
genesis = core.DefaultRinkebyGenesisBlock()
case params.SokolChainName:
chainConfig = params.SokolChainConfig
genesis = core.DefaultSokolGenesisBlock()
}
return genesis, chainConfig
}
func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) (prune.Mode, consensus.Engine, *params.ChainConfig, *vm.Config, *core.TxPool, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) {
tmpdir := path.Join(datadir, etl.TmpDirName)
snapshotDir = path.Join(datadir, "snapshot")
logger := log.New()
var pm prune.Mode
var err error
if err = db.View(context.Background(), func(tx kv.Tx) error {
pm, err = prune.Get(tx)
if err != nil {
return err
}
if err = stagedsync.UpdateMetrics(tx); err != nil {
return err
}
return nil
}); err != nil {
panic(err)
}
vmConfig := &vm.Config{}
genesis, chainConfig := byChain()
var engine consensus.Engine
engine = ethash.NewFaker()
switch chain {
case params.SokolChainName:
engine = ethconfig.CreateConsensusEngine(chainConfig, logger, ¶ms.AuRaConfig{DBPath: path.Join(datadir, "aura")}, nil, false)
}
events := privateapi.NewEvents()
txPool := core.NewTxPool(ethconfig.Defaults.TxPool, chainConfig, db)
chainConfig, genesisBlock, genesisErr := core.CommitGenesisBlock(db, genesis)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
panic(genesisErr)
}
log.Info("Initialised chain configuration", "config", chainConfig)
var batchSize datasize.ByteSize
must(batchSize.UnmarshalText([]byte(batchSizeStr)))
blockDownloaderWindow := 65536
downloadServer, err := download.NewControlServer(db, "", chainConfig, genesisBlock.Hash(), engine, 1, nil, blockDownloaderWindow)
if err != nil {
panic(err)
}
txPoolP2PServer, err := txpool.NewP2PServer(context.Background(), nil, txPool)
if err != nil {
panic(err)
}
fetchTx := func(peerID string, hashes []common.Hash) error {
txPoolP2PServer.SendTxsRequest(context.TODO(), peerID, hashes)
return nil
}
txPoolP2PServer.TxFetcher = fetcher.NewTxFetcher(txPool.Has, txPool.AddRemotes, fetchTx)
cfg := ethconfig.Defaults
cfg.Prune = pm
cfg.BatchSize = batchSize
cfg.TxPool.Disable = true
if miningConfig != nil {
cfg.Miner = *miningConfig
}
sync, err := stages2.NewStagedSync(context.Background(), logger, db, p2p.Config{}, cfg,
downloadServer,
tmpdir,
txPool,
txPoolP2PServer,
nil, nil, nil,
)
if err != nil {
panic(err)
}
miner := stagedsync.NewMiningState(&cfg.Miner)
miningSync := stagedsync.New(
stagedsync.MiningStages(ctx,
stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, txPool, tmpdir),
stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir),
stagedsync.StageHashStateCfg(db, tmpdir),
stagedsync.StageTrieCfg(db, false, true, tmpdir),
stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, ctx.Done()),
),
stagedsync.MiningUnwindOrder,
stagedsync.MiningPruneOrder,
)
return pm, engine, chainConfig, vmConfig, txPool, sync, miningSync, miner
}
func progress(tx kv.Getter, stage stages.SyncStage) uint64 {
res, err := stages.GetStageProgress(tx, stage)
if err != nil {
panic(err)
}
return res
}
func stage(st *stagedsync.Sync, tx kv.Tx, db kv.RoDB, stage stages.SyncStage) *stagedsync.StageState {
res, err := st.StageState(stage, tx, db)
if err != nil {
panic(err)
}
return res
}
func overrideStorageMode(db kv.RwDB) error {
pm, err := prune.FromCli(pruneFlag, pruneH, pruneR, pruneT, pruneC, experiments)
if err != nil {
return err
}
return db.Update(context.Background(), func(tx kv.RwTx) error {
if err = prune.Override(tx, pm); err != nil {
return err
}
pm, err = prune.Get(tx)
if err != nil {
return err
}
log.Info("Storage mode in DB", "mode", pm.String())
return nil
})
}
| 1 | 22,525 | set real one plz (get it from DB). | ledgerwatch-erigon | go |
@@ -180,8 +180,9 @@ static fpga_result send_uafu_event_request(fpga_handle handle,
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct fpga_port_info port_info = {.argsz = sizeof(port_info),
.flags = 0 };
- struct fpga_port_uafu_irq_set uafu_irq = {.argsz = sizeof(uafu_irq),
- .flags = 0};
+ uint8_t uafu_irq_buf[sizeof(struct fpga_port_uafu_irq_set)+sizeof(int32_t)];
+ struct fpga_port_uafu_irq_set *uafu_irq =
+ (struct fpga_port_uafu_irq_set *) uafu_irq_buf;
if (uafu_operation != FPGA_IRQ_ASSIGN && uafu_operation != FPGA_IRQ_DEASSIGN) {
FPGA_ERR("Invalid UAFU operation requested"); | 1 | // Copyright(c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif // HAVE_CONFIG_H
#include "common_int.h"
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/eventfd.h>
#include <errno.h>
#include "safe_string/safe_string.h"
#include "opae/access.h"
#include "opae/properties.h"
#include "types_int.h"
#include "intel-fpga.h"
#define EVENT_SOCKET_NAME "/tmp/fpga_event_socket"
#define EVENT_SOCKET_NAME_LEN 23
#define MAX_PATH_LEN 256
enum request_type {
REGISTER_EVENT = 0,
UNREGISTER_EVENT = 1
};
struct event_request {
enum request_type type;
fpga_event_type event;
char device[MAX_PATH_LEN];
};
fpga_result send_event_request(int conn_socket, int fd, struct event_request *req)
{
struct msghdr mh;
struct cmsghdr *cmh;
struct iovec iov[1];
char buf[CMSG_SPACE(sizeof(int))];
ssize_t n;
int *fd_ptr;
/* set up ancillary data message header */
iov[0].iov_base = req;
iov[0].iov_len = sizeof(*req);
memset_s(buf, sizeof(buf), 0x0);
mh.msg_name = NULL;
mh.msg_namelen = 0;
mh.msg_iov = iov;
mh.msg_iovlen = sizeof(iov) / sizeof(iov[0]);
mh.msg_control = buf;
mh.msg_controllen = CMSG_LEN(sizeof(int));
mh.msg_flags = 0;
cmh = CMSG_FIRSTHDR(&mh);
cmh->cmsg_len = CMSG_LEN(sizeof(int));
cmh->cmsg_level = SOL_SOCKET;
cmh->cmsg_type = SCM_RIGHTS;
fd_ptr = (int *)CMSG_DATA(cmh);
*fd_ptr = fd;
/* send ancillary data */
n = sendmsg(conn_socket, &mh, 0);
if (n < 0) {
FPGA_ERR("sendmsg failed: %s", strerror(errno));
return FPGA_EXCEPTION;
}
return FPGA_OK;
}
static fpga_result send_fme_event_request(fpga_handle handle,
fpga_event_handle event_handle, int fme_operation)
{
int fd = FILE_DESCRIPTOR(event_handle);
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct fpga_fme_info fme_info = {.argsz = sizeof(fme_info),
.flags = 0 };
struct fpga_fme_err_irq_set fme_irq = {.argsz = sizeof(fme_irq),
.flags = 0};
if (fme_operation != FPGA_IRQ_ASSIGN && fme_operation != FPGA_IRQ_DEASSIGN) {
FPGA_ERR("Invalid FME operation requested");
return FPGA_INVALID_PARAM;
}
if (ioctl(_handle->fddev, FPGA_FME_GET_INFO, &fme_info) != 0) {
FPGA_ERR("Could not get FME info: %s", strerror(errno));
return FPGA_EXCEPTION;
}
/*capability field is set to 1 if the platform supports interrupts*/
if (fme_info.capability & FPGA_FME_CAP_ERR_IRQ) {
if (fme_operation == FPGA_IRQ_ASSIGN)
fme_irq.evtfd = fd;
else
fme_irq.evtfd = -1;
if (ioctl(_handle->fddev, FPGA_FME_ERR_SET_IRQ, &fme_irq) != 0) {
FPGA_ERR("Could not set eventfd %s", strerror(errno));
return FPGA_EXCEPTION;
}
} else {
FPGA_ERR("FME interrupts not supported in hw");
return FPGA_EXCEPTION;
}
return FPGA_OK;
}
static fpga_result send_port_event_request(fpga_handle handle,
fpga_event_handle event_handle, int port_operation)
{
int fd = FILE_DESCRIPTOR(event_handle);
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct fpga_port_info port_info = {.argsz = sizeof(port_info),
.flags = 0 };
struct fpga_port_err_irq_set port_irq = {.argsz = sizeof(port_irq),
.flags = 0};
if (port_operation != FPGA_IRQ_ASSIGN && port_operation != FPGA_IRQ_DEASSIGN) {
FPGA_ERR("Invalid PORT operation requested");
return FPGA_INVALID_PARAM;
}
if (ioctl(_handle->fddev, FPGA_PORT_GET_INFO, &port_info) != 0) {
FPGA_ERR("Could not get PORT info");
return FPGA_EXCEPTION;
}
/*capability field is set to 1 if the platform supports interrupts*/
if (port_info.capability & FPGA_PORT_CAP_ERR_IRQ) {
if (port_operation == FPGA_IRQ_ASSIGN)
port_irq.evtfd = fd;
else
port_irq.evtfd = -1;
if (ioctl(_handle->fddev, FPGA_PORT_ERR_SET_IRQ, &port_irq) != 0) {
FPGA_ERR("Could not set eventfd");
return FPGA_EXCEPTION;
}
} else {
FPGA_ERR("PORT interrupts not supported in hw");
return FPGA_EXCEPTION;
}
return FPGA_OK;
}
static fpga_result send_uafu_event_request(fpga_handle handle,
fpga_event_handle event_handle, uint32_t flags, int uafu_operation)
{
int fd = FILE_DESCRIPTOR(event_handle);
struct _fpga_event_handle *_eh = (struct _fpga_event_handle *)event_handle;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct fpga_port_info port_info = {.argsz = sizeof(port_info),
.flags = 0 };
struct fpga_port_uafu_irq_set uafu_irq = {.argsz = sizeof(uafu_irq),
.flags = 0};
if (uafu_operation != FPGA_IRQ_ASSIGN && uafu_operation != FPGA_IRQ_DEASSIGN) {
FPGA_ERR("Invalid UAFU operation requested");
return FPGA_INVALID_PARAM;
}
if (ioctl(_handle->fddev, FPGA_PORT_GET_INFO, &port_info) != 0) {
FPGA_ERR("Could not get PORT info");
return FPGA_EXCEPTION;
}
/*capability field is set to 1 if the platform supports interrupts*/
if (port_info.capability & FPGA_PORT_CAP_UAFU_IRQ) {
if (flags >= port_info.num_uafu_irqs) {
FPGA_ERR("Invalid User Interrupt vector id");
return FPGA_INVALID_PARAM;
}
if (uafu_operation == FPGA_IRQ_ASSIGN) {
uafu_irq.evtfd[0] = fd;
uafu_irq.start = flags;
_eh->flags = flags;
} else {
uafu_irq.start = _eh->flags;
uafu_irq.evtfd[0] = -1;
}
uafu_irq.count = 1;
if (ioctl(_handle->fddev, FPGA_PORT_UAFU_SET_IRQ, &uafu_irq) != 0) {
FPGA_ERR("Could not set eventfd");
return FPGA_EXCEPTION;
}
} else {
FPGA_ERR("UAFU interrupts not supported in hw");
return FPGA_EXCEPTION;
}
return FPGA_OK;
}
/*
* Uses driver ioctls to determine whether the driver supports interrupts
* on this platform. objtype is an output parameter.
*/
static fpga_result check_interrupts_supported(fpga_handle handle, fpga_objtype *objtype)
{
fpga_result res = FPGA_OK;
fpga_result destroy_res = FPGA_OK;
fpga_properties prop = NULL;
struct _fpga_handle *_handle = (struct _fpga_handle *) handle;
struct fpga_fme_info fme_info = {.argsz = sizeof(fme_info),
.flags = 0 };
struct fpga_port_info port_info = {.argsz = sizeof(port_info),
.flags = 0 };
res = fpgaGetPropertiesFromHandle(handle, &prop);
if (res != FPGA_OK) {
FPGA_MSG("Could not get FPGA properties from handle");
return res;
}
res = fpgaPropertiesGetObjectType(prop, objtype);
if (res != FPGA_OK) {
FPGA_MSG("Could not determine FPGA object type");
goto destroy_prop;
}
if (*objtype == FPGA_DEVICE) {
if (ioctl(_handle->fddev, FPGA_FME_GET_INFO, &fme_info) != 0) {
FPGA_ERR("Could not get FME info: %s", strerror(errno));
res = FPGA_EXCEPTION;
goto destroy_prop;
}
if (fme_info.capability & FPGA_FME_CAP_ERR_IRQ) {
res = FPGA_OK;
} else {
FPGA_MSG("Interrupts not supported in hw");
res = FPGA_NOT_SUPPORTED;
}
} else if (*objtype == FPGA_ACCELERATOR) {
if (ioctl(_handle->fddev, FPGA_PORT_GET_INFO, &port_info) != 0) {
FPGA_ERR("Could not get PORT info: %s", strerror(errno));
res = FPGA_EXCEPTION;
goto destroy_prop;
}
if (port_info.capability & FPGA_PORT_CAP_ERR_IRQ) {
res = FPGA_OK;
} else {
FPGA_MSG("Interrupts not supported in hw");
res = FPGA_NOT_SUPPORTED;
}
}
destroy_prop:
destroy_res = fpgaDestroyProperties(&prop);
if (destroy_res != FPGA_OK) {
FPGA_MSG("Could not destroy FPGA properties");
return destroy_res;
}
return res;
}
static fpga_result driver_register_event(fpga_handle handle,
fpga_event_type event_type,
fpga_event_handle event_handle,
uint32_t flags)
{
fpga_objtype objtype;
fpga_result res = FPGA_OK;
res = check_interrupts_supported(handle, &objtype);
if (res != FPGA_OK) {
FPGA_MSG("Could not determine whether interrupts are supported");
return FPGA_NOT_SUPPORTED;
}
switch (event_type) {
case FPGA_EVENT_ERROR:
if (objtype == FPGA_DEVICE) {
return send_fme_event_request(handle, event_handle, FPGA_IRQ_ASSIGN);
} else if (objtype == FPGA_ACCELERATOR) {
return send_port_event_request(handle, event_handle, FPGA_IRQ_ASSIGN);
}
FPGA_ERR("Invalid objtype: %d", objtype);
return FPGA_EXCEPTION;
case FPGA_EVENT_INTERRUPT:
if (objtype != FPGA_ACCELERATOR) {
FPGA_MSG("User events need an accelerator object");
return FPGA_INVALID_PARAM;
}
return send_uafu_event_request(handle, event_handle, flags, FPGA_IRQ_ASSIGN);
case FPGA_EVENT_POWER_THERMAL:
FPGA_MSG("Thermal interrupts not supported");
return FPGA_NOT_SUPPORTED;
default:
FPGA_ERR("Invalid event type");
return FPGA_EXCEPTION;
}
}
static fpga_result driver_unregister_event(fpga_handle handle,
fpga_event_type event_type, fpga_event_handle event_handle)
{
fpga_objtype objtype;
fpga_result res = FPGA_OK;
res = check_interrupts_supported(handle, &objtype);
if (res != FPGA_OK) {
FPGA_MSG("Could not determine whether interrupts are supported");
return FPGA_NOT_SUPPORTED;
}
switch (event_type) {
case FPGA_EVENT_ERROR:
if (objtype == FPGA_DEVICE) {
return send_fme_event_request(handle, event_handle, FPGA_IRQ_DEASSIGN);
} else if (objtype == FPGA_ACCELERATOR) {
return send_port_event_request(handle, event_handle, FPGA_IRQ_DEASSIGN);
}
FPGA_ERR("Invalid objtype: %d", objtype);
return FPGA_EXCEPTION;
case FPGA_EVENT_INTERRUPT:
if (objtype != FPGA_ACCELERATOR) {
FPGA_MSG("User events need an Accelerator object");
return FPGA_INVALID_PARAM;
}
return send_uafu_event_request(handle, event_handle, 0, FPGA_IRQ_DEASSIGN);
case FPGA_EVENT_POWER_THERMAL:
FPGA_MSG("Thermal interrupts not supported");
return FPGA_NOT_SUPPORTED;
default:
FPGA_ERR("Invalid event type");
return FPGA_EXCEPTION;
}
}
static fpga_result daemon_register_event(fpga_handle handle,
fpga_event_type event_type,
fpga_event_handle event_handle,
uint32_t flags)
{
int fd = FILE_DESCRIPTOR(event_handle);
fpga_result result = FPGA_OK;
struct sockaddr_un addr;
struct event_request req;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct _fpga_token *_token = (struct _fpga_token *)_handle->token;
errno_t e;
UNUSED_PARAM(flags);
if (_handle->fdfpgad < 0) {
/* connect to event socket */
_handle->fdfpgad = socket(AF_UNIX, SOCK_STREAM, 0);
if (_handle->fdfpgad < 0) {
FPGA_ERR("socket: %s", strerror(errno));
return FPGA_EXCEPTION;
}
addr.sun_family = AF_UNIX;
e = strncpy_s(addr.sun_path, sizeof(addr.sun_path),
EVENT_SOCKET_NAME, EVENT_SOCKET_NAME_LEN);
if (EOK != e) {
FPGA_ERR("strncpy_s failed");
return FPGA_EXCEPTION;
}
if (connect(_handle->fdfpgad, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
FPGA_DBG("connect: %s", strerror(errno));
result = FPGA_NO_DAEMON;
goto out_close_conn;
}
}
/* create event registration request */
req.type = REGISTER_EVENT;
req.event = event_type;
e = strncpy_s(req.device, sizeof(req.device),
_token->sysfspath, sizeof(_token->sysfspath));
if (EOK != e) {
FPGA_ERR("strncpy_s failed");
result = FPGA_EXCEPTION;
goto out_close_conn;
}
req.device[sizeof(req.device)-1] = '\0';
/* send event packet */
result = send_event_request(_handle->fdfpgad, fd, &req);
if (result != FPGA_OK) {
FPGA_ERR("send_event_request failed");
goto out_close_conn;
}
return result;
out_close_conn:
close(_handle->fdfpgad);
_handle->fdfpgad = -1;
return result;
}
static fpga_result daemon_unregister_event(fpga_handle handle,
fpga_event_type event_type)
{
fpga_result result = FPGA_OK;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct _fpga_token *_token = (struct _fpga_token *)_handle->token;
struct event_request req;
ssize_t n;
errno_t e;
if (_handle->fdfpgad < 0) {
FPGA_MSG("No fpgad connection");
return FPGA_INVALID_PARAM;
}
req.type = UNREGISTER_EVENT;
req.event = event_type;
e = strncpy_s(req.device, sizeof(req.device),
_token->sysfspath, sizeof(_token->sysfspath));
if (EOK != e) {
FPGA_ERR("strncpy_s failed");
result = FPGA_EXCEPTION;
goto out_close_conn;
}
req.device[sizeof(req.device)-1] = '\0';
n = send(_handle->fdfpgad, &req, sizeof(req), 0);
if (n < 0) {
FPGA_ERR("send : %s", strerror(errno));
result = FPGA_EXCEPTION;
goto out_close_conn;
}
return result;
out_close_conn:
close(_handle->fdfpgad);
_handle->fdfpgad = -1;
return result;
}
fpga_result __FPGA_API__ fpgaCreateEventHandle(fpga_event_handle *event_handle)
{
struct _fpga_event_handle *_eh;
fpga_result result = FPGA_OK;
pthread_mutexattr_t mattr;
int err = 0;
ASSERT_NOT_NULL(event_handle);
_eh = malloc(sizeof(struct _fpga_event_handle));
if (NULL == _eh) {
FPGA_ERR("Could not allocate memory for event handle");
return FPGA_NO_MEMORY;
}
_eh->magic = FPGA_EVENT_HANDLE_MAGIC;
/* create eventfd */
_eh->fd = eventfd(0, 0);
if (_eh->fd < 0) {
FPGA_ERR("eventfd : %s", strerror(errno));
result = FPGA_EXCEPTION;
goto out_free;
}
if (pthread_mutexattr_init(&mattr)) {
FPGA_MSG("Failed to initialized event handle mutex attributes");
result = FPGA_EXCEPTION;
goto out_free;
}
if (pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE)) {
FPGA_MSG("Failed to initialize event handle mutex attributes");
result = FPGA_EXCEPTION;
goto out_attr_destroy;
}
if (pthread_mutex_init(&_eh->lock, &mattr)) {
FPGA_MSG("Failed to initialize event handle mutex");
result = FPGA_EXCEPTION;
goto out_attr_destroy;
}
pthread_mutexattr_destroy(&mattr);
*event_handle = (fpga_event_handle)_eh;
return FPGA_OK;
out_attr_destroy:
err = pthread_mutexattr_destroy(&mattr);
if (err)
FPGA_ERR("pthread_mutexatr_destroy() failed: %s", strerror(err));
out_free:
free(_eh);
return result;
}
fpga_result __FPGA_API__ fpgaDestroyEventHandle(fpga_event_handle *event_handle)
{
struct _fpga_event_handle *_eh;
fpga_result result = FPGA_OK;
int err = 0;
//sanity check
if (!event_handle) {
return FPGA_INVALID_PARAM;
}
_eh = (struct _fpga_event_handle *) *event_handle;
result = event_handle_check_and_lock(_eh);
if (result)
return result;
if (close(_eh->fd) < 0) {
FPGA_ERR("eventfd : %s", strerror(errno));
err = pthread_mutex_unlock(&_eh->lock);
if (err)
FPGA_ERR("pthread_mutex_unlock() failed: %S", strerror(err));
if (errno == EBADF)
return FPGA_INVALID_PARAM;
else
return FPGA_EXCEPTION;
}
_eh->magic = FPGA_INVALID_MAGIC;
err = pthread_mutex_unlock(&_eh->lock);
if (err)
FPGA_ERR("pthread_mutex_unlock() failed: %S", strerror(err));
err = pthread_mutex_destroy(&_eh->lock);
if (err)
FPGA_ERR("pthread_mutex_destroy() failed: %S", strerror(err));
free(*event_handle);
*event_handle = NULL;
return FPGA_OK;
}
fpga_result __FPGA_API__ fpgaGetOSObjectFromEventHandle(const fpga_event_handle eh,
int *fd)
{
struct _fpga_event_handle *_eh = (struct _fpga_event_handle *) eh;
fpga_result result = FPGA_OK;
int err = 0;
result = event_handle_check_and_lock(_eh);
if (result)
return result;
*fd = _eh->fd;
err = pthread_mutex_unlock(&_eh->lock);
if (err)
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
return FPGA_OK;
}
fpga_result __FPGA_API__ fpgaRegisterEvent(fpga_handle handle,
fpga_event_type event_type,
fpga_event_handle event_handle,
uint32_t flags)
{
fpga_result result = FPGA_OK;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct _fpga_event_handle *_eh = (struct _fpga_event_handle *) event_handle;
struct _fpga_token *_token;
int err;
result = handle_check_and_lock(_handle);
if (result)
return result;
result = event_handle_check_and_lock(_eh);
if (result)
goto out_unlock_handle;
_token = (struct _fpga_token *)_handle->token;
if (_token->magic != FPGA_TOKEN_MAGIC) {
FPGA_MSG("Invalid token found in handle");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
switch (event_type) {
case FPGA_EVENT_INTERRUPT:
if (!strstr(_token->devpath, "port")) {
FPGA_MSG("Handle does not refer to accelerator object");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
break;
case FPGA_EVENT_ERROR: /* fall through */
case FPGA_EVENT_POWER_THERMAL:
break;
}
/* TODO: reject unknown flags */
/* try driver first */
result = driver_register_event(handle, event_type, event_handle, flags);
if (result == FPGA_NOT_SUPPORTED) {
result = daemon_register_event(handle, event_type,
event_handle, flags);
}
out_unlock:
err = pthread_mutex_unlock(&_eh->lock);
if (err)
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
out_unlock_handle:
err = pthread_mutex_unlock(&_handle->lock);
if (err)
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
return result;
}
fpga_result __FPGA_API__ fpgaUnregisterEvent(fpga_handle handle,
fpga_event_type event_type,
fpga_event_handle event_handle)
{
fpga_result result = FPGA_OK;
int err;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
struct _fpga_event_handle *_eh = (struct _fpga_event_handle *) event_handle;
struct _fpga_token *_token;
result = handle_check_and_lock(_handle);
if (result)
return result;
result = event_handle_check_and_lock(_eh);
if (result)
goto out_unlock_handle;
_token = (struct _fpga_token *)_handle->token;
if (_token->magic != FPGA_TOKEN_MAGIC) {
FPGA_MSG("Invalid token found in handle");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
switch (event_type) {
case FPGA_EVENT_INTERRUPT:
if (!strstr(_token->devpath, "port")) {
FPGA_MSG("Handle does not refer to accelerator object");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
break;
case FPGA_EVENT_ERROR: /* fall through */
case FPGA_EVENT_POWER_THERMAL:
break;
}
/* try driver first */
result = driver_unregister_event(handle, event_type, event_handle);
if (result == FPGA_NOT_SUPPORTED) {
result = daemon_unregister_event(handle, event_type);
}
out_unlock:
err = pthread_mutex_unlock(&_eh->lock);
if (err)
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
out_unlock_handle:
err = pthread_mutex_unlock(&_handle->lock);
if (err)
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
return result;
}
| 1 | 15,891 | Should this be initialized to zeroes? | OPAE-opae-sdk | c |
@@ -419,7 +419,7 @@ public abstract class FacetProcessor<FacetRequestT extends FacetRequest> {
}
count = result.size(); // don't really need this if we are skipping, but it's free.
} else {
- if (q == null) {
+ if (q == null || fcontext.base.size() == 0) {
count = fcontext.base.size();
} else {
count = fcontext.searcher.numDocs(q, fcontext.base); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.facet;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.function.IntFunction;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.handler.component.ResponseBuilder;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.BitDocSet;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.QParser;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SyntaxError;
import org.apache.solr.search.facet.SlotAcc.SlotContext;
/** Base abstraction for a class that computes facets. This is fairly internal to the module. */
public abstract class FacetProcessor<FacetRequestT extends FacetRequest> {
SimpleOrderedMap<Object> response;
FacetContext fcontext;
FacetRequestT freq;
DocSet filter; // additional filters specified by "filter" // TODO: do these need to be on the context to support recomputing during multi-select?
LinkedHashMap<String,SlotAcc> accMap;
SlotAcc[] accs;
SlotAcc.CountSlotAcc countAcc;
FacetProcessor(FacetContext fcontext, FacetRequestT freq) {
this.fcontext = fcontext;
this.freq = freq;
fcontext.processor = this;
}
public org.apache.solr.common.MapWriter getResponse() {
return response;
}
public void process() throws IOException {
handleDomainChanges();
}
private void evalFilters() throws IOException {
if (freq.domain.filters == null || freq.domain.filters.isEmpty()) return;
this.filter = fcontext.searcher.getDocSet(evalJSONFilterQueryStruct(fcontext, freq.domain.filters));
}
private static List<Query> evalJSONFilterQueryStruct(FacetContext fcontext, List<Object> filters) throws IOException {
List<Query> qlist = new ArrayList<>(filters.size());
// TODO: prevent parsing filters each time!
for (Object rawFilter : filters) {
if (rawFilter instanceof String) {
qlist.add(parserFilter((String) rawFilter, fcontext.req));
} else if (rawFilter instanceof Map) {
@SuppressWarnings({"unchecked"})
Map<String,Object> m = (Map<String, Object>) rawFilter;
String type;
Object args;
if (m.size() == 1) {
Map.Entry<String, Object> entry = m.entrySet().iterator().next();
type = entry.getKey();
args = entry.getValue();
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't convert map to query:" + rawFilter);
}
if (!"param".equals(type)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown type. Can't convert map to query:" + rawFilter);
}
String tag;
if (!(args instanceof String)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't retrieve non-string param:" + args);
}
tag = (String)args;
String[] qstrings = fcontext.req.getParams().getParams(tag);
// idea is to support multivalued parameter ie, 0 or more values
// so, when value not specified, it is ignored rather than throwing exception
if (qstrings != null) {
for (String qstring : qstrings) {
qlist.add(parserFilter(qstring, fcontext.req));
}
}
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Bad query (expected a string):" + rawFilter);
}
}
return qlist;
}
private static Query parserFilter(String rawFilter, SolrQueryRequest req) {
QParser parser = null;
try {
parser = QParser.getParser(rawFilter, req);
parser.setIsFilter(true);
Query symbolicFilter = parser.getQuery();
if (symbolicFilter == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"QParser yields null, perhaps unresolved parameter reference in: "+rawFilter);
}
return symbolicFilter;
} catch (SyntaxError syntaxError) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
}
}
private void handleDomainChanges() throws IOException {
if (freq.domain == null) return;
if (null != freq.domain.explicitQueries) {
try {
final List<Query> domainQs = evalJSONFilterQueryStruct(fcontext, freq.domain.explicitQueries);
if (domainQs.isEmpty()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"'query' domain must not evaluate to an empty list of queries");
}
fcontext.base = fcontext.searcher.getDocSet(domainQs);
} catch (SolrException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Unable to parse domain 'query': " + freq.domain.explicitQueries +
" -- reason: " + e.getMessage(),
e);
}
} else {
// mutualy exclusive to freq.domain.explicitQueries
handleFilterExclusions();
}
// Check filters... if we do have filters they apply after domain changes.
// We still calculate them first because we can use it in a parent->child domain change.
evalFilters();
handleJoinField();
handleGraphField();
boolean appliedFilters = handleBlockJoin();
if (this.filter != null && !appliedFilters) {
fcontext.base = fcontext.base.intersection( filter );
}
}
private void handleFilterExclusions() throws IOException {
List<String> excludeTags = freq.domain.excludeTags;
if (excludeTags == null || excludeTags.size() == 0) {
return;
}
@SuppressWarnings({"rawtypes"})
Map tagMap = (Map) fcontext.req.getContext().get("tags");
if (tagMap == null) {
// no filters were tagged
return;
}
IdentityHashMap<Query,Boolean> excludeSet = new IdentityHashMap<>();
for (String excludeTag : excludeTags) {
Object olst = tagMap.get(excludeTag);
// tagMap has entries of List<String,List<QParser>>, but subject to change in the future
if (!(olst instanceof Collection)) continue;
for (Object o : (Collection<?>)olst) {
if (!(o instanceof QParser)) continue;
QParser qp = (QParser)o;
try {
excludeSet.put(qp.getQuery(), Boolean.TRUE);
} catch (SyntaxError syntaxError) {
// This should not happen since we should only be retrieving a previously parsed query
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
}
}
}
if (excludeSet.size() == 0) return;
List<Query> qlist = new ArrayList<>();
// TODO: somehow remove responsebuilder dependency
ResponseBuilder rb = SolrRequestInfo.getRequestInfo().getResponseBuilder();
// add the base query
if (!excludeSet.containsKey(rb.getQuery())) {
qlist.add(rb.getQuery());
}
// add the filters
if (rb.getFilters() != null) {
for (Query q : rb.getFilters()) {
if (!excludeSet.containsKey(q)) {
qlist.add(q);
}
}
}
// now walk back up the context tree
// TODO: we lose parent exclusions...
for (FacetContext curr = fcontext; curr != null; curr = curr.parent) {
if (curr.filter != null) {
qlist.add( curr.filter );
}
}
// recompute the base domain
fcontext.base = fcontext.searcher.getDocSet(qlist);
}
/** modifies the context base if there is a join field domain change */
private void handleJoinField() throws IOException {
if (null == freq.domain.joinField) return;
final Query domainQuery = freq.domain.joinField.createDomainQuery(fcontext);
fcontext.base = fcontext.searcher.getDocSet(domainQuery);
}
/** modifies the context base if there is a graph field domain change */
private void handleGraphField() throws IOException {
if (null == freq.domain.graphField) return;
final Query domainQuery = freq.domain.graphField.createDomainQuery(fcontext);
fcontext.base = fcontext.searcher.getDocSet(domainQuery);
}
// returns "true" if filters were applied to fcontext.base already
private boolean handleBlockJoin() throws IOException {
boolean appliedFilters = false;
if (!(freq.domain.toChildren || freq.domain.toParent)) return appliedFilters;
// TODO: avoid query parsing per-bucket somehow...
String parentStr = freq.domain.parents;
Query parentQuery;
try {
QParser parser = QParser.getParser(parentStr, fcontext.req);
parser.setIsFilter(true);
parentQuery = parser.getQuery();
} catch (SyntaxError err) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing block join parent specification: " + parentStr);
}
BitDocSet parents = fcontext.searcher.getDocSetBits(parentQuery);
DocSet input = fcontext.base;
DocSet result;
if (freq.domain.toChildren) {
// If there are filters on this facet, then use them as acceptDocs when executing toChildren.
// We need to remember to not redundantly re-apply these filters after.
DocSet acceptDocs = this.filter;
if (acceptDocs == null) {
acceptDocs = fcontext.searcher.getLiveDocSet();
} else {
appliedFilters = true;
}
result = BlockJoin.toChildren(input, parents, acceptDocs, fcontext.qcontext);
} else {
result = BlockJoin.toParents(input, parents, fcontext.qcontext);
}
fcontext.base = result;
return appliedFilters;
}
protected void processStats(SimpleOrderedMap<Object> bucket, Query bucketQ, DocSet docs, long docCount) throws IOException {
if (docCount == 0 && !freq.processEmpty || freq.getFacetStats().size() == 0) {
bucket.add("count", docCount);
return;
}
createAccs(docCount, 1);
long collected = collect(docs, 0, slotNum -> { return new SlotContext(bucketQ); });
countAcc.incrementCount(0, collected);
assert collected == docCount;
addStats(bucket, 0);
}
protected void createAccs(long docCount, int slotCount) throws IOException {
accMap = new LinkedHashMap<>();
// allow a custom count acc to be used
if (countAcc == null) {
countAcc = new SlotAcc.CountSlotArrAcc(fcontext, slotCount);
}
for (Map.Entry<String,AggValueSource> entry : freq.getFacetStats().entrySet()) {
SlotAcc acc = entry.getValue().createSlotAcc(fcontext, docCount, slotCount);
acc.key = entry.getKey();
accMap.put(acc.key, acc);
}
accs = new SlotAcc[accMap.size()];
int i=0;
for (SlotAcc acc : accMap.values()) {
accs[i++] = acc;
}
}
// note: only called by enum/stream prior to collect
void resetStats() throws IOException {
countAcc.reset();
for (SlotAcc acc : accs) {
acc.reset();
}
}
long collect(DocSet docs, int slot, IntFunction<SlotContext> slotContext) throws IOException {
long count = 0;
SolrIndexSearcher searcher = fcontext.searcher;
if (0 == docs.size()) {
// we may be in a "processEmpty" type situation where the client still cares about this bucket
// either way, we should let our accumulators know about the empty set, so they can collect &
// compute the slot (ie: let them decide if they care even when it's size==0)
if (accs != null) {
for (SlotAcc acc : accs) {
acc.collect(docs, slot, slotContext); // NOT per-seg collectors
}
}
return count;
}
final List<LeafReaderContext> leaves = searcher.getIndexReader().leaves();
final Iterator<LeafReaderContext> ctxIt = leaves.iterator();
LeafReaderContext ctx = null;
int segBase = 0;
int segMax;
int adjustedMax = 0;
for (DocIterator docsIt = docs.iterator(); docsIt.hasNext(); ) {
final int doc = docsIt.nextDoc();
if (doc >= adjustedMax) {
do {
ctx = ctxIt.next();
if (ctx == null) {
// should be impossible
throw new RuntimeException("INTERNAL FACET ERROR");
}
segBase = ctx.docBase;
segMax = ctx.reader().maxDoc();
adjustedMax = segBase + segMax;
} while (doc >= adjustedMax);
assert doc >= ctx.docBase;
setNextReader(ctx);
}
count++;
collect(doc - segBase, slot, slotContext); // per-seg collectors
}
return count;
}
void collect(int segDoc, int slot, IntFunction<SlotContext> slotContext) throws IOException {
if (accs != null) {
for (SlotAcc acc : accs) {
acc.collect(segDoc, slot, slotContext);
}
}
}
void setNextReader(LeafReaderContext ctx) throws IOException {
// countAcc.setNextReader is a no-op
for (SlotAcc acc : accs) {
acc.setNextReader(ctx);
}
}
void addStats(SimpleOrderedMap<Object> target, int slotNum) throws IOException {
long count = countAcc.getCount(slotNum);
target.add("count", count);
if (count > 0 || freq.processEmpty) {
for (SlotAcc acc : accs) {
acc.setValues(target, slotNum);
}
}
}
void fillBucket(SimpleOrderedMap<Object> bucket, Query q, DocSet result, boolean skip, Map<String,Object> facetInfo) throws IOException {
boolean needDocSet = (skip==false && freq.getFacetStats().size() > 0) || freq.getSubFacets().size() > 0;
long count;
if (result != null) {
count = result.size();
} else if (needDocSet) {
if (q == null) {
result = fcontext.base;
// result.incref(); // OFF-HEAP
} else {
result = fcontext.searcher.getDocSet(q, fcontext.base);
}
count = result.size(); // don't really need this if we are skipping, but it's free.
} else {
if (q == null) {
count = fcontext.base.size();
} else {
count = fcontext.searcher.numDocs(q, fcontext.base);
}
}
try {
if (!skip) {
processStats(bucket, q, result, count);
}
processSubs(bucket, q, result, skip, facetInfo);
} finally {
if (result != null) {
// result.decref(); // OFF-HEAP
result = null;
}
}
}
@SuppressWarnings({"unchecked"})
void processSubs(SimpleOrderedMap<Object> response, Query filter, DocSet domain, boolean skip, Map<String,Object> facetInfo) throws IOException {
boolean emptyDomain = domain == null || domain.size() == 0;
for (Map.Entry<String,FacetRequest> sub : freq.getSubFacets().entrySet()) {
FacetRequest subRequest = sub.getValue();
// This includes a static check if a sub-facet can possibly produce something from
// an empty domain. Should this be changed to a dynamic check as well? That would
// probably require actually executing the facet anyway, and dropping it at the
// end if it was unproductive.
if (emptyDomain && !freq.processEmpty && !subRequest.canProduceFromEmpty()) {
continue;
}
Map<String,Object>facetInfoSub = null;
if (facetInfo != null) {
facetInfoSub = (Map<String,Object>)facetInfo.get(sub.getKey());
}
// If we're skipping this node, then we only need to process sub-facets that have facet info specified.
if (skip && facetInfoSub == null) continue;
// make a new context for each sub-facet since they can change the domain
FacetContext subContext = fcontext.sub(filter, domain);
subContext.facetInfo = facetInfoSub;
if (!skip) subContext.flags &= ~FacetContext.SKIP_FACET; // turn off the skip flag if we're not skipping this bucket
if (fcontext.getDebugInfo() != null) { // if fcontext.debugInfo != null, it means rb.debug() == true
FacetDebugInfo fdebug = new FacetDebugInfo();
subContext.setDebugInfo(fdebug);
fcontext.getDebugInfo().addChild(fdebug);
}
Object result = subRequest.process(subContext);
response.add( sub.getKey(), result);
}
}
@SuppressWarnings("unused")
static DocSet getFieldMissing(SolrIndexSearcher searcher, DocSet docs, String fieldName) throws IOException {
SchemaField sf = searcher.getSchema().getField(fieldName);
DocSet hasVal = searcher.getDocSet(sf.getType().getRangeQuery(null, sf, null, null, false, false));
DocSet answer = docs.andNot(hasVal);
// hasVal.decref(); // OFF-HEAP
return answer;
}
static Query getFieldMissingQuery(SolrIndexSearcher searcher, String fieldName) throws IOException {
SchemaField sf = searcher.getSchema().getField(fieldName);
Query hasVal = sf.getType().getRangeQuery(null, sf, null, null, false, false);
BooleanQuery.Builder noVal = new BooleanQuery.Builder();
noVal.add(hasVal, BooleanClause.Occur.MUST_NOT);
return noVal.build();
}
}
| 1 | 38,754 | The query is already built at this point, so I don't think this particular change actually helps wrt SOLR-10732? (and the `base.size()==0` case is already trivially optimized in `SolrIndexSearcher.numDocs(Query, DocSet)`) | apache-lucene-solr | java |
@@ -1,16 +1,14 @@
-
package net.runelite.client.plugins.wildernesslocations;
+
import java.util.Arrays;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
import java.util.Objects;
-import java.util.Set;
-import java.util.function.Consumer;
import javax.inject.Inject;
+
+import lombok.Getter;
import net.runelite.api.Client;
-import net.runelite.api.Player;
import net.runelite.api.Varbits;
import net.runelite.api.coords.WorldArea;
import net.runelite.api.coords.WorldPoint; | 1 |
package net.runelite.client.plugins.wildernesslocations;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.Consumer;
import javax.inject.Inject;
import net.runelite.api.Client;
import net.runelite.api.Player;
import net.runelite.api.Varbits;
import net.runelite.api.coords.WorldArea;
import net.runelite.api.coords.WorldPoint;
import net.runelite.api.events.GameTick;
import net.runelite.client.eventbus.Subscribe;
import net.runelite.client.plugins.Plugin;
import net.runelite.client.plugins.PluginDescriptor;
import net.runelite.client.plugins.PluginType;
import net.runelite.client.plugins.wildernesslocations.WildernessLocationsOverlay;
import net.runelite.client.ui.overlay.Overlay;
import net.runelite.client.ui.overlay.OverlayManager;
import net.runelite.client.util.WildernessLocation;
@PluginDescriptor(name="PvP Wild Locations",
description="Indicates the players current location in the wild",
tags={"Wildy,", "Wilderness Location", "location", "loc", "pvp", "pklite"},
type = PluginType.PVP
)
public class WildernessLocationsPlugin extends Plugin {
@Inject
private Client client;
@Inject
OverlayManager overlayManager;
@Inject
private WildernessLocationsOverlay overlay;
private final HashMap<WorldArea, String> wildLocs;
private boolean renderLocation;
private String locationString;
private WorldPoint worldPoint;
private static int UPDATE_INTERVAL = 3;
public WildernessLocationsPlugin() {
overlay = new WildernessLocationsOverlay(client, this);
wildLocs = WildernessLocationsPlugin.getLocationMap();
locationString = "";
worldPoint = null;
}
@Override
protected void startUp() throws Exception {
overlayManager.add(overlay);
}
@Override
protected void shutDown() throws Exception {
overlayManager.add(overlay);
}
@Subscribe
public void onGameTick(GameTick event) {
if (UPDATE_INTERVAL > 0) {
--UPDATE_INTERVAL;
return;
}
boolean bl = renderLocation = client.getVar(Varbits.IN_WILDERNESS) == 1;
if (renderLocation) {
if (client.getLocalPlayer().getWorldLocation() != worldPoint) {
locationString = location();
worldPoint = client.getLocalPlayer().getWorldLocation();
}
} else {
worldPoint = null;
locationString = "";
}
UPDATE_INTERVAL = 3;
}
private String location() {
int dist = 10000;
String s = "";
WorldArea closestArea = null;
for (Map.Entry<WorldArea, String> entry : wildLocs.entrySet()) {
WorldArea worldArea = entry.getKey();
if (worldArea.toWorldPointList().contains(client.getLocalPlayer().getWorldLocation())) {
s = entry.getValue();
return s;
}
int distTo = worldArea.distanceTo(client.getLocalPlayer().getWorldLocation());
if (distTo >= dist) continue;
dist = distTo;
closestArea = worldArea;
}
if (client.getLocalPlayer().getWorldLocation().getY() > ((WorldArea)Objects.requireNonNull(closestArea)).toWorldPoint().getY() + closestArea.getHeight()) {
s = s + "N";
}
if (client.getLocalPlayer().getWorldLocation().getY() < closestArea.toWorldPoint().getY()) {
s = s + "S";
}
if (client.getLocalPlayer().getWorldLocation().getX() < closestArea.toWorldPoint().getX()) {
s = s + "W";
}
if (client.getLocalPlayer().getWorldLocation().getX() > closestArea.toWorldPoint().getX() + closestArea.getWidth()) {
s = s + "E";
}
s = s + " of ";
if ((s = s + wildLocs.get(closestArea)).startsWith(" of ")) {
s = s.substring(3);
}
return s;
}
private static HashMap<WorldArea, String> getLocationMap() {
HashMap<WorldArea, String> hashMap = new HashMap<WorldArea, String>();
Arrays.stream(WildernessLocation.values()).forEach(wildernessLocation -> hashMap.put(wildernessLocation.getWorldArea(), wildernessLocation.getName()));
return hashMap;
}
public boolean isRenderLocation() {
return renderLocation;
}
public String getLocationString() {
return locationString;
}
}
| 1 | 14,758 | re-add the type in the annotation here | open-osrs-runelite | java |
@@ -226,9 +226,11 @@ func GetLatestVersion() (string, error) {
//Download the tar from repo
versionURL := "curl -k " + latestReleaseVersionURL
cmd := exec.Command("sh", "-c", versionURL)
+ var stderr bytes.Buffer
+ cmd.Stderr = &stderr
latestReleaseData, err := cmd.Output()
if err != nil {
- return "", err
+ return "", fmt.Errorf("%v:%v", err, stderr.String())
}
latestRelease := &latestReleaseVersion{} | 1 | /*
Copyright 2019 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"sync"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
types "github.com/kubeedge/kubeedge/keadm/cmd/keadm/app/cmd/common"
)
//Constants used by installers
const (
UbuntuOSType = "ubuntu"
CentOSType = "centos"
KubeEdgeDownloadURL = "https://github.com/kubeedge/kubeedge/releases/download"
KubeEdgePath = "/etc/kubeedge/"
KubeEdgeUsrBinPath = "/usr/local/bin"
KubeEdgeConfPath = KubeEdgePath + "kubeedge/edge/conf"
KubeEdgeBinaryName = "edgecore"
KubeEdgeDefaultCertPath = KubeEdgePath + "certs/"
KubeEdgeConfigEdgeYaml = KubeEdgeConfPath + "/edge.yaml"
KubeEdgeConfigNodeJSON = KubeEdgeConfPath + "/node.json"
KubeEdgeConfigModulesYaml = KubeEdgeConfPath + "/modules.yaml"
KubeEdgeCloudCertGenPath = KubeEdgePath + "certgen.sh"
KubeEdgeEdgeCertsTarFileName = "certs.tgz"
KubeEdgeEdgeCertsTarFilePath = KubeEdgePath + "certs.tgz"
KubeEdgeCloudConfPath = KubeEdgePath + "kubeedge/cloud/conf"
KubeEdgeCloudCoreYaml = KubeEdgeCloudConfPath + "/controller.yaml"
KubeEdgeCloudCoreModulesYaml = KubeEdgeCloudConfPath + "/modules.yaml"
KubeCloudBinaryName = "cloudcore"
KubeEdgeNewConfigDir = KubeEdgePath + "config/"
KubeEdgeCloudCoreNewYaml = KubeEdgeNewConfigDir + "cloudcore.yaml"
KubeEdgeEdgeCoreNewYaml = KubeEdgeNewConfigDir + "edgecore.yaml"
KubeEdgeLogPath = "/var/log/kubeedge/"
KubeEdgeCrdPath = KubeEdgePath + "crds"
KubeEdgeCRDDownloadURL = "https://raw.githubusercontent.com/kubeedge/kubeedge/master/build/crds"
InterfaceName = "eth0"
latestReleaseVersionURL = "https://api.github.com/repos/kubeedge/kubeedge/releases/latest"
RetryTimes = 5
)
type latestReleaseVersion struct {
TagName string `json:"tag_name"`
}
//AddToolVals gets the value and default values of each flags and collects them in temporary cache
func AddToolVals(f *pflag.Flag, flagData map[string]types.FlagData) {
flagData[f.Name] = types.FlagData{Val: f.Value.String(), DefVal: f.DefValue}
}
//CheckIfAvailable checks is val of a flag is empty then return the default value
func CheckIfAvailable(val, defval string) string {
if val == "" {
return defval
}
return val
}
//Common struct contains OS and Tool version properties and also embeds OS interface
type Common struct {
types.OSTypeInstaller
OSVersion string
ToolVersion string
KubeConfig string
Master string
}
//SetOSInterface defines a method to set the implemtation of the OS interface
func (co *Common) SetOSInterface(intf types.OSTypeInstaller) {
co.OSTypeInstaller = intf
}
//Command defines commands to be executed and captures std out and std error
type Command struct {
Cmd *exec.Cmd
StdOut []byte
StdErr []byte
}
//ExecuteCommand executes the command and captures the output in stdOut
func (cm *Command) ExecuteCommand() {
var err error
cm.StdOut, err = cm.Cmd.Output()
if err != nil {
fmt.Println("Output failed: ", err)
cm.StdErr = []byte(err.Error())
}
}
//GetStdOutput gets StdOut field
func (cm Command) GetStdOutput() string {
if len(cm.StdOut) != 0 {
return strings.TrimRight(string(cm.StdOut), "\n")
}
return ""
}
//GetStdErr gets StdErr field
func (cm Command) GetStdErr() string {
if len(cm.StdErr) != 0 {
return strings.TrimRight(string(cm.StdErr), "\n")
}
return ""
}
//ExecuteCmdShowOutput captures both StdOut and StdErr after exec.cmd().
//It helps in the commands where it takes some time for execution.
func (cm Command) ExecuteCmdShowOutput() error {
var stdoutBuf, stderrBuf bytes.Buffer
stdoutIn, _ := cm.Cmd.StdoutPipe()
stderrIn, _ := cm.Cmd.StderrPipe()
var errStdout, errStderr error
stdout := io.MultiWriter(os.Stdout, &stdoutBuf)
stderr := io.MultiWriter(os.Stderr, &stderrBuf)
err := cm.Cmd.Start()
if err != nil {
return fmt.Errorf("failed to start '%s' because of error : %s", strings.Join(cm.Cmd.Args, " "), err.Error())
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
_, errStdout = io.Copy(stdout, stdoutIn)
wg.Done()
}()
_, errStderr = io.Copy(stderr, stderrIn)
wg.Wait()
err = cm.Cmd.Wait()
if err != nil {
return fmt.Errorf("failed to run '%s' because of error : %s", strings.Join(cm.Cmd.Args, " "), err.Error())
}
if errStdout != nil || errStderr != nil {
return fmt.Errorf("failed to capture stdout or stderr")
}
cm.StdOut, cm.StdErr = stdoutBuf.Bytes(), stderrBuf.Bytes()
return nil
}
//GetOSVersion gets the OS name
func GetOSVersion() string {
c := &Command{Cmd: exec.Command("sh", "-c", ". /etc/os-release && echo $ID")}
c.ExecuteCommand()
return c.GetStdOutput()
}
//GetOSInterface helps in returning OS specific object which implements OSTypeInstaller interface.
func GetOSInterface() types.OSTypeInstaller {
switch GetOSVersion() {
case UbuntuOSType:
return &UbuntuOS{}
case CentOSType:
return &CentOS{}
default:
panic("This OS version is currently un-supported by keadm")
}
}
// IsCloudCore identifies if the node is having cloudcore already running.
// If so, then return true, else it can used as edge node and initialise it.
func IsCloudCore() (types.ModuleRunning, error) {
osType := GetOSInterface()
cloudCoreRunning, err := osType.IsKubeEdgeProcessRunning(KubeCloudBinaryName)
if err != nil {
return types.NoneRunning, err
}
if cloudCoreRunning {
return types.KubeEdgeCloudRunning, nil
}
edgeCoreRunning, err := osType.IsKubeEdgeProcessRunning(KubeEdgeBinaryName)
if err != nil {
return types.NoneRunning, err
}
if false != edgeCoreRunning {
return types.KubeEdgeEdgeRunning, nil
}
return types.NoneRunning, nil
}
// GetLatestVersion return the latest non-prerelease, non-draft version of kubeedge in releases
func GetLatestVersion() (string, error) {
//Download the tar from repo
versionURL := "curl -k " + latestReleaseVersionURL
cmd := exec.Command("sh", "-c", versionURL)
latestReleaseData, err := cmd.Output()
if err != nil {
return "", err
}
latestRelease := &latestReleaseVersion{}
err = json.Unmarshal(latestReleaseData, latestRelease)
if err != nil {
return "", err
}
return latestRelease.TagName, nil
}
// runCommandWithShell executes the given command with "sh -c".
// It returns an error if the command outputs anything on the stderr.
func runCommandWithShell(command string) (string, error) {
cmd := &Command{Cmd: exec.Command("sh", "-c", command)}
err := cmd.ExecuteCmdShowOutput()
if err != nil {
return "", err
}
errout := cmd.GetStdErr()
if errout != "" {
return "", fmt.Errorf("failed to run command(%s), err:%s", command, errout)
}
return cmd.GetStdOutput(), nil
}
// runCommandWithStdout executes the given command with "sh -c".
// It returns the stdout and an error if the command outputs anything on the stderr.
func runCommandWithStdout(command string) (string, error) {
cmd := &Command{Cmd: exec.Command("sh", "-c", command)}
cmd.ExecuteCommand()
if errout := cmd.GetStdErr(); errout != "" {
return "", fmt.Errorf("failed to run command(%s), err:%s", command, errout)
}
return cmd.GetStdOutput(), nil
}
// build Config from flags
func BuildConfig(kubeConfig, master string) (conf *rest.Config, err error) {
config, err := clientcmd.BuildConfigFromFlags(master, kubeConfig)
if err != nil {
return nil, err
}
return config, nil
}
// isK8SComponentInstalled checks if said K8S version is already installed in the host
func isK8SComponentInstalled(kubeConfig, master string) error {
config, err := BuildConfig(kubeConfig, master)
if err != nil {
return fmt.Errorf("Failed to build config, err: %v", err)
}
discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return fmt.Errorf("Failed to init discovery client, err: %v", err)
}
discoveryClient.RESTClient().Post()
serverVersion, err := discoveryClient.ServerVersion()
if err != nil {
return fmt.Errorf("Failed to get the version of K8s master, please check whether K8s was successfully installed, err: %v", err)
}
return checkKubernetesVersion(serverVersion)
}
func checkKubernetesVersion(serverVersion *version.Info) error {
reg := regexp.MustCompile(`[[:digit:]]*`)
minorVersion := reg.FindString(serverVersion.Minor)
k8sMinorVersion, err := strconv.Atoi(minorVersion)
if err != nil {
return fmt.Errorf("Could not parse the minor version of K8s, error: %s", err)
}
if k8sMinorVersion >= types.DefaultK8SMinimumVersion {
return nil
}
return fmt.Errorf("Your minor version of K8s is lower than %d, please reinstall newer version", types.DefaultK8SMinimumVersion)
}
//installKubeEdge downloads the provided version of KubeEdge.
//Untar's in the specified location /etc/kubeedge/ and then copies
//the binary to excecutables' path (eg: /usr/local/bin)
func installKubeEdge(componentType types.ComponentType, arch string, version string) error {
err := os.MkdirAll(KubeEdgePath, os.ModePerm)
if err != nil {
return fmt.Errorf("not able to create %s folder path", KubeEdgePath)
}
//Check if the same version exists, then skip the download and just untar and continue
//TODO: It is always better to have the checksum validation of the downloaded file
//and checksum available at download URL. So that both can be compared to see if
//proper download has happened and then only proceed further.
//Currently it is missing and once checksum is in place, checksum check required
//to be added here.
dirname := fmt.Sprintf("kubeedge-v%s-linux-%s", version, arch)
filename := fmt.Sprintf("kubeedge-v%s-linux-%s.tar.gz", version, arch)
checksumFilename := fmt.Sprintf("checksum_kubeedge-v%s-linux-%s.tar.gz.txt", version, arch)
filePath := fmt.Sprintf("%s%s", KubeEdgePath, filename)
if _, err = os.Stat(filePath); err == nil {
fmt.Println("Expected or Default KubeEdge version", version, "is already downloaded")
} else if !os.IsNotExist(err) {
return err
} else {
try := 0
for ; try < downloadRetryTimes; try++ {
//Download the tar from repo
dwnldURL := fmt.Sprintf("cd %s && wget -k --no-check-certificate --progress=bar:force %s/v%s/%s",
KubeEdgePath, KubeEdgeDownloadURL, version, filename)
if _, err := runCommandWithShell(dwnldURL); err != nil {
return err
}
//Verify the tar with checksum
fmt.Printf("%s checksum: \n", filename)
cmdStr := fmt.Sprintf("cd %s && sha512sum %s | awk '{split($0,a,\"[ ]\"); print a[1]}'", KubeEdgePath, filename)
desiredChecksum, err := runCommandWithStdout(cmdStr)
if err != nil {
return err
}
fmt.Printf("%s content: \n", checksumFilename)
cmdStr = fmt.Sprintf("wget -qO- %s/v%s/%s", KubeEdgeDownloadURL, version, checksumFilename)
actualChecksum, err := runCommandWithStdout(cmdStr)
if err != nil {
return err
}
if desiredChecksum == actualChecksum {
break
} else {
fmt.Printf("Failed to verify the checksum of %s, try to download it again ... \n\n", filename)
//Cleanup the downloaded files
cmdStr = fmt.Sprintf("cd %s && rm -f %s", KubeEdgePath, filename)
_, err := runCommandWithStdout(cmdStr)
if err != nil {
return err
}
}
}
if try == downloadRetryTimes {
return fmt.Errorf("failed to download %s", filename)
}
}
// Compatible with 1.0.0
var untarFileAndMoveCloudCore, untarFileAndMoveEdgeCore string
if version >= "1.1.0" {
if componentType == types.CloudCore {
untarFileAndMoveCloudCore = fmt.Sprintf("cd %s && tar -C %s -xvzf %s && cp %s/%s/cloud/cloudcore/%s %s/",
KubeEdgePath, KubeEdgePath, filename, KubeEdgePath, dirname, KubeCloudBinaryName, KubeEdgeUsrBinPath)
}
if componentType == types.EdgeCore {
untarFileAndMoveEdgeCore = fmt.Sprintf("cd %s && tar -C %s -xvzf %s && cp %s%s/edge/%s %s/",
KubeEdgePath, KubeEdgePath, filename, KubeEdgePath, dirname, KubeEdgeBinaryName, KubeEdgeUsrBinPath)
}
} else {
untarFileAndMoveEdgeCore = fmt.Sprintf("cd %s && tar -C %s -xvzf %s && cp %skubeedge/edge/%s %s/.",
KubeEdgePath, KubeEdgePath, filename, KubeEdgePath, KubeEdgeBinaryName, KubeEdgeUsrBinPath)
untarFileAndMoveEdgeCore = fmt.Sprintf("cd %s && cp %skubeedge/cloud/%s %s/.",
KubeEdgePath, KubeEdgePath, KubeCloudBinaryName, KubeEdgeUsrBinPath)
}
if componentType == types.CloudCore {
stdout, err := runCommandWithStdout(untarFileAndMoveCloudCore)
if err != nil {
return err
}
fmt.Println(stdout)
}
if componentType == types.EdgeCore {
stdout, err := runCommandWithStdout(untarFileAndMoveEdgeCore)
if err != nil {
return err
}
fmt.Println(stdout)
}
return nil
}
//runEdgeCore sets the environment variable GOARCHAIUS_CONFIG_PATH for the configuration path
//and the starts edgecore with logs being captured
func runEdgeCore(version string) error {
// create the log dir for kubeedge
err := os.MkdirAll(KubeEdgeLogPath, os.ModePerm)
if err != nil {
return fmt.Errorf("not able to create %s folder path", KubeEdgeLogPath)
}
// add +x for edgecore
command := fmt.Sprintf("chmod +x %s/%s", KubeEdgeUsrBinPath, KubeEdgeBinaryName)
if _, err := runCommandWithStdout(command); err != nil {
return err
}
var binExec string
if version >= "1.1.0" {
binExec = fmt.Sprintf("%s > %s/%s.log 2>&1 &", KubeEdgeBinaryName, KubeEdgeLogPath, KubeEdgeBinaryName)
} else {
binExec = fmt.Sprintf("%s > %skubeedge/edge/%s.log 2>&1 &", KubeEdgeBinaryName, KubeEdgePath, KubeEdgeBinaryName)
}
cmd := &Command{Cmd: exec.Command("sh", "-c", binExec)}
cmd.Cmd.Env = os.Environ()
env := fmt.Sprintf("GOARCHAIUS_CONFIG_PATH=%skubeedge/edge", KubeEdgePath)
cmd.Cmd.Env = append(cmd.Cmd.Env, env)
err = cmd.ExecuteCmdShowOutput()
errout := cmd.GetStdErr()
if err != nil || errout != "" {
return fmt.Errorf("%s", errout)
}
fmt.Println(cmd.GetStdOutput())
if version >= "1.1.0" {
fmt.Println("KubeEdge edgecore is running, For logs visit: ", KubeEdgeLogPath+KubeEdgeBinaryName+".log")
} else {
fmt.Println("KubeEdge edgecore is running, For logs visit", KubeEdgePath, "kubeedge/edge/")
}
return nil
}
// killKubeEdgeBinary will search for KubeEdge process and forcefully kill it
func killKubeEdgeBinary(proc string) error {
binExec := fmt.Sprintf("kill -9 $(ps aux | grep '[%s]%s' | awk '{print $2}')", proc[0:1], proc[1:])
if _, err := runCommandWithStdout(binExec); err != nil {
return err
}
fmt.Println("KubeEdge", proc, "is stopped, For logs visit: ", KubeEdgeLogPath+proc+".log")
return nil
}
//isKubeEdgeProcessRunning checks if the given process is running or not
func isKubeEdgeProcessRunning(proc string) (bool, error) {
procRunning := fmt.Sprintf("ps aux | grep '[%s]%s' | awk '{print $2}'", proc[0:1], proc[1:])
stdout, err := runCommandWithStdout(procRunning)
if err != nil {
return false, err
}
if stdout != "" {
return true, nil
}
return false, nil
}
| 1 | 16,411 | Could we simpify it as `cmd.Stderr = &bytes.Buffer{}`? | kubeedge-kubeedge | go |
@@ -13,6 +13,11 @@ func ShouldRotateX509(now time.Time, cert *x509.Certificate) bool {
return shouldRotate(now, cert.NotBefore, cert.NotAfter)
}
+// X509Expired returns true if the given X509 cert has expired
+func X509Expired(now time.Time, cert *x509.Certificate) bool {
+ return !now.Before(cert.NotAfter)
+}
+
// JWTSVIDExpiresSoon determines if the given JWT SVID should be rotated
// based on presented current time, the JWT's expiration.
// Also returns true if the JWT is already expired. | 1 | package rotationutil
import (
"crypto/x509"
"time"
"github.com/spiffe/spire/pkg/agent/client"
)
// ShouldRotateX509 determines if a given SVID should be rotated, based
// on presented current time, and the certificate's expiration.
func ShouldRotateX509(now time.Time, cert *x509.Certificate) bool {
return shouldRotate(now, cert.NotBefore, cert.NotAfter)
}
// JWTSVIDExpiresSoon determines if the given JWT SVID should be rotated
// based on presented current time, the JWT's expiration.
// Also returns true if the JWT is already expired.
func JWTSVIDExpiresSoon(svid *client.JWTSVID, now time.Time) bool {
if JWTSVIDExpired(svid, now) {
return true
}
// if the SVID has less than half of its lifetime left, consider it
// as expiring soon
return shouldRotate(now, svid.IssuedAt, svid.ExpiresAt)
}
// JWTSVIDExpired returns true if the given SVID is expired.
func JWTSVIDExpired(svid *client.JWTSVID, now time.Time) bool {
return !now.Before(svid.ExpiresAt)
}
func shouldRotate(now, beginTime, expiryTime time.Time) bool {
ttl := expiryTime.Sub(now)
lifetime := expiryTime.Sub(beginTime)
return ttl <= lifetime/2
}
| 1 | 13,665 | there's enough "nots" in here that while it's correct by my review, I'd like to see a small unit test (just passing in an expired and non-expired cert) | spiffe-spire | go |
@@ -21,7 +21,7 @@ import (
"github.com/ghodss/yaml"
- "istio.io/fortio/log"
+ "fortio.org/fortio/log"
"istio.io/tools/isotope/convert/pkg/graph"
"istio.io/tools/isotope/convert/pkg/graph/size"
"istio.io/tools/isotope/convert/pkg/graph/svc" | 1 | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this currentFile except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package srv
import (
"fmt"
"io/ioutil"
"math/rand"
"github.com/ghodss/yaml"
"istio.io/fortio/log"
"istio.io/tools/isotope/convert/pkg/graph"
"istio.io/tools/isotope/convert/pkg/graph/size"
"istio.io/tools/isotope/convert/pkg/graph/svc"
"istio.io/tools/isotope/convert/pkg/graph/svctype"
)
// HandlerFromServiceGraphYAML makes a handler to emulate the service with name
// serviceName in the service graph represented by the YAML file at path.
func HandlerFromServiceGraphYAML(
path string, serviceName string) (Handler, error) {
serviceGraph, err := serviceGraphFromYAMLFile(path)
if err != nil {
return Handler{}, err
}
service, err := extractService(serviceGraph, serviceName)
if err != nil {
return Handler{}, err
}
_ = logService(service)
serviceTypes := extractServiceTypes(serviceGraph)
responsePayload, err := makeRandomByteArray(service.ResponseSize)
if err != nil {
return Handler{}, err
}
return Handler{
Service: service,
ServiceTypes: serviceTypes,
responsePayload: responsePayload,
}, nil
}
func makeRandomByteArray(n size.ByteSize) ([]byte, error) {
arr := make([]byte, n)
if _, err := rand.Read(arr); err != nil {
return nil, err
}
return arr, nil
}
func logService(service svc.Service) error {
if log.Log(log.Info) {
serviceYAML, err := yaml.Marshal(service)
if err != nil {
return err
}
log.Infof("acting as service %s:\n%s", service.Name, serviceYAML)
}
return nil
}
// serviceGraphFromYAMLFile unmarshals the ServiceGraph from the YAML at path.
func serviceGraphFromYAMLFile(
path string) (serviceGraph graph.ServiceGraph, err error) {
graphYAML, err := ioutil.ReadFile(path)
if err != nil {
return
}
log.Debugf("unmarshalling\n%s", graphYAML)
err = yaml.Unmarshal(graphYAML, &serviceGraph)
if err != nil {
return
}
return
}
// extractService finds the service in serviceGraph with the specified name.
func extractService(
serviceGraph graph.ServiceGraph, name string) (
service svc.Service, err error) {
for _, svc := range serviceGraph.Services {
if svc.Name == name {
service = svc
return
}
}
err = fmt.Errorf(
"service with name %s does not exist in %v", name, serviceGraph)
return
}
// extractServiceTypes builds a map from service name to its type
// (i.e. HTTP or gRPC).
func extractServiceTypes(
serviceGraph graph.ServiceGraph) map[string]svctype.ServiceType {
types := make(map[string]svctype.ServiceType, len(serviceGraph.Services))
for _, service := range serviceGraph.Services {
types[service.Name] = service.Type
}
return types
}
| 1 | 6,767 | File is not `goimports`-ed (from `goimports`) | istio-tools | go |
@@ -73,6 +73,11 @@ class Config(param.ParameterizedFunction):
recommended that users switch this on to update any uses of
__call__ as it will be deprecated in future.""")
+ rtol = param.Number(default=10e-6, doc="""
+ The tolerance used to enforce regular sampling for gridded data
+ where regular sampling is expected. Expressed as the maximal
+ allowable sampling difference between sample locations.""")
+
def __call__(self, **params):
self.set_param(**params)
return self | 1 | import os, sys, warnings, operator
import time
import types
import numbers
import inspect
import itertools
import string, fnmatch
import unicodedata
import datetime as dt
from collections import defaultdict
from functools import partial
from contextlib import contextmanager
from distutils.version import LooseVersion
from threading import Thread, Event
import numpy as np
import param
import json
try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
try:
import __builtin__ as builtins # noqa (compatibility)
except:
import builtins as builtins # noqa (compatibility)
datetime_types = (np.datetime64, dt.datetime, dt.date)
timedelta_types = (np.timedelta64, dt.timedelta,)
try:
import pandas as pd
if LooseVersion(pd.__version__) > '0.20.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtypeType
else:
from pandas.types.dtypes import DatetimeTZDtypeType
datetime_types = datetime_types + (pd.Timestamp, DatetimeTZDtypeType)
timedelta_types = timedelta_types + (pd.Timedelta,)
except ImportError:
pd = None
try:
import dask.dataframe as dd
except ImportError:
dd = None
class VersionError(Exception):
"Raised when there is a library version mismatch."
def __init__(self, msg, version=None, min_version=None, **kwargs):
self.version = version
self.min_version = min_version
super(VersionError, self).__init__(msg, **kwargs)
class Config(param.ParameterizedFunction):
"""
Set of boolean configuration values to change HoloViews' global
behavior. Typically used to control warnings relating to
deprecations or set global parameter such as style 'themes'.
"""
style_17 = param.Boolean(default=False, doc="""
Switch to the default style options used up to (and including)
the HoloViews 1.7 release.""")
warn_options_call = param.Boolean(default=False, doc="""
Whether to warn when the deprecated __call__ options syntax is
used (the opts method should now be used instead). It is
recommended that users switch this on to update any uses of
__call__ as it will be deprecated in future.""")
def __call__(self, **params):
self.set_param(**params)
return self
config = Config()
class HashableJSON(json.JSONEncoder):
"""
Extends JSONEncoder to generate a hashable string for as many types
of object as possible including nested objects and objects that are
not normally hashable. The purpose of this class is to generate
unique strings that once hashed are suitable for use in memoization
and other cases where deep equality must be tested without storing
the entire object.
By default JSONEncoder supports booleans, numbers, strings, lists,
tuples and dictionaries. In order to support other types such as
sets, datetime objects and mutable objects such as pandas Dataframes
or numpy arrays, HashableJSON has to convert these types to
datastructures that can normally be represented as JSON.
Support for other object types may need to be introduced in
future. By default, unrecognized object types are represented by
their id.
One limitation of this approach is that dictionaries with composite
keys (e.g tuples) are not supported due to the JSON spec.
"""
string_hashable = (dt.datetime,)
repr_hashable = ()
def default(self, obj):
if isinstance(obj, set):
return hash(frozenset(obj))
elif isinstance(obj, np.ndarray):
return obj.tolist()
if pd and isinstance(obj, (pd.Series, pd.DataFrame)):
return obj.to_csv().encode('utf-8')
elif isinstance(obj, self.string_hashable):
return str(obj)
elif isinstance(obj, self.repr_hashable):
return repr(obj)
try:
return hash(obj)
except:
return id(obj)
class periodic(Thread):
"""
Run a callback count times with a given period without blocking.
If count is None, will run till timeout (which may be forever if None).
"""
def __init__(self, period, count, callback, timeout=None, block=False):
if isinstance(count, int):
if count < 0: raise ValueError('Count value must be positive')
elif not type(count) is type(None):
raise ValueError('Count value must be a positive integer or None')
if block is False and count is None and timeout is None:
raise ValueError('When using a non-blocking thread, please specify '
'either a count or a timeout')
super(periodic, self).__init__()
self.period = period
self.callback = callback
self.count = count
self.counter = 0
self.block = block
self.timeout = timeout
self._completed = Event()
self._start_time = None
@property
def completed(self):
return self._completed.is_set()
def start(self):
self._start_time = time.time()
if self.block is False:
super(periodic,self).start()
else:
self.run()
def stop(self):
self.timeout = None
self._completed.set()
def __repr__(self):
return 'periodic(%s, %s, %s)' % (self.period,
self.count,
callable_name(self.callback))
def __str__(self):
return repr(self)
def run(self):
while not self.completed:
if self.block:
time.sleep(self.period)
else:
self._completed.wait(self.period)
self.counter += 1
try:
self.callback(self.counter)
except Exception as e:
self.stop()
if self.timeout is not None:
dt = (time.time() - self._start_time)
if dt > self.timeout:
self.stop()
if self.counter == self.count:
self.stop()
def deephash(obj):
"""
Given an object, return a hash using HashableJSON. This hash is not
architecture, Python version or platform independent.
"""
try:
return hash(json.dumps(obj, cls=HashableJSON, sort_keys=True))
except:
return None
# Python3 compatibility
if sys.version_info.major == 3:
basestring = str
unicode = str
long = int
generator_types = (zip, range, types.GeneratorType)
else:
basestring = basestring
unicode = unicode
from itertools import izip
generator_types = (izip, xrange, types.GeneratorType)
def argspec(callable_obj):
"""
Returns an ArgSpec object for functions, staticmethods, instance
methods, classmethods and partials.
Note that the args list for instance and class methods are those as
seen by the user. In other words, the first argument which is
conventionally called 'self' or 'cls' is omitted in these cases.
"""
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
# Parameterized function.__call__ considered function in py3 but not py2
spec = inspect.getargspec(callable_obj.__call__)
args=spec.args[1:]
elif inspect.isfunction(callable_obj): # functions and staticmethods
return inspect.getargspec(callable_obj)
elif isinstance(callable_obj, partial): # partials
arglen = len(callable_obj.args)
spec = inspect.getargspec(callable_obj.func)
args = [arg for arg in spec.args[arglen:] if arg not in callable_obj.keywords]
elif inspect.ismethod(callable_obj): # instance and class methods
spec = inspect.getargspec(callable_obj)
args = spec.args[1:]
else: # callable objects
return argspec(callable_obj.__call__)
return inspect.ArgSpec(args = args,
varargs = spec.varargs,
keywords = spec.keywords,
defaults = spec.defaults)
def validate_dynamic_argspec(callback, kdims, streams):
"""
Utility used by DynamicMap to ensure the supplied callback has an
appropriate signature.
If validation succeeds, returns a list of strings to be zipped with
the positional arguments i.e kdim values. The zipped values can then
be merged with the stream values to pass everything to the Callable
as keywords.
If the callbacks use *args, None is returned to indicate that kdim
values must be passed to the Callable by position. In this
situation, Callable passes *args and **kwargs directly to the
callback.
If the callback doesn't use **kwargs, the accepted keywords are
validated against the stream parameter names.
"""
argspec = callback.argspec
name = callback.name
kdims = [kdim.name for kdim in kdims]
stream_params = stream_parameters(streams)
defaults = argspec.defaults if argspec.defaults else []
all_posargs = argspec.args[:-len(defaults)] if defaults else argspec.args
# Filter out any posargs for streams
posargs = [arg for arg in all_posargs if arg not in stream_params]
kwargs = argspec.args[-len(defaults):]
if argspec.keywords is None:
unassigned_streams = set(stream_params) - set(argspec.args)
if unassigned_streams:
unassigned = ','.join(unassigned_streams)
raise KeyError('Callable {name!r} missing keywords to '
'accept stream parameters: {unassigned}'.format(name=name,
unassigned=unassigned))
if len(posargs) > len(kdims) + len(stream_params):
raise KeyError('Callable {name!r} accepts more positional arguments than '
'there are kdims and stream parameters'.format(name=name))
if kdims == []: # Can be no posargs, stream kwargs already validated
return []
if set(kdims) == set(posargs): # Posargs match exactly, can all be passed as kwargs
return kdims
elif len(posargs) == len(kdims): # Posargs match kdims length, supplying names
if argspec.args[:len(kdims)] != posargs:
raise KeyError('Unmatched positional kdim arguments only allowed at '
'the start of the signature of {name!r}'.format(name=name))
return posargs
elif argspec.varargs: # Posargs missing, passed to Callable directly
return None
elif set(posargs) - set(kdims):
raise KeyError('Callable {name!r} accepts more positional arguments {posargs} '
'than there are key dimensions {kdims}'.format(name=name,
posargs=posargs,
kdims=kdims))
elif set(kdims).issubset(set(kwargs)): # Key dims can be supplied by keyword
return kdims
elif set(kdims).issubset(set(posargs+kwargs)):
return kdims
else:
raise KeyError('Callback {name!r} signature over {names} does not accommodate '
'required kdims {kdims}'.format(name=name,
names=list(set(posargs+kwargs)),
kdims=kdims))
def callable_name(callable_obj):
"""
Attempt to return a meaningful name identifying a callable or generator
"""
try:
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
return callable_obj.__name__
elif (isinstance(callable_obj, param.Parameterized)
and 'operation' in callable_obj.params()):
return callable_obj.operation.__name__
elif isinstance(callable_obj, partial):
return str(callable_obj)
elif inspect.isfunction(callable_obj): # functions and staticmethods
return callable_obj.__name__
elif inspect.ismethod(callable_obj): # instance and class methods
meth = callable_obj
if sys.version_info < (3,0):
owner = meth.im_class if meth.im_self is None else meth.im_self
else:
owner = meth.__self__
if meth.__name__ == '__call__':
return type(owner).__name__
return '.'.join([owner.__name__, meth.__name__])
elif isinstance(callable_obj, types.GeneratorType):
return callable_obj.__name__
else:
return type(callable_obj).__name__
except:
return str(callable_obj)
def process_ellipses(obj, key, vdim_selection=False):
"""
Helper function to pad a __getitem__ key with the right number of
empty slices (i.e :) when the key contains an Ellipsis (...).
If the vdim_selection flag is true, check if the end of the key
contains strings or Dimension objects in obj. If so, extra padding
will not be applied for the value dimensions (i.e the resulting key
will be exactly one longer than the number of kdims). Note: this
flag should not be used for composite types.
"""
if isinstance(key, np.ndarray) and key.dtype.kind == 'b':
return key
wrapped_key = wrap_tuple(key)
if wrapped_key.count(Ellipsis)== 0:
return key
if wrapped_key.count(Ellipsis)!=1:
raise Exception("Only one ellipsis allowed at a time.")
dim_count = len(obj.dimensions())
index = wrapped_key.index(Ellipsis)
head = wrapped_key[:index]
tail = wrapped_key[index+1:]
padlen = dim_count - (len(head) + len(tail))
if vdim_selection:
# If the end of the key (i.e the tail) is in vdims, pad to len(kdims)+1
if wrapped_key[-1] in obj.vdims:
padlen = (len(obj.kdims) +1 ) - len(head+tail)
return head + ((slice(None),) * padlen) + tail
def bytes_to_unicode(value):
"""
Safely casts bytestring to unicode
"""
if isinstance(value, bytes):
return unicode(value.decode('utf-8'))
return value
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail
class Aliases(object):
"""
Helper class useful for defining a set of alias tuples on a single object.
For instance, when defining a group or label with an alias, instead
of setting tuples in the constructor, you could use
``aliases.water`` if you first define:
>>> aliases = Aliases(water='H_2O', glucose='C_6H_{12}O_6')
>>> aliases.water
('water', 'H_2O')
This may be used to conveniently define aliases for groups, labels
or dimension names.
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, (k,v))
class sanitize_identifier_fn(param.ParameterizedFunction):
"""
Sanitizes group/label values for use in AttrTree attribute
access. Depending on the version parameter, either sanitization
appropriate for Python 2 (no unicode gn identifiers allowed) or
Python 3 (some unicode allowed) is used.
Note that if you are using Python 3, you can switch to version 2
for compatibility but you cannot enable relaxed sanitization if
you are using Python 2.
Special characters are sanitized using their (lowercase) unicode
name using the unicodedata module. For instance:
>>> unicodedata.name(u'$').lower()
'dollar sign'
As these names are often very long, this parameterized function
allows filtered, substitutions and transforms to help shorten these
names appropriately.
"""
version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc="""
The sanitization version. If set to 2, more aggressive
sanitization appropriate for Python 2 is applied. Otherwise,
if set to 3, more relaxed, Python 3 sanitization is used.""")
capitalize = param.Boolean(default=True, doc="""
Whether the first letter should be converted to
uppercase. Note, this will only be applied to ASCII characters
in order to make sure paths aren't confused with method
names.""")
eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit',
'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc="""
Lowercase strings to be eliminated from the unicode names in
order to shorten the sanitized name ( lowercase). Redundant
strings should be removed but too much elimination could cause
two unique strings to map to the same sanitized output.""")
substitutions = param.Dict(default={'circumflex':'power',
'asterisk':'times',
'solidus':'over'}, doc="""
Lowercase substitutions of substrings in unicode names. For
instance the ^ character has the name 'circumflex accent' even
though it is more typically used for exponentiation. Note that
substitutions occur after filtering and that there should be no
ordering dependence between substitutions.""")
transforms = param.List(default=[capitalize_unicode_name], doc="""
List of string transformation functions to apply after
filtering and substitution in order to further compress the
unicode name. For instance, the default capitalize_unicode_name
function will turn the string "capital delta" into "Delta".""")
disallowed = param.List(default=['trait_names', '_ipython_display_',
'_getAttributeNames'], doc="""
An explicit list of name that should not be allowed as
attribute names on Tree objects.
By default, prevents IPython from creating an entry called
Trait_names due to an inconvenient getattr check (during
tab-completion).""")
disable_leading_underscore = param.Boolean(default=False, doc="""
Whether leading underscores should be allowed to be sanitized
with the leading prefix.""")
aliases = param.Dict(default={}, doc="""
A dictionary of aliases mapping long strings to their short,
sanitized equivalents""")
prefix = 'A_'
_lookup_table = param.Dict(default={}, doc="""
Cache of previously computed sanitizations""")
@param.parameterized.bothmethod
def add_aliases(self_or_cls, **kwargs):
"""
Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string')
"""
self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
@param.parameterized.bothmethod
def remove_aliases(self_or_cls, aliases):
"""
Remove a list of aliases.
"""
for k,v in self_or_cls.aliases.items():
if v in aliases:
self_or_cls.aliases.pop(k)
@param.parameterized.bothmethod
def allowable(self_or_cls, name, disable_leading_underscore=None):
disabled_reprs = ['javascript', 'jpeg', 'json', 'latex',
'latex', 'pdf', 'png', 'svg', 'markdown']
disabled_ = (self_or_cls.disable_leading_underscore
if disable_leading_underscore is None
else disable_leading_underscore)
if disabled_ and name.startswith('_'):
return False
isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs)
return (name not in self_or_cls.disallowed) and not isrepr
@param.parameterized.bothmethod
def prefixed(self, identifier, version):
"""
Whether or not the identifier will be prefixed.
Strings that require the prefix are generally not recommended.
"""
invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc']
if identifier.startswith('_'): return True
return((identifier[0] in string.digits) if version==2
else (unicodedata.category(identifier[0]) in invalid_starting))
@param.parameterized.bothmethod
def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += bytes_to_unicode(replacement)
else:
chars += c
return chars
@param.parameterized.bothmethod
def shortened_character_name(self_or_cls, c, eliminations=[], substitutions={}, transforms=[]):
"""
Given a unicode character c, return the shortened unicode name
(as a list of tokens) by applying the eliminations,
substitutions and transforms.
"""
name = unicodedata.name(c).lower()
# Filtering
for elim in eliminations:
name = name.replace(elim, '')
# Substitution
for i,o in substitutions.items():
name = name.replace(i, o)
for transform in transforms:
name = transform(name)
return ' '.join(name.strip().split()).replace(' ','_').replace('-','_')
def __call__(self, name, escape=True, version=None):
if name in [None, '']:
return name
elif name in self.aliases:
return self.aliases[name]
elif name in self._lookup_table:
return self._lookup_table[name]
name = bytes_to_unicode(name)
version = self.version if version is None else version
if not self.allowable(name):
raise AttributeError("String %r is in the disallowed list of attribute names: %r" % self.disallowed)
if version == 2:
name = self.remove_diacritics(name)
if self.capitalize and name and name[0] in string.ascii_lowercase:
name = name[0].upper()+name[1:]
sanitized = (self.sanitize_py2(name) if version==2 else self.sanitize_py3(name))
if self.prefixed(name, version):
sanitized = self.prefix + sanitized
self._lookup_table[name] = sanitized
return sanitized
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed
def sanitize_py2(self, name):
# This fix works but masks an issue in self.sanitize (py2)
prefix = '_' if name.startswith('_') else ''
valid_chars = string.ascii_letters+string.digits+'_'
return prefix + str('_'.join(self.sanitize(name, lambda c: c in valid_chars)))
def sanitize_py3(self, name):
if not name.isidentifier():
return '_'.join(self.sanitize(name, lambda c: ('_'+c).isidentifier()))
else:
return name
def sanitize(self, name, valid_fn):
"Accumulate blocks of hex and separate blocks by underscores"
invalid = {'\a':'a','\b':'b', '\v':'v','\f':'f','\r':'r'}
for cc in filter(lambda el: el in name, invalid.keys()):
raise Exception("Please use a raw string or escape control code '\%s'"
% invalid[cc])
sanitized, chars = [], ''
for split in name.split():
for c in split:
if valid_fn(c): chars += str(c) if c=='_' else c
else:
short = self.shortened_character_name(c, self.eliminations,
self.substitutions,
self.transforms)
sanitized.extend([chars] if chars else [])
if short != '':
sanitized.append(short)
chars = ''
if chars:
sanitized.extend([chars])
chars=''
return self._process_underscores(sanitized + ([chars] if chars else []))
sanitize_identifier = sanitize_identifier_fn.instance()
group_sanitizer = sanitize_identifier_fn.instance()
label_sanitizer = sanitize_identifier_fn.instance()
dimension_sanitizer = sanitize_identifier_fn.instance(capitalize=False)
def isnumeric(val):
if isinstance(val, (basestring, bool, np.bool_)):
return False
try:
float(val)
return True
except:
return False
def find_minmax(lims, olims):
"""
Takes (a1, a2) and (b1, b2) as input and returns
(np.nanmin(a1, b1), np.nanmax(a2, b2)). Used to calculate
min and max values of a number of items.
"""
try:
limzip = zip(list(lims), list(olims), [np.nanmin, np.nanmax])
limits = tuple([float(fn([l, ol])) for l, ol, fn in limzip])
except:
limits = (np.NaN, np.NaN)
return limits
def find_range(values, soft_range=[]):
"""
Safely finds either the numerical min and max of
a set of values, falling back to the first and
the last value in the sorted list of values.
"""
try:
values = np.array(values)
values = np.squeeze(values) if len(values.shape) > 1 else values
if len(soft_range):
values = np.concatenate([values, soft_range])
if values.dtype.kind == 'M':
return values.min(), values.max()
return np.nanmin(values), np.nanmax(values)
except:
try:
values = sorted(values)
return (values[0], values[-1])
except:
return (None, None)
def max_range(ranges):
"""
Computes the maximal lower and upper bounds from a list bounds.
"""
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
values = [r for r in ranges for v in r if v is not None]
if pd and all(isinstance(v, pd.Timestamp) for r in values for v in r):
values = [(v1.to_datetime64(), v2.to_datetime64()) for v1, v2 in values]
arr = np.array(values)
if arr.dtype.kind in 'OSU':
arr = np.sort([v for v in arr.flat if not is_nan(v)])
return arr[0], arr[-1]
if arr.dtype.kind in 'M':
return arr[:, 0].min(), arr[:, 1].max()
return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1]))
except:
return (np.NaN, np.NaN)
def dimension_range(lower, upper, dimension):
"""
Computes the range along a dimension by combining the data range
with the Dimension soft_range and range.
"""
lower, upper = max_range([(lower, upper), dimension.soft_range])
dmin, dmax = dimension.range
lower = lower if dmin is None or not np.isfinite(dmin) else dmin
upper = upper if dmax is None or not np.isfinite(dmax) else dmax
return lower, upper
def max_extents(extents, zrange=False):
"""
Computes the maximal extent in 2D and 3D space from
list of 4-tuples or 6-tuples. If zrange is enabled
all extents are converted to 6-tuples to compute
x-, y- and z-limits.
"""
if zrange:
num = 6
inds = [(0, 3), (1, 4), (2, 5)]
extents = [e if len(e) == 6 else (e[0], e[1], None,
e[2], e[3], None)
for e in extents]
else:
num = 4
inds = [(0, 2), (1, 3)]
arr = list(zip(*extents)) if extents else []
extents = [np.NaN] * num
if len(arr) == 0:
return extents
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for lidx, uidx in inds:
lower = [v for v in arr[lidx] if v is not None and not is_nan(v)]
upper = [v for v in arr[uidx] if v is not None and not is_nan(v)]
if lower and isinstance(lower[0], datetime_types):
extents[lidx] = np.min(lower)
elif any(isinstance(l, basestring) for l in lower):
extents[lidx] = np.sort(lower)[0]
elif lower:
extents[lidx] = np.nanmin(lower)
if upper and isinstance(upper[0], datetime_types):
extents[uidx] = np.max(upper)
elif any(isinstance(u, basestring) for u in upper):
extents[uidx] = np.sort(upper)[-1]
elif upper:
extents[uidx] = np.nanmax(upper)
return tuple(extents)
def int_to_alpha(n, upper=True):
"Generates alphanumeric labels of form A-Z, AA-ZZ etc."
casenum = 65 if upper else 97
label = ''
count= 0
if n == 0: return str(chr(n + casenum))
while n >= 0:
mod, div = n % 26, n
for _ in range(count):
div //= 26
div %= 26
if count == 0:
val = mod
else:
val = div
label += str(chr(val + casenum))
count += 1
n -= 26**count
return label[::-1]
def int_to_roman(input):
if type(input) != type(1):
raise TypeError("expected integer, got %s" % type(input))
if not 0 < input < 4000:
raise ValueError("Argument must be between 1 and 3999")
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def unique_iterator(seq):
"""
Returns an iterator containing all non-duplicate elements
in the input sequence.
"""
seen = set()
for item in seq:
if item not in seen:
seen.add(item)
yield item
def unique_array(arr):
"""
Returns an array of unique values in the input order
"""
if not len(arr):
return arr
elif pd:
return pd.unique(arr)
else:
arr = np.asarray(arr)
_, uniq_inds = np.unique(arr, return_index=True)
return arr[np.sort(uniq_inds)]
def match_spec(element, specification):
"""
Matches the group.label specification of the supplied
element against the supplied specification dictionary
returning the value of the best match.
"""
match_tuple = ()
match = specification.get((), {})
for spec in [type(element).__name__,
group_sanitizer(element.group, escape=False),
label_sanitizer(element.label, escape=False)]:
match_tuple += (spec,)
if match_tuple in specification:
match = specification[match_tuple]
return match
def python2sort(x,key=None):
if len(x) == 0: return x
it = iter(x)
groups = [[next(it)]]
for item in it:
for group in groups:
try:
item_precedence = item if key is None else key(item)
group_precedence = group[0] if key is None else key(group[0])
item_precedence < group_precedence # exception if not comparable
group.append(item)
break
except TypeError:
continue
else: # did not break, make new group
groups.append([item])
return itertools.chain.from_iterable(sorted(group, key=key) for group in groups)
def merge_dimensions(dimensions_list):
"""
Merges lists of fully or partially overlapping dimensions by
merging their values.
>>> from holoviews import Dimension
>>> dim_list = [[Dimension('A', values=[1, 2, 3]), Dimension('B')],
... [Dimension('A', values=[2, 3, 4])]]
>>> dimensions = merge_dimensions(dim_list)
>>> dimensions
[Dimension('A'), Dimension('B')]
>>> dimensions[0].values
[1, 2, 3, 4]
"""
dvalues = defaultdict(list)
dimensions = []
for dims in dimensions_list:
for d in dims:
dvalues[d.name].append(d.values)
if d not in dimensions:
dimensions.append(d)
dvalues = {k: list(unique_iterator(itertools.chain(*vals)))
for k, vals in dvalues.items()}
return [d(values=dvalues.get(d.name, [])) for d in dimensions]
def dimension_sort(odict, kdims, vdims, key_index):
"""
Sorts data by key using usual Python tuple sorting semantics
or sorts in categorical order for any categorical Dimensions.
"""
sortkws = {}
ndims = len(kdims)
dimensions = kdims+vdims
indexes = [(dimensions[i], int(i not in range(ndims)),
i if i in range(ndims) else i-ndims)
for i in key_index]
cached_values = {d.name: [None]+list(d.values) for d in dimensions}
if len(set(key_index)) != len(key_index):
raise ValueError("Cannot sort on duplicated dimensions")
else:
sortkws['key'] = lambda x: tuple(cached_values[dim.name].index(x[t][d])
if dim.values else x[t][d]
for i, (dim, t, d) in enumerate(indexes))
if sys.version_info.major == 3:
return python2sort(odict.items(), **sortkws)
else:
return sorted(odict.items(), **sortkws)
# Copied from param should make param version public
def is_number(obj):
if isinstance(obj, numbers.Number): return True
# The extra check is for classes that behave like numbers, such as those
# found in numpy, gmpy, etc.
elif (hasattr(obj, '__int__') and hasattr(obj, '__add__')): return True
# This is for older versions of gmpy
elif hasattr(obj, 'qdiv'): return True
else: return False
class ProgressIndicator(param.Parameterized):
"""
Baseclass for any ProgressIndicator that indicates progress
as a completion percentage.
"""
percent_range = param.NumericTuple(default=(0.0, 100.0), doc="""
The total percentage spanned by the progress bar when called
with a value between 0% and 100%. This allows an overall
completion in percent to be broken down into smaller sub-tasks
that individually complete to 100 percent.""")
label = param.String(default='Progress', allow_None=True, doc="""
The label of the current progress bar.""")
def __call__(self, completion):
raise NotImplementedError
def sort_topologically(graph):
"""
Stackless topological sorting.
graph = {
3: [1],
5: [3],
4: [2],
6: [4],
}
sort_topologically(graph)
[[1, 2], [3, 4], [5, 6]]
"""
levels_by_name = {}
names_by_level = defaultdict(list)
def add_level_to_name(name, level):
levels_by_name[name] = level
names_by_level[level].append(name)
def walk_depth_first(name):
stack = [name]
while(stack):
name = stack.pop()
if name in levels_by_name:
continue
if name not in graph or not graph[name]:
level = 0
add_level_to_name(name, level)
continue
children = graph[name]
children_not_calculated = [child for child in children if child not in levels_by_name]
if children_not_calculated:
stack.append(name)
stack.extend(children_not_calculated)
continue
level = 1 + max(levels_by_name[lname] for lname in children)
add_level_to_name(name, level)
for name in graph:
walk_depth_first(name)
return list(itertools.takewhile(lambda x: x is not None,
(names_by_level.get(i, None)
for i in itertools.count())))
def is_cyclic(graph):
"""
Return True if the directed graph g has a cycle. The directed graph
should be represented as a dictionary mapping of edges for each node.
"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in graph.get(vertex, ()):
if neighbour in path or visit(neighbour):
return True
path.remove(vertex)
return False
return any(visit(v) for v in graph)
def one_to_one(graph, nodes):
"""
Return True if graph contains only one to one mappings. The
directed graph should be represented as a dictionary mapping of
edges for each node. Nodes should be passed a simple list.
"""
edges = itertools.chain.from_iterable(graph.values())
return len(graph) == len(nodes) and len(set(edges)) == len(nodes)
def get_overlay_spec(o, k, v):
"""
Gets the type.group.label + key spec from an Element in an Overlay.
"""
k = wrap_tuple(k)
return ((type(v).__name__, v.group, v.label) + k if len(o.kdims) else
(type(v).__name__,) + k)
def layer_sort(hmap):
"""
Find a global ordering for layers in a HoloMap of CompositeOverlay
types.
"""
orderings = {}
for o in hmap:
okeys = [get_overlay_spec(o, k, v) for k, v in o.data.items()]
if len(okeys) == 1 and not okeys[0] in orderings:
orderings[okeys[0]] = []
else:
orderings.update({k: [] if k == v else [v] for k, v in zip(okeys[1:], okeys)})
return [i for g in sort_topologically(orderings) for i in sorted(g)]
def layer_groups(ordering, length=2):
"""
Splits a global ordering of Layers into groups based on a slice of
the spec. The grouping behavior can be modified by changing the
length of spec the entries are grouped by.
"""
group_orderings = defaultdict(list)
for el in ordering:
group_orderings[el[:length]].append(el)
return group_orderings
def group_select(selects, length=None, depth=None):
"""
Given a list of key tuples to select, groups them into sensible
chunks to avoid duplicating indexing operations.
"""
if length == None and depth == None:
length = depth = len(selects[0])
getter = operator.itemgetter(depth-length)
if length > 1:
selects = sorted(selects, key=getter)
grouped_selects = defaultdict(dict)
for k, v in itertools.groupby(selects, getter):
grouped_selects[k] = group_select(list(v), length-1, depth)
return grouped_selects
else:
return list(selects)
def iterative_select(obj, dimensions, selects, depth=None):
"""
Takes the output of group_select selecting subgroups iteratively,
avoiding duplicating select operations.
"""
ndims = len(dimensions)
depth = depth if depth is not None else ndims
items = []
if isinstance(selects, dict):
for k, v in selects.items():
items += iterative_select(obj.select(**{dimensions[ndims-depth]: k}),
dimensions, v, depth-1)
else:
for s in selects:
items.append((s, obj.select(**{dimensions[-1]: s[-1]})))
return items
def get_spec(obj):
"""
Gets the spec from any labeled data object.
"""
return (obj.__class__.__name__,
obj.group, obj.label)
def find_file(folder, filename):
"""
Find a file given folder and filename. If the filename can be
resolved directly returns otherwise walks the supplied folder.
"""
matches = []
if os.path.isabs(filename) and os.path.isfile(filename):
return filename
for root, _, filenames in os.walk(folder):
for fn in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, fn))
if not matches:
raise IOError('File %s could not be found' % filename)
return matches[-1]
def is_dataframe(data):
"""
Checks whether the supplied data is of DataFrame type.
"""
return((pd is not None and isinstance(data, pd.DataFrame)) or
(dd is not None and isinstance(data, dd.DataFrame)))
def is_series(data):
"""
Checks whether the supplied data is of Series type.
"""
return((pd is not None and isinstance(data, pd.Series)) or
(dd is not None and isinstance(data, dd.Series)))
def get_param_values(data):
params = dict(kdims=data.kdims, vdims=data.vdims,
label=data.label)
if (data.group != data.params()['group'].default and not
isinstance(type(data).group, property)):
params['group'] = data.group
return params
@contextmanager
def disable_constant(parameterized):
"""
Temporarily set parameters on Parameterized object to
constant=False.
"""
params = parameterized.params().values()
constants = [p.constant for p in params]
for p in params:
p.constant = False
try:
yield
except:
raise
finally:
for (p, const) in zip(params, constants):
p.constant = const
def get_ndmapping_label(ndmapping, attr):
"""
Function to get the first non-auxiliary object
label attribute from an NdMapping.
"""
label = None
els = itervalues(ndmapping.data)
while label is None:
try:
el = next(els)
except StopIteration:
return None
if not el._auxiliary_component:
label = getattr(el, attr)
if attr == 'group':
tp = type(el).__name__
if tp == label:
return None
return label
def wrap_tuple(unwrapped):
""" Wraps any non-tuple types in a tuple """
return (unwrapped if isinstance(unwrapped, tuple) else (unwrapped,))
def stream_name_mapping(stream, exclude_params=['name'], reverse=False):
"""
Return a complete dictionary mapping between stream parameter names
to their applicable renames, excluding parameters listed in
exclude_params.
If reverse is True, the mapping is from the renamed strings to the
original stream parameter names.
"""
filtered = [k for k in stream.params().keys() if k not in exclude_params]
mapping = {k:stream._rename.get(k,k) for k in filtered}
if reverse:
return {v:k for k,v in mapping.items()}
else:
return mapping
def rename_stream_kwargs(stream, kwargs, reverse=False):
"""
Given a stream and a kwargs dictionary of parameter values, map to
the corresponding dictionary where the keys are substituted with the
appropriately renamed string.
If reverse, the output will be a dictionary using the original
parameter names given a dictionary using the renamed equivalents.
"""
mapped_kwargs = {}
mapping = stream_name_mapping(stream, reverse=reverse)
for k,v in kwargs.items():
if k not in mapping:
msg = 'Could not map key {key} {direction} renamed equivalent'
direction = 'from' if reverse else 'to'
raise KeyError(msg.format(key=repr(k), direction=direction))
mapped_kwargs[mapping[k]] = v
return mapped_kwargs
def stream_parameters(streams, no_duplicates=True, exclude=['name']):
"""
Given a list of streams, return a flat list of parameter name,
excluding those listed in the exclude list.
If no_duplicates is enabled, a KeyError will be raised if there are
parameter name clashes across the streams.
"""
param_groups = [s.contents.keys() for s in streams]
names = [name for group in param_groups for name in group]
if no_duplicates:
clashes = sorted(set([n for n in names if names.count(n) > 1]))
clash_streams = [s for s in streams for c in clashes if c in s.contents]
if clashes:
clashing = ', '.join([repr(c) for c in clash_streams[:-1]])
raise Exception('The supplied stream objects %s and %s '
'clash on the following parameters: %r'
% (clashing, clash_streams[-1], clashes))
return [name for name in names if name not in exclude]
def dimensionless_contents(streams, kdims, no_duplicates=True):
"""
Return a list of stream parameters that have not been associated
with any of the key dimensions.
"""
names = stream_parameters(streams, no_duplicates)
return [name for name in names if name not in kdims]
def unbound_dimensions(streams, kdims, no_duplicates=True):
"""
Return a list of dimensions that have not been associated with
any streams.
"""
params = stream_parameters(streams, no_duplicates)
return [d for d in kdims if d not in params]
def wrap_tuple_streams(unwrapped, kdims, streams):
"""
Fills in tuple keys with dimensioned stream values as appropriate.
"""
param_groups = [(s.contents.keys(), s) for s in streams]
pairs = [(name,s) for (group, s) in param_groups for name in group]
substituted = []
for pos,el in enumerate(wrap_tuple(unwrapped)):
if el is None and pos < len(kdims):
matches = [(name,s) for (name,s) in pairs if name==kdims[pos].name]
if len(matches) == 1:
(name, stream) = matches[0]
el = stream.contents[name]
substituted.append(el)
return tuple(substituted)
def drop_streams(streams, kdims, keys):
"""
Drop any dimensioned streams from the keys and kdims.
"""
stream_params = stream_parameters(streams)
inds, dims = zip(*[(ind, kdim) for ind, kdim in enumerate(kdims)
if kdim not in stream_params])
return dims, [tuple(wrap_tuple(key)[ind] for ind in inds) for key in keys]
def itervalues(obj):
"Get value iterator from dictionary for Python 2 and 3"
return iter(obj.values()) if sys.version_info.major == 3 else obj.itervalues()
def iterkeys(obj):
"Get key iterator from dictionary for Python 2 and 3"
return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys()
def get_unique_keys(ndmapping, dimensions):
inds = [ndmapping.get_dimension_index(dim) for dim in dimensions]
getter = operator.itemgetter(*inds)
return unique_iterator(getter(key) if len(inds) > 1 else (key[inds[0]],)
for key in ndmapping.data.keys())
def unpack_group(group, getter):
for k, v in group.iterrows():
obj = v.values[0]
key = getter(k)
if hasattr(obj, 'kdims'):
yield (key, obj)
else:
obj = tuple(v)
yield (wrap_tuple(key), obj)
def capitalize(string):
"""
Capitalizes the first letter of a string.
"""
return string[0].upper() + string[1:]
def get_path(item):
"""
Gets a path from an Labelled object or from a tuple of an existing
path and a labelled object. The path strings are sanitized and
capitalized.
"""
sanitizers = [group_sanitizer, label_sanitizer]
if isinstance(item, tuple):
path, item = item
if item.label:
if len(path) > 1 and item.label == path[1]:
path = path[:2]
else:
path = path[:1] + (item.label,)
else:
path = path[:1]
else:
path = (item.group, item.label) if item.label else (item.group,)
return tuple(capitalize(fn(p)) for (p, fn) in zip(path, sanitizers))
def make_path_unique(path, counts, new):
"""
Given a path, a list of existing paths and counts for each of the
existing paths.
"""
added = False
while any(path == c[:i] for c in counts for i in range(1, len(c)+1)):
count = counts[path]
counts[path] += 1
if (not new and len(path) > 1) or added:
path = path[:-1]
else:
added = True
path = path + (int_to_roman(count),)
if len(path) == 1:
path = path + (int_to_roman(counts.get(path, 1)),)
if path not in counts:
counts[path] = 1
return path
class ndmapping_groupby(param.ParameterizedFunction):
"""
Apply a groupby operation to an NdMapping, using pandas to improve
performance (if available).
"""
def __call__(self, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
try:
import pandas # noqa (optional import)
groupby = self.groupby_pandas
except:
groupby = self.groupby_python
return groupby(ndmapping, dimensions, container_type,
group_type, sort=sort, **kwargs)
@param.parameterized.bothmethod
def groupby_pandas(self_or_cls, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
if 'kdims' in kwargs:
idims = [ndmapping.get_dimension(d) for d in kwargs['kdims']]
else:
idims = [dim for dim in ndmapping.kdims if dim not in dimensions]
all_dims = [d.name for d in ndmapping.kdims]
inds = [ndmapping.get_dimension_index(dim) for dim in idims]
getter = operator.itemgetter(*inds) if inds else lambda x: tuple()
multi_index = pd.MultiIndex.from_tuples(ndmapping.keys(), names=all_dims)
df = pd.DataFrame(list(map(wrap_tuple, ndmapping.values())), index=multi_index)
kwargs = dict(dict(get_param_values(ndmapping), kdims=idims), **kwargs)
groups = ((wrap_tuple(k), group_type(OrderedDict(unpack_group(group, getter)), **kwargs))
for k, group in df.groupby(level=[d.name for d in dimensions]))
if sort:
selects = list(get_unique_keys(ndmapping, dimensions))
groups = sorted(groups, key=lambda x: selects.index(x[0]))
return container_type(groups, kdims=dimensions)
@param.parameterized.bothmethod
def groupby_python(self_or_cls, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
idims = [dim for dim in ndmapping.kdims if dim not in dimensions]
dim_names = [dim.name for dim in dimensions]
selects = get_unique_keys(ndmapping, dimensions)
selects = group_select(list(selects))
groups = [(k, group_type((v.reindex(idims) if hasattr(v, 'kdims')
else [((), (v,))]), **kwargs))
for k, v in iterative_select(ndmapping, dim_names, selects)]
return container_type(groups, kdims=dimensions)
def cartesian_product(arrays, flat=True, copy=False):
"""
Efficient cartesian product of a list of 1D arrays returning the
expanded array views for each dimensions. By default arrays are
flattened, which may be controlled with the flat flag. The array
views can be turned into regular arrays with the copy flag.
"""
arrays = np.broadcast_arrays(*np.ix_(*arrays))
if flat:
return tuple(arr.flatten() if copy else arr.flat for arr in arrays)
return tuple(arr.copy() if copy else arr for arr in arrays)
def arglexsort(arrays):
"""
Returns the indices of the lexicographical sorting
order of the supplied arrays.
"""
dtypes = ','.join(array.dtype.str for array in arrays)
recarray = np.empty(len(arrays[0]), dtype=dtypes)
for i, array in enumerate(arrays):
recarray['f%s' % i] = array
return recarray.argsort()
def dimensioned_streams(dmap):
"""
Given a DynamicMap return all streams that have any dimensioned
parameters i.e parameters also listed in the key dimensions.
"""
dimensioned = []
for stream in dmap.streams:
stream_params = stream_parameters([stream])
if set([str(k) for k in dmap.kdims]) & set(stream_params):
dimensioned.append(stream)
return dimensioned
def expand_grid_coords(dataset, dim):
"""
Expand the coordinates along a dimension of the gridded
dataset into an ND-array matching the dimensionality of
the dataset.
"""
arrays = [dataset.interface.coords(dataset, d.name, True)
for d in dataset.kdims]
idx = dataset.get_dimension_index(dim)
return cartesian_product(arrays, flat=False)[idx]
def dt64_to_dt(dt64):
"""
Safely converts NumPy datetime64 to a datetime object.
"""
ts = (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
return dt.datetime.utcfromtimestamp(ts)
def is_nan(x):
"""
Checks whether value is NaN on arbitrary types
"""
try:
return np.isnan(x)
except:
return False
def bound_range(vals, density, time_unit='us'):
"""
Computes a bounding range and density from a number of samples
assumed to be evenly spaced. Density is rounded to machine precision
using significant digits reported by sys.float_info.dig.
"""
low, high = vals.min(), vals.max()
invert = False
if len(vals) > 1 and vals[0] > vals[1]:
invert = True
if not density:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered in double_scalars')
full_precision_density = compute_density(low, high, len(vals)-1)
density = round(full_precision_density, sys.float_info.dig)
if density == 0:
density = full_precision_density
if density == 0:
raise ValueError('Could not determine Image density, ensure it has a non-zero range.')
halfd = 0.5/density
if isinstance(low, datetime_types):
halfd = np.timedelta64(int(round(halfd)), time_unit)
return low-halfd, high+halfd, density, invert
def compute_density(start, end, length, time_unit='us'):
"""
Computes a grid density given the edges and number of samples.
Handles datetime grids correctly by computing timedeltas and
computing a density for the given time_unit.
"""
if isinstance(start, int): start = float(start)
if isinstance(end, int): end = float(end)
diff = end-start
if isinstance(diff, timedelta_types):
if isinstance(diff, np.timedelta64):
diff = np.timedelta64(diff, time_unit).tolist()
tscale = 1./np.timedelta64(1, time_unit).tolist().total_seconds()
return (length/(diff.total_seconds()*tscale))
else:
return length/diff
def date_range(start, end, length, time_unit='us'):
"""
Computes a date range given a start date, end date and the number
of samples.
"""
step = (1./compute_density(start, end, length, time_unit))
if pd and isinstance(start, pd.Timestamp):
start = start.to_datetime64()
step = np.timedelta64(int(round(step)), time_unit)
return start+step/2.+np.arange(length)*step
def dt_to_int(value, time_unit='us'):
"""
Converts a datetime type to an integer with the supplied time unit.
"""
if time_unit == 'ns':
tscale = 1./np.timedelta64(1, time_unit).tolist()
else:
tscale = 1./np.timedelta64(1, time_unit).tolist().total_seconds()
if pd and isinstance(value, pd.Timestamp):
value = value.to_pydatetime()
elif isinstance(value, np.datetime64):
value = value.tolist()
if isinstance(value, (int, long)):
# Handle special case of nanosecond precision which cannot be
# represented by python datetime
return value * 10**-(np.log10(tscale)-3)
try:
# Handle python3
return int(value.timestamp() * tscale)
except:
# Handle python2
return (time.mktime(value.timetuple()) + value.microsecond / 1e6) * tscale
def search_indices(values, source):
"""
Given a set of values returns the indices of each of those values
in the source array.
"""
orig_indices = source.argsort()
return orig_indices[np.searchsorted(source[orig_indices], values)]
def compute_edges(edges):
"""
Computes edges as midpoints of the bin centers. The first and
last boundaries are equidistant from the first and last midpoints
respectively.
"""
edges = np.asarray(edges)
if edges.dtype.kind == 'i':
edges = edges.astype('f')
midpoints = (edges[:-1] + edges[1:])/2.0
boundaries = (2*edges[0] - midpoints[0], 2*edges[-1] - midpoints[-1])
return np.concatenate([boundaries[:1], midpoints, boundaries[-1:]])
| 1 | 20,050 | The config option should probably have a more specific name. Also it's not for all gridded data but specifically for Images (and its subclasses). | holoviz-holoviews | py |
@@ -80,7 +80,7 @@ public class PreferencesTest {
}
clickPreference(R.string.user_interface_label);
clickPreference(R.string.pref_set_theme_title);
- onView(withText(otherTheme)).perform(click());
+ clickPreference(otherTheme);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getTheme() != theme);
} | 1 | package de.test.antennapod.ui;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.res.Resources;
import android.preference.PreferenceManager;
import androidx.annotation.StringRes;
import androidx.test.filters.LargeTest;
import androidx.test.rule.ActivityTestRule;
import com.google.android.material.snackbar.Snackbar;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.activity.PreferenceActivity;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences.EnqueueLocation;
import de.danoeh.antennapod.core.storage.APCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APNullCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APQueueCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.EpisodeCleanupAlgorithm;
import de.danoeh.antennapod.fragment.EpisodesFragment;
import de.danoeh.antennapod.fragment.QueueFragment;
import de.danoeh.antennapod.fragment.SubscriptionFragment;
import de.test.antennapod.EspressoTestUtils;
import org.awaitility.Awaitility;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import static androidx.test.espresso.Espresso.onView;
import static androidx.test.espresso.action.ViewActions.click;
import static androidx.test.espresso.action.ViewActions.replaceText;
import static androidx.test.espresso.action.ViewActions.scrollTo;
import static androidx.test.espresso.action.ViewActions.swipeDown;
import static androidx.test.espresso.action.ViewActions.swipeUp;
import static androidx.test.espresso.assertion.ViewAssertions.doesNotExist;
import static androidx.test.espresso.assertion.ViewAssertions.matches;
import static androidx.test.espresso.matcher.ViewMatchers.isChecked;
import static androidx.test.espresso.matcher.ViewMatchers.isDisplayed;
import static androidx.test.espresso.matcher.ViewMatchers.isRoot;
import static androidx.test.espresso.matcher.ViewMatchers.withClassName;
import static androidx.test.espresso.matcher.ViewMatchers.withId;
import static androidx.test.espresso.matcher.ViewMatchers.withText;
import static de.test.antennapod.EspressoTestUtils.clickPreference;
import static de.test.antennapod.EspressoTestUtils.waitForView;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static junit.framework.TestCase.assertTrue;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.not;
@LargeTest
public class PreferencesTest {
private Resources res;
@Rule
public ActivityTestRule<PreferenceActivity> mActivityRule = new ActivityTestRule<>(PreferenceActivity.class, false, false);
@Before
public void setUp() {
EspressoTestUtils.clearDatabase();
EspressoTestUtils.clearPreferences();
mActivityRule.launchActivity(new Intent());
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(mActivityRule.getActivity());
prefs.edit().putBoolean(UserPreferences.PREF_ENABLE_AUTODL, true).commit();
res = mActivityRule.getActivity().getResources();
UserPreferences.init(mActivityRule.getActivity());
}
@Test
public void testSwitchTheme() {
final int theme = UserPreferences.getTheme();
int otherTheme;
if (theme == de.danoeh.antennapod.core.R.style.Theme_AntennaPod_Light) {
otherTheme = R.string.pref_theme_title_dark;
} else {
otherTheme = R.string.pref_theme_title_light;
}
clickPreference(R.string.user_interface_label);
clickPreference(R.string.pref_set_theme_title);
onView(withText(otherTheme)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getTheme() != theme);
}
@Test
public void testSwitchThemeBack() {
final int theme = UserPreferences.getTheme();
int otherTheme;
if (theme == de.danoeh.antennapod.core.R.style.Theme_AntennaPod_Light) {
otherTheme = R.string.pref_theme_title_dark;
} else {
otherTheme = R.string.pref_theme_title_light;
}
clickPreference(R.string.user_interface_label);
clickPreference(R.string.pref_set_theme_title);
onView(withText(otherTheme)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getTheme() != theme);
}
@Test
public void testEnablePersistentPlaybackControls() {
final boolean persistNotify = UserPreferences.isPersistNotify();
clickPreference(R.string.user_interface_label);
clickPreference(R.string.pref_persistNotify_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> persistNotify != UserPreferences.isPersistNotify());
clickPreference(R.string.pref_persistNotify_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> persistNotify == UserPreferences.isPersistNotify());
}
@Test
public void testSetLockscreenButtons() {
clickPreference(R.string.user_interface_label);
String[] buttons = res.getStringArray(R.array.compact_notification_buttons_options);
clickPreference(R.string.pref_compact_notification_buttons_title);
// First uncheck checkbox
onView(withText(buttons[2])).perform(click());
// Now try to check all checkboxes
onView(withText(buttons[0])).perform(click());
onView(withText(buttons[1])).perform(click());
onView(withText(buttons[2])).perform(click());
// Make sure that the third checkbox is unchecked
onView(withText(buttons[2])).check(matches(not(isChecked())));
String snackBarText = String.format(res.getString(
R.string.pref_compact_notification_buttons_dialog_error), 2);
Awaitility.await().ignoreExceptions().atMost(4000, MILLISECONDS)
.until(() -> {
onView(withText(snackBarText)).check(doesNotExist());
return true;
});
onView(withText(R.string.confirm_label)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(UserPreferences::showRewindOnCompactNotification);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(UserPreferences::showFastForwardOnCompactNotification);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> !UserPreferences.showSkipOnCompactNotification());
}
@Test
public void testEnqueueLocation() {
clickPreference(R.string.playback_pref);
doTestEnqueueLocation(R.string.enqueue_location_after_current, EnqueueLocation.AFTER_CURRENTLY_PLAYING);
doTestEnqueueLocation(R.string.enqueue_location_front, EnqueueLocation.FRONT);
doTestEnqueueLocation(R.string.enqueue_location_back, EnqueueLocation.BACK);
}
private void doTestEnqueueLocation(@StringRes int optionResId, EnqueueLocation expected) {
clickPreference(R.string.pref_enqueue_location_title);
onView(withText(optionResId)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> expected == UserPreferences.getEnqueueLocation());
}
@Test
public void testHeadPhonesDisconnect() {
onView(withText(R.string.playback_pref)).perform(click());
final boolean pauseOnHeadsetDisconnect = UserPreferences.isPauseOnHeadsetDisconnect();
onView(withText(R.string.pref_pauseOnHeadsetDisconnect_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> pauseOnHeadsetDisconnect != UserPreferences.isPauseOnHeadsetDisconnect());
onView(withText(R.string.pref_pauseOnHeadsetDisconnect_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> pauseOnHeadsetDisconnect == UserPreferences.isPauseOnHeadsetDisconnect());
}
@Test
public void testHeadPhonesReconnect() {
onView(withText(R.string.playback_pref)).perform(click());
if (!UserPreferences.isPauseOnHeadsetDisconnect()) {
onView(withText(R.string.pref_pauseOnHeadsetDisconnect_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(UserPreferences::isPauseOnHeadsetDisconnect);
}
final boolean unpauseOnHeadsetReconnect = UserPreferences.isUnpauseOnHeadsetReconnect();
onView(withText(R.string.pref_unpauseOnHeadsetReconnect_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> unpauseOnHeadsetReconnect != UserPreferences.isUnpauseOnHeadsetReconnect());
onView(withText(R.string.pref_unpauseOnHeadsetReconnect_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> unpauseOnHeadsetReconnect == UserPreferences.isUnpauseOnHeadsetReconnect());
}
@Test
public void testBluetoothReconnect() {
onView(withText(R.string.playback_pref)).perform(click());
if (!UserPreferences.isPauseOnHeadsetDisconnect()) {
onView(withText(R.string.pref_pauseOnHeadsetDisconnect_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(UserPreferences::isPauseOnHeadsetDisconnect);
}
final boolean unpauseOnBluetoothReconnect = UserPreferences.isUnpauseOnBluetoothReconnect();
onView(withText(R.string.pref_unpauseOnBluetoothReconnect_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> unpauseOnBluetoothReconnect != UserPreferences.isUnpauseOnBluetoothReconnect());
onView(withText(R.string.pref_unpauseOnBluetoothReconnect_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> unpauseOnBluetoothReconnect == UserPreferences.isUnpauseOnBluetoothReconnect());
}
@Test
public void testContinuousPlayback() {
clickPreference(R.string.playback_pref);
final boolean continuousPlayback = UserPreferences.isFollowQueue();
clickPreference(R.string.pref_followQueue_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> continuousPlayback != UserPreferences.isFollowQueue());
clickPreference(R.string.pref_followQueue_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> continuousPlayback == UserPreferences.isFollowQueue());
}
@Test
public void testAutoDelete() {
onView(withText(R.string.storage_pref)).perform(click());
final boolean autoDelete = UserPreferences.isAutoDelete();
onView(withText(R.string.pref_auto_delete_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> autoDelete != UserPreferences.isAutoDelete());
onView(withText(R.string.pref_auto_delete_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> autoDelete == UserPreferences.isAutoDelete());
}
@Test
public void testPlaybackSpeeds() {
clickPreference(R.string.playback_pref);
clickPreference(R.string.media_player);
onView(withText(R.string.media_player_exoplayer)).perform(click());
clickPreference(R.string.pref_playback_speed_title);
onView(isRoot()).perform(waitForView(withText("0.50"), 1000));
onView(withText("0.50")).check(matches(isDisplayed()));
onView(withText(R.string.cancel_label)).perform(click());
}
@Test
public void testPauseForInterruptions() {
onView(withText(R.string.playback_pref)).perform(click());
final boolean pauseForFocusLoss = UserPreferences.shouldPauseForFocusLoss();
clickPreference(R.string.pref_pausePlaybackForFocusLoss_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> pauseForFocusLoss != UserPreferences.shouldPauseForFocusLoss());
clickPreference(R.string.pref_pausePlaybackForFocusLoss_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> pauseForFocusLoss == UserPreferences.shouldPauseForFocusLoss());
}
@Test
public void testDisableUpdateInterval() {
onView(withText(R.string.network_pref)).perform(click());
onView(withText(R.string.pref_autoUpdateIntervallOrTime_title)).perform(click());
onView(withText(R.string.pref_autoUpdateIntervallOrTime_Disable)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getUpdateInterval() == 0);
}
@Test
public void testSetUpdateInterval() {
clickPreference(R.string.network_pref);
clickPreference(R.string.pref_autoUpdateIntervallOrTime_title);
onView(withText(R.string.pref_autoUpdateIntervallOrTime_Interval)).perform(click());
String search = "12 " + res.getString(R.string.pref_update_interval_hours_plural);
onView(withText(search)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getUpdateInterval() == TimeUnit.HOURS.toMillis(12));
}
@Test
public void testSetSequentialDownload() {
clickPreference(R.string.network_pref);
clickPreference(R.string.pref_parallel_downloads_title);
onView(isRoot()).perform(waitForView(withClassName(endsWith("EditText")), 1000));
onView(withClassName(endsWith("EditText"))).perform(replaceText("1"));
onView(withText(android.R.string.ok)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getParallelDownloads() == 1);
}
@Test
public void testSetParallelDownloads() {
clickPreference(R.string.network_pref);
clickPreference(R.string.pref_parallel_downloads_title);
onView(isRoot()).perform(waitForView(withClassName(endsWith("EditText")), 1000));
onView(withClassName(endsWith("EditText"))).perform(replaceText("10"));
onView(withText(android.R.string.ok)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getParallelDownloads() == 10);
}
@Test
public void testSetParallelDownloadsInvalidInput() {
clickPreference(R.string.network_pref);
clickPreference(R.string.pref_parallel_downloads_title);
onView(isRoot()).perform(waitForView(withClassName(endsWith("EditText")), 1000));
onView(withClassName(endsWith("EditText"))).perform(replaceText("0"));
onView(withClassName(endsWith("EditText"))).check(matches(withText("")));
onView(withClassName(endsWith("EditText"))).perform(replaceText("100"));
onView(withClassName(endsWith("EditText"))).check(matches(withText("")));
}
@Test
public void testSetEpisodeCache() {
String[] entries = res.getStringArray(R.array.episode_cache_size_entries);
String[] values = res.getStringArray(R.array.episode_cache_size_values);
String entry = entries[entries.length / 2];
final int value = Integer.parseInt(values[values.length / 2]);
clickPreference(R.string.network_pref);
clickPreference(R.string.pref_automatic_download_title);
clickPreference(R.string.pref_episode_cache_title);
onView(isRoot()).perform(waitForView(withText(entry), 1000));
onView(withText(entry)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getEpisodeCacheSize() == value);
}
@Test
public void testSetEpisodeCacheMin() {
String[] entries = res.getStringArray(R.array.episode_cache_size_entries);
String[] values = res.getStringArray(R.array.episode_cache_size_values);
String minEntry = entries[0];
final int minValue = Integer.parseInt(values[0]);
clickPreference(R.string.network_pref);
clickPreference(R.string.pref_automatic_download_title);
clickPreference(R.string.pref_episode_cache_title);
onView(withId(R.id.select_dialog_listview)).perform(swipeDown());
onView(withText(minEntry)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getEpisodeCacheSize() == minValue);
}
@Test
public void testSetEpisodeCacheMax() {
String[] entries = res.getStringArray(R.array.episode_cache_size_entries);
String[] values = res.getStringArray(R.array.episode_cache_size_values);
String maxEntry = entries[entries.length - 1];
final int maxValue = Integer.parseInt(values[values.length - 1]);
onView(withText(R.string.network_pref)).perform(click());
onView(withText(R.string.pref_automatic_download_title)).perform(click());
onView(withText(R.string.pref_episode_cache_title)).perform(click());
onView(withId(R.id.select_dialog_listview)).perform(swipeUp());
onView(withText(maxEntry)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getEpisodeCacheSize() == maxValue);
}
@Test
public void testAutomaticDownload() {
final boolean automaticDownload = UserPreferences.isEnableAutodownload();
clickPreference(R.string.network_pref);
clickPreference(R.string.pref_automatic_download_title);
clickPreference(R.string.pref_automatic_download_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> automaticDownload != UserPreferences.isEnableAutodownload());
if (!UserPreferences.isEnableAutodownload()) {
clickPreference(R.string.pref_automatic_download_title);
}
Awaitility.await().atMost(1000, MILLISECONDS)
.until(UserPreferences::isEnableAutodownload);
final boolean enableAutodownloadOnBattery = UserPreferences.isEnableAutodownloadOnBattery();
clickPreference(R.string.pref_automatic_download_on_battery_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> enableAutodownloadOnBattery != UserPreferences.isEnableAutodownloadOnBattery());
clickPreference(R.string.pref_automatic_download_on_battery_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> enableAutodownloadOnBattery == UserPreferences.isEnableAutodownloadOnBattery());
final boolean enableWifiFilter = UserPreferences.isEnableAutodownloadWifiFilter();
clickPreference(R.string.pref_autodl_wifi_filter_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> enableWifiFilter != UserPreferences.isEnableAutodownloadWifiFilter());
clickPreference(R.string.pref_autodl_wifi_filter_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> enableWifiFilter == UserPreferences.isEnableAutodownloadWifiFilter());
}
@Test
public void testEpisodeCleanupQueueOnly() {
onView(withText(R.string.network_pref)).perform(click());
onView(withText(R.string.pref_automatic_download_title)).perform(click());
onView(withText(R.string.pref_episode_cleanup_title)).perform(click());
onView(isRoot()).perform(waitForView(withText(R.string.episode_cleanup_queue_removal), 1000));
onView(withText(R.string.episode_cleanup_queue_removal)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getEpisodeCleanupAlgorithm() instanceof APQueueCleanupAlgorithm);
}
@Test
public void testEpisodeCleanupNeverAlg() {
onView(withText(R.string.network_pref)).perform(click());
onView(withText(R.string.pref_automatic_download_title)).perform(click());
onView(withText(R.string.pref_episode_cleanup_title)).perform(click());
onView(withId(R.id.select_dialog_listview)).perform(swipeUp());
onView(withText(R.string.episode_cleanup_never)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getEpisodeCleanupAlgorithm() instanceof APNullCleanupAlgorithm);
}
@Test
public void testEpisodeCleanupClassic() {
onView(withText(R.string.network_pref)).perform(click());
onView(withText(R.string.pref_automatic_download_title)).perform(click());
onView(withText(R.string.pref_episode_cleanup_title)).perform(click());
onView(isRoot()).perform(waitForView(withText(R.string.episode_cleanup_after_listening), 1000));
onView(withText(R.string.episode_cleanup_after_listening)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> {
EpisodeCleanupAlgorithm alg = UserPreferences.getEpisodeCleanupAlgorithm();
if (alg instanceof APCleanupAlgorithm) {
APCleanupAlgorithm cleanupAlg = (APCleanupAlgorithm) alg;
return cleanupAlg.getNumberOfHoursAfterPlayback() == 0;
}
return false;
});
}
@Test
public void testEpisodeCleanupNumDays() {
clickPreference(R.string.network_pref);
clickPreference(R.string.pref_automatic_download_title);
clickPreference(R.string.pref_episode_cleanup_title);
String search = res.getQuantityString(R.plurals.episode_cleanup_days_after_listening, 3, 3);
onView(isRoot()).perform(waitForView(withText(search), 1000));
onView(withText(search)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> {
EpisodeCleanupAlgorithm alg = UserPreferences.getEpisodeCleanupAlgorithm();
if (alg instanceof APCleanupAlgorithm) {
APCleanupAlgorithm cleanupAlg = (APCleanupAlgorithm) alg;
return cleanupAlg.getNumberOfHoursAfterPlayback() == 72; // 5 days
}
return false;
});
}
@Test
public void testRewindChange() {
int seconds = UserPreferences.getRewindSecs();
int[] deltas = res.getIntArray(R.array.seek_delta_values);
clickPreference(R.string.playback_pref);
clickPreference(R.string.pref_rewind);
int currentIndex = Arrays.binarySearch(deltas, seconds);
assertTrue(currentIndex >= 0 && currentIndex < deltas.length); // found?
// Find next value (wrapping around to next)
int newIndex = (currentIndex + 1) % deltas.length;
onView(withText(deltas[newIndex] + " seconds")).perform(click());
onView(withText("Confirm")).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getRewindSecs() == deltas[newIndex]);
}
@Test
public void testFastForwardChange() {
clickPreference(R.string.playback_pref);
for (int i = 2; i > 0; i--) { // repeat twice to catch any error where fastforward is tracking rewind
int seconds = UserPreferences.getFastForwardSecs();
int[] deltas = res.getIntArray(R.array.seek_delta_values);
clickPreference(R.string.pref_fast_forward);
int currentIndex = Arrays.binarySearch(deltas, seconds);
assertTrue(currentIndex >= 0 && currentIndex < deltas.length); // found?
// Find next value (wrapping around to next)
int newIndex = (currentIndex + 1) % deltas.length;
onView(withText(deltas[newIndex] + " seconds")).perform(click());
onView(withText("Confirm")).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getFastForwardSecs() == deltas[newIndex]);
}
}
@Test
public void testBackButtonBehaviorGoToPageSelector() {
clickPreference(R.string.user_interface_label);
clickPreference(R.string.pref_back_button_behavior_title);
onView(withText(R.string.back_button_go_to_page)).perform(click());
onView(withText(R.string.queue_label)).perform(click());
onView(withText(R.string.confirm_label)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getBackButtonBehavior() == UserPreferences.BackButtonBehavior.GO_TO_PAGE);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getBackButtonGoToPage().equals(QueueFragment.TAG));
clickPreference(R.string.pref_back_button_behavior_title);
onView(withText(R.string.back_button_go_to_page)).perform(click());
onView(withText(R.string.episodes_label)).perform(click());
onView(withText(R.string.confirm_label)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getBackButtonBehavior() == UserPreferences.BackButtonBehavior.GO_TO_PAGE);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getBackButtonGoToPage().equals(EpisodesFragment.TAG));
clickPreference(R.string.pref_back_button_behavior_title);
onView(withText(R.string.back_button_go_to_page)).perform(click());
onView(withText(R.string.subscriptions_label)).perform(click());
onView(withText(R.string.confirm_label)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getBackButtonBehavior() == UserPreferences.BackButtonBehavior.GO_TO_PAGE);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> UserPreferences.getBackButtonGoToPage().equals(SubscriptionFragment.TAG));
}
@Test
public void testDeleteRemovesFromQueue() {
clickPreference(R.string.storage_pref);
if (!UserPreferences.shouldDeleteRemoveFromQueue()) {
clickPreference(R.string.pref_delete_removes_from_queue_title);
Awaitility.await().atMost(1000, MILLISECONDS)
.until(UserPreferences::shouldDeleteRemoveFromQueue);
}
final boolean deleteRemovesFromQueue = UserPreferences.shouldDeleteRemoveFromQueue();
onView(withText(R.string.pref_delete_removes_from_queue_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> deleteRemovesFromQueue != UserPreferences.shouldDeleteRemoveFromQueue());
onView(withText(R.string.pref_delete_removes_from_queue_title)).perform(click());
Awaitility.await().atMost(1000, MILLISECONDS)
.until(() -> deleteRemovesFromQueue == UserPreferences.shouldDeleteRemoveFromQueue());
}
}
| 1 | 15,614 | Be careful. This is not a preference but an option in a dialog. | AntennaPod-AntennaPod | java |
@@ -30,8 +30,11 @@ func (s *svc) DescribeDeployment(ctx context.Context, clientset, cluster, namesp
}
func ProtoForDeployment(cluster string, deployment *appsv1.Deployment) *k8sapiv1.Deployment {
+ if deployment.ClusterName == "" {
+ deployment.ClusterName = cluster
+ }
return &k8sapiv1.Deployment{
- Cluster: cluster,
+ Cluster: deployment.ClusterName,
Namespace: deployment.Namespace,
Name: deployment.Name,
Labels: deployment.Labels, | 1 | package k8s
import (
"context"
"encoding/json"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/util/retry"
appsv1 "k8s.io/api/apps/v1"
k8sapiv1 "github.com/lyft/clutch/backend/api/k8s/v1"
)
func (s *svc) DescribeDeployment(ctx context.Context, clientset, cluster, namespace, name string) (*k8sapiv1.Deployment, error) {
cs, err := s.manager.GetK8sClientset(clientset, cluster, namespace)
if err != nil {
return nil, err
}
getOpts := metav1.GetOptions{}
deployment, err := cs.AppsV1().Deployments(cs.Namespace()).Get(name, getOpts)
if err != nil {
return nil, err
}
return ProtoForDeployment(cs.Cluster(), deployment), nil
}
func ProtoForDeployment(cluster string, deployment *appsv1.Deployment) *k8sapiv1.Deployment {
return &k8sapiv1.Deployment{
Cluster: cluster,
Namespace: deployment.Namespace,
Name: deployment.Name,
Labels: deployment.Labels,
Annotations: deployment.Annotations,
}
}
func (s *svc) UpdateDeployment(ctx context.Context, clientset, cluster, namespace, name string, fields *k8sapiv1.UpdateDeploymentRequest_Fields) error {
cs, err := s.manager.GetK8sClientset(clientset, cluster, namespace)
if err != nil {
return err
}
getOpts := metav1.GetOptions{}
oldDeployment, err := cs.AppsV1().Deployments(cs.Namespace()).Get(name, getOpts)
if err != nil {
return err
}
newDeployment := oldDeployment.DeepCopy()
mergeLabelsAndAnnotations(newDeployment, fields)
patchBytes, err := generateDeploymentStrategicPatch(oldDeployment, newDeployment)
if err != nil {
return err
}
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
_, err := cs.AppsV1().Deployments(cs.Namespace()).Patch(oldDeployment.Name, types.StrategicMergePatchType, patchBytes)
return err
})
return retryErr
}
func mergeLabelsAndAnnotations(deployment *appsv1.Deployment, fields *k8sapiv1.UpdateDeploymentRequest_Fields) {
if len(fields.Labels) > 0 {
for k, v := range fields.Labels {
deployment.Labels[k] = v
if deployment.Spec.Template.ObjectMeta.Labels == nil {
deployment.Spec.Template.ObjectMeta.Labels = make(map[string]string)
}
deployment.Spec.Template.ObjectMeta.Labels[k] = v
}
}
if len(fields.Annotations) > 0 {
for k, v := range fields.Annotations {
deployment.Annotations[k] = v
if deployment.Spec.Template.ObjectMeta.Annotations == nil {
deployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
deployment.Spec.Template.ObjectMeta.Annotations[k] = v
}
}
}
func generateDeploymentStrategicPatch(oldDeployment, newDeployment *appsv1.Deployment) ([]byte, error) {
old, err := json.Marshal(oldDeployment)
if err != nil {
return nil, err
}
new, err := json.Marshal(newDeployment)
if err != nil {
return nil, err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(old, new, appsv1.Deployment{})
if err != nil {
return nil, err
}
return patchBytes, nil
}
| 1 | 8,299 | this will modify the incoming object, which may not be desirable in some cases. i think we should stick with the local var, override it with deployment.ClusterName if deployment.ClusterName not empty | lyft-clutch | go |
@@ -156,7 +156,6 @@ test.suite(
await driver.get(fileServer.Pages.basicAuth)
let source = await driver.getPageSource()
assert.strictEqual(source.includes('Access granted!'), true)
- await server.stop()
})
})
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
'use strict'
const assert = require('assert')
const fs = require('fs')
const path = require('path')
const chrome = require('../../chrome')
const error = require('../../lib/error')
const fileServer = require('../../lib/test/fileserver')
const io = require('../../io')
const test = require('../../lib/test')
const until = require('../../lib/until')
test.suite(
function (env) {
let driver
before(async function () {
driver = await env
.builder()
.setChromeOptions(new chrome.Options().headless())
.build()
})
after(() => driver.quit())
it('can send commands to devtools', async function () {
await driver.get(test.Pages.ajaxyPage)
assert.strictEqual(await driver.getCurrentUrl(), test.Pages.ajaxyPage)
await driver.sendDevToolsCommand('Page.navigate', {
url: test.Pages.echoPage,
})
assert.strictEqual(await driver.getCurrentUrl(), test.Pages.echoPage)
})
it('can send commands to devtools and get return', async function () {
await driver.get(test.Pages.ajaxyPage)
assert.strictEqual(await driver.getCurrentUrl(), test.Pages.ajaxyPage)
await driver.get(test.Pages.echoPage)
assert.strictEqual(await driver.getCurrentUrl(), test.Pages.echoPage)
let history = await driver.sendAndGetDevToolsCommand(
'Page.getNavigationHistory'
)
assert(history)
assert(history.currentIndex >= 2)
assert.strictEqual(
history.entries[history.currentIndex].url,
test.Pages.echoPage
)
assert.strictEqual(
history.entries[history.currentIndex - 1].url,
test.Pages.ajaxyPage
)
})
it('sends Page.enable command using devtools', async function () {
const cdpConnection = await driver.createCDPConnection('page')
cdpConnection.execute('Page.enable', 1, {}, function (_res, err) {
assert(!err)
})
})
it('sends Network and Page command using devtools', async function () {
const cdpConnection = await driver.createCDPConnection('page')
cdpConnection.execute('Network.enable', 1, {}, function (_res, err) {
assert(!err)
})
cdpConnection.execute(
'Page.navigate',
1,
{ url: 'chrome://newtab/' },
function (_res, err) {
assert(!err)
}
)
})
describe('JS CDP events', function () {
it('calls the event listener for console.log', async function () {
const cdpConnection = await driver.createCDPConnection('page')
await driver.onLogEvent(cdpConnection, function (event) {
assert.strictEqual(event['args'][0]['value'], 'here')
})
await driver.executeScript('console.log("here")')
})
it('calls the event listener for js exceptions', async function () {
const cdpConnection = await driver.createCDPConnection('page')
await driver.onLogException(cdpConnection, function (event) {
assert.strictEqual(
event['exceptionDetails']['stackTrace']['callFrames'][0][
'functionName'
],
'onmouseover'
)
})
await driver.get(test.Pages.javascriptPage)
let element = driver.findElement({ id: 'throwing-mouseover' })
await element.click()
})
})
describe('JS DOM events', function () {
it('calls the event listener on dom mutations', async function () {
const cdpConnection = await driver.createCDPConnection('page')
await driver.logMutationEvents(cdpConnection, function (event) {
assert.strictEqual(event['attribute_name'], 'style')
assert.strictEqual(event['current_value'], '')
assert.strictEqual(event['old_value'], 'display:none;')
})
await driver.get(fileServer.Pages.dynamicPage)
let element = driver.findElement({ id: 'reveal' })
await element.click()
let revealed = driver.findElement({ id: 'revealed' })
await driver.wait(until.elementIsVisible(revealed), 5000)
})
})
describe('Basic Auth Injection', function () {
it('denies entry if username and password do not match', async function () {
const pageCdpConnection = await driver.createCDPConnection('page')
await driver.register('random', 'random', pageCdpConnection)
await driver.get(fileServer.Pages.basicAuth)
let source = await driver.getPageSource()
assert.strictEqual(source.includes('Access granted!'), false)
})
it('grants access if username and password are a match', async function () {
const pageCdpConnection = await driver.createCDPConnection('page')
await driver.register('genie', 'bottle', pageCdpConnection)
await driver.get(fileServer.Pages.basicAuth)
let source = await driver.getPageSource()
assert.strictEqual(source.includes('Access granted!'), true)
await server.stop()
})
})
describe('setDownloadPath', function () {
it('can enable downloads in headless mode', async function () {
const dir = await io.tmpDir()
await driver.setDownloadPath(dir)
const url = fileServer.whereIs('/data/firefox/webextension.xpi')
await driver.get(`data:text/html,<!DOCTYPE html>
<div><a download="" href="${url}">Go!</a></div>`)
await driver.findElement({ css: 'a' }).click()
const downloadPath = path.join(dir, 'webextension.xpi')
await driver.wait(() => io.exists(downloadPath), 5000)
const goldenPath = path.join(
__dirname,
'../../lib/test/data/firefox/webextension.xpi'
)
assert.strictEqual(
fs.readFileSync(downloadPath, 'binary'),
fs.readFileSync(goldenPath, 'binary')
)
})
it('throws if path is not a directory', async function () {
await assertInvalidArgumentError(() => driver.setDownloadPath())
await assertInvalidArgumentError(() => driver.setDownloadPath(null))
await assertInvalidArgumentError(() => driver.setDownloadPath(''))
await assertInvalidArgumentError(() => driver.setDownloadPath(1234))
const file = await io.tmpFile()
await assertInvalidArgumentError(() => driver.setDownloadPath(file))
async function assertInvalidArgumentError(fn) {
try {
await fn()
return Promise.reject(Error('should have failed'))
} catch (err) {
if (err instanceof error.InvalidArgumentError) {
return
}
throw err
}
}
})
})
},
{ browsers: ['chrome'] }
)
| 1 | 18,850 | Is this not required? | SeleniumHQ-selenium | rb |
@@ -121,8 +121,13 @@ Blockly.BlockSvg.INLINE = -1;
*/
Blockly.BlockSvg.prototype.initSvg = function() {
goog.asserts.assert(this.workspace.rendered, 'Workspace is headless.');
+ // "Input shapes" for each input. Used to draw "holes" for unoccupied value inputs.
+ this.inputShapes_ = {};
for (var i = 0, input; input = this.inputList[i]; i++) {
input.init();
+ if (input.type === Blockly.INPUT_VALUE) {
+ this.initInputShape(input);
+ }
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) { | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Methods for graphically rendering a block as SVG.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.BlockSvg');
goog.require('Blockly.Block');
goog.require('Blockly.ContextMenu');
goog.require('Blockly.RenderedConnection');
goog.require('goog.Timer');
goog.require('goog.asserts');
goog.require('goog.dom');
goog.require('goog.math.Coordinate');
goog.require('goog.userAgent');
/**
* Class for a block's SVG representation.
* Not normally called directly, workspace.newBlock() is preferred.
* @param {!Blockly.Workspace} workspace The block's workspace.
* @param {?string} prototypeName Name of the language object containing
* type-specific functions for this block.
* @param {=string} opt_id Optional ID. Use this ID if provided, otherwise
* create a new id.
* @extends {Blockly.Block}
* @constructor
*/
Blockly.BlockSvg = function(workspace, prototypeName, opt_id) {
// Create core elements for the block.
/**
* @type {SVGElement}
* @private
*/
this.svgGroup_ = Blockly.createSvgElement('g', {}, null);
/** @type {SVGElement} */
this.svgPath_ = Blockly.createSvgElement('path', {'class': 'blocklyPath'},
this.svgGroup_);
this.svgPath_.tooltip = this;
/** @type {boolean} */
this.rendered = false;
Blockly.Tooltip.bindMouseEvents(this.svgPath_);
Blockly.BlockSvg.superClass_.constructor.call(this,
workspace, prototypeName, opt_id);
};
goog.inherits(Blockly.BlockSvg, Blockly.Block);
/**
* Height of this block, not including any statement blocks above or below.
* @type {number}
*/
Blockly.BlockSvg.prototype.height = 0;
/**
* Width of this block, including any connected value blocks.
* @type {number}
*/
Blockly.BlockSvg.prototype.width = 0;
/**
* Opacity of this block between 0 and 1.
* @type {number}
* @private
*/
Blockly.BlockSvg.prototype.opacity_ = 1;
/**
* Original location of block being dragged.
* @type {goog.math.Coordinate}
* @private
*/
Blockly.BlockSvg.prototype.dragStartXY_ = null;
/**
* Whether the block glows as if running.
* @type {boolean}
* @private
*/
Blockly.BlockSvg.prototype.isGlowingBlock_ = false;
/**
* Whether the block's whole stack glows as if running.
* @type {boolean}
* @private
*/
Blockly.BlockSvg.prototype.isGlowingStack_ = false;
/**
* Constant for identifying rows that are to be rendered inline.
* Don't collide with Blockly.INPUT_VALUE and friends.
* @const
*/
Blockly.BlockSvg.INLINE = -1;
/**
* Create and initialize the SVG representation of the block.
* May be called more than once.
*/
Blockly.BlockSvg.prototype.initSvg = function() {
goog.asserts.assert(this.workspace.rendered, 'Workspace is headless.');
for (var i = 0, input; input = this.inputList[i]; i++) {
input.init();
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].createIcon();
}
this.updateColour();
this.updateMovable();
if (!this.workspace.options.readOnly && !this.eventsInit_) {
Blockly.bindEvent_(this.getSvgRoot(), 'mousedown', this,
this.onMouseDown_);
var thisBlock = this;
Blockly.bindEvent_(this.getSvgRoot(), 'touchstart', null,
function(e) {Blockly.longStart_(e, thisBlock);});
}
this.eventsInit_ = true;
if (!this.getSvgRoot().parentNode) {
this.workspace.getCanvas().appendChild(this.getSvgRoot());
}
};
/**
* Select this block. Highlight it visually.
*/
Blockly.BlockSvg.prototype.select = function() {
if (this.isShadow() && this.getParent()) {
// Shadow blocks should not be selected.
this.getParent().select();
return;
}
if (Blockly.selected == this) {
return;
}
var oldId = null;
if (Blockly.selected) {
oldId = Blockly.selected.id;
// Unselect any previously selected block.
Blockly.Events.disable();
Blockly.selected.unselect();
Blockly.Events.enable();
}
var event = new Blockly.Events.Ui(null, 'selected', oldId, this.id);
event.workspaceId = this.workspace.id;
Blockly.Events.fire(event);
Blockly.selected = this;
this.addSelect();
};
/**
* Unselect this block. Remove its highlighting.
*/
Blockly.BlockSvg.prototype.unselect = function() {
if (Blockly.selected != this) {
return;
}
var event = new Blockly.Events.Ui(null, 'selected', this.id, null);
event.workspaceId = this.workspace.id;
Blockly.Events.fire(event);
Blockly.selected = null;
this.removeSelect();
};
/**
* Glow only this particular block, to highlight it visually as if it's running.
* @param {boolean} isGlowingBlock Whether the block should glow.
*/
Blockly.BlockSvg.prototype.setGlowBlock = function(isGlowingBlock) {
this.isGlowingBlock_ = isGlowingBlock;
this.updateColour();
};
/**
* Glow the stack starting with this block, to highlight it visually as if it's running.
* @param {boolean} isGlowingStack Whether the stack starting with this block should glow.
*/
Blockly.BlockSvg.prototype.setGlowStack = function(isGlowingStack) {
this.isGlowingStack_ = isGlowingStack;
// Update the applied SVG filter if the property has changed
var svg = this.getSvgRoot();
if (this.isGlowingStack_ && !svg.hasAttribute('filter')) {
svg.setAttribute('filter', 'url(#blocklyStackGlowFilter)');
} else if (!this.isGlowingStack_ && svg.hasAttribute('filter')) {
svg.removeAttribute('filter');
}
};
/**
* Block's mutator icon (if any).
* @type {Blockly.Mutator}
*/
Blockly.BlockSvg.prototype.mutator = null;
/**
* Block's comment icon (if any).
* @type {Blockly.Comment}
*/
Blockly.BlockSvg.prototype.comment = null;
/**
* Block's warning icon (if any).
* @type {Blockly.Warning}
*/
Blockly.BlockSvg.prototype.warning = null;
/**
* Returns a list of mutator, comment, and warning icons.
* @return {!Array} List of icons.
*/
Blockly.BlockSvg.prototype.getIcons = function() {
var icons = [];
if (this.mutator) {
icons.push(this.mutator);
}
if (this.comment) {
icons.push(this.comment);
}
if (this.warning) {
icons.push(this.warning);
}
return icons;
};
/**
* Wrapper function called when a mouseUp occurs during a drag operation.
* @type {Array.<!Array>}
* @private
*/
Blockly.BlockSvg.onMouseUpWrapper_ = null;
/**
* Wrapper function called when a mouseMove occurs during a drag operation.
* @type {Array.<!Array>}
* @private
*/
Blockly.BlockSvg.onMouseMoveWrapper_ = null;
/**
* Stop binding to the global mouseup and mousemove events.
* @private
*/
Blockly.BlockSvg.terminateDrag_ = function() {
if (Blockly.BlockSvg.onMouseUpWrapper_) {
Blockly.unbindEvent_(Blockly.BlockSvg.onMouseUpWrapper_);
Blockly.BlockSvg.onMouseUpWrapper_ = null;
}
if (Blockly.BlockSvg.onMouseMoveWrapper_) {
Blockly.unbindEvent_(Blockly.BlockSvg.onMouseMoveWrapper_);
Blockly.BlockSvg.onMouseMoveWrapper_ = null;
}
var selected = Blockly.selected;
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
// Terminate a drag operation.
if (selected) {
if (Blockly.insertionMarker_) {
Blockly.Events.disable();
if (Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
}
Blockly.insertionMarker_.dispose();
Blockly.insertionMarker_ = null;
Blockly.Events.enable();
}
// Update the connection locations.
var xy = selected.getRelativeToSurfaceXY();
var dxy = goog.math.Coordinate.difference(xy, selected.dragStartXY_);
var event = new Blockly.Events.Move(selected);
event.oldCoordinate = selected.dragStartXY_;
event.recordNew();
Blockly.Events.fire(event);
selected.moveConnections_(dxy.x, dxy.y);
delete selected.draggedBubbles_;
selected.setDragging_(false);
selected.moveOffDragSurface_();
selected.render();
// Ensure that any stap and bump are part of this move's event group.
var group = Blockly.Events.getGroup();
setTimeout(function() {
Blockly.Events.setGroup(group);
selected.snapToGrid();
Blockly.Events.setGroup(false);
}, Blockly.BUMP_DELAY / 2);
setTimeout(function() {
Blockly.Events.setGroup(group);
selected.bumpNeighbours_();
Blockly.Events.setGroup(false);
}, Blockly.BUMP_DELAY);
// Fire an event to allow scrollbars to resize.
Blockly.asyncSvgResize(this.workspace);
}
}
Blockly.dragMode_ = Blockly.DRAG_NONE;
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
};
/**
* Set parent of this block to be a new block or null.
* @param {Blockly.BlockSvg} newParent New parent block.
*/
Blockly.BlockSvg.prototype.setParent = function(newParent) {
if (newParent == this.parentBlock_) {
return;
}
var svgRoot = this.getSvgRoot();
if (this.parentBlock_ && svgRoot) {
// Move this block up the DOM. Keep track of x/y translations.
var xy = this.getRelativeToSurfaceXY();
// Avoid moving a block up the DOM if it's currently selected/dragging,
// so as to avoid taking things off the drag surface.
if (Blockly.selected != this) {
this.workspace.getCanvas().appendChild(svgRoot);
this.translate(xy.x, xy.y);
}
}
Blockly.Field.startCache();
Blockly.BlockSvg.superClass_.setParent.call(this, newParent);
Blockly.Field.stopCache();
if (newParent) {
var oldXY = this.getRelativeToSurfaceXY();
newParent.getSvgRoot().appendChild(svgRoot);
var newXY = this.getRelativeToSurfaceXY();
// Move the connections to match the child's new position.
this.moveConnections_(newXY.x - oldXY.x, newXY.y - oldXY.y);
// If we are a shadow block, inherit tertiary colour.
if (this.isShadow()) {
this.setColour(this.getColour(), this.getColourSecondary(),
newParent.getColourTertiary());
}
}
};
/**
* Return the coordinates of the top-left corner of this block relative to the
* drawing surface's origin (0,0).
* @return {!goog.math.Coordinate} Object with .x and .y properties.
*/
Blockly.BlockSvg.prototype.getRelativeToSurfaceXY = function() {
// The drawing surface is relative to either the workspace canvas
// or to the drag surface group.
var x = 0;
var y = 0;
var dragSurfaceGroup = (this.workspace.dragSurface) ?
this.workspace.dragSurface.getGroup() : null;
var element = this.getSvgRoot();
if (element) {
do {
// Loop through this block and every parent.
var xy = Blockly.getRelativeXY_(element);
x += xy.x;
y += xy.y;
// If this element is the current element on the drag surface, include
// the translation of the drag surface itself.
if (this.workspace.dragSurface &&
this.workspace.dragSurface.getCurrentBlock() == element) {
var surfaceTranslation = this.workspace.dragSurface.getSurfaceTranslation();
x += surfaceTranslation.x;
y += surfaceTranslation.y;
}
element = element.parentNode;
} while (element && element != this.workspace.getCanvas() &&
element != dragSurfaceGroup);
}
return new goog.math.Coordinate(x, y);
};
/**
* Move a block by a relative offset.
* @param {number} dx Horizontal offset.
* @param {number} dy Vertical offset.
*/
Blockly.BlockSvg.prototype.moveBy = function(dx, dy) {
goog.asserts.assert(!this.parentBlock_, 'Block has parent.');
var eventsEnabled = Blockly.Events.isEnabled();
if (eventsEnabled) {
var event = new Blockly.Events.Move(this);
}
var xy = this.getRelativeToSurfaceXY();
this.translate(xy.x + dx, xy.y + dy);
this.moveConnections_(dx, dy);
if (eventsEnabled) {
event.recordNew();
Blockly.Events.fire(event);
}
};
/**
* Set this block to an absolute translation.
* @param {number} x Horizontal translation.
* @param {number} y Vertical translation.
* @param {boolean=} opt_use3d If set, use 3d translation.
*/
Blockly.BlockSvg.prototype.translate = function(x, y, opt_use3d) {
if (opt_use3d) {
this.getSvgRoot().setAttribute('style', 'transform: translate3d(' + x + 'px,' + y + 'px, 0px)');
} else {
this.getSvgRoot().setAttribute('transform', 'translate(' + x + ',' + y + ')');
}
};
/**
* Snap this block to the nearest grid point.
*/
Blockly.BlockSvg.prototype.snapToGrid = function() {
if (!this.workspace) {
return; // Deleted block.
}
if (Blockly.dragMode_ != Blockly.DRAG_NONE) {
return; // Don't bump blocks during a drag.
}
if (this.getParent()) {
return; // Only snap top-level blocks.
}
if (this.isInFlyout) {
return; // Don't move blocks around in a flyout.
}
if (!this.workspace.options.gridOptions ||
!this.workspace.options.gridOptions['snap']) {
return; // Config says no snapping.
}
var spacing = this.workspace.options.gridOptions['spacing'];
var half = spacing / 2;
var xy = this.getRelativeToSurfaceXY();
var dx = Math.round((xy.x - half) / spacing) * spacing + half - xy.x;
var dy = Math.round((xy.y - half) / spacing) * spacing + half - xy.y;
dx = Math.round(dx);
dy = Math.round(dy);
if (dx != 0 || dy != 0) {
this.moveBy(dx, dy);
}
};
/**
* Returns the coordinates of a bounding box describing the dimensions of this
* block and any blocks stacked below it.
* @return {!{topLeft: goog.math.Coordinate, bottomRight: goog.math.Coordinate}}
* Object with top left and bottom right coordinates of the bounding box.
*/
Blockly.BlockSvg.prototype.getBoundingRectangle = function() {
var blockXY = this.getRelativeToSurfaceXY(this);
var blockBounds = this.getHeightWidth();
var topLeft;
var bottomRight;
if (this.RTL) {
topLeft = new goog.math.Coordinate(blockXY.x - blockBounds.width,
blockXY.y);
bottomRight = new goog.math.Coordinate(blockXY.x,
blockXY.y + blockBounds.height);
} else {
topLeft = new goog.math.Coordinate(blockXY.x, blockXY.y);
bottomRight = new goog.math.Coordinate(blockXY.x + blockBounds.width,
blockXY.y + blockBounds.height);
}
return {topLeft: topLeft, bottomRight: bottomRight};
};
/**
* Set block opacity for SVG rendering.
* @param {number} opacity Intended opacity, betweeen 0 and 1
*/
Blockly.BlockSvg.prototype.setOpacity = function(opacity) {
this.opacity_ = opacity;
if (this.rendered) {
this.updateColour();
}
};
/**
* Get block opacity for SVG rendering.
* @return {number} Intended opacity, betweeen 0 and 1
*/
Blockly.BlockSvg.prototype.getOpacity = function() {
return this.opacity_;
};
/**
* Set whether the block is collapsed or not.
* @param {boolean} collapsed True if collapsed.
*/
Blockly.BlockSvg.prototype.setCollapsed = function(collapsed) {
if (this.collapsed_ == collapsed) {
return;
}
var renderList = [];
// Show/hide the inputs.
for (var i = 0, input; input = this.inputList[i]; i++) {
renderList.push.apply(renderList, input.setVisible(!collapsed));
}
var COLLAPSED_INPUT_NAME = '_TEMP_COLLAPSED_INPUT';
if (collapsed) {
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].setVisible(false);
}
var text = this.toString(Blockly.COLLAPSE_CHARS);
this.appendDummyInput(COLLAPSED_INPUT_NAME).appendField(text).init();
} else {
this.removeInput(COLLAPSED_INPUT_NAME);
// Clear any warnings inherited from enclosed blocks.
this.setWarningText(null);
}
Blockly.BlockSvg.superClass_.setCollapsed.call(this, collapsed);
if (!renderList.length) {
// No child blocks, just render this block.
renderList[0] = this;
}
if (this.rendered) {
for (var i = 0, block; block = renderList[i]; i++) {
block.render();
}
// Don't bump neighbours.
// Although bumping neighbours would make sense, users often collapse
// all their functions and store them next to each other. Expanding and
// bumping causes all their definitions to go out of alignment.
}
};
/**
* Open the next (or previous) FieldTextInput.
* @param {Blockly.Field|Blockly.Block} start Current location.
* @param {boolean} forward If true go forward, otherwise backward.
*/
Blockly.BlockSvg.prototype.tab = function(start, forward) {
// This function need not be efficient since it runs once on a keypress.
// Create an ordered list of all text fields and connected inputs.
var list = [];
for (var i = 0, input; input = this.inputList[i]; i++) {
for (var j = 0, field; field = input.fieldRow[j]; j++) {
if (field instanceof Blockly.FieldTextInput) {
// TODO: Also support dropdown fields.
list.push(field);
}
}
if (input.connection) {
var block = input.connection.targetBlock();
if (block) {
list.push(block);
}
}
}
i = list.indexOf(start);
if (i == -1) {
// No start location, start at the beginning or end.
i = forward ? -1 : list.length;
}
var target = list[forward ? i + 1 : i - 1];
if (!target) {
// Ran off of list.
var parent = this.getParent();
if (parent) {
parent.tab(this, forward);
}
} else if (target instanceof Blockly.Field) {
target.showEditor_();
} else {
target.tab(null, forward);
}
};
/**
* Handle a mouse-down on an SVG block.
* @param {!Event} e Mouse down event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseDown_ = function(e) {
if (this.workspace.options.readOnly) {
return;
}
if (this.isInFlyout) {
e.stopPropagation();
return;
}
this.workspace.markFocused();
// Update Blockly's knowledge of its own location.
Blockly.svgResize(this.workspace);
Blockly.terminateDrag_();
this.select();
Blockly.hideChaff();
this.workspace.recordDeleteAreas();
if (Blockly.isRightButton(e)) {
// Right-click.
this.showContextMenu_(e);
} else if (!this.isMovable()) {
// Allow immovable blocks to be selected and context menued, but not
// dragged. Let this event bubble up to document, so the workspace may be
// dragged instead.
return;
} else {
if (!Blockly.Events.getGroup()) {
Blockly.Events.setGroup(true);
}
// Left-click (or middle click)
Blockly.Css.setCursor(Blockly.Css.Cursor.CLOSED);
this.dragStartXY_ = this.getRelativeToSurfaceXY();
this.workspace.startDrag(e, this.dragStartXY_);
Blockly.dragMode_ = Blockly.DRAG_STICKY;
Blockly.BlockSvg.onMouseUpWrapper_ = Blockly.bindEvent_(document,
'mouseup', this, this.onMouseUp_);
Blockly.BlockSvg.onMouseMoveWrapper_ = Blockly.bindEvent_(document,
'mousemove', this, this.onMouseMove_);
// Build a list of bubbles that need to be moved and where they started.
this.draggedBubbles_ = [];
var descendants = this.getDescendants();
for (var i = 0, descendant; descendant = descendants[i]; i++) {
var icons = descendant.getIcons();
for (var j = 0; j < icons.length; j++) {
var data = icons[j].getIconLocation();
data.bubble = icons[j];
this.draggedBubbles_.push(data);
}
}
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
e.preventDefault();
};
/**
* Handle a mouse-up anywhere in the SVG pane. Is only registered when a
* block is clicked. We can't use mouseUp on the block since a fast-moving
* cursor can briefly escape the block before it catches up.
* @param {!Event} e Mouse up event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseUp_ = function(e) {
var isNotShadowBlock = this.ioClickHackIsNotShadow_(e);
if (Blockly.dragMode_ != Blockly.DRAG_FREE && !Blockly.WidgetDiv.isVisible() && isNotShadowBlock) {
Blockly.Events.fire(
new Blockly.Events.Ui(this, 'click', undefined, undefined));
// Scratch-specific: also fire a "stack click" event for this stack.
// This is used to toggle the stack when any block in the stack is clicked.
var rootBlock = this.workspace.getBlockById(this.id).getRootBlock();
Blockly.Events.fire(
new Blockly.Events.Ui(rootBlock, 'stackclick', undefined, undefined));
}
Blockly.terminateDrag_();
if (Blockly.selected && Blockly.highlightedConnection_) {
this.positionNewBlock(Blockly.selected,
Blockly.localConnection_, Blockly.highlightedConnection_);
// Connect two blocks together.
Blockly.localConnection_.connect(Blockly.highlightedConnection_);
if (this.rendered) {
// Trigger a connection animation.
// Determine which connection is inferior (lower in the source stack).
var inferiorConnection = Blockly.localConnection_.isSuperior() ?
Blockly.highlightedConnection_ : Blockly.localConnection_;
inferiorConnection.getSourceBlock().connectionUiEffect();
}
if (this.workspace.trashcan) {
// Don't throw an object in the trash can if it just got connected.
this.workspace.trashcan.close();
}
} else if (!this.getParent() && Blockly.selected.isDeletable() &&
this.workspace.isDeleteArea(e)) {
var trashcan = this.workspace.trashcan;
if (trashcan) {
goog.Timer.callOnce(trashcan.close, 100, trashcan);
}
Blockly.selected.dispose(false, true);
// Dropping a block on the trash can will usually cause the workspace to
// resize to contain the newly positioned block. Force a second resize
// now that the block has been deleted.
Blockly.asyncSvgResize(this.workspace);
}
if (Blockly.highlightedConnection_) {
Blockly.highlightedConnection_ = null;
}
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
if (!Blockly.WidgetDiv.isVisible()) {
Blockly.Events.setGroup(false);
}
};
/**
* XXX: Hack to fix drop-down clicking issue for Google I/O.
* We cannot just check isShadow, since `this` is the parent block.
* See: https://github.com/google/blockly/issues/336
* @param {!Event} e Mouse up event.
* @return {boolean} True if the block is not the drop-down shadow.
*/
Blockly.BlockSvg.prototype.ioClickHackIsNotShadow_ = function(e) {
// True if click target is a non-shadow block path.
if (e.target === this.svgPath_ &&
e.target.parentNode === this.getSvgRoot()) {
return true;
}
for (var i = 0, input; input = this.inputList[i]; i++) {
for (var j = 0, field; field = input.fieldRow[j]; j++) {
if (field.imageElement_ && field.imageElement_ === e.target) {
return true;
}
}
}
return false;
};
/**
* Load the block's help page in a new window.
* @private
*/
Blockly.BlockSvg.prototype.showHelp_ = function() {
var url = goog.isFunction(this.helpUrl) ? this.helpUrl() : this.helpUrl;
if (url) {
// @todo rewrite
alert(url);
}
};
/**
* Show the context menu for this block.
* @param {!Event} e Mouse event.
* @private
*/
Blockly.BlockSvg.prototype.showContextMenu_ = function(e) {
if (this.workspace.options.readOnly || !this.contextMenu) {
return;
}
// Save the current block in a variable for use in closures.
var block = this;
var menuOptions = [];
if (this.isDeletable() && this.isMovable() && !block.isInFlyout) {
// Option to duplicate this block.
var duplicateOption = {
text: Blockly.Msg.DUPLICATE_BLOCK,
enabled: true,
callback: function() {
Blockly.duplicate_(block);
}
};
if (this.getDescendants().length > this.workspace.remainingCapacity()) {
duplicateOption.enabled = false;
}
menuOptions.push(duplicateOption);
if (this.isEditable() && this.workspace.options.comments) {
// Option to add/remove a comment.
var commentOption = {enabled: !goog.userAgent.IE};
if (this.comment) {
commentOption.text = Blockly.Msg.REMOVE_COMMENT;
commentOption.callback = function() {
block.setCommentText(null);
};
} else {
commentOption.text = Blockly.Msg.ADD_COMMENT;
commentOption.callback = function() {
block.setCommentText('');
};
}
menuOptions.push(commentOption);
}
// Option to delete this block.
// Count the number of blocks that are nested in this block.
var descendantCount = this.getDescendants(true).length;
var nextBlock = this.getNextBlock();
if (nextBlock) {
// Blocks in the current stack would survive this block's deletion.
descendantCount -= nextBlock.getDescendants(true).length;
}
var deleteOption = {
text: descendantCount == 1 ? Blockly.Msg.DELETE_BLOCK :
Blockly.Msg.DELETE_X_BLOCKS.replace('%1', String(descendantCount)),
enabled: true,
callback: function() {
Blockly.Events.setGroup(true);
block.dispose(true, true);
Blockly.Events.setGroup(false);
}
};
menuOptions.push(deleteOption);
}
// Option to get help.
var url = goog.isFunction(this.helpUrl) ? this.helpUrl() : this.helpUrl;
var helpOption = {enabled: !!url};
helpOption.text = Blockly.Msg.HELP;
helpOption.callback = function() {
block.showHelp_();
};
menuOptions.push(helpOption);
// Allow the block to add or modify menuOptions.
if (this.customContextMenu && !block.isInFlyout) {
this.customContextMenu(menuOptions);
}
Blockly.ContextMenu.show(e, menuOptions, this.RTL);
Blockly.ContextMenu.currentBlock = this;
};
/**
* Move the connections for this block and all blocks attached under it.
* Also update any attached bubbles.
* @param {number} dx Horizontal offset from current location.
* @param {number} dy Vertical offset from current location.
* @private
*/
Blockly.BlockSvg.prototype.moveConnections_ = function(dx, dy) {
if (!this.rendered) {
// Rendering is required to lay out the blocks.
// This is probably an invisible block attached to a collapsed block.
return;
}
var myConnections = this.getConnections_(false);
for (var i = 0; i < myConnections.length; i++) {
myConnections[i].moveBy(dx, dy);
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].computeIconLocation();
}
// Recurse through all blocks attached under this one.
for (i = 0; i < this.childBlocks_.length; i++) {
this.childBlocks_[i].moveConnections_(dx, dy);
}
};
/**
* Recursively adds or removes the dragging class to this node and its children.
* @param {boolean} adding True if adding, false if removing.
* @private
*/
Blockly.BlockSvg.prototype.setDragging_ = function(adding) {
if (adding) {
this.addDragging();
Blockly.draggingConnections_ =
Blockly.draggingConnections_.concat(this.getConnections_(true));
} else {
this.removeDragging();
Blockly.draggingConnections_ = [];
}
// Recurse through all blocks attached under this one.
for (var i = 0; i < this.childBlocks_.length; i++) {
this.childBlocks_[i].setDragging_(adding);
}
};
/**
* Move this block to its workspace's drag surface, accounting for positioning.
* Generally should be called at the same time as setDragging_(true).
* @private
*/
Blockly.BlockSvg.prototype.moveToDragSurface_ = function() {
// The translation for drag surface blocks,
// is equal to the current relative-to-surface position,
// to keep the position in sync as it move on/off the surface.
var xy = this.getRelativeToSurfaceXY();
this.clearTransformAttributes_();
this.workspace.dragSurface.translateSurface(xy.x, xy.y);
// Execute the move on the top-level SVG component
this.workspace.dragSurface.setBlocksAndShow(this.getSvgRoot());
};
/**
* Move this block back to the workspace block canvas.
* Generally should be called at the same time as setDragging_(false).
* @private
*/
Blockly.BlockSvg.prototype.moveOffDragSurface_ = function() {
// Translate to current position, turning off 3d.
var xy = this.getRelativeToSurfaceXY();
this.clearTransformAttributes_();
this.translate(xy.x, xy.y, false);
this.workspace.dragSurface.clearAndHide(this.workspace.getCanvas());
};
/**
* Clear the block of style="..." and transform="..." attributes.
* Used when the block is switching from 3d to 2d transform or vice versa.
* @private
*/
Blockly.BlockSvg.prototype.clearTransformAttributes_ = function() {
if (this.getSvgRoot().hasAttribute('transform')) {
this.getSvgRoot().removeAttribute('transform');
}
if (this.getSvgRoot().hasAttribute('style')) {
this.getSvgRoot().removeAttribute('style');
}
};
/**
* Drag this block to follow the mouse.
* @param {!Event} e Mouse move event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseMove_ = function(e) {
if (e.type == 'mousemove' && e.clientX <= 1 && e.clientY == 0 &&
e.button == 0) {
/* HACK:
Safari Mobile 6.0 and Chrome for Android 18.0 fire rogue mousemove
events on certain touch actions. Ignore events with these signatures.
This may result in a one-pixel blind spot in other browsers,
but this shouldn't be noticeable. */
e.stopPropagation();
return;
}
var oldXY = this.getRelativeToSurfaceXY();
var newXY = this.workspace.moveDrag(e);
if (Blockly.dragMode_ == Blockly.DRAG_STICKY) {
// Still dragging within the sticky DRAG_RADIUS.
var dr = goog.math.Coordinate.distance(oldXY, newXY) * this.workspace.scale;
if (dr > Blockly.DRAG_RADIUS) {
// Switch to unrestricted dragging.
Blockly.dragMode_ = Blockly.DRAG_FREE;
Blockly.longStop_();
// Must move to drag surface before unplug(),
// or else connections will calculate the wrong relative to surface XY
// in tighten_(). Then blocks connected to this block move around on the
// drag surface. By moving to the drag surface before unplug, connection
// positions will be calculated correctly.
this.moveToDragSurface_();
// Clear WidgetDiv/DropDownDiv without animating, in case blocks are moved
// around
Blockly.WidgetDiv.hide(true);
Blockly.DropDownDiv.hideWithoutAnimation();
if (this.parentBlock_) {
// Push this block to the very top of the stack.
this.unplug();
}
this.setDragging_(true);
}
}
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
this.handleDragFree_(oldXY, newXY, e);
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
e.preventDefault();
};
/**
* Handle a mouse movement when a block is already freely dragging.
* @param {!goog.math.Coordinate} oldXY The position of the block on screen
* before the most recent mouse movement.
* @param {!goog.math.Coordinate} newXY The new location after applying the
* mouse movement.
* @param {!Event} e Mouse move event.
* @private
*/
Blockly.BlockSvg.prototype.handleDragFree_ = function(oldXY, newXY, e) {
var dxy = goog.math.Coordinate.difference(oldXY, this.dragStartXY_);
this.workspace.dragSurface.translateSurface(newXY.x, newXY.y);
// Drag all the nested bubbles.
for (var i = 0; i < this.draggedBubbles_.length; i++) {
var commentData = this.draggedBubbles_[i];
commentData.bubble.setIconLocation(
goog.math.Coordinate.sum(commentData, dxy));
}
// Check to see if any of this block's connections are within range of
// another block's connection.
var myConnections = this.getConnections_(false);
// Also check the last connection on this stack
var lastOnStack = this.lastConnectionInStack();
if (lastOnStack && lastOnStack != this.nextConnection) {
myConnections.push(lastOnStack);
}
var closestConnection = null;
var localConnection = null;
var radiusConnection = Blockly.SNAP_RADIUS;
for (i = 0; i < myConnections.length; i++) {
var myConnection = myConnections[i];
var neighbour = myConnection.closest(radiusConnection, dxy);
if (neighbour.connection) {
closestConnection = neighbour.connection;
localConnection = myConnection;
radiusConnection = neighbour.radius;
}
}
var updatePreviews = true;
if (Blockly.localConnection_ && Blockly.highlightedConnection_) {
var xDiff = Blockly.localConnection_.x_ + dxy.x -
Blockly.highlightedConnection_.x_;
var yDiff = Blockly.localConnection_.y_ + dxy.y -
Blockly.highlightedConnection_.y_;
var curDistance = Math.sqrt(xDiff * xDiff + yDiff * yDiff);
// Slightly prefer the existing preview over a new preview.
if (closestConnection && radiusConnection > curDistance -
Blockly.CURRENT_CONNECTION_PREFERENCE) {
updatePreviews = false;
}
}
if (updatePreviews) {
var candidateIsLast = (localConnection == lastOnStack);
this.updatePreviews(closestConnection, localConnection, radiusConnection,
e, newXY.x - this.dragStartXY_.x, newXY.y - this.dragStartXY_.y,
candidateIsLast);
}
};
/**
* Preview the results of the drag if the mouse is released immediately.
* @param {Blockly.Connection} closestConnection The closest connection found
* during the search
* @param {Blockly.Connection} localConnection The connection on the moving
* block.
* @param {number} radiusConnection The distance between closestConnection and
* localConnection.
* @param {!Event} e Mouse move event.
* @param {number} dx The x distance the block has moved onscreen up to this
* point in the drag.
* @param {number} dy The y distance the block has moved onscreen up to this
* point in the drag.
* @param {boolean} candidateIsLast True if the dragging stack is more than one
* block long and localConnection is the last connection on the stack.
*/
Blockly.BlockSvg.prototype.updatePreviews = function(closestConnection,
localConnection, radiusConnection, e, dx, dy, candidateIsLast) {
// Don't fire events for insertion marker creation or movement.
Blockly.Events.disable();
// Remove an insertion marker if needed. For Scratch-Blockly we are using
// grayed-out blocks instead of highlighting the connection; for compatibility
// with Web Blockly the name "highlightedConnection" will still be used.
if (Blockly.highlightedConnection_ &&
Blockly.highlightedConnection_ != closestConnection) {
if (Blockly.insertionMarker_ && Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
}
// If there's already an insertion marker but it's representing the wrong
// block, delete it so we can create the correct one.
if (Blockly.insertionMarker_ &&
((candidateIsLast && Blockly.localConnection_.sourceBlock_ == this) ||
(!candidateIsLast && Blockly.localConnection_.sourceBlock_ != this))) {
Blockly.insertionMarker_.dispose();
Blockly.insertionMarker_ = null;
}
Blockly.highlightedConnection_ = null;
Blockly.localConnection_ = null;
}
// Add an insertion marker if needed.
if (closestConnection &&
closestConnection != Blockly.highlightedConnection_ &&
!closestConnection.sourceBlock_.isInsertionMarker()) {
Blockly.highlightedConnection_ = closestConnection;
Blockly.localConnection_ = localConnection;
if (!Blockly.insertionMarker_) {
Blockly.insertionMarker_ =
this.workspace.newBlock(Blockly.localConnection_.sourceBlock_.type);
Blockly.insertionMarker_.setInsertionMarker(true);
Blockly.insertionMarker_.initSvg();
}
var insertionMarker = Blockly.insertionMarker_;
var insertionMarkerConnection = insertionMarker.getMatchingConnection(
localConnection.sourceBlock_, localConnection);
if (insertionMarkerConnection != Blockly.insertionMarkerConnection_) {
insertionMarker.rendered = true;
// Render disconnected from everything else so that we have a valid
// connection location.
insertionMarker.render();
insertionMarker.getSvgRoot().setAttribute('visibility', 'visible');
this.positionNewBlock(insertionMarker,
insertionMarkerConnection, closestConnection);
if (insertionMarkerConnection.type == Blockly.PREVIOUS_STATEMENT &&
!insertionMarker.nextConnection) {
Blockly.bumpedConnection_ = closestConnection.targetConnection;
}
// Renders insertion marker.
insertionMarkerConnection.connect(closestConnection);
Blockly.insertionMarkerConnection_ = insertionMarkerConnection;
}
}
// Reenable events.
Blockly.Events.enable();
// Provide visual indication of whether the block will be deleted if
// dropped here.
if (this.isDeletable()) {
this.workspace.isDeleteArea(e);
}
};
/**
* Disconnect the current insertion marker from the stack, and heal the stack to
* its previous state.
*/
Blockly.BlockSvg.disconnectInsertionMarker = function() {
// The insertion marker is the first block in a stack, either because it
// doesn't have a previous connection or because the previous connection is
// not connected. Unplug won't do anything in that case. Instead, unplug the
// following block.
if (Blockly.insertionMarkerConnection_ ==
Blockly.insertionMarker_.nextConnection &&
(!Blockly.insertionMarker_.previousConnection ||
!Blockly.insertionMarker_.previousConnection.targetConnection)) {
Blockly.insertionMarkerConnection_.targetBlock().unplug(false);
}
// Inside of a C-block, first statement connection.
else if (Blockly.insertionMarkerConnection_.type == Blockly.NEXT_STATEMENT &&
Blockly.insertionMarkerConnection_ !=
Blockly.insertionMarker_.nextConnection) {
var innerConnection = Blockly.insertionMarkerConnection_.targetConnection;
innerConnection.sourceBlock_.unplug(false);
var previousBlockNextConnection =
Blockly.insertionMarker_.previousConnection.targetConnection;
Blockly.insertionMarker_.unplug(true);
if (previousBlockNextConnection) {
previousBlockNextConnection.connect(innerConnection);
}
}
else {
Blockly.insertionMarker_.unplug(true /* healStack */);
}
if (Blockly.insertionMarkerConnection_.targetConnection) {
throw 'insertionMarkerConnection still connected at the end of disconnectInsertionMarker';
}
Blockly.insertionMarkerConnection_ = null;
Blockly.insertionMarker_.getSvgRoot().setAttribute('visibility', 'hidden');
};
/**
* Add or remove the UI indicating if this block is movable or not.
*/
Blockly.BlockSvg.prototype.updateMovable = function() {
if (this.isMovable()) {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDraggable');
} else {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDraggable');
}
};
/**
* Set whether this block is movable or not.
* @param {boolean} movable True if movable.
*/
Blockly.BlockSvg.prototype.setMovable = function(movable) {
Blockly.BlockSvg.superClass_.setMovable.call(this, movable);
this.updateMovable();
};
/**
* Set whether this block is editable or not.
* @param {boolean} editable True if editable.
*/
Blockly.BlockSvg.prototype.setEditable = function(editable) {
Blockly.BlockSvg.superClass_.setEditable.call(this, editable);
if (this.rendered) {
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
icons[i].updateEditable();
}
}
};
/**
* Set whether this block is a shadow block or not.
* @param {boolean} shadow True if a shadow.
*/
Blockly.BlockSvg.prototype.setShadow = function(shadow) {
Blockly.BlockSvg.superClass_.setShadow.call(this, shadow);
this.updateColour();
};
/**
* Set whether this block is an insertion marker block or not.
* @param {boolean} insertionMarker True if an insertion marker.
*/
Blockly.BlockSvg.prototype.setInsertionMarker = function(insertionMarker) {
Blockly.BlockSvg.superClass_.setInsertionMarker.call(this, insertionMarker);
this.updateColour();
};
/**
* Return the root node of the SVG or null if none exists.
* @return {Element} The root SVG node (probably a group).
*/
Blockly.BlockSvg.prototype.getSvgRoot = function() {
return this.svgGroup_;
};
/**
* Dispose of this block.
* @param {boolean} healStack If true, then try to heal any gap by connecting
* the next statement with the previous statement. Otherwise, dispose of
* all children of this block.
* @param {boolean} animate If true, show a disposal animation and sound.
*/
Blockly.BlockSvg.prototype.dispose = function(healStack, animate) {
Blockly.Tooltip.hide();
Blockly.Field.startCache();
// If this block is being dragged, unlink the mouse events.
if (Blockly.selected == this) {
this.unselect();
Blockly.terminateDrag_();
}
// If this block has a context menu open, close it.
if (Blockly.ContextMenu.currentBlock == this) {
Blockly.ContextMenu.hide();
}
if (animate && this.rendered) {
this.unplug(healStack);
this.disposeUiEffect();
}
// Stop rerendering.
this.rendered = false;
Blockly.Events.disable();
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
icons[i].dispose();
}
Blockly.Events.enable();
Blockly.BlockSvg.superClass_.dispose.call(this, healStack);
goog.dom.removeNode(this.svgGroup_);
// Sever JavaScript to DOM connections.
this.svgGroup_ = null;
this.svgPath_ = null;
Blockly.Field.stopCache();
};
/**
* Play some UI effects (sound, animation) when disposing of a block.
*/
Blockly.BlockSvg.prototype.disposeUiEffect = function() {
this.workspace.playAudio('delete');
var xy = Blockly.getSvgXY_(/** @type {!Element} */ (this.svgGroup_),
this.workspace);
// Deeply clone the current block.
var clone = this.svgGroup_.cloneNode(true);
clone.translateX_ = xy.x;
clone.translateY_ = xy.y;
clone.setAttribute('transform',
'translate(' + clone.translateX_ + ',' + clone.translateY_ + ')');
this.workspace.getParentSvg().appendChild(clone);
clone.bBox_ = clone.getBBox();
// Start the animation.
Blockly.BlockSvg.disposeUiStep_(clone, this.RTL, new Date(),
this.workspace.scale);
};
/**
* Play some UI effects (sound) after a connection has been established.
*/
Blockly.BlockSvg.prototype.connectionUiEffect = function() {
this.workspace.playAudio('click');
};
/**
* Animate a cloned block and eventually dispose of it.
* This is a class method, not an instace method since the original block has
* been destroyed and is no longer accessible.
* @param {!Element} clone SVG element to animate and dispose of.
* @param {boolean} rtl True if RTL, false if LTR.
* @param {!Date} start Date of animation's start.
* @param {number} workspaceScale Scale of workspace.
* @private
*/
Blockly.BlockSvg.disposeUiStep_ = function(clone, rtl, start, workspaceScale) {
var ms = (new Date()) - start;
var percent = ms / 150;
if (percent > 1) {
goog.dom.removeNode(clone);
} else {
var x = clone.translateX_ +
(rtl ? -1 : 1) * clone.bBox_.width * workspaceScale / 2 * percent;
var y = clone.translateY_ + clone.bBox_.height * workspaceScale * percent;
var scale = (1 - percent) * workspaceScale;
clone.setAttribute('transform', 'translate(' + x + ',' + y + ')' +
' scale(' + scale + ')');
var closure = function() {
Blockly.BlockSvg.disposeUiStep_(clone, rtl, start, workspaceScale);
};
setTimeout(closure, 10);
}
};
/**
* Enable or disable a block.
*/
Blockly.BlockSvg.prototype.updateDisabled = function() {
// not supported
};
/**
* Returns the comment on this block (or '' if none).
* @return {string} Block's comment.
*/
Blockly.BlockSvg.prototype.getCommentText = function() {
if (this.comment) {
var comment = this.comment.getText();
// Trim off trailing whitespace.
return comment.replace(/\s+$/, '').replace(/ +\n/g, '\n');
}
return '';
};
/**
* Set this block's comment text.
* @param {?string} text The text, or null to delete.
*/
Blockly.BlockSvg.prototype.setCommentText = function(text) {
var changedState = false;
if (goog.isString(text)) {
if (!this.comment) {
this.comment = new Blockly.Comment(this);
changedState = true;
}
this.comment.setText(/** @type {string} */ (text));
} else {
if (this.comment) {
this.comment.dispose();
changedState = true;
}
}
if (changedState && this.rendered) {
this.render();
// Adding or removing a comment icon will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Set this block's warning text.
* @param {?string} text The text, or null to delete.
* @param {string=} opt_id An optional ID for the warning text to be able to
* maintain multiple warnings.
*/
Blockly.BlockSvg.prototype.setWarningText = function(text, opt_id) {
if (!this.setWarningText.pid_) {
// Create a database of warning PIDs.
// Only runs once per block (and only those with warnings).
this.setWarningText.pid_ = Object.create(null);
}
var id = opt_id || '';
if (!id) {
// Kill all previous pending processes, this edit supercedes them all.
for (var n in this.setWarningText.pid_) {
clearTimeout(this.setWarningText.pid_[n]);
delete this.setWarningText.pid_[n];
}
} else if (this.setWarningText.pid_[id]) {
// Only queue up the latest change. Kill any earlier pending process.
clearTimeout(this.setWarningText.pid_[id]);
delete this.setWarningText.pid_[id];
}
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
// Don't change the warning text during a drag.
// Wait until the drag finishes.
var thisBlock = this;
this.setWarningText.pid_[id] = setTimeout(function() {
if (thisBlock.workspace) { // Check block wasn't deleted.
delete thisBlock.setWarningText.pid_[id];
thisBlock.setWarningText(text, id);
}
}, 100);
return;
}
if (this.isInFlyout) {
text = null;
}
var changedState = false;
if (goog.isString(text)) {
if (!this.warning) {
this.warning = new Blockly.Warning(this);
changedState = true;
}
this.warning.setText(/** @type {string} */ (text), id);
} else {
// Dispose all warnings if no id is given.
if (this.warning && !id) {
this.warning.dispose();
changedState = true;
} else if (this.warning) {
var oldText = this.warning.getText();
this.warning.setText('', id);
var newText = this.warning.getText();
if (!newText) {
this.warning.dispose();
}
changedState = oldText == newText;
}
}
if (changedState && this.rendered) {
this.render();
// Adding or removing a warning icon will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Give this block a mutator dialog.
* @param {Blockly.Mutator} mutator A mutator dialog instance or null to remove.
*/
Blockly.BlockSvg.prototype.setMutator = function(mutator) {
if (this.mutator && this.mutator !== mutator) {
this.mutator.dispose();
}
if (mutator) {
mutator.block_ = this;
this.mutator = mutator;
mutator.createIcon();
}
};
/**
* Select this block. Highlight it visually.
*/
Blockly.BlockSvg.prototype.addSelect = function() {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklySelected');
// Move the selected block to the top of the stack.
this.svgGroup_.parentNode.appendChild(this.svgGroup_);
};
/**
* Unselect this block. Remove its highlighting.
*/
Blockly.BlockSvg.prototype.removeSelect = function() {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklySelected');
};
/**
* Adds the dragging class to this block.
*/
Blockly.BlockSvg.prototype.addDragging = function() {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDragging');
};
/**
* Removes the dragging class from this block.
*/
Blockly.BlockSvg.prototype.removeDragging = function() {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDragging');
};
// Overrides of functions on Blockly.Block that take into account whether the
// block has been rendered.
/**
* Change the colour of a block.
* @param {number|string} colour HSV hue value, or #RRGGBB string.
* @param {number|string} colourSecondary Secondary HSV hue value, or #RRGGBB
* string.
* @param {number|string} colourTertiary Tertiary HSV hue value, or #RRGGBB
* string.
*/
Blockly.BlockSvg.prototype.setColour = function(colour, colourSecondary,
colourTertiary) {
Blockly.BlockSvg.superClass_.setColour.call(this, colour, colourSecondary,
colourTertiary);
if (this.rendered) {
this.updateColour();
}
};
/**
* Set whether this block can chain onto the bottom of another block.
* @param {boolean} newBoolean True if there can be a previous statement.
* @param {string|Array.<string>|null|undefined} opt_check Statement type or
* list of statement types. Null/undefined if any type could be connected.
*/
Blockly.BlockSvg.prototype.setPreviousStatement =
function(newBoolean, opt_check) {
/* eslint-disable indent */
Blockly.BlockSvg.superClass_.setPreviousStatement.call(this, newBoolean,
opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
}; /* eslint-enable indent */
/**
* Set whether another block can chain onto the bottom of this block.
* @param {boolean} newBoolean True if there can be a next statement.
* @param {string|Array.<string>|null|undefined} opt_check Statement type or
* list of statement types. Null/undefined if any type could be connected.
*/
Blockly.BlockSvg.prototype.setNextStatement = function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setNextStatement.call(this, newBoolean,
opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether this block returns a value.
* @param {boolean} newBoolean True if there is an output.
* @param {string|Array.<string>|null|undefined} opt_check Returned type or list
* of returned types. Null or undefined if any type could be returned
* (e.g. variable get).
*/
Blockly.BlockSvg.prototype.setOutput = function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setOutput.call(this, newBoolean, opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether value inputs are arranged horizontally or vertically.
* @param {boolean} newBoolean True if inputs are horizontal.
*/
Blockly.BlockSvg.prototype.setInputsInline = function(newBoolean) {
Blockly.BlockSvg.superClass_.setInputsInline.call(this, newBoolean);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Remove an input from this block.
* @param {string} name The name of the input.
* @param {boolean=} opt_quiet True to prevent error if input is not present.
* @throws {goog.asserts.AssertionError} if the input is not present and
* opt_quiet is not true.
*/
Blockly.BlockSvg.prototype.removeInput = function(name, opt_quiet) {
Blockly.BlockSvg.superClass_.removeInput.call(this, name, opt_quiet);
if (this.rendered) {
this.render();
// Removing an input will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Move a numbered input to a different location on this block.
* @param {number} inputIndex Index of the input to move.
* @param {number} refIndex Index of input that should be after the moved input.
*/
Blockly.BlockSvg.prototype.moveNumberedInputBefore = function(
inputIndex, refIndex) {
Blockly.BlockSvg.superClass_.moveNumberedInputBefore.call(this, inputIndex,
refIndex);
if (this.rendered) {
this.render();
// Moving an input will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Add a value input, statement input or local variable to this block.
* @param {number} type Either Blockly.INPUT_VALUE or Blockly.NEXT_STATEMENT or
* Blockly.DUMMY_INPUT.
* @param {string} name Language-neutral identifier which may used to find this
* input again. Should be unique to this block.
* @return {!Blockly.Input} The input object created.
* @private
*/
Blockly.BlockSvg.prototype.appendInput_ = function(type, name) {
var input = Blockly.BlockSvg.superClass_.appendInput_.call(this, type, name);
if (this.rendered) {
this.render();
// Adding an input will cause the block to change shape.
this.bumpNeighbours_();
}
return input;
};
/**
* Returns connections originating from this block.
* @param {boolean} all If true, return all connections even hidden ones.
* Otherwise, for a non-rendered block return an empty list, and for a
* collapsed block don't return inputs connections.
* @return {!Array.<!Blockly.Connection>} Array of connections.
* @private
*/
Blockly.BlockSvg.prototype.getConnections_ = function(all) {
var myConnections = [];
if (all || this.rendered) {
if (this.outputConnection) {
myConnections.push(this.outputConnection);
}
if (this.previousConnection) {
myConnections.push(this.previousConnection);
}
if (this.nextConnection) {
myConnections.push(this.nextConnection);
}
if (all || !this.collapsed_) {
for (var i = 0, input; input = this.inputList[i]; i++) {
if (input.connection) {
myConnections.push(input.connection);
}
}
}
}
return myConnections;
};
/**
* Create a connection of the specified type.
* @param {number} type The type of the connection to create.
* @return {!Blockly.RenderedConnection} A new connection of the specified type.
* @private
*/
Blockly.BlockSvg.prototype.makeConnection_ = function(type) {
return new Blockly.RenderedConnection(this, type);
};
| 1 | 7,820 | You're using scare quotes on these terms instead of defining them. | LLK-scratch-blocks | js |
@@ -13,6 +13,9 @@ var DdevVersion = "v0.3.0-dev" // Note that this is overridden by make
// for examples defining version constraints.
var DockerVersionConstraint = ">= 17.05.0-ce"
+// DockerComposeVersionConstraint is the current minimum version of docker-compose required for ddev.
+var DockerComposeVersionConstraint = ">= 1.10.0"
+
// WebImg defines the default web image used for applications.
var WebImg = "drud/nginx-php-fpm-local" // Note that this is overridden by make
| 1 | package version
// VERSION is supplied with the git committish this is built from
var VERSION = ""
// IMPORTANT: These versions are overridden by version ldflags specifications VERSION_VARIABLES in the Makefile
// DdevVersion is the current version of ddev, by default the git committish (should be current git tag)
var DdevVersion = "v0.3.0-dev" // Note that this is overridden by make
// DockerVersionConstraint is the current minimum version of docker required for ddev.
// See https://godoc.org/github.com/Masterminds/semver#hdr-Checking_Version_Constraints
// for examples defining version constraints.
var DockerVersionConstraint = ">= 17.05.0-ce"
// WebImg defines the default web image used for applications.
var WebImg = "drud/nginx-php-fpm-local" // Note that this is overridden by make
// WebTag defines the default web image tag for drud dev
var WebTag = "v0.8.0" // Note that this is overridden by make
// DBImg defines the default db image used for applications.
var DBImg = "drud/mysql-local-57" // Note that this is overridden by make
// DBTag defines the default db image tag for drud dev
var DBTag = "v0.6.3" // Note that this is overridden by make
// DBAImg defines the default phpmyadmin image tag used for applications.
var DBAImg = "drud/phpmyadmin"
// DBATag defines the default phpmyadmin image tag used for applications.
var DBATag = "v0.2.0"
// RouterImage defines the image used for the router.
var RouterImage = "drud/ddev-router" // Note that this is overridden by make
// RouterTag defines the tag used for the router.
var RouterTag = "v0.4.3" // Note that this is overridden by make
// COMMIT is the actual committish, supplied by make
var COMMIT = "COMMIT should be overridden"
// BUILDINFO is information with date and context, supplied by make
var BUILDINFO = "BUILDINFO should have new info"
| 1 | 11,996 | These should both be const, not var right? | drud-ddev | go |
@@ -78,9 +78,7 @@ public class SettingsManager {
public void save() {
Settings settings = load();
- try {
- File file = new File(settingsFileName);
- OutputStream outputStream = new FileOutputStream(file);
+ try (OutputStream outputStream = new FileOutputStream(new File(settingsFileName))) {
DefaultPropertiesPersister persister = new DefaultPropertiesPersister();
persister.store(settings.getProperties(), outputStream, "Phoenicis User Settings");
} catch (Exception e) { | 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.settings;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.util.DefaultPropertiesPersister;
import java.io.File;
import java.io.FileOutputStream;
import java.io.OutputStream;
public class SettingsManager {
@Value("${application.theme}")
private String theme;
@Value("${application.scale}")
private double scale;
@Value("${application.viewsource}")
private boolean viewScriptSource;
@Value("${application.repository.configuration}")
private String repository;
private String settingsFileName = "config.properties";
public SettingsManager(String settingsFileName) {
this.settingsFileName = settingsFileName;
}
public String getTheme() {
return theme;
}
public void setTheme(String theme) {
this.theme = theme;
}
public double getScale() {
return scale;
}
public void setScale(double scale) {
this.scale = scale;
}
public boolean isViewScriptSource() {
return viewScriptSource;
}
public void setViewScriptSource(boolean viewScriptSource) {
this.viewScriptSource = viewScriptSource;
}
public String getRepository() {
return repository;
}
public void setRepository(String repository) {
this.repository = repository;
}
public void save() {
Settings settings = load();
try {
File file = new File(settingsFileName);
OutputStream outputStream = new FileOutputStream(file);
DefaultPropertiesPersister persister = new DefaultPropertiesPersister();
persister.store(settings.getProperties(), outputStream, "Phoenicis User Settings");
} catch (Exception e) {
e.printStackTrace();
}
}
private Settings load() {
Settings settings = new Settings();
settings.set(Setting.THEME, theme);
settings.set(Setting.SCALE, scale);
settings.set(Setting.VIEW_SOURCE, String.valueOf(viewScriptSource));
settings.set(Setting.REPOSITORY, repository);
return settings;
}
}
| 1 | 10,008 | Can you catch a more specific exception here? Thanks :-) | PhoenicisOrg-phoenicis | java |
@@ -80,6 +80,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
r.Log.Error(err, "failed to recover chaos")
return ctrl.Result{Requeue: true}, nil
}
+ return ctrl.Result{}, nil
}
// Start failure action | 1 | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"context"
"time"
"github.com/go-logr/logr"
"github.com/pingcap/chaos-mesh/api/v1alpha1"
"github.com/pingcap/chaos-mesh/pkg/apiinterface"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// InnerCommonObject used in common chaos reconcile
type InnerCommonObject interface {
IsDeleted() bool
apiinterface.StatefulObject
}
// InnerCommonReconcile used in common chaos reconcile
type InnerCommonReconcile interface {
Apply(ctx context.Context, req ctrl.Request, chaos InnerCommonObject) error
Recover(ctx context.Context, req ctrl.Request, chaos InnerCommonObject) error
Object() InnerCommonObject
}
// Reconciler for common chaos
type Reconciler struct {
InnerCommonReconcile
client.Client
Log logr.Logger
}
// NewReconciler would create Reconciler for common chaos
func NewReconciler(reconcile InnerCommonReconcile, c client.Client, log logr.Logger) *Reconciler {
return &Reconciler{
InnerCommonReconcile: reconcile,
Client: c,
Log: log,
}
}
// Reconcile the common chaos
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
var err error
r.Log.Info("reconciling a common chaos", "name", req.Name, "namespace", req.Namespace)
ctx := context.Background()
chaos := r.Object()
if err = r.Get(ctx, req.NamespacedName, chaos); err != nil {
r.Log.Error(err, "unable to get chaos")
return ctrl.Result{}, nil
}
if chaos.IsDeleted() {
// This chaos was deleted
r.Log.Info("Removing self")
err = r.Recover(ctx, req, chaos)
if err != nil {
r.Log.Error(err, "failed to recover chaos")
return ctrl.Result{Requeue: true}, nil
}
}
// Start failure action
r.Log.Info("Performing Action")
status := chaos.GetStatus()
err = r.Apply(ctx, req, chaos)
if err != nil {
r.Log.Error(err, "failed to apply chaos action")
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error {
return r.Update(ctx, chaos)
})
if updateError != nil {
r.Log.Error(updateError, "unable to update chaos finalizers")
}
return ctrl.Result{Requeue: true}, nil
}
status.Experiment.StartTime = &metav1.Time{
Time: time.Now(),
}
status.Experiment.Phase = v1alpha1.ExperimentPhaseRunning
if err := r.Update(ctx, chaos); err != nil {
r.Log.Error(err, "unable to update chaosctl status")
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
| 1 | 13,076 | This is a bug during handling the recover logic in `common chaos` controller, I fixed in this request when I found it. | chaos-mesh-chaos-mesh | go |
@@ -11,7 +11,11 @@ from mmcv.runner import get_dist_info
from mmdet.core import tensor2imgs
-def single_gpu_test(model, data_loader, show=False, out_dir=None):
+def single_gpu_test(model,
+ data_loader,
+ show=False,
+ out_dir=None,
+ score_thr=0.3):
model.eval()
results = []
dataset = data_loader.dataset | 1 | import os.path as osp
import pickle
import shutil
import tempfile
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
from mmdet.core import tensor2imgs
def single_gpu_test(model, data_loader, show=False, out_dir=None):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['filename'])
else:
out_file = None
model.module.show_result(
img_show, result, show=show, out_file=out_file)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = (
len(data['img_meta']._data)
if 'img_meta' in data else data['img'][0].size(0))
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 1 | 19,399 | During testing, we adopt the score threshold specified in the config file. Here the threshold is only used for visualization, and the variable name `score_thr` can be misleading. Renaming it to `show_score_thr` would be better. | open-mmlab-mmdetection | py |
@@ -1,7 +1,7 @@
-<header class="banner" role="banner">
+<header class="banner">
<div class="container">
<a class="brand" href="<?= esc_url(home_url('/')); ?>"><?php bloginfo('name'); ?></a>
- <nav role="navigation">
+ <nav class="site-nav">
<?php
if (has_nav_menu('primary_navigation')) :
wp_nav_menu(['theme_location' => 'primary_navigation', 'menu_class' => 'nav']); | 1 | <header class="banner" role="banner">
<div class="container">
<a class="brand" href="<?= esc_url(home_url('/')); ?>"><?php bloginfo('name'); ?></a>
<nav role="navigation">
<?php
if (has_nav_menu('primary_navigation')) :
wp_nav_menu(['theme_location' => 'primary_navigation', 'menu_class' => 'nav']);
endif;
?>
</nav>
</div>
</header>
| 1 | 9,174 | can you make this `nav-primary` please? i'd like to roll with this since the `<ul>` class is `nav`, and primary is the name of the navigation menu | roots-sage | php |
@@ -49,7 +49,16 @@ app.factory('CalendarListItem', function($rootScope, $window, Calendar, WebCal,
},
publicSharingURL: {
get: () => {
- return $rootScope.root + 'p/' + context.calendar.publicToken;
+ let displayname = context.calendar.displayname
+ .replace(/\s+/g, '-').replace(/[^\w\-]+/g, '')
+ .replace(/\-\-+/g, '-').replace(/^-+/, '')
+ .replace(/-+$/, '');
+
+ if (displayname === '') {
+ return $rootScope.root + 'p/' + context.calendar.publicToken;
+ } else {
+ return $rootScope.root + 'p/' + context.calendar.publicToken + '/' + displayname;
+ }
}
},
publicEmbedURL: { | 1 | /**
* Calendar App
*
* @author Raghu Nayyar
* @author Georg Ehrke
* @copyright 2016 Raghu Nayyar <[email protected]>
* @copyright 2016 Georg Ehrke <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
* License as published by the Free Software Foundation; either
* version 3 of the License, or any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU AFFERO GENERAL PUBLIC LICENSE for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
*/
app.factory('CalendarListItem', function($rootScope, $window, Calendar, WebCal, isSharingAPI) {
'use strict';
function CalendarListItem(calendar) {
const context = {
calendar: calendar,
isEditingShares: false,
isEditingProperties: false,
isDisplayingCalDAVUrl: false,
isDisplayingWebCalUrl: false,
isSendingMail: false
};
const iface = {
_isACalendarListItemObject: true
};
if (!Calendar.isCalendar(calendar)) {
return null;
}
Object.defineProperties(iface, {
calendar: {
get: function() {
return context.calendar;
}
},
publicSharingURL: {
get: () => {
return $rootScope.root + 'p/' + context.calendar.publicToken;
}
},
publicEmbedURL: {
get: () => {
return $rootScope.root + 'embed/' + context.calendar.publicToken;
}
}
});
iface.displayCalDAVUrl = function() {
return context.isDisplayingCalDAVUrl;
};
iface.showCalDAVUrl = function() {
context.isDisplayingCalDAVUrl = true;
};
iface.displayWebCalUrl = function() {
return context.isDisplayingWebCalUrl;
};
iface.hideCalDAVUrl = function() {
context.isDisplayingCalDAVUrl = false;
};
iface.showWebCalUrl = function() {
context.isDisplayingWebCalUrl = true;
};
iface.hideWebCalUrl = function() {
context.isDisplayingWebCalUrl = false;
};
iface.showSharingIcon = function() {
const isCalendarShareable = context.calendar.isShareable();
const isCalendarShared = context.calendar.isShared();
const isCalendarPublishable = context.calendar.isPublishable();
// Publishing does not depend on sharing API
// always show sharing icon in this case
if (isCalendarPublishable) {
return true;
}
// if the sharing API was disabled, but the calendar was
// previously shared, allow users to edit or remove
// existing shares
if (!isSharingAPI && isCalendarShared && isCalendarShareable) {
return true;
}
return (isSharingAPI && isCalendarShareable);
};
iface.isEditingShares = function() {
return context.isEditingShares;
};
iface.isSendingMail = function() {
return context.isSendingMail;
};
iface.toggleEditingShares = function() {
context.isEditingShares = !context.isEditingShares;
};
iface.toggleSendingMail = function() {
context.isSendingMail = !context.isSendingMail;
};
iface.isEditing = function() {
return context.isEditingProperties;
};
iface.displayActions = function() {
return !iface.isEditing();
};
iface.displayColorIndicator = function() {
return (!iface.isEditing() && !context.calendar.isRendering());
};
iface.displaySpinner = function() {
return (!iface.isEditing() && context.calendar.isRendering());
};
iface.openEditor = function() {
iface.color = context.calendar.color;
iface.displayname = context.calendar.displayname;
context.isEditingProperties = true;
};
iface.cancelEditor = function() {
iface.color = '';
iface.displayname = '';
context.isEditingProperties = false;
};
iface.saveEditor = function() {
context.calendar.color = iface.color;
context.calendar.displayname = iface.displayname;
iface.color = '';
iface.displayname = '';
context.isEditingProperties = false;
};
iface.isWebCal = function() {
return WebCal.isWebCal(context.calendar);
};
iface.getOwnerName = function() {
return context.calendar.ownerDisplayname || context.calendar.owner;
};
iface.getPublicDisplayname = function() {
const searchFor = '(' + context.calendar.owner + ')';
const lastIndexOf = context.calendar.displayname.lastIndexOf(searchFor);
return context.calendar.displayname.substr(0, lastIndexOf - 1);
};
//Properties for ng-model of calendar editor
iface.color = '';
iface.displayname = '';
iface.order = 0;
iface.selectedSharee = '';
return iface;
}
CalendarListItem.isCalendarListItem = function(obj) {
return (typeof obj === 'object' && obj !== null && obj._isACalendarListItemObject === true);
};
return CalendarListItem;
});
| 1 | 5,998 | @georgehrke Just out of curiosity. Couldn't you combine at least the combine the regex for '-' and '' with groups? | nextcloud-calendar | js |
@@ -292,7 +292,7 @@ HashedCollectionConfig::SwiftObjectAtAddress(
if (error.Fail())
return nullptr;
- CompilerType anyObject_type = ast_ctx->FindQualifiedType("Swift.AnyObject");
+ CompilerType anyObject_type = ast_ctx->GetAnyObjectType();
if (!anyObject_type)
return nullptr;
| 1 | //===-- SwiftHashedContainer.cpp --------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "SwiftHashedContainer.h"
#include "lldb/Core/ValueObjectConstResult.h"
#include "lldb/DataFormatters/FormattersHelpers.h"
#include "lldb/Symbol/ClangASTContext.h"
#include "lldb/Symbol/SwiftASTContext.h"
#include "lldb/Target/ObjCLanguageRuntime.h"
#include "lldb/Target/Process.h"
#include "lldb/Target/SwiftLanguageRuntime.h"
#include "lldb/Utility/DataBufferHeap.h"
#include "Plugins/Language/ObjC/NSDictionary.h"
#include "swift/AST/ASTContext.h"
#include "llvm/ADT/StringRef.h"
using namespace lldb;
using namespace lldb_private;
using namespace lldb_private::formatters;
using namespace lldb_private::formatters::swift;
namespace lldb_private {
namespace formatters {
namespace swift {
class EmptyHashedStorageHandler: public HashedStorageHandler {
public:
EmptyHashedStorageHandler(CompilerType elem_type)
: m_elem_type(elem_type) {}
virtual size_t GetCount() override { return 0; }
virtual CompilerType GetElementType() override { return m_elem_type; }
virtual ValueObjectSP GetElementAtIndex(size_t) override {
return ValueObjectSP();
}
virtual bool IsValid() override { return true; }
virtual ~EmptyHashedStorageHandler() {}
private:
CompilerType m_elem_type;
};
class NativeHashedStorageHandler: public HashedStorageHandler {
public:
NativeHashedStorageHandler(ValueObjectSP storage_sp,
CompilerType key_type,
CompilerType value_type);
virtual size_t GetCount() override { return m_count; }
virtual CompilerType GetElementType() override { return m_element_type; }
virtual ValueObjectSP GetElementAtIndex(size_t) override;
virtual bool IsValid() override;
virtual ~NativeHashedStorageHandler() override {}
protected:
typedef uint64_t Index;
typedef uint64_t Cell;
bool ReadBitmaskAtIndex(Index, Status &error);
lldb::addr_t GetLocationOfKeyAtCell(Cell i) {
return m_keys_ptr + (i * m_key_stride);
}
lldb::addr_t GetLocationOfValueAtCell(Cell i) {
return m_value_stride
? m_values_ptr + (i * m_value_stride)
: LLDB_INVALID_ADDRESS;
}
// these are sharp tools that assume that the Cell contains valid data and the
// destination buffer
// has enough room to store the data to - use with caution
bool GetDataForKeyAtCell(Cell i, void *data_ptr) {
if (!data_ptr)
return false;
lldb::addr_t addr = GetLocationOfKeyAtCell(i);
Status error;
m_process->ReadMemory(addr, data_ptr, m_key_stride, error);
if (error.Fail())
return false;
return true;
}
bool GetDataForValueAtCell(Cell i, void *data_ptr) {
if (!data_ptr || !m_value_stride)
return false;
lldb::addr_t addr = GetLocationOfValueAtCell(i);
Status error;
m_process->ReadMemory(addr, data_ptr, m_value_stride, error);
if (error.Fail())
return false;
return true;
}
private:
ValueObject *m_nativeStorage;
Process *m_process;
uint32_t m_ptr_size;
uint64_t m_count;
uint64_t m_bucketCount;
lldb::addr_t m_bitmask_ptr;
lldb::addr_t m_keys_ptr;
lldb::addr_t m_values_ptr;
CompilerType m_element_type;
uint64_t m_key_stride;
uint64_t m_value_stride;
uint64_t m_key_stride_padded;
std::map<lldb::addr_t, uint64_t> m_bitmask_cache;
};
class CocoaHashedStorageHandler: public HashedStorageHandler {
public:
CocoaHashedStorageHandler(
ValueObjectSP cocoaObject_sp,
SyntheticChildrenFrontEnd *frontend)
: m_cocoaObject_sp(cocoaObject_sp), m_frontend(frontend) {}
virtual size_t GetCount() override {
return m_frontend->CalculateNumChildren();
}
virtual CompilerType GetElementType() override {
// this doesn't make sense here - the synthetic children know best
return CompilerType();
}
virtual ValueObjectSP GetElementAtIndex(size_t idx) override {
return m_frontend->GetChildAtIndex(idx);
}
virtual bool IsValid() override {
return m_frontend.get() != nullptr;
}
virtual ~CocoaHashedStorageHandler() {}
private:
// reader beware: this entails you must only pass self-rooted
// valueobjects to this class
ValueObjectSP m_cocoaObject_sp;
std::unique_ptr<SyntheticChildrenFrontEnd> m_frontend;
};
}
}
}
void
HashedCollectionConfig::RegisterSummaryProviders(
lldb::TypeCategoryImplSP swift_category_sp,
TypeSummaryImpl::Flags flags
) const {
#ifndef LLDB_DISABLE_PYTHON
using lldb_private::formatters::AddCXXSummary;
auto summaryProvider = GetSummaryProvider();
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_collection_demangledRegex, flags, true);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_nativeStorage_demangledRegex, flags, true);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_emptyStorage_demangled, flags, false);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_deferredBridgedStorage_demangledRegex, flags, true);
flags.SetSkipPointers(false);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_nativeStorage_mangledRegex_ObjC, flags, true);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_emptyStorage_mangled_ObjC, flags, false);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_deferredBridgedStorage_mangledRegex_ObjC, flags, true);
#endif // LLDB_DISABLE_PYTHON
}
void
HashedCollectionConfig::RegisterSyntheticChildrenCreators(
lldb::TypeCategoryImplSP swift_category_sp,
SyntheticChildren::Flags flags
) const {
#ifndef LLDB_DISABLE_PYTHON
using lldb_private::formatters::AddCXXSynthetic;
auto creator = GetSyntheticChildrenCreator();
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_collection_demangledRegex, flags, true);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_nativeStorage_demangledRegex, flags, true);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_emptyStorage_demangled, flags, false);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_deferredBridgedStorage_demangledRegex, flags, true);
flags.SetSkipPointers(false);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_nativeStorage_mangledRegex_ObjC, flags, true);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_emptyStorage_mangled_ObjC, flags, false);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_deferredBridgedStorage_mangledRegex_ObjC, flags, true);
#endif // LLDB_DISABLE_PYTHON
}
bool
HashedCollectionConfig::IsNativeStorageName(ConstString name) const {
assert(m_nativeStorage_demangledPrefix);
auto n = name.GetStringRef();
return n.startswith(m_nativeStorage_demangledPrefix.GetStringRef());
}
bool
HashedCollectionConfig::IsEmptyStorageName(ConstString name) const {
assert(m_emptyStorage_demangled);
return name == m_emptyStorage_demangled;
}
bool
HashedCollectionConfig::IsDeferredBridgedStorageName(ConstString name) const {
assert(m_deferredBridgedStorage_demangledPrefix);
auto n = name.GetStringRef();
return n.startswith(m_deferredBridgedStorage_demangledPrefix.GetStringRef());
}
HashedStorageHandlerUP
HashedCollectionConfig::CreateEmptyHandler(CompilerType elem_type) const {
return HashedStorageHandlerUP(new EmptyHashedStorageHandler(elem_type));
}
ValueObjectSP
HashedCollectionConfig::SwiftObjectAtAddress(
const ExecutionContext &exe_ctx,
lldb::addr_t address) const {
if (address == LLDB_INVALID_ADDRESS)
return nullptr;
ProcessSP process_sp = exe_ctx.GetProcessSP();
if (!process_sp)
return nullptr;
// Create a ValueObject with a Swift AnyObject type referencing the
// same address.
Status error;
ExecutionContextScope *exe_scope = exe_ctx.GetBestExecutionContextScope();
auto reader =
process_sp->GetTarget().GetScratchSwiftASTContext(error, *exe_scope);
SwiftASTContext *ast_ctx = reader.get();
if (!ast_ctx)
return nullptr;
if (error.Fail())
return nullptr;
CompilerType anyObject_type = ast_ctx->FindQualifiedType("Swift.AnyObject");
if (!anyObject_type)
return nullptr;
lldb::DataBufferSP buffer(
new lldb_private::DataBufferHeap(&address, sizeof(lldb::addr_t)));
return ValueObjectConstResult::Create(
exe_scope, anyObject_type, ConstString("swift"),
buffer, exe_ctx.GetByteOrder(), exe_ctx.GetAddressByteSize());
}
ValueObjectSP
HashedCollectionConfig::CocoaObjectAtAddress(
const ExecutionContext &exe_ctx,
lldb::addr_t address) const {
if (address == LLDB_INVALID_ADDRESS)
return nullptr;
ProcessSP process_sp = exe_ctx.GetProcessSP();
if (!process_sp)
return nullptr;
CompilerType id = exe_ctx.GetTargetSP()
->GetScratchClangASTContext()
->GetBasicType(lldb::eBasicTypeObjCID);
InferiorSizedWord isw(address, *process_sp);
return ValueObject::CreateValueObjectFromData(
"cocoa", isw.GetAsData(process_sp->GetByteOrder()), exe_ctx, id);
}
HashedStorageHandlerUP
HashedCollectionConfig::CreateNativeHandler(
ValueObjectSP value_sp,
ValueObjectSP storage_sp) const {
if (!storage_sp)
return nullptr;
// FIXME: To prevent reading uninitialized data, get the runtime
// class of storage_sp and verify that it's the type we expect
// (m_nativeStorage_mangledPrefix). Also, get the correct key_type
// and value_type directly from its generic arguments instead of
// using value_sp.
CompilerType type(value_sp->GetCompilerType());
CompilerType key_type = type.GetGenericArgumentType(0);
CompilerType value_type = type.GetGenericArgumentType(1);
auto handler = HashedStorageHandlerUP(
new NativeHashedStorageHandler(storage_sp, key_type, value_type));
if (!handler->IsValid())
return nullptr;
return handler;
}
HashedStorageHandlerUP
HashedCollectionConfig::CreateCocoaHandler(ValueObjectSP storage_sp) const {
auto cocoaChildrenCreator = GetCocoaSyntheticChildrenCreator();
auto frontend = cocoaChildrenCreator(nullptr, storage_sp);
if (!frontend) {
return nullptr;
}
// Cocoa frontends must be updated before use
frontend->Update();
auto handler = HashedStorageHandlerUP(
new CocoaHashedStorageHandler(storage_sp, frontend));
if (!handler->IsValid())
return nullptr;
return handler;
}
ValueObjectSP
NativeHashedStorageHandler::GetElementAtIndex(size_t idx) {
ValueObjectSP null_valobj_sp;
if (idx >= m_count)
return null_valobj_sp;
if (!IsValid())
return null_valobj_sp;
int64_t found_idx = -1;
Status error;
for (Cell cell_idx = 0; cell_idx < m_bucketCount; cell_idx++) {
const bool used = ReadBitmaskAtIndex(cell_idx, error);
if (error.Fail()) {
Status bitmask_error;
bitmask_error.SetErrorStringWithFormat(
"Failed to read bit-mask index from Dictionary: %s",
error.AsCString());
return ValueObjectConstResult::Create(m_process, bitmask_error);
}
if (!used)
continue;
if (++found_idx == idx) {
// you found it!!!
DataBufferSP full_buffer_sp(
new DataBufferHeap(m_key_stride_padded + m_value_stride, 0));
uint8_t *key_buffer_ptr = full_buffer_sp->GetBytes();
uint8_t *value_buffer_ptr =
m_value_stride ? (key_buffer_ptr + m_key_stride_padded) : nullptr;
if (GetDataForKeyAtCell(cell_idx, key_buffer_ptr) &&
(value_buffer_ptr == nullptr ||
GetDataForValueAtCell(cell_idx, value_buffer_ptr))) {
DataExtractor full_data;
full_data.SetData(full_buffer_sp);
StreamString name;
name.Printf("[%zu]", idx);
return ValueObjectConstResult::Create(
m_process, m_element_type, ConstString(name.GetData()), full_data);
}
}
}
return null_valobj_sp;
}
bool NativeHashedStorageHandler::ReadBitmaskAtIndex(Index i, Status &error) {
if (i >= m_bucketCount)
return false;
const size_t word = i / (8 * m_ptr_size);
const size_t offset = i % (8 * m_ptr_size);
const lldb::addr_t effective_ptr = m_bitmask_ptr + (word * m_ptr_size);
uint64_t data = 0;
auto cached = m_bitmask_cache.find(effective_ptr);
if (cached != m_bitmask_cache.end()) {
data = cached->second;
} else {
data = m_process->ReadUnsignedIntegerFromMemory(effective_ptr, m_ptr_size,
0, error);
if (error.Fail())
return false;
m_bitmask_cache[effective_ptr] = data;
}
const uint64_t mask = static_cast<uint64_t>(1UL << offset);
const uint64_t value = (data & mask);
return (0 != value);
}
NativeHashedStorageHandler::NativeHashedStorageHandler(
ValueObjectSP nativeStorage_sp,
CompilerType key_type,
CompilerType value_type
) : m_nativeStorage(nativeStorage_sp.get()), m_process(nullptr),
m_ptr_size(0), m_count(0), m_bucketCount(0),
m_bitmask_ptr(LLDB_INVALID_ADDRESS), m_keys_ptr(LLDB_INVALID_ADDRESS),
m_values_ptr(LLDB_INVALID_ADDRESS), m_element_type(),
m_key_stride(key_type.GetByteStride()), m_value_stride(0),
m_key_stride_padded(m_key_stride), m_bitmask_cache() {
static ConstString g_initializedEntries("initializedEntries");
static ConstString g_values("values");
static ConstString g__rawValue("_rawValue");
static ConstString g_keys("keys");
static ConstString g_key("key");
static ConstString g_value("value");
static ConstString g__value("_value");
static ConstString g_capacity("capacity");
static ConstString g_bucketCount("bucketCount");
static ConstString g_count("count");
if (!m_nativeStorage)
return;
if (!key_type)
return;
if (value_type) {
m_value_stride = value_type.GetByteStride();
if (SwiftASTContext *swift_ast =
llvm::dyn_cast_or_null<SwiftASTContext>(key_type.GetTypeSystem())) {
std::vector<SwiftASTContext::TupleElement> tuple_elements{
{g_key, key_type}, {g_value, value_type}};
m_element_type = swift_ast->CreateTupleType(tuple_elements);
m_key_stride_padded = m_element_type.GetByteStride() - m_value_stride;
}
} else {
m_element_type = key_type;
}
if (!m_element_type)
return;
m_process = m_nativeStorage->GetProcessSP().get();
if (!m_process)
return;
m_ptr_size = m_process->GetAddressByteSize();
auto bucketCount_sp =
m_nativeStorage->GetChildAtNamePath({g_bucketCount, g__value});
if (!bucketCount_sp) // <4.1: bucketCount was called capacity.
bucketCount_sp = m_nativeStorage->GetChildAtNamePath({g_capacity, g__value});
if (!bucketCount_sp)
return;
m_bucketCount = bucketCount_sp->GetValueAsUnsigned(0);
auto count_sp = m_nativeStorage->GetChildAtNamePath({g_count, g__value});
if (!count_sp)
return;
m_count = count_sp->GetValueAsUnsigned(0);
m_nativeStorage = nativeStorage_sp.get();
m_bitmask_ptr =
m_nativeStorage
->GetChildAtNamePath({g_initializedEntries, g_values, g__rawValue})
->GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
if (ValueObjectSP value_child_sp =
m_nativeStorage->GetChildAtNamePath({g_values, g__rawValue})) {
// it is fine not to pass a value_type, but if the value child exists, then
// you have to pass one
if (!value_type)
return;
m_values_ptr = value_child_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
}
m_keys_ptr = m_nativeStorage->GetChildAtNamePath({g_keys, g__rawValue})
->GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
// Make sure we can read the bitmask at the ount index.
// and this will keep us from trying
// to reconstruct many bajillions of invalid children.
// Don't bother if the native buffer handler is invalid already, however.
if (IsValid())
{
Status error;
ReadBitmaskAtIndex(m_bucketCount - 1, error);
if (error.Fail())
{
m_bitmask_ptr = LLDB_INVALID_ADDRESS;
}
}
}
bool NativeHashedStorageHandler::IsValid() {
return (m_nativeStorage != nullptr) && (m_process != nullptr) &&
m_element_type.IsValid() && m_bitmask_ptr != LLDB_INVALID_ADDRESS &&
m_keys_ptr != LLDB_INVALID_ADDRESS &&
/*m_values_ptr != LLDB_INVALID_ADDRESS && you can't check values
because some containers have only keys*/
// The bucket count must be a power of two.
m_bucketCount >= 1 && (m_bucketCount & (m_bucketCount - 1)) == 0 &&
m_bucketCount >= m_count;
}
HashedStorageHandlerUP
HashedCollectionConfig::CreateHandler(ValueObject &valobj) const {
static ConstString g__variant("_variant"); // Swift 5
static ConstString g__variantBuffer("_variantBuffer"); // Swift 4
static ConstString g_native("native");
static ConstString g_cocoa("cocoa");
static ConstString g_nativeBuffer("nativeBuffer"); // Swift 4
static ConstString g__storage("_storage");
Status error;
ValueObjectSP valobj_sp = valobj.GetSP();
if (valobj_sp->GetObjectRuntimeLanguage() != eLanguageTypeSwift &&
valobj_sp->IsPointerType()) {
valobj_sp = SwiftObjectAtAddress(valobj_sp->GetExecutionContextRef(),
valobj_sp->GetPointerValue());
}
valobj_sp = valobj_sp->GetQualifiedRepresentationIfAvailable(
lldb::eDynamicCanRunTarget, false);
ConstString type_name_cs(valobj_sp->GetTypeName());
if (IsNativeStorageName(type_name_cs)) {
return CreateNativeHandler(valobj_sp, valobj_sp);
}
if (IsEmptyStorageName(type_name_cs)) {
return CreateEmptyHandler();
}
if (IsDeferredBridgedStorageName(type_name_cs)) {
auto storage_sp = valobj_sp->GetChildAtNamePath({g_native, g__storage});
if (!storage_sp) // try Swift 4 name
storage_sp = valobj_sp->GetChildAtNamePath({g_nativeBuffer, g__storage});
return CreateNativeHandler(valobj_sp, storage_sp);
}
ValueObjectSP variant_sp =
valobj_sp->GetChildMemberWithName(g__variant, true);
if (!variant_sp) // try Swift 4 name
variant_sp = valobj_sp->GetChildMemberWithName(g__variantBuffer, true);
if (!variant_sp)
return nullptr;
ConstString variant_cs(variant_sp->GetValueAsCString());
if (!variant_cs)
return nullptr;
if (g_cocoa == variant_cs) {
// it's an NSDictionary/NSSet in disguise
static ConstString g_object("object"); // Swift 5
static ConstString g_cocoaDictionary("cocoaDictionary"); // Swift 4
static ConstString g_cocoaSet("cocoaSet"); // Swift 4
ValueObjectSP child_sp =
variant_sp->GetChildAtNamePath({g_cocoa, g_object});
if (!child_sp) // try Swift 4 name for dictionaries
child_sp = variant_sp->GetChildAtNamePath({g_cocoa, g_cocoaDictionary});
if (!child_sp) // try Swift 4 name for sets
child_sp = variant_sp->GetChildAtNamePath({g_cocoa, g_cocoaSet});
if (!child_sp)
return nullptr;
// child_sp is the _NSDictionary/_NSSet reference.
ValueObjectSP ref_sp = child_sp->GetChildAtIndex(0, true); // instance
if (!ref_sp)
return nullptr;
uint64_t cocoa_ptr = ref_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
if (cocoa_ptr == LLDB_INVALID_ADDRESS)
return nullptr;
// FIXME: for some reason I need to zero out the MSB; figure out why
cocoa_ptr &= 0x00FFFFFFFFFFFFFF;
auto cocoa_sp = CocoaObjectAtAddress(valobj_sp->GetExecutionContextRef(),
cocoa_ptr);
if (!cocoa_sp)
return nullptr;
return CreateCocoaHandler(cocoa_sp);
}
if (g_native == variant_cs) {
auto storage_sp = variant_sp->GetChildAtNamePath({g_native, g__storage});
return CreateNativeHandler(valobj_sp, storage_sp);
}
return nullptr;
}
HashedSyntheticChildrenFrontEnd::HashedSyntheticChildrenFrontEnd(
const HashedCollectionConfig &config,
ValueObjectSP valobj_sp
) : SyntheticChildrenFrontEnd(*valobj_sp.get()),
m_config(config),
m_buffer()
{}
size_t
HashedSyntheticChildrenFrontEnd::CalculateNumChildren() {
return m_buffer ? m_buffer->GetCount() : 0;
}
ValueObjectSP
HashedSyntheticChildrenFrontEnd::GetChildAtIndex(size_t idx) {
if (!m_buffer)
return ValueObjectSP();
ValueObjectSP child_sp = m_buffer->GetElementAtIndex(idx);
if (child_sp)
child_sp->SetSyntheticChildrenGenerated(true);
return child_sp;
}
bool
HashedSyntheticChildrenFrontEnd::Update() {
m_buffer = m_config.CreateHandler(m_backend);
return false;
}
bool
HashedSyntheticChildrenFrontEnd::MightHaveChildren() {
return true;
}
size_t
HashedSyntheticChildrenFrontEnd::GetIndexOfChildWithName(
const ConstString &name
) {
if (!m_buffer)
return UINT32_MAX;
const char *item_name = name.GetCString();
uint32_t idx = ExtractIndexFromString(item_name);
if (idx < UINT32_MAX && idx >= CalculateNumChildren())
return UINT32_MAX;
return idx;
}
| 1 | 16,996 | I think this is objectively better than looking up the object by name. As a follow-up, I'm going to see whether we do this name-based lookup somewhere else and switch to your method. | apple-swift-lldb | cpp |
@@ -81,7 +81,8 @@ Blockly.FlyoutExtensionCategoryHeader.prototype.createDom = function() {
var marginX = 15;
var marginY = 10;
- var statusButtonX = (this.flyoutWidth_ - statusButtonWidth - marginX) / this.workspace_.scale;
+ var statusButtonX = this.workspace_.RTL ? (marginX - this.flyoutWidth_ + statusButtonWidth) :
+ (this.flyoutWidth_ - statusButtonWidth - marginX) / this.workspace_.scale;
if (this.imageSrc_) {
/** @type {SVGElement} */ | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2018 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Class for a category header in the flyout for Scratch
* extensions which can display a textual label and a status button.
* @author [email protected] (Eric Rosenbaum)
*/
'use strict';
goog.provide('Blockly.FlyoutExtensionCategoryHeader');
goog.require('Blockly.FlyoutButton');
/**
* Class for a category header in the flyout for Scratch extensions which can
* display a textual label and a status button.
* @param {!Blockly.WorkspaceSvg} workspace The workspace in which to place this
* header.
* @param {!Blockly.WorkspaceSvg} targetWorkspace The flyout's target workspace.
* @param {!Element} xml The XML specifying the header.
* @extends {Blockly.FlyoutButton}
* @constructor
*/
Blockly.FlyoutExtensionCategoryHeader = function(workspace, targetWorkspace, xml) {
this.init(workspace, targetWorkspace, xml, false);
/**
* @type {number}
* @private
*/
this.flyoutWidth_ = this.targetWorkspace_.getFlyout().getWidth();
/**
* @type {string}
*/
this.extensionId = xml.getAttribute('id');
/**
* Whether this is a label at the top of a category.
* @type {boolean}
* @private
*/
this.isCategoryLabel_ = true;
};
goog.inherits(Blockly.FlyoutExtensionCategoryHeader, Blockly.FlyoutButton);
/**
* Create the label and button elements.
* @return {!Element} The SVG group.
*/
Blockly.FlyoutExtensionCategoryHeader.prototype.createDom = function() {
var cssClass = 'blocklyFlyoutLabel';
this.svgGroup_ = Blockly.utils.createSvgElement('g', {'class': cssClass},
this.workspace_.getCanvas());
this.addTextSvg(true);
this.refreshStatus();
var statusButtonWidth = 25;
var marginX = 15;
var marginY = 10;
var statusButtonX = (this.flyoutWidth_ - statusButtonWidth - marginX) / this.workspace_.scale;
if (this.imageSrc_) {
/** @type {SVGElement} */
this.imageElement_ = Blockly.utils.createSvgElement(
'image',
{
'class': 'blocklyFlyoutButton',
'height': statusButtonWidth + 'px',
'width': statusButtonWidth + 'px',
'x': statusButtonX + 'px',
'y': marginY + 'px'
},
this.svgGroup_);
this.setImageSrc(this.imageSrc_);
}
this.callback_ = Blockly.statusButtonCallback.bind(this, this.extensionId);
this.mouseUpWrapper_ = Blockly.bindEventWithChecks_(this.imageElement_, 'mouseup',
this, this.onMouseUp_);
return this.svgGroup_;
};
/**
* Set the image on the status button using a status string.
*/
Blockly.FlyoutExtensionCategoryHeader.prototype.refreshStatus = function() {
var status = Blockly.FlyoutExtensionCategoryHeader.getExtensionState(this.extensionId);
var basePath = Blockly.mainWorkspace.options.pathToMedia;
if (status == Blockly.StatusButtonState.READY) {
this.setImageSrc(basePath + 'status-ready.svg');
}
if (status == Blockly.StatusButtonState.NOT_READY) {
this.setImageSrc(basePath + 'status-not-ready.svg');
}
};
/**
* Set the source URL of the image for the button.
* @param {?string} src New source.
* @package
*/
Blockly.FlyoutExtensionCategoryHeader.prototype.setImageSrc = function(src) {
if (src === null) {
// No change if null.
return;
}
this.imageSrc_ = src;
if (this.imageElement_) {
this.imageElement_.setAttributeNS('http://www.w3.org/1999/xlink',
'xlink:href', this.imageSrc_ || '');
}
};
/**
* Gets the extension state. Overridden externally.
* @param {string} extensionId The ID of the extension in question.
* @return {Blockly.StatusButtonState} The state of the extension.
* @public
*/
Blockly.FlyoutExtensionCategoryHeader.getExtensionState = function(/* extensionId */) {
return Blockly.StatusButtonState.NOT_READY;
};
| 1 | 9,698 | Why are you dividing by scale in the LTR case but not the RTL case? | LLK-scratch-blocks | js |
@@ -368,6 +368,8 @@ class RemoteConnection(object):
('POST', '/session/$sessionId/window/rect'),
Command.GET_WINDOW_RECT:
('GET', '/session/$sessionId/window/rect'),
+ Command.W3C_MINIMIZE_WINDOW:
+ ('POST', '/session/$sessionId/window/minimize'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.W3C_MAXIMIZE_WINDOW: | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import socket
import string
import base64
try:
import http.client as httplib
from urllib import request as url_request
from urllib import parse
except ImportError: # above is available in py3+, below is py2.7
import httplib as httplib
import urllib2 as url_request
import urlparse as parse
from selenium.webdriver.common import utils as common_utils
from .command import Command
from .errorhandler import ErrorCode
from . import utils
LOGGER = logging.getLogger(__name__)
class Request(url_request.Request):
"""
Extends the url_request.Request to support all HTTP request types.
"""
def __init__(self, url, data=None, method=None):
"""
Initialise a new HTTP request.
:Args:
- url - String for the URL to send the request to.
- data - Data to send with the request.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method != 'POST' and method != 'PUT':
data = None
self._method = method
url_request.Request.__init__(self, url, data=data)
def get_method(self):
"""
Returns the HTTP method used by this request.
"""
return self._method
class Response(object):
"""
Represents an HTTP response.
"""
def __init__(self, fp, code, headers, url):
"""
Initialise a new Response.
:Args:
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- headers - A dictionary of headers returned by the server.
- url - URL of the retrieved resource represented by this Response.
"""
self.fp = fp
self.read = fp.read
self.code = code
self.headers = headers
self.url = url
def close(self):
"""
Close the response body file object.
"""
self.read = None
self.fp = None
def info(self):
"""
Returns the response headers.
"""
return self.headers
def geturl(self):
"""
Returns the URL for the resource returned in this response.
"""
return self.url
class HttpErrorHandler(url_request.HTTPDefaultErrorHandler):
"""
A custom HTTP error handler.
Used to return Response objects instead of raising an HTTPError exception.
"""
def http_error_default(self, req, fp, code, msg, headers):
"""
Default HTTP error handler.
:Args:
- req - The original Request object.
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- msg - The HTTP status message returned by the server.
- headers - The response headers.
:Returns:
A new Response object.
"""
return Response(fp, code, headers, req.get_full_url())
class RemoteConnection(object):
"""A connection with the Remote WebDriver server.
Communicates with the server using the WebDriver wire protocol:
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol"""
_timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_timeout(cls):
"""
:Returns:
Timeout value in seconds for all http requests made to the Remote Connection
"""
return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout
@classmethod
def set_timeout(cls, timeout):
"""
Override the default timeout
:Args:
- timeout - timeout value for http requests in seconds
"""
cls._timeout = timeout
@classmethod
def reset_timeout(cls):
"""
Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT
"""
cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_remote_connection_headers(cls, parsed_url, keep_alive=False):
"""
Get headers for remote request.
:Args:
- parsed_url - The parsed url
- keep_alive (Boolean) - Is this a keep-alive connection (default: False)
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': 'Python http auth'
}
if parsed_url.username:
base64string = base64.b64encode('{0.username}:{0.password}'.format(parsed_url).encode())
headers.update({
'Authorization': 'Basic {}'.format(base64string.decode())
})
if keep_alive:
headers.update({
'Connection': 'keep-alive'
})
return headers
def __init__(self, remote_server_addr, keep_alive=False, resolve_ip=True):
# Attempt to resolve the hostname and get an IP address.
self.keep_alive = keep_alive
parsed_url = parse.urlparse(remote_server_addr)
addr = parsed_url.hostname
if parsed_url.hostname and resolve_ip:
port = parsed_url.port or None
if parsed_url.scheme == "https":
ip = parsed_url.hostname
else:
ip = common_utils.find_connectable_ip(parsed_url.hostname,
port=port)
if ip:
netloc = ip
addr = netloc
if parsed_url.port:
netloc = common_utils.join_host_port(netloc,
parsed_url.port)
if parsed_url.username:
auth = parsed_url.username
if parsed_url.password:
auth += ':%s' % parsed_url.password
netloc = '%s@%s' % (auth, netloc)
remote_server_addr = parse.urlunparse(
(parsed_url.scheme, netloc, parsed_url.path,
parsed_url.params, parsed_url.query, parsed_url.fragment))
else:
LOGGER.info('Could not get IP address for host: %s' %
parsed_url.hostname)
self._url = remote_server_addr
if keep_alive:
self._conn = httplib.HTTPConnection(
str(addr), str(parsed_url.port), timeout=self._timeout)
self._commands = {
Command.STATUS: ('GET', '/status'),
Command.NEW_SESSION: ('POST', '/session'),
Command.GET_ALL_SESSIONS: ('GET', '/sessions'),
Command.QUIT: ('DELETE', '/session/$sessionId'),
Command.GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window_handle'),
Command.W3C_GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window'),
Command.GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window_handles'),
Command.W3C_GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window/handles'),
Command.GET: ('POST', '/session/$sessionId/url'),
Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'),
Command.GO_BACK: ('POST', '/session/$sessionId/back'),
Command.REFRESH: ('POST', '/session/$sessionId/refresh'),
Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'),
Command.W3C_EXECUTE_SCRIPT:
('POST', '/session/$sessionId/execute/sync'),
Command.W3C_EXECUTE_SCRIPT_ASYNC:
('POST', '/session/$sessionId/execute/async'),
Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'),
Command.GET_TITLE: ('GET', '/session/$sessionId/title'),
Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'),
Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'),
Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/element/$id/screenshot'),
Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'),
Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'),
Command.W3C_GET_ACTIVE_ELEMENT: ('GET', '/session/$sessionId/element/active'),
Command.GET_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/element/active'),
Command.FIND_CHILD_ELEMENT:
('POST', '/session/$sessionId/element/$id/element'),
Command.FIND_CHILD_ELEMENTS:
('POST', '/session/$sessionId/element/$id/elements'),
Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'),
Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'),
Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'),
Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'),
Command.SEND_KEYS_TO_ELEMENT:
('POST', '/session/$sessionId/element/$id/value'),
Command.SEND_KEYS_TO_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/keys'),
Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"),
Command.GET_ELEMENT_VALUE:
('GET', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_TAG_NAME:
('GET', '/session/$sessionId/element/$id/name'),
Command.IS_ELEMENT_SELECTED:
('GET', '/session/$sessionId/element/$id/selected'),
Command.SET_ELEMENT_SELECTED:
('POST', '/session/$sessionId/element/$id/selected'),
Command.IS_ELEMENT_ENABLED:
('GET', '/session/$sessionId/element/$id/enabled'),
Command.IS_ELEMENT_DISPLAYED:
('GET', '/session/$sessionId/element/$id/displayed'),
Command.GET_ELEMENT_LOCATION:
('GET', '/session/$sessionId/element/$id/location'),
Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW:
('GET', '/session/$sessionId/element/$id/location_in_view'),
Command.GET_ELEMENT_SIZE:
('GET', '/session/$sessionId/element/$id/size'),
Command.GET_ELEMENT_RECT:
('GET', '/session/$sessionId/element/$id/rect'),
Command.GET_ELEMENT_ATTRIBUTE:
('GET', '/session/$sessionId/element/$id/attribute/$name'),
Command.GET_ELEMENT_PROPERTY:
('GET', '/session/$sessionId/element/$id/property/$name'),
Command.ELEMENT_EQUALS:
('GET', '/session/$sessionId/element/$id/equals/$other'),
Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'),
Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'),
Command.DELETE_ALL_COOKIES:
('DELETE', '/session/$sessionId/cookie'),
Command.DELETE_COOKIE:
('DELETE', '/session/$sessionId/cookie/$name'),
Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'),
Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'),
Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'),
Command.CLOSE: ('DELETE', '/session/$sessionId/window'),
Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY:
('GET', '/session/$sessionId/element/$id/css/$propertyName'),
Command.IMPLICIT_WAIT:
('POST', '/session/$sessionId/timeouts/implicit_wait'),
Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'),
Command.SET_SCRIPT_TIMEOUT:
('POST', '/session/$sessionId/timeouts/async_script'),
Command.SET_TIMEOUTS:
('POST', '/session/$sessionId/timeouts'),
Command.DISMISS_ALERT:
('POST', '/session/$sessionId/dismiss_alert'),
Command.W3C_DISMISS_ALERT:
('POST', '/session/$sessionId/alert/dismiss'),
Command.ACCEPT_ALERT:
('POST', '/session/$sessionId/accept_alert'),
Command.W3C_ACCEPT_ALERT:
('POST', '/session/$sessionId/alert/accept'),
Command.SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert_text'),
Command.W3C_SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert/text'),
Command.GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert_text'),
Command.W3C_GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert/text'),
Command.SET_ALERT_CREDENTIALS:
('POST', '/session/$sessionId/alert/credentials'),
Command.CLICK:
('POST', '/session/$sessionId/click'),
Command.W3C_ACTIONS:
('POST', '/session/$sessionId/actions'),
Command.W3C_CLEAR_ACTIONS:
('DELETE', '/session/$sessionId/actions'),
Command.DOUBLE_CLICK:
('POST', '/session/$sessionId/doubleclick'),
Command.MOUSE_DOWN:
('POST', '/session/$sessionId/buttondown'),
Command.MOUSE_UP:
('POST', '/session/$sessionId/buttonup'),
Command.MOVE_TO:
('POST', '/session/$sessionId/moveto'),
Command.GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/size'),
Command.SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/size'),
Command.GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/$windowHandle/position'),
Command.SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/$windowHandle/position'),
Command.W3C_GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/position'),
Command.W3C_SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/position'),
Command.SET_WINDOW_RECT:
('POST', '/session/$sessionId/window/rect'),
Command.GET_WINDOW_RECT:
('GET', '/session/$sessionId/window/rect'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.W3C_MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/maximize'),
Command.SET_SCREEN_ORIENTATION:
('POST', '/session/$sessionId/orientation'),
Command.GET_SCREEN_ORIENTATION:
('GET', '/session/$sessionId/orientation'),
Command.SINGLE_TAP:
('POST', '/session/$sessionId/touch/click'),
Command.TOUCH_DOWN:
('POST', '/session/$sessionId/touch/down'),
Command.TOUCH_UP:
('POST', '/session/$sessionId/touch/up'),
Command.TOUCH_MOVE:
('POST', '/session/$sessionId/touch/move'),
Command.TOUCH_SCROLL:
('POST', '/session/$sessionId/touch/scroll'),
Command.DOUBLE_TAP:
('POST', '/session/$sessionId/touch/doubleclick'),
Command.LONG_PRESS:
('POST', '/session/$sessionId/touch/longclick'),
Command.FLICK:
('POST', '/session/$sessionId/touch/flick'),
Command.EXECUTE_SQL:
('POST', '/session/$sessionId/execute_sql'),
Command.GET_LOCATION:
('GET', '/session/$sessionId/location'),
Command.SET_LOCATION:
('POST', '/session/$sessionId/location'),
Command.GET_APP_CACHE:
('GET', '/session/$sessionId/application_cache'),
Command.GET_APP_CACHE_STATUS:
('GET', '/session/$sessionId/application_cache/status'),
Command.CLEAR_APP_CACHE:
('DELETE', '/session/$sessionId/application_cache/clear'),
Command.GET_NETWORK_CONNECTION:
('GET', '/session/$sessionId/network_connection'),
Command.SET_NETWORK_CONNECTION:
('POST', '/session/$sessionId/network_connection'),
Command.GET_LOCAL_STORAGE_ITEM:
('GET', '/session/$sessionId/local_storage/key/$key'),
Command.REMOVE_LOCAL_STORAGE_ITEM:
('DELETE', '/session/$sessionId/local_storage/key/$key'),
Command.GET_LOCAL_STORAGE_KEYS:
('GET', '/session/$sessionId/local_storage'),
Command.SET_LOCAL_STORAGE_ITEM:
('POST', '/session/$sessionId/local_storage'),
Command.CLEAR_LOCAL_STORAGE:
('DELETE', '/session/$sessionId/local_storage'),
Command.GET_LOCAL_STORAGE_SIZE:
('GET', '/session/$sessionId/local_storage/size'),
Command.GET_SESSION_STORAGE_ITEM:
('GET', '/session/$sessionId/session_storage/key/$key'),
Command.REMOVE_SESSION_STORAGE_ITEM:
('DELETE', '/session/$sessionId/session_storage/key/$key'),
Command.GET_SESSION_STORAGE_KEYS:
('GET', '/session/$sessionId/session_storage'),
Command.SET_SESSION_STORAGE_ITEM:
('POST', '/session/$sessionId/session_storage'),
Command.CLEAR_SESSION_STORAGE:
('DELETE', '/session/$sessionId/session_storage'),
Command.GET_SESSION_STORAGE_SIZE:
('GET', '/session/$sessionId/session_storage/size'),
Command.GET_LOG:
('POST', '/session/$sessionId/log'),
Command.GET_AVAILABLE_LOG_TYPES:
('GET', '/session/$sessionId/log/types'),
Command.CURRENT_CONTEXT_HANDLE:
('GET', '/session/$sessionId/context'),
Command.CONTEXT_HANDLES:
('GET', '/session/$sessionId/contexts'),
Command.SWITCH_TO_CONTEXT:
('POST', '/session/$sessionId/context'),
}
def execute(self, command, params):
"""
Send a command to the remote server.
Any path subtitutions required for the URL mapped to the command should be
included in the command parameters.
:Args:
- command - A string specifying the command to execute.
- params - A dictionary of named parameters to send with the command as
its JSON payload.
"""
command_info = self._commands[command]
assert command_info is not None, 'Unrecognised command %s' % command
data = utils.dump_json(params)
path = string.Template(command_info[1]).substitute(params)
url = '%s%s' % (self._url, path)
return self._request(command_info[0], url, body=data)
def _request(self, method, url, body=None):
"""
Send an HTTP request to the remote server.
:Args:
- method - A string for the HTTP method to send the request with.
- url - A string for the URL to send the request to.
- body - A string for request body. Ignored unless method is POST or PUT.
:Returns:
A dictionary with the server's parsed JSON response.
"""
LOGGER.debug('%s %s %s' % (method, url, body))
parsed_url = parse.urlparse(url)
headers = self.get_remote_connection_headers(parsed_url, self.keep_alive)
if self.keep_alive:
if body and method != 'POST' and method != 'PUT':
body = None
try:
self._conn.request(method, parsed_url.path, body, headers)
resp = self._conn.getresponse()
except (httplib.HTTPException, socket.error):
self._conn.close()
raise
statuscode = resp.status
else:
password_manager = None
if parsed_url.username:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%s" % parsed_url.port
cleaned_url = parse.urlunparse((
parsed_url.scheme,
netloc,
parsed_url.path,
parsed_url.params,
parsed_url.query,
parsed_url.fragment))
password_manager = url_request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None,
"%s://%s" % (parsed_url.scheme, netloc),
parsed_url.username,
parsed_url.password)
request = Request(cleaned_url, data=body.encode('utf-8'), method=method)
else:
request = Request(url, data=body.encode('utf-8'), method=method)
for key, val in headers.items():
request.add_header(key, val)
if password_manager:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler(),
url_request.HTTPBasicAuthHandler(password_manager))
else:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler())
resp = opener.open(request, timeout=self._timeout)
statuscode = resp.code
if not hasattr(resp, 'getheader'):
if hasattr(resp.headers, 'getheader'):
resp.getheader = lambda x: resp.headers.getheader(x)
elif hasattr(resp.headers, 'get'):
resp.getheader = lambda x: resp.headers.get(x)
data = resp.read()
try:
if 300 <= statuscode < 304:
return self._request('GET', resp.getheader('location'))
body = data.decode('utf-8').replace('\x00', '').strip()
if 399 < statuscode <= 500:
return {'status': statuscode, 'value': body}
content_type = []
if resp.getheader('Content-Type') is not None:
content_type = resp.getheader('Content-Type').split(';')
if not any([x.startswith('image/png') for x in content_type]):
try:
data = utils.load_json(body.strip())
except ValueError:
if 199 < statuscode < 300:
status = ErrorCode.SUCCESS
else:
status = ErrorCode.UNKNOWN_ERROR
return {'status': status, 'value': body.strip()}
assert type(data) is dict, (
'Invalid server response body: %s' % body)
# Some of the drivers incorrectly return a response
# with no 'value' field when they should return null.
if 'value' not in data:
data['value'] = None
return data
else:
data = {'status': 0, 'value': body.strip()}
return data
finally:
LOGGER.debug("Finished Request")
resp.close()
| 1 | 14,688 | Update after command rename | SeleniumHQ-selenium | py |
@@ -61,8 +61,8 @@ export default function createLegacySettingsWrapper( moduleSlug, moduleComponent
useEffect( () => {
removeAllFilters( `googlesitekit.ModuleSettingsDetails-${ moduleSlug }` );
addFilter(
- 'googlesitekit.ModuleSettingsDetails-tagmanager',
- 'googlesitekit.TagManagerModuleSettings',
+ `googlesitekit.ModuleSettingsDetails-${ moduleSlug }`,
+ 'googlesitekit.SettingsLegacy',
fillFilterWithComponent( moduleComponent )
);
return () => removeAllFilters( `googlesitekit.ModuleSettingsDetails-${ moduleSlug }` ); | 1 | /**
* Legacy Settings Storybook component wrapper.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { useEffect, WPElement } from '@wordpress/element';
import { removeAllFilters, addFilter } from '@wordpress/hooks';
/**
* Internal dependencies
*/
import SettingsModule from '../../assets/js/components/settings/settings-module';
import { fillFilterWithComponent } from '../../assets/js/util';
import { WithTestRegistry } from '../../tests/js/utils';
/**
* Creates a legacy settings wrapper component for the given module.
*
* @since 1.12.0
*
* @param {string} moduleSlug The module's slug.
* @param {WPElement} moduleComponent Module settings component to filter in.
* @return {Function} Legacy settings component.
*/
export default function createLegacySettingsWrapper( moduleSlug, moduleComponent ) {
return function SettingsLegacy( props ) {
const {
registry,
callback,
module = {
...global._googlesitekitLegacyData.modules[ moduleSlug ],
active: true,
setupComplete: true,
},
isEditing = false,
isOpen = true,
isSaving = false,
error = false,
handleAccordion = global.console.log.bind( null, 'handleAccordion' ),
handleDialog = global.console.log.bind( null, 'handleDialog' ),
updateModulesList = global.console.log.bind( null, 'updateModulesList' ),
handleButtonAction = global.console.log.bind( null, 'handleButtonAction' ),
} = props;
useEffect( () => {
removeAllFilters( `googlesitekit.ModuleSettingsDetails-${ moduleSlug }` );
addFilter(
'googlesitekit.ModuleSettingsDetails-tagmanager',
'googlesitekit.TagManagerModuleSettings',
fillFilterWithComponent( moduleComponent )
);
return () => removeAllFilters( `googlesitekit.ModuleSettingsDetails-${ moduleSlug }` );
} );
const moduleKey = `${ moduleSlug }-module`;
return (
<WithTestRegistry registry={ registry } callback={ callback }>
<div style={ { background: 'white' } }>
<SettingsModule
key={ moduleKey }
slug={ moduleSlug }
name={ module.name }
description={ module.description }
homepage={ module.homepage }
learnmore={ module.learnMore }
active={ module.active }
setupComplete={ module.setupComplete }
hasSettings={ true }
autoActivate={ module.autoActivate }
updateModulesList={ updateModulesList }
handleEdit={ handleButtonAction }
handleConfirm
isEditing={ isEditing ? { [ moduleKey ]: true } : {} }
isOpen={ isOpen }
handleAccordion={ handleAccordion }
handleDialog={ handleDialog }
provides={ module.provides }
isSaving={ isSaving }
screenID={ module.screenID }
error={ error }
/>
</div>
</WithTestRegistry>
);
};
}
| 1 | 30,644 | I added this change to fix a bug with the legacy settings wrapper which was preventing it from working properly with the other modules | google-site-kit-wp | js |
@@ -33,12 +33,15 @@ package azkaban;
public class Constants {
// Azkaban Flow Versions
- public static final String AZKABAN_FLOW_VERSION_2_0 = "2.0";
+ public static final double VERSION_2_0 = 2.0;
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
+ // Flow 2.0 node type
+ public static final String FLOW_NODE_TYPE = "flow";
+
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*/
public class Constants {
// Azkaban Flow Versions
public static final String AZKABAN_FLOW_VERSION_2_0 = "2.0";
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_EXECUTOR_PORT_FILE = "executor.portfile";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
public static class ConfigurationKeys {
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// The property is used for the web server to get the host name of the executor when running in SOLO mode.
public static final String EXECUTOR_HOST = "executor.host";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
public static final String AZKABAN_STORAGE_HDFS_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
// enable Quartz Scheduler if true.
public static final String ENABLE_QUARTZ= "azkaban.server.schedule.enable_quartz";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
/*
* this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available.
* EXTRA_HCAT_CLUSTERS has the following format:
* other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port"
* Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster.
* The uris(hcat servers) in a "cluster" ensures HA is provided.
**/
public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters";
/*
* the settings to be defined by user indicating if there are hcat locations other than the
* default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are
* supported, use comma to separate the values, values are case insensitive.
**/
// Use EXTRA_HCAT_CLUSTERS instead
@Deprecated
public static final String EXTRA_HCAT_LOCATION = "other_hcat_location";
// Job properties that indicate maximum memory size
public static final String JOB_MAX_XMS = "job.max.Xms";
public static final String MAX_XMS_DEFAULT = "1G";
public static final String JOB_MAX_XMX = "job.max.Xmx";
public static final String MAX_XMX_DEFAULT = "2G";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
}
| 1 | 15,175 | isn't AZKABAN_FLOW_VERSION_2_0 more explicit? | azkaban-azkaban | java |
@@ -308,9 +308,7 @@ public class AccountActivity extends ThemedActivity implements AccountContract.V
/*case ONEDRIVE:
signInOneDrive();
break;*/
-
- default:
- SnackBarHandler.show(coordinatorLayout, R.string.feature_not_present);
+ default://do nothing
}
} else {
AlertDialog alertDialog = new AlertDialog.Builder(this) | 1 | package org.fossasia.phimpme.accounts;
import android.content.ComponentName;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.os.Bundle;
import android.support.design.widget.BottomNavigationView;
import android.support.design.widget.CoordinatorLayout;
import android.support.design.widget.Snackbar;
import android.support.v7.app.AlertDialog;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.SwitchCompat;
import android.support.v7.widget.Toolbar;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.widget.FrameLayout;
import android.widget.RelativeLayout;
import com.box.androidsdk.content.BoxConfig;
import com.box.androidsdk.content.auth.BoxAuthentication;
import com.box.androidsdk.content.models.BoxSession;
import com.cloudrail.si.CloudRail;
import com.dropbox.client2.DropboxAPI;
import com.dropbox.client2.android.AndroidAuthSession;
import com.pinterest.android.pdk.PDKCallback;
import com.pinterest.android.pdk.PDKClient;
import com.pinterest.android.pdk.PDKException;
import com.pinterest.android.pdk.PDKResponse;
import com.twitter.sdk.android.core.identity.TwitterAuthClient;
import org.fossasia.phimpme.R;
import org.fossasia.phimpme.base.PhimpmeProgressBarHandler;
import org.fossasia.phimpme.base.RecyclerItemClickListner;
import org.fossasia.phimpme.base.ThemedActivity;
import org.fossasia.phimpme.data.local.AccountDatabase;
import org.fossasia.phimpme.data.local.DatabaseHelper;
import org.fossasia.phimpme.gallery.activities.LFMainActivity;
import org.fossasia.phimpme.gallery.activities.SettingsActivity;
import org.fossasia.phimpme.gallery.util.AlertDialogsHelper;
import org.fossasia.phimpme.gallery.util.ThemeHelper;
import org.fossasia.phimpme.share.flickr.FlickrActivity;
import org.fossasia.phimpme.share.imgur.ImgurAuthActivity;
import org.fossasia.phimpme.share.nextcloud.NextCloudAuth;
import org.fossasia.phimpme.share.owncloud.OwnCloudActivity;
import org.fossasia.phimpme.share.twitter.LoginActivity;
import org.fossasia.phimpme.utilities.ActivitySwitchHelper;
import org.fossasia.phimpme.utilities.BasicCallBack;
import org.fossasia.phimpme.utilities.Constants;
import org.fossasia.phimpme.utilities.SnackBarHandler;
import org.jetbrains.annotations.NotNull;
import java.util.ArrayList;
import butterknife.BindView;
import butterknife.ButterKnife;
import io.realm.Realm;
import io.realm.RealmQuery;
import static com.pinterest.android.pdk.PDKClient.setDebugMode;
import static org.fossasia.phimpme.R.string.no_account_signed_in;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.BOX;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.DROPBOX;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.IMGUR;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.NEXTCLOUD;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.OWNCLOUD;
import static org.fossasia.phimpme.data.local.AccountDatabase.AccountName.PINTEREST;
import static org.fossasia.phimpme.utilities.Constants.BOX_CLIENT_ID;
import static org.fossasia.phimpme.utilities.Constants.BOX_CLIENT_SECRET;
import static org.fossasia.phimpme.utilities.Constants.PINTEREST_APP_ID;
import static org.fossasia.phimpme.utilities.Constants.SUCCESS;
import static org.fossasia.phimpme.utilities.Utils.checkNetwork;
/**
* Created by pa1pal on 13/6/17.
*/
public class AccountActivity extends ThemedActivity implements AccountContract.View,
RecyclerItemClickListner.OnItemClickListener{
private static final int NEXTCLOUD_REQUEST_CODE = 3;
private static final int OWNCLOUD_REQUEST_CODE = 9;
private static final int RESULT_OK = 1;
private static final int RC_SIGN_IN = 9001;
public static final String BROWSABLE = "android.intent.category.BROWSABLE";
public final static String CLOUDRAIL_APP_KEY = Constants.CLOUDRAIL_LICENSE_KEY;//CloudRail_App-Key
@BindView(R.id.accounts_parent)
RelativeLayout parentLayout;
@BindView(R.id.accounts_recycler_view)
RecyclerView accountsRecyclerView;
@BindView(R.id.toolbar)
Toolbar toolbar;
@BindView(R.id.bottombar)
BottomNavigationView bottomNavigationView;
@BindView(R.id.accounts)
CoordinatorLayout coordinatorLayout;
private AccountAdapter accountAdapter;
private AccountPresenter accountPresenter;
private Realm realm = Realm.getDefaultInstance();
private RealmQuery<AccountDatabase> realmResult;
private PhimpmeProgressBarHandler phimpmeProgressBarHandler;
private TwitterAuthClient client;
private AccountDatabase account;
private DatabaseHelper databaseHelper;
private Context context;
private CloudRailServices cloudRailServices;
private PDKClient pdkClient;
// private GoogleApiClient mGoogleApiClient;
private DropboxAPI<AndroidAuthSession> mDBApi;
private BoxSession sessionBox;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
ButterKnife.bind(this);
ActivitySwitchHelper.setContext(this);
parentLayout.setBackgroundColor(getBackgroundColor());
overridePendingTransition(R.anim.right_to_left,
R.anim.left_to_right);
parentLayout.setBackgroundColor(getBackgroundColor());
accountAdapter = new AccountAdapter();
accountPresenter = new AccountPresenter(realm);
phimpmeProgressBarHandler = new PhimpmeProgressBarHandler(this);
accountPresenter.attachView(this);
databaseHelper = new DatabaseHelper(realm);
client = new TwitterAuthClient();
setSupportActionBar(toolbar);
ThemeHelper themeHelper = new ThemeHelper(getContext());
toolbar.setPopupTheme(getPopupToolbarStyle());
toolbar.setBackgroundColor(themeHelper.getPrimaryColor());
bottomNavigationView.setBackgroundColor(themeHelper.getPrimaryColor());
setUpRecyclerView();
accountPresenter.loadFromDatabase(); // Calling presenter function to load data from database
getSupportActionBar().setTitle(R.string.title_account);
phimpmeProgressBarHandler.show();
cloudRailServices=CloudRailServices.getInstance();
pdkClient = PDKClient.configureInstance(this, PINTEREST_APP_ID);
pdkClient.onConnect(this);
setDebugMode(true);
// googleApiClient();
configureBoxClient();
}
/* private void googleApiClient(){
// Configure sign-in to request the user's ID, email address, and basic
// profile. ID and basic profile are included in DEFAULT_SIGN_IN.
GoogleSignInOptions gso = new GoogleSignInOptions.Builder(GoogleSignInOptions.DEFAULT_SIGN_IN)
.requestEmail()
.build();
// Build a GoogleApiClient with access to the Google Sign-In API and the
// options specified by gso.
mGoogleApiClient = new GoogleApiClient.Builder(this)
.enableAutoManage(this, AccountActivity.this)
.addApi(Auth.GOOGLE_SIGN_IN_API, gso)
.build();
}*/
private void configureBoxClient() {
BoxConfig.CLIENT_ID = BOX_CLIENT_ID;
BoxConfig.CLIENT_SECRET = BOX_CLIENT_SECRET;
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.menu_accounts_activity, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId())
{
case R.id.action_account_settings:
startActivity(new Intent(AccountActivity.this, SettingsActivity.class));
return true;
}
return super.onOptionsItemSelected(item);
}
@Override
public void setUpRecyclerView() {
RecyclerView.LayoutManager layoutManager = new LinearLayoutManager(this);
accountsRecyclerView.setLayoutManager(layoutManager);
accountsRecyclerView.setAdapter(accountAdapter);
accountsRecyclerView.addOnItemTouchListener(new RecyclerItemClickListner(this, this));
}
@Override
public void setUpAdapter(@NotNull RealmQuery<AccountDatabase> accountDetails) {
this.realmResult = accountDetails;
accountAdapter.setResults(realmResult);
}
@Override
public void showError() {
SnackBarHandler.show(coordinatorLayout, getString(no_account_signed_in));
}
@Override
public void showComplete() {
phimpmeProgressBarHandler.hide();
}
@Override
public int getContentViewId() {
return R.layout.activity_accounts;
}
@Override
public int getNavigationMenuItemId() {
return R.id.navigation_accounts;
}
@Override
public void onItemClick(final View childView, final int position) {
ConnectivityManager cm=(ConnectivityManager)context.getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo ni = cm.getActiveNetworkInfo();
if (ni == null) {
View rootView = AccountActivity.this.getWindow().getDecorView().findViewById(android.R.id.content);
Snackbar snackbar = Snackbar
.make(rootView, R.string.internet_is_off, Snackbar.LENGTH_SHORT)
.setAction("Settings", new View.OnClickListener() {
@Override
public void onClick(View view) {
Intent intent = new Intent();
intent.setComponent(new ComponentName("com.android.settings","com.android.settings.Settings$DataUsageSummaryActivity"));
startActivity(intent);
}
})
.setActionTextColor(getAccentColor());
View sbView = snackbar.getView();
final FrameLayout.LayoutParams params = (FrameLayout.LayoutParams) sbView.getLayoutParams();
params.setMargins(params.leftMargin,
params.topMargin,
params.rightMargin,
params.bottomMargin + navigationView.getHeight());
sbView.setLayoutParams(params);
snackbar.show();
}
final SwitchCompat signInSignOut = childView.findViewById(R.id.sign_in_sign_out_switch);
final String name = AccountDatabase.AccountName.values()[position].toString();
if (!signInSignOut.isChecked()) {
if (!checkNetwork(this, parentLayout)) return;
switch (AccountDatabase.AccountName.values()[position]) {
case TWITTER:
signInTwitter();
break;
/*case DRUPAL:
Intent drupalShare = new Intent(getContext(), DrupalLogin.class);
startActivity(drupalShare);
break;*/
case NEXTCLOUD:
Intent nextCloudShare = new Intent(getContext(), NextCloudAuth.class);
startActivityForResult(nextCloudShare, NEXTCLOUD_REQUEST_CODE);
break;
/*case WORDPRESS:
Intent WordpressShare = new Intent(this, WordpressLoginActivity.class);
startActivity(WordpressShare);
break;*/
/* case GOOGLEDRIVE:
signInGoogleDrive();
break;*/
case PINTEREST:
signInPinterest();
break;
case FLICKR:
signInFlickr();
break;
case IMGUR:
signInImgur();
break;
case DROPBOX:
if(CLOUDRAIL_APP_KEY==null || CLOUDRAIL_APP_KEY.equals(""))
{
Snackbar.make(findViewById(android.R.id.content),R.string.Cloudrail_License_key,Snackbar.LENGTH_SHORT).show();
}
else
signInDropbox();
break;
case OWNCLOUD:
Intent ownCloudShare = new Intent(getContext(), OwnCloudActivity.class);
startActivityForResult(ownCloudShare, OWNCLOUD_REQUEST_CODE);
break;
case BOX:
sessionBox = new BoxSession(AccountActivity.this);
sessionBox.authenticate();
break;
case TUMBLR:
//signInTumblr();
break;
/*case ONEDRIVE:
signInOneDrive();
break;*/
default:
SnackBarHandler.show(coordinatorLayout, R.string.feature_not_present);
}
} else {
AlertDialog alertDialog = new AlertDialog.Builder(this)
.setMessage(name)
.setTitle(getString(R.string.sign_out_dialog_title))
.setPositiveButton(R.string.yes_action,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
databaseHelper
.deleteSignedOutAccount(name);
accountAdapter.notifyDataSetChanged();
accountPresenter.loadFromDatabase();
signInSignOut.setChecked(false);
BoxAuthentication.getInstance().logoutAllUsers(AccountActivity.this);
}
})
.setNegativeButton(R.string.no_action,
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
//TODO: Implement negative button action
}
})
.create();
alertDialog.show();
AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), alertDialog);
}
}
private void signInFlickr() {
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if (status == SUCCESS)
SnackBarHandler.show(coordinatorLayout, getString(R.string.logged_in_flickr));
}
};
Intent intent = new Intent(this, FlickrActivity.class);
FlickrActivity.setBasicCallBack(basicCallBack);
startActivity(intent);
}
/* private void signInTumblr() {
LoginListener loginListener = new LoginListener() {
@Override
public void onLoginSuccessful(com.tumblr.loglr.LoginResult loginResult) {
SnackBarHandler.show(coordinatorLayout, getString(R.string.logged_in_tumblr));
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class,
TUMBLR.toString());
account.setToken(loginResult.getOAuthToken());
account.setSecret(loginResult.getOAuthTokenSecret());
account.setUsername(TUMBLR.toString());
realm.commitTransaction();
TumblrClient tumblrClient = new TumblrClient();
realm.beginTransaction();
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
account.setUsername(data.toString());
realm.commitTransaction();
}
};
tumblrClient.getName(basicCallBack);
}
};
ExceptionHandler exceptionHandler = new ExceptionHandler() {
@Override
public void onLoginFailed(RuntimeException e) {
SnackBarHandler.show(coordinatorLayout, R.string.error_volly);
}
};
Loglr.getInstance()
.setConsumerKey(Constants.TUMBLR_CONSUMER_KEY)
.setConsumerSecretKey(Constants.TUMBLR_CONSUMER_SECRET)
.setLoginListener(loginListener)
.setExceptionHandler(exceptionHandler)
.enable2FA(true)
.setUrlCallBack(Constants.CALL_BACK_TUMBLR)
.initiateInActivity(AccountActivity.this);
}*/
private void signInDropbox() {
if (accountPresenter.checkAlreadyExist(DROPBOX))
SnackBarHandler.show(coordinatorLayout, R.string.already_signed_in);
else
cloudRailServices.prepare(this);
cloudRailServices.login();
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if(status == 1)
{
dropboxAuthentication(data.toString());
}
}
};
CloudRailServices.setCallBack(basicCallBack);
}
/*
Catching the intent of the external browser login and getting that data
*/
@Override
protected void onNewIntent(Intent intent) {
try{
if(intent.getCategories().contains(BROWSABLE)){
CloudRail.setAuthenticationResponse(intent);
}
}catch (Exception e)
{
//Nothing is to be done when the BROWSABLE Intent is null
}
super.onNewIntent(intent);
}
/* private void signInGoogleDrive() {
if(accountPresenter.checkAlreadyExist(GOOGLEDRIVE))
SnackBarHandler.show(coordinatorLayout,"Already Signed In");
else
cloudRailServices.prepare(this);
cloudRailServices.googleDriveLogin();
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if(status == 2){
Log.e("TAG", "callBack: GOOGLE DRIVE"+data.toString() );
googleDriveAuthentication(data.toString());
}
}
};
CloudRailServices.setCallBack(basicCallBack);
}*/
/* private void signInOneDrive(){
if(accountPresenter.checkAlreadyExist(ONEDRIVE))
SnackBarHandler.show(coordinatorLayout,"Already Signed In");
else
cloudRailServices.prepare(this);
cloudRailServices.oneDriveLogin();
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if(status==3){
oneDriveAuthentication(data.toString());
}
}
};
CloudRailServices.setCallBack(basicCallBack);
}*/
private void signInImgur() {
BasicCallBack basicCallBack = new BasicCallBack() {
@Override
public void callBack(int status, Object data) {
if (status == SUCCESS) {
SnackBarHandler.show(coordinatorLayout, R.string.account_logged);
if (data instanceof Bundle) {
Bundle bundle = (Bundle) data;
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, IMGUR.toString());
account.setUsername(bundle.getString(getString(R.string.auth_username)));
account.setToken(bundle.getString(getString(R.string.auth_token)));
realm.commitTransaction();
}
}
}
};
Intent i = new Intent(AccountActivity.this, ImgurAuthActivity.class);
ImgurAuthActivity.setBasicCallBack(basicCallBack);
startActivity(i);
}
private void signInPinterest() {
ArrayList<String> scopes = new ArrayList<String>();
scopes.add(PDKClient.PDKCLIENT_PERMISSION_READ_PUBLIC);
scopes.add(PDKClient.PDKCLIENT_PERMISSION_WRITE_PUBLIC);
scopes.add(PDKClient.PDKCLIENT_PERMISSION_READ_RELATIONSHIPS);
scopes.add(PDKClient.PDKCLIENT_PERMISSION_WRITE_RELATIONSHIPS);
pdkClient.login(this, scopes, new PDKCallback() {
@Override
public void onSuccess(PDKResponse response) {
Log.d(getClass().getName(), response.getData().toString());
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, PINTEREST.toString());
account.setAccountname(PINTEREST);
account.setUsername(response.getUser().getFirstName() + " " + response.getUser().getLastName());
realm.commitTransaction();
finish();
startActivity(getIntent());
SnackBarHandler.show(coordinatorLayout, getString(R.string.account_logged_pinterest));
}
@Override
public void onFailure(PDKException exception) {
Log.e(getClass().getName(), exception.getDetailMessage());
SnackBarHandler.show(coordinatorLayout, R.string.pinterest_signIn_fail);
}
});
}
@Override
public void onItemLongPress(View childView, int position) {
// TODO: long press to implemented
}
/**
* Create twitter login and session
*/
public void signInTwitter() {
Intent i = new Intent(AccountActivity.this, LoginActivity.class);
startActivity(i);
}
/**
* Create Facebook login and session
*/
/* public void signInFacebook() {
List<String> permissionNeeds = Arrays.asList("publish_actions");
loginManager = LoginManager.getInstance();
loginManager.logInWithPublishPermissions(this, permissionNeeds);
//loginManager.logInWithReadPermissions(this, Arrays.asList("email", "public_profile"));
loginManager.registerCallback(callbackManager,
new FacebookCallback<LoginResult>() {
@Override
public void onSuccess(LoginResult loginResult) {
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, FACEBOOK.toString());
account.setUsername(loginResult.getAccessToken().getUserId());
GraphRequest request = GraphRequest.newMeRequest(
loginResult.getAccessToken(),
new GraphRequest.GraphJSONObjectCallback() {
@Override
public void onCompleted(@NonNls JSONObject jsonObject, GraphResponse graphResponse) {
Log.v("LoginActivity", graphResponse.toString());
try {
account.setUsername(jsonObject.getString("name"));
realm.commitTransaction();
SnackBarHandler.show(coordinatorLayout, getString(R.string.logged_in_facebook));
} catch (JSONException e) {
Log.e("LoginAct", e.toString());
}
}
});
Bundle parameters = new Bundle();
parameters.putString("fields", "id,name");
request.setParameters(parameters);
request.executeAsync();
}
@Override
public void onCancel() {
SnackBarHandler.show(coordinatorLayout, getString(R.string.facebook_login_cancel));
}
@Override
public void onError(FacebookException e) {
SnackBarHandler.show(coordinatorLayout, getString(R.string.facebook_login_error));
Log.d("error", e.toString());
}
});
}*/
@Override
public Context getContext() {
this.context = this;
return context;
}
@Override
public void onResume() {
super.onResume();
ActivitySwitchHelper.setContext(this);
setNavigationBarColor(ThemeHelper.getPrimaryColor(this));
toolbar.setBackgroundColor(ThemeHelper.getPrimaryColor(this));
//dropboxAuthentication();
boxAuthentication();
setStatusBarColor();
setNavBarColor();
accountPresenter.loadFromDatabase();
accountAdapter.updateTheme();
accountAdapter.notifyDataSetChanged();
}
@Override
public void onBackPressed() {
Intent intent = new Intent(this, LFMainActivity.class);
startActivity(intent);
finish();
overridePendingTransition(R.anim.left_to_right,
R.anim.right_to_left);
}
private void boxAuthentication() {
if (sessionBox != null && sessionBox.getUser() != null) {
String accessToken = sessionBox.getAuthInfo().accessToken();
realm.beginTransaction();
// Creating Realm object for AccountDatabase Class
account = realm.createObject(AccountDatabase.class,
BOX.toString());
// Writing values in Realm database
account.setUsername(sessionBox.getUser().getName());
account.setToken(String.valueOf(accessToken));
// Finally committing the whole data
realm.commitTransaction();
accountPresenter.loadFromDatabase();
}
}
private void dropboxAuthentication(String tokens) {
try{
String result = cloudRailServices.db.saveAsString();
Log.d("AccountsActivity", "dropboxAuthentication: "+tokens +" "+result);
String accessToken = cloudRailServices.getToken();
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, DROPBOX.toString());
account.setUsername(DROPBOX.toString());
account.setToken(String.valueOf(accessToken));
realm.commitTransaction();
}catch (Exception e )
{
//catches exception dont need handling
}
accountPresenter.loadFromDatabase();
}
/* private void oneDriveAuthentication(String tokens){
try {
String result = cloudRailServices.oneDrive.saveAsString();
Log.d("AccountsActivity", "oneDriveAuthentication: "+tokens+" "+result );
String accessToken = cloudRailServices.getOneDriveToken();
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class,ONEDRIVE.toString());
account.setUsername(ONEDRIVE.toString());
account.setToken(String.valueOf(accessToken));
realm.commitTransaction();
}
catch (Exception e){
//No need of handling it
}
accountPresenter.loadFromDatabase();
}*/
/* private void googleDriveAuthentication(String tokens) {
try{
String token = cloudRailServices.googleDrive.saveAsString();
Log.e("AccountsActivity", "googleDriveAuthentication: "+token + "Matching Token "+tokens);
String accessToken = cloudRailServices.getGoogleDriveToken();
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class,GOOGLEDRIVE.toString());
account.setUsername(GOOGLEDRIVE.toString());
account.setToken(String.valueOf(accessToken));
realm.commitTransaction();
}catch (Exception e)
{
//No need for handling
}
accountPresenter.loadFromDatabase();
}*/
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
client.onActivityResult(requestCode, resultCode, data);
// callbackManager.onActivityResult(requestCode, resultCode, data);
pdkClient.onOauthResponse(requestCode, resultCode, data);
if ((requestCode == OWNCLOUD_REQUEST_CODE && resultCode == RESULT_OK) || (requestCode == NEXTCLOUD_REQUEST_CODE && resultCode == RESULT_OK)) {
realm.beginTransaction();
if (requestCode == NEXTCLOUD_REQUEST_CODE) {
account = realm.createObject(AccountDatabase.class, NEXTCLOUD.toString());
} else {
account = realm.createObject(AccountDatabase.class, OWNCLOUD.toString());
}
account.setServerUrl(data.getStringExtra(getString(R.string.server_url)));
account.setUsername(data.getStringExtra(getString(R.string.auth_username)));
account.setPassword(data.getStringExtra(getString(R.string.auth_password)));
realm.commitTransaction();
}
/* if (requestCode == RC_SIGN_IN) {
GoogleSignInResult result = Auth.GoogleSignInApi.getSignInResultFromIntent(data);
handleSignInResult(result);
}*/
}
/*private void handleSignInResult(GoogleSignInResult result) {
if (result.isSuccess()) {
GoogleSignInAccount acct = result.getSignInAccount();//acct.getDisplayName()
SnackBarHandler.show(parentLayout,R.string.success);
realm.beginTransaction();
account = realm.createObject(AccountDatabase.class, GOOGLEPLUS.name());account.setUsername(acct.getDisplayName());
account.setUserId(acct.getId());
realm.commitTransaction();
} else {
SnackBarHandler.show(parentLayout,R.string.google_auth_fail);
}
}*/
}
| 1 | 12,869 | Please don't leave an empty default | fossasia-phimpme-android | java |
@@ -15,6 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
/**
* External dependencies
*/ | 1 | /**
* Utility functions.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import {
map,
isEqual,
isFinite,
get,
unescape,
} from 'lodash';
import React from 'react';
/**
* WordPress dependencies
*/
import apiFetch from '@wordpress/api-fetch';
import {
addFilter,
applyFilters,
} from '@wordpress/hooks';
import {
_n,
__,
sprintf,
} from '@wordpress/i18n';
import { addQueryArgs, getQueryString } from '@wordpress/url';
/**
* Internal dependencies
*/
import SvgIcon from './svg-icon';
import { tagMatchers as setupTagMatchers } from '../components/setup/compatibility-checks';
import { default as adsenseTagMatchers } from '../modules/adsense/util/tagMatchers';
import { default as analyticsTagMatchers } from '../modules/analytics/util/tagMatchers';
import { tagMatchers as tagmanagerTagMatchers } from '../modules/tagmanager/util';
import { trackEvent } from './tracking';
import data, { TYPE_CORE } from '../components/data';
export { trackEvent };
export * from './sanitize';
export * from './stringify';
export * from './standalone';
export * from './storage';
export * from './i18n';
/**
* Remove a parameter from a URL string.
*
* Fallback for when URL is unable to handle parsedURL.searchParams.delete.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*
* @return {string} URL without the deleted parameter.
*
*/
const removeURLFallBack = ( url, parameter ) => {
const urlparts = url.split( '?' );
if ( 2 <= urlparts.length ) {
const prefix = encodeURIComponent( parameter ) + '=';
const pars = urlparts[ 1 ].split( /[&;]/g );
//reverse iteration as may be destructive
const newPars = pars.filter( ( param ) => {
return -1 === param.lastIndexOf( prefix, 0 );
} );
url = urlparts[ 0 ] + '/' + ( 0 < newPars.length ? '?' + newPars.join( '&' ) : '' );
return url;
}
return url;
};
/**
* Remove a parameter from a URL string.
*
* Leverages the URL object internally.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*
* @return {string} URL without the deleted parameter.
*/
export const removeURLParameter = ( url, parameter ) => {
const parsedURL = new URL( url );
// If the URL implementation doesn't support ! parsedURL.searchParams, use the fallback handler.
if ( ! parsedURL.searchParams || ! parsedURL.searchParams.delete ) {
return removeURLFallBack( url, parameter );
}
parsedURL.searchParams.delete( parameter );
return parsedURL.href;
};
/**
* Prepares a number to be used in readableLargeNumber.
*
* @param {number} number The large number to prepare.
*
* @return {number} The prepared number
*/
export const prepareForReadableLargeNumber = ( number ) => {
if ( 1000000 <= number ) {
return Math.round( number / 100000 ) / 10;
}
if ( 10000 <= number ) {
return Math.round( number / 1000 );
}
if ( 1000 <= number ) {
return Math.round( number / 100 ) / 10;
}
return number;
};
/**
* Format a large number for shortened display.
*
* @param {number} number The large number to format.
* @param {(string|boolean)} currencyCode Optional currency code to format as amount.
*
* @return {string} The formatted number.
*/
export const readableLargeNumber = ( number, currencyCode = false ) => {
// Cast parseable values to numeric types.
number = isFinite( number ) ? number : Number( number );
if ( ! isFinite( number ) ) {
// eslint-disable-next-line no-console
console.warn( 'Invalid number', number, typeof number );
number = 0;
}
if ( currencyCode ) {
return numberFormat( number, { style: 'currency', currency: currencyCode } );
}
const withSingleDecimal = {
minimumFractionDigits: 1,
maximumFractionDigits: 1,
};
// Numbers over 1,000,000 round normally and display a single decimal unless the decimal is 0.
if ( 1000000 <= number ) {
return sprintf(
// translators: %s: an abbreviated number in millions.
__( '%sM', 'google-site-kit' ),
numberFormat( prepareForReadableLargeNumber( number ), number % 10 === 0 ? {} : withSingleDecimal )
);
}
// Numbers between 10,000 and 1,000,000 round normally and have no decimals
if ( 10000 <= number ) {
return sprintf(
// translators: %s: an abbreviated number in thousands.
__( '%sK', 'google-site-kit' ),
numberFormat( prepareForReadableLargeNumber( number ) )
);
}
// Numbers between 1,000 and 10,000 round normally and display a single decimal unless the decimal is 0.
if ( 1000 <= number ) {
return sprintf(
// translators: %s: an abbreviated number in thousands.
__( '%sK', 'google-site-kit' ),
numberFormat( prepareForReadableLargeNumber( number ), number % 10 === 0 ? {} : withSingleDecimal )
);
}
return number.toString();
};
/**
* Internationalization Number Format.
*
* @param {number} number The number to format.
* @param {Object} [options] Formatting options.
* @param {string} [options.locale] Locale to use for formatting. Defaults to current locale used by Site Kit.
* @see {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/NumberFormat/NumberFormat|`options` parameter}
* For all available formatting options.
*
* @return {string} The formatted number.
*/
export const numberFormat = ( number, options = {} ) => {
const { locale = getLocale(), ...formatOptions } = options;
return new Intl.NumberFormat( locale, formatOptions ).format( number );
};
/**
* Gets the current locale for use with browser APIs.
*
* @param {Object} _global The global window object.
*
* @return {string} Current Site Kit locale if set, otherwise the current language set by the browser.
* E.g. `en-US` or `de-DE`
*/
export const getLocale = ( _global = global ) => {
const siteKitLocale = get( _global, [ 'googlesitekit', 'locale', '', 'lang' ] );
if ( siteKitLocale ) {
const matches = siteKitLocale.match( /^(\w{2})?(_)?(\w{2})/ );
if ( matches && matches[ 0 ] ) {
return matches[ 0 ].replace( /_/g, '-' );
}
}
return _global.navigator.language;
};
/**
* Transform a period string into a number of seconds.
*
* @param {string} period The period to transform.
*
* @return {number} The number of seconds
*/
export const getTimeInSeconds = ( period ) => {
const minute = 60;
const hour = minute * 60;
const day = hour * 24;
const week = day * 7;
const month = day * 30;
const year = day * 365;
switch ( period ) {
case 'minute':
return minute;
case 'hour':
return hour;
case 'day':
return day;
case 'week':
return week;
case 'month':
return month;
case 'year':
return year;
}
};
/**
* Converts seconds to a display ready string indicating
* the number of hours, minutes and seconds that have elapsed.
*
* For example, passing 65 returns '1m 5s'.
*
* @param {number} seconds The number of seconds.
*
* @return {string} Human readable string indicating time elapsed.
*
*/
export const prepareSecondsForDisplay = ( seconds ) => {
seconds = parseInt( seconds, 10 );
if ( isNaN( seconds ) || 0 === seconds ) {
return '0.0s';
}
const results = {};
results.hours = Math.floor( seconds / 60 / 60 );
results.minutes = Math.floor( ( seconds / 60 ) % 60 );
results.seconds = Math.floor( seconds % 60 );
const returnString =
( results.hours ? results.hours + 'h ' : '' ) +
( results.minutes ? results.minutes + 'm ' : '' ) +
( results.seconds ? results.seconds + 's ' : '' );
return returnString.trim();
};
/**
* Retrieve number of days between 2 dates.
*
* @param {Date} dateStart Start date instance.
* @param {Date} dateEnd End date instance.
*
* @return {number} The number of days.
*/
export const getDaysBetweenDates = ( dateStart, dateEnd ) => {
const dayMs = 1000 * getTimeInSeconds( 'day' );
const dateStartMs = dateStart.getTime();
const dateEndMs = dateEnd.getTime();
return Math.round( Math.abs( dateStartMs - dateEndMs ) / dayMs );
};
/**
* Calculate the percent change between two values.
*
* @param {number} previous The previous value.
* @param {number} current The current value.
*
* @return {(number|string)} The percent change.
*/
export const changeToPercent = ( previous, current ) => {
// Prevent divide by zero errors.
if ( '0' === previous || 0 === previous || isNaN( previous ) ) {
return '';
}
const change = ( ( current - previous ) / previous * 100 ).toFixed( 1 );
// Avoid NaN at all costs.
if ( isNaN( change ) || 'Infinity' === change ) {
return '';
}
return change;
};
/**
* Extract a single column of data for a sparkline from a dataset prepared for Google charts.
*
* @param {Array} rowData An array of Google charts row data.
* @param {number} column The column to extract for the sparkline.
*
* @return {Array} Extracted column of dataset prepared for Google charts.
*
*/
export const extractForSparkline = ( rowData, column ) => {
return map( rowData, ( row, i ) => {
return [
row[ 0 ], // row[0] always contains the x axis value (typically date).
row[ column ] || ( 0 === i ? '' : 0 ), // the data for the sparkline.
];
} );
};
export const refreshAuthentication = async () => {
try {
const response = await data.get( TYPE_CORE, 'user', 'authentication' );
const requiredAndGrantedScopes = response.grantedScopes.filter( ( scope ) => {
return -1 !== response.requiredScopes.indexOf( scope );
} );
// We should really be using state management. This is terrible.
global.googlesitekit.setup = global.googlesitekit.setup || {};
global.googlesitekit.setup.isAuthenticated = response.isAuthenticated;
global.googlesitekit.setup.requiredScopes = response.requiredScopes;
global.googlesitekit.setup.grantedScopes = response.grantedScopes;
global.googlesitekit.setup.needReauthenticate = requiredAndGrantedScopes.length < response.requiredScopes.length;
} catch ( e ) { // eslint-disable-line no-empty
}
};
/**
* Gets data for all modules.
*
* Because googlesitekit.modules contains both module information (legacy) and
* API functions (new), we should be using this function and never access
* googlesitekit.modules directly to access module data.
*
* This function should be removed once this object is no longer used to store
* legacy module data.
*
* @since 1.7.0
*
* @param {Object} _googlesitekit Optional. googlesitekit global; can be replaced for testing.
* @return {Object} Object with module data, with each module keyed by its slug.
*/
export const getModulesData = ( _googlesitekit = global.googlesitekit ) => {
const modulesObj = _googlesitekit.modules;
if ( ! modulesObj ) {
return {};
}
return Object.keys( modulesObj ).reduce( ( acc, slug ) => {
if ( 'object' !== typeof modulesObj[ slug ] ) {
return acc;
}
if (
'undefined' === typeof modulesObj[ slug ].slug ||
'undefined' === typeof modulesObj[ slug ].name ||
modulesObj[ slug ].slug !== slug
) {
return acc;
}
return { ...acc, [ slug ]: modulesObj[ slug ] };
}, {} );
};
/**
* Get the URL needed to initiate a reAuth flow.
*
* @param {string} slug The module slug. If included redirect URL will include page: page={ `googlesitekit-${slug}`}.
* @param {boolean} status The module activation status.
* @param {Object} _googlesitekit googlesitekit global; can be replaced for testing.
* @return {string} Authentication URL
*/
export const getReAuthURL = ( slug, status, _googlesitekit = global.googlesitekit ) => {
const {
connectURL,
adminRoot,
} = _googlesitekit.admin;
const { needReauthenticate } = _googlesitekit.setup;
const { screenID } = getModulesData( _googlesitekit )[ slug ];
// Special case handling for PageSpeed Insights.
// TODO: Refactor this out.
const pageSpeedQueryArgs = 'pagespeed-insights' === slug ? {
notification: 'authentication_success',
reAuth: undefined,
} : {};
let redirect = addQueryArgs(
adminRoot, {
// If the module has a submenu page, and is being activated, redirect back to the module page.
page: ( slug && status && screenID ) ? screenID : 'googlesitekit-dashboard',
slug,
reAuth: status,
...pageSpeedQueryArgs,
}
);
if ( ! needReauthenticate ) {
return redirect;
}
// Encodes the query string to ensure the redirect url is not messing up with the main url.
const queryString = encodeURIComponent( getQueryString( redirect ) );
// Rebuild the redirect url.
redirect = adminRoot + '?' + queryString;
return addQueryArgs(
connectURL, {
redirect,
status,
}
);
};
/**
* Replace a filtered component with the passed component and merge their props.
*
* Components wrapped in the 'withFilters' higher order component have a filter applied to them (wp.hooks.applyFilters).
* This helper is used to replace (or "Fill") a filtered component with a passed component. To use, pass as the third
* argument to an addFilter call, eg:
*
* addFilter( `googlesitekit.ModuleSettingsDetails-${slug}`,
* 'googlesitekit.AdSenseModuleSettingsDetails',
* fillFilterWithComponent( AdSenseSettings, {
* onSettingsPage: true,
* } ) );
*
* @param {WPElement} NewComponent The component to render in place of the filtered component.
* @param {Object} newProps The props to pass down to the new component.
*
* @return {WPElement} React Component after overriding filtered component with NewComponent.
*/
export const fillFilterWithComponent = ( NewComponent, newProps ) => {
return ( OriginalComponent ) => {
return function InnerComponent( props ) {
return (
<NewComponent { ...props } { ...newProps } OriginalComponent={ OriginalComponent } />
);
};
};
};
/**
* Get Site Kit Admin URL Helper
*
* @param {string} page The page slug. Optional. Default is 'googlesitekit-dashboard'.
* @param {Object} args Optional. Object of arguments to add to the URL.
*
* @return {string} Admin URL with appended query params.
*/
export const getSiteKitAdminURL = ( page, args ) => {
const { adminRoot } = global.googlesitekit.admin;
if ( ! page ) {
page = 'googlesitekit-dashboard';
}
args = { page, ...args };
return addQueryArgs( adminRoot, args );
};
/**
* Verifies whether JSON is valid.
*
* @param {string} stringToValidate The string to validate.
*
* @return {boolean} Indicates JSON is valid.
*/
export const validateJSON = ( stringToValidate ) => {
try {
return ( JSON.parse( stringToValidate ) && !! stringToValidate );
} catch ( e ) {
return false;
}
};
/**
* Verifies Optimize ID
*
* @param {string} stringToValidate The string to validate.
*
* @return {boolean} Indicates GTM tag is valid.
*/
export const validateOptimizeID = ( stringToValidate ) => {
return ( stringToValidate.match( /^GTM-[a-zA-Z\d]{7}$/ ) );
};
/**
* Looks for existing tag requesting front end html, if no existing tag was found on server side
* while requesting list of accounts.
*
* @param {string} module Module slug.
*
* @return {(string|null)} The tag id if found, otherwise null.
*/
export const getExistingTag = async ( module ) => {
const { homeURL, ampMode } = global.googlesitekit.admin;
const tagFetchQueryArgs = {
// Indicates a tag checking request. This lets Site Kit know not to output its own tags.
tagverify: 1,
// Add a timestamp for cache-busting.
timestamp: Date.now(),
};
// Always check the homepage regardless of AMP mode.
let tagFound = await scrapeTag( addQueryArgs( homeURL, tagFetchQueryArgs ), module );
if ( ! tagFound && 'secondary' === ampMode ) {
tagFound = await apiFetch( { path: '/wp/v2/posts?per_page=1' } ).then(
// Scrape the first post in AMP mode, if there is one.
( posts ) => posts.slice( 0, 1 ).map( async ( post ) => {
return await scrapeTag( addQueryArgs( post.link, { ...tagFetchQueryArgs, amp: 1 } ), module );
} ).pop()
);
}
return Promise.resolve( tagFound || null );
};
/**
* Scrapes a module tag from the given URL.
*
* @param {string} url URL request and parse tag from.
* @param {string} module The module to parse tag for.
*
* @return {(string|null)} The tag id if found, otherwise null.
*/
export const scrapeTag = async ( url, module ) => {
try {
const html = await fetch( url, { credentials: 'omit' } ).then( ( res ) => res.text() );
return extractTag( html, module ) || null;
} catch ( error ) {
return null;
}
};
/**
* Extracts a tag related to a module from the given string.
*
* @param {string} string The string from where to find the tag.
* @param {string} module The tag to search for, one of 'adsense' or 'analytics'
*
* @return {(string|boolean)} The tag id if found, otherwise false.
*/
export const extractTag = ( string, module ) => {
const matchers = {
adsense: adsenseTagMatchers,
analytics: analyticsTagMatchers,
tagmanager: tagmanagerTagMatchers,
setup: setupTagMatchers,
}[ module ] || [];
const matchingPattern = matchers.find( ( pattern ) => pattern.test( string ) );
if ( matchingPattern ) {
return matchingPattern.exec( string )[ 1 ];
}
return false;
};
/**
* Activate or Deactivate a Module.
*
* @param {Object} restApiClient Rest API client from data module, this needed so we don't need to import data module in helper.
* @param {string} moduleSlug Module slug to activate or deactivate.
* @param {boolean} status True if module should be activated, false if it should be deactivated.
* @return {Promise} A promise for activating/deactivating a module.
*/
export const activateOrDeactivateModule = ( restApiClient, moduleSlug, status ) => {
return restApiClient.setModuleActive( moduleSlug, status ).then( ( responseData ) => {
const modulesData = getModulesData();
// We should really be using state management. This is terrible.
if ( modulesData[ moduleSlug ] ) {
modulesData[ moduleSlug ].active = responseData.active;
}
trackEvent(
`${ moduleSlug }_setup`,
! responseData.active ? 'module_deactivate' : 'module_activate',
moduleSlug,
);
return new Promise( ( resolve ) => {
resolve( responseData );
} );
} );
};
/**
* Helper to toggle confirm changes button disable/enable
* depending on the module changed settings.
*
* @param {string} moduleSlug The module slug being edited.
* @param {Object} settingsMapping The mapping between form settings names and saved settings.
* @param {Object} settingsState The changed settings component state to compare with.
* @param {Object} skipDOM Skip DOm checks/modifications, used for testing.
* @param {Object} _googlesitekit googlesitekit global; can be replaced for testing.
* @return {(void|boolean)} True if a module has been toggled.
*/
export const toggleConfirmModuleSettings = ( moduleSlug, settingsMapping, settingsState, skipDOM = false, _googlesitekit = global.googlesitekit ) => {
const { settings, setupComplete } = getModulesData( _googlesitekit )[ moduleSlug ];
const confirm = skipDOM || document.getElementById( `confirm-changes-${ moduleSlug }` );
if ( ! setupComplete || ! confirm ) {
return;
}
// Check if any of the mapped settings differ from the current/saved settings.
const changed = !! Object.keys( settingsMapping ).find( ( stateKey ) => {
const settingsKey = settingsMapping[ stateKey ];
return ! isEqual( settingsState[ stateKey ], settings[ settingsKey ] );
} );
if ( ! skipDOM ) {
confirm.disabled = ! changed;
}
return changed;
};
/**
* Trigger error notification on top of the page.
*
* @param {WPElement} ErrorComponent The error component to render in place.
* @param {Object} props The props to pass down to the error component. Optional.
*/
export const showErrorNotification = ( ErrorComponent, props = {} ) => {
addFilter( 'googlesitekit.ErrorNotification',
'googlesitekit.ErrorNotification',
fillFilterWithComponent( ErrorComponent, props ), 1 );
};
/**
* HTML text into HTML entity.
*
* _.unescape doesn't seem to decode some entities for admin bar titles.
* adding combination in this helper as a workaround.
*
* @param {string} str The string to decode.
*
* @return {string} Decoded HTML entity.
*/
export const decodeHtmlEntity = ( str ) => {
if ( ! str ) {
return '';
}
const decoded = str.replace( /&#(\d+);/g, function( match, dec ) {
return String.fromCharCode( dec );
} ).replace( /(\\)/g, '' );
return unescape( decoded );
};
/**
* Gets the current dateRange string.
*
* @return {string} the date range string.
*/
export function getCurrentDateRange() {
/**
* Filter the date range used for queries.
*
* @param String The selected date range. Default 'Last 28 days'.
*/
const dateRange = applyFilters( 'googlesitekit.dateRange', 'last-28-days' );
const daysMatch = dateRange.match( /last-(\d+)-days/ );
if ( daysMatch && daysMatch[ 1 ] ) {
return sprintf(
_n( '%s day', '%s days', parseInt( daysMatch[ 1 ], 10 ), 'google-site-kit' ),
daysMatch[ 1 ]
);
}
throw new Error( 'Unrecognized date range slug used in `googlesitekit.dateRange`.' );
}
/**
* Gets the current dateRange slug.
*
* @return {string} the date range slug.
*/
export function getCurrentDateRangeSlug() {
return applyFilters( 'googlesitekit.dateRange', 'last-28-days' );
}
/**
* Get the icon for a module.
*
* @param {string} module The module slug.
* @param {boolean} blockedByParentModule Whether the module is blocked by a parent module.
* @param {string} width The icon width.
* @param {string} height The icon height.
* @param {string} useClass Class string to use for icon.
*
* @return {HTMLImageElement} <img> tag with module icon.
*/
export function moduleIcon( module, blockedByParentModule, width = '33', height = '33', useClass = '' ) {
if ( ! global.googlesitekit ) {
return;
}
/* Set module icons. Page Speed Insights is a special case because only a .png is available. */
let iconComponent = <SvgIcon id={ module } width={ width } height={ height } className={ useClass } />;
if ( blockedByParentModule ) {
iconComponent = <SvgIcon id={ `${ module }-disabled` } width={ width } height={ height } className={ useClass } />;
} else if ( 'pagespeed-insights' === module ) {
iconComponent = <img src={ global.googlesitekit.admin.assetsRoot + 'images/icon-pagespeed.png' } width={ width } alt="" className={ useClass } />;
}
return iconComponent;
}
/**
* Gets the meta key for the given user option.
*
* @param {string} userOptionName User option name.
* @param {Object} _googlesitekitBaseData Site Kit base data (used for testing).
* @return {string} meta key name.
*/
export function getMetaKeyForUserOption( userOptionName, _googlesitekitBaseData = global._googlesitekitBaseData ) {
const { blogPrefix, isNetworkMode } = _googlesitekitBaseData;
if ( ! isNetworkMode ) {
return blogPrefix + userOptionName;
}
return userOptionName;
}
| 1 | 27,478 | This isn't directly related, but the `import React from 'react';` below should be removed. We never import this directly and any modules from it we need are imported through `@wordpress/element`. This was probably added automatically at some point, but we also provide this automatically via `ProvidePlugin`. | google-site-kit-wp | js |
@@ -84,7 +84,9 @@ module Travis
sh.cmd 'sudo chmod 2777 /usr/local/lib/R /usr/local/lib/R/site-library'
when 'osx'
- sh.cmd 'brew update', retry: true
+ # We want to update, but we don't need the 800+ lines of
+ # output.
+ sh.cmd 'brew update >/dev/null', retry: true
# Install from latest CRAN binary build for OS X
sh.cmd "wget #{config[:cran]}/bin/macosx/R-latest.pkg " + | 1 | module Travis
module Build
class Script
class R < Script
DEFAULTS = {
# Basic config options
cran: 'http://cran.rstudio.com',
warnings_are_errors: false,
# Dependencies (installed in this order)
apt_packages: [],
brew_packages: [],
r_binary_packages: [],
r_packages: [],
bioc_packages: [],
r_github_packages: [],
# Build/test options
r_build_args: '',
r_check_args: '--as-cran',
r_check_revdep: false,
# Heavy dependencies
pandoc: true,
pandoc_version: '1.13.1',
# Bioconductor
bioc: 'http://bioconductor.org/biocLite.R',
bioc_required: false,
bioc_use_devel: false,
}
def initialize(data)
# TODO(craigcitro): Is there a way to avoid explicitly
# naming arguments here?
super
@devtools_installed = false
@bioc_installed = false
end
def export
super
sh.export 'TRAVIS_R_VERSION', 'release', echo: false
end
def setup
super
# TODO(craigcitro): Confirm that these do, in fact, print as
# green. (They're yellow under vagrant.)
sh.echo 'R for Travis-CI is not officially supported, ' +
'but is community maintained.', ansi: :green
sh.echo 'Please file any issues using the following link',
ansi: :green
sh.echo ' https://github.com/travis-ci/travis-ci/issues' +
'/new?labels=community:r', ansi: :green
sh.echo 'and mention @craigcitro, @eddelbuettel and ' +
'@hadley in the issue', ansi: :green
# TODO(craigcitro): python-software-properties?
sh.echo 'Installing R'
case config[:os]
when 'linux'
# Set up our CRAN mirror.
sh.cmd 'sudo add-apt-repository ' +
"\"deb #{config[:cran]}/bin/linux/ubuntu " +
"$(lsb_release -cs)/\""
sh.cmd 'sudo apt-key adv --keyserver keyserver.ubuntu.com ' +
'--recv-keys E084DAB9'
# Add marutter's c2d4u repository.
sh.cmd 'sudo add-apt-repository -y "ppa:marutter/rrutter"'
sh.cmd 'sudo add-apt-repository -y "ppa:marutter/c2d4u"'
# Update after adding all repositories. Retry several
# times to work around flaky connection to Launchpad PPAs.
sh.cmd 'sudo apt-get update -qq', retry: true
# Install an R development environment. qpdf is also needed for
# --as-cran checks:
# https://stat.ethz.ch/pipermail/r-help//2012-September/335676.html
sh.cmd 'sudo apt-get install --no-install-recommends r-base-dev ' +
'r-recommended qpdf', retry: true
# Change permissions for /usr/local/lib/R/site-library
# This should really be via 'sudo adduser travis staff'
# but that may affect only the next shell
sh.cmd 'sudo chmod 2777 /usr/local/lib/R /usr/local/lib/R/site-library'
when 'osx'
sh.cmd 'brew update', retry: true
# Install from latest CRAN binary build for OS X
sh.cmd "wget #{config[:cran]}/bin/macosx/R-latest.pkg " +
'-O /tmp/R-latest.pkg'
sh.echo 'Installing OS X binary package for R'
sh.cmd 'sudo installer -pkg "/tmp/R-latest.pkg" -target /'
sh.rm '/tmp/R-latest.pkg'
else
sh.failure "Operating system not supported: #{config[:os]}"
end
setup_latex
setup_bioc if needs_bioc?
setup_pandoc if config[:pandoc]
end
def announce
super
sh.cmd 'Rscript -e \'sessionInfo()\''
sh.echo ''
end
def install
super
# Install any declared packages
apt_install config[:apt_packages]
brew_install config[:brew_packages]
r_binary_install config[:r_binary_packages]
r_install config[:r_packages]
bioc_install config[:bioc_packages]
r_github_install config[:r_github_packages]
# Install dependencies for the package we're testing.
install_deps
end
def script
# Build the package
sh.echo "Building with: R CMD build ${R_BUILD_ARGS}"
sh.cmd "R CMD build #{config[:r_build_args]} ."
tarball_script = [
'pkg <- devtools::as.package(".")',
'cat(paste0(pkg$package, "_", pkg$version, ".tar.gz"))',
].join('; ')
sh.export 'PKG_TARBALL', "$(Rscript -e '#{tarball_script}')"
# Test the package
sh.echo 'Testing with: R CMD check "${PKG_TARBALL}" ' +
"#{config[:r_check_args]}"
sh.cmd "R CMD check \"${PKG_TARBALL}\" #{config[:r_check_args]}"
# Turn warnings into errors, if requested.
if config[:warnings_are_errors]
export_rcheck_dir
sh.cmd 'grep -q -R "WARNING" "${RCHECK_DIR}/00check.log"; ' +
'RETVAL=$?'
sh.if '${RETVAL} -eq 0' do
sh.failure "Found warnings, treating as errors (as requested)."
end
end
# Check revdeps, if requested.
if config[:r_check_revdep]
sh.echo "Checking reverse dependencies"
revdep_script = [
'library("devtools");',
'res <- revdep_check();',
'if (length(res) > 0) {',
' revdep_check_summary(res);',
' revdep_check_save_logs(res);',
' q(status = 1, save = "no");',
'}',
].join(' ')
sh.cmd "Rscript -e '#{revdep_script}'"
end
end
def after_failure
dump_logs
super
end
private
def needs_bioc?
config[:bioc_required] || !config[:bioc_packages].empty?
end
def packages_as_arg(packages)
quoted_pkgs = packages.collect{|p| "\"#{p}\""}
"c(#{quoted_pkgs.join(', ')})"
end
def as_r_boolean(bool)
bool ? "TRUE" : "FALSE"
end
def r_install(packages)
return if packages.empty?
sh.echo "Installing R packages: #{packages.join(', ')}"
pkg_arg = packages_as_arg(packages)
sh.cmd "Rscript -e 'install.packages(#{pkg_arg}, " +
"repos=\"#{config[:cran]}\")'"
end
def r_github_install(packages)
return if packages.empty?
setup_devtools
sh.echo "Installing R packages from github: #{packages.join(', ')}"
pkg_arg = packages_as_arg(packages)
install_script = [
"options(repos = c(CRAN = \"#{config[:cran]}\"))",
"devtools::install_github(#{pkg_arg}, build_vignettes = FALSE)",
].join('; ')
sh.cmd "Rscript -e '#{install_script}'"
end
def r_binary_install(packages)
return if packages.empty?
case config[:os]
when 'linux'
sh.echo "Installing *binary* R packages: #{packages.join(', ')}"
apt_install packages.collect{|p| "r-cran-#{p.downcase}"}
else
sh.echo "R binary packages not supported on #{config[:os]}, " +
'falling back to source install'
r_install packages
end
end
def apt_install(packages)
return if packages.empty?
return unless (config[:os] == 'linux')
pkg_arg = packages.join(' ')
sh.echo "Installing apt packages: #{packages.join(', ')}"
sh.cmd "sudo apt-get install #{pkg_arg}", retry: true
end
def brew_install(packages)
return if packages.empty?
return unless (config[:os] == 'osx')
pkg_arg = packages.join(' ')
sh.echo "Installing brew packages: #{packages.join(', ')}"
sh.cmd "brew install #{pkg_arg}", retry: true
end
def bioc_install(packages)
return if packages.empty?
return unless needs_bioc?
setup_bioc
sh.echo "Installing bioc packages: #{packages.join(', ')}"
pkg_arg = packages_as_arg(packages)
install_script = [
"source(\"#{config[:bioc]}\")",
'options(repos=biocinstallRepos())',
"biocLite(#{pkg_arg})",
].join('; ')
sh.cmd "Rscript -e '#{install_script}'"
end
def install_deps
setup_devtools
if not needs_bioc?
install_script = [
"options(repos = c(CRAN = \"#{config[:cran]}\"))",
'devtools::install_deps(dependencies = TRUE)',
].join('; ')
else
install_script = [
'options(repos = BiocInstaller::biocinstallRepos())',
'devtools::install_deps(dependencies = TRUE)',
].join('; ')
end
sh.cmd "Rscript -e '#{install_script}'"
end
def export_rcheck_dir
pkg_script = (
'cat(paste0(devtools::as.package(".")$package, ".Rcheck"))'
)
sh.export 'RCHECK_DIR', "$(Rscript -e '#{pkg_script}')"
end
def dump_logs
export_rcheck_dir
['out', 'log', 'fail'].each do |ext|
cmd = [
'for name in',
"$(find \"${RCHECK_DIR}\" -type f -name \"*#{ext}\");",
'do',
'echo ">>> Filename: ${name} <<<";',
'cat ${name};',
'done',
].join(' ')
sh.cmd cmd
end
end
def setup_bioc
unless @bioc_installed
sh.echo 'Installing BioConductor'
bioc_install_script = [
"source(\"#{config[:bioc]}\");",
'tryCatch(',
" useDevel(#{as_r_boolean(config[:bioc_use_devel])}),",
' error=function(e) {if (!grepl("already in use", e$message)) {e}}',
');',
].join(' ')
sh.cmd "Rscript -e '#{bioc_install_script}'", retry: true
end
@bioc_installed = true
end
def setup_devtools
unless @devtools_installed
case config[:os]
when 'linux'
r_binary_install ['devtools']
else
devtools_check = '!requireNamespace("devtools", quietly = TRUE)'
devtools_install = 'install.packages(c("devtools"), ' +
"repos=\"#{config[:cran]}\")"
sh.cmd "Rscript -e 'if (#{devtools_check}) #{devtools_install}'",
retry: true
end
end
@devtools_installed = true
end
def setup_latex
case config[:os]
when 'linux'
# We add a backports PPA for more recent TeX packages.
sh.cmd 'sudo add-apt-repository -y "ppa:texlive-backports/ppa"'
latex_packages = %w[
lmodern texinfo texlive-base texlive-extra-utils
texlive-fonts-extra texlive-fonts-recommended
texlive-generic-recommended texlive-latex-base
texlive-latex-extra texlive-latex-recommended
]
sh.cmd 'sudo apt-get install --no-install-recommends ' +
"#{latex_packages.join(' ')}",
retry: true
when 'mac'
# We use mactex-basic due to disk space constraints.
mactex = 'mactex-basic.pkg'
# TODO(craigcitro): Confirm that this will route us to the
# nearest mirror.
sh.cmd 'wget http://mirror.ctan.org/systems/mac/mactex/' +
"#{mactex} -O \"/tmp/#{mactex}\""
sh.echo 'Installing OS X binary package for MacTeX'
sh.cmd "sudo installer -pkg \"/tmp/#{mactex}\" -target /"
sh.rm "/tmp/#{mactex}"
sh.cmd 'sudo tlmgr update --self'
sh.cmd 'sudo tlmgr install inconsolata upquote courier ' +
'courier-scaled helvetic'
sh.export 'PATH', '$PATH:/usr/texbin'
end
end
def setup_pandoc
case config[:os]
when 'linux'
os_path = 'linux/debian/x86_64'
when 'mac'
os_path = 'mac'
end
pandoc_url = 'https://s3.amazonaws.com/rstudio-buildtools/pandoc-' +
"#{config[:pandoc_version]}.zip"
pandoc_srcdir = "pandoc-#{config[:pandoc_version]}/#{os_path}"
pandoc_destdir = '${HOME}/opt/pandoc'
pandoc_tmpfile = "/tmp/pandoc-#{config[:pandoc_version]}.zip"
sh.mkdir pandoc_destdir, recursive: true
sh.cmd "curl -o #{pandoc_tmpfile} #{pandoc_url}"
['pandoc', 'pandoc-citeproc'].each do |filename|
binary_srcpath = File.join(pandoc_srcdir, filename)
sh.cmd "unzip -j #{pandoc_tmpfile} #{binary_srcpath} " +
"-d #{pandoc_destdir}"
sh.chmod '+x', "#{File.join(pandoc_destdir, filename)}"
end
sh.export 'PATH', "$PATH:#{pandoc_destdir}"
end
end
end
end
end
| 1 | 12,955 | You can also use `echo: false` instead. Either is fine; I'm just pointing it out. | travis-ci-travis-build | rb |
@@ -115,12 +115,13 @@ namespace NLog.Layouts
/// </summary>
protected override void InitializeLayout()
{
- base.InitializeLayout();
if (!WithHeader)
{
Header = null;
}
+ base.InitializeLayout();
+
switch (Delimiter)
{
case CsvColumnDelimiterMode.Auto: | 1 | //
// Copyright (c) 2004-2018 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Layouts
{
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Globalization;
using System.Text;
using NLog.Config;
/// <summary>
/// A specialized layout that renders CSV-formatted events.
/// </summary>
/// <remarks>If <see cref="LayoutWithHeaderAndFooter.Header"/> is set, then the header generation with columnnames will be disabled.</remarks>
[Layout("CsvLayout")]
[ThreadAgnostic]
[ThreadSafe]
[AppDomainFixedOutput]
public class CsvLayout : LayoutWithHeaderAndFooter
{
private string _actualColumnDelimiter;
private string _doubleQuoteChar;
private char[] _quotableCharacters;
/// <summary>
/// Initializes a new instance of the <see cref="CsvLayout"/> class.
/// </summary>
public CsvLayout()
{
Columns = new List<CsvColumn>();
WithHeader = true;
Delimiter = CsvColumnDelimiterMode.Auto;
Quoting = CsvQuotingMode.Auto;
QuoteChar = "\"";
Layout = this;
Header = new CsvHeaderLayout(this);
Footer = null;
}
/// <summary>
/// Gets the array of parameters to be passed.
/// </summary>
/// <docgen category='CSV Options' order='10' />
[ArrayParameter(typeof(CsvColumn), "column")]
public IList<CsvColumn> Columns { get; private set; }
/// <summary>
/// Gets or sets a value indicating whether CVS should include header.
/// </summary>
/// <value>A value of <c>true</c> if CVS should include header; otherwise, <c>false</c>.</value>
/// <docgen category='CSV Options' order='10' />
public bool WithHeader { get; set; }
/// <summary>
/// Gets or sets the column delimiter.
/// </summary>
/// <docgen category='CSV Options' order='10' />
[DefaultValue("Auto")]
public CsvColumnDelimiterMode Delimiter { get; set; }
/// <summary>
/// Gets or sets the quoting mode.
/// </summary>
/// <docgen category='CSV Options' order='10' />
[DefaultValue("Auto")]
public CsvQuotingMode Quoting { get; set; }
/// <summary>
/// Gets or sets the quote Character.
/// </summary>
/// <docgen category='CSV Options' order='10' />
[DefaultValue("\"")]
public string QuoteChar { get; set; }
/// <summary>
/// Gets or sets the custom column delimiter value (valid when ColumnDelimiter is set to 'Custom').
/// </summary>
/// <docgen category='CSV Options' order='10' />
public string CustomColumnDelimiter { get; set; }
/// <summary>
/// Initializes the layout.
/// </summary>
protected override void InitializeLayout()
{
base.InitializeLayout();
if (!WithHeader)
{
Header = null;
}
switch (Delimiter)
{
case CsvColumnDelimiterMode.Auto:
_actualColumnDelimiter = CultureInfo.CurrentCulture.TextInfo.ListSeparator;
break;
case CsvColumnDelimiterMode.Comma:
_actualColumnDelimiter = ",";
break;
case CsvColumnDelimiterMode.Semicolon:
_actualColumnDelimiter = ";";
break;
case CsvColumnDelimiterMode.Pipe:
_actualColumnDelimiter = "|";
break;
case CsvColumnDelimiterMode.Tab:
_actualColumnDelimiter = "\t";
break;
case CsvColumnDelimiterMode.Space:
_actualColumnDelimiter = " ";
break;
case CsvColumnDelimiterMode.Custom:
_actualColumnDelimiter = CustomColumnDelimiter;
break;
}
_quotableCharacters = (QuoteChar + "\r\n" + _actualColumnDelimiter).ToCharArray();
_doubleQuoteChar = QuoteChar + QuoteChar;
}
internal override void PrecalculateBuilder(LogEventInfo logEvent, StringBuilder target)
{
PrecalculateBuilderInternal(logEvent, target);
}
/// <summary>
/// Formats the log event for write.
/// </summary>
/// <param name="logEvent">The log event to be formatted.</param>
/// <returns>A string representation of the log event.</returns>
protected override string GetFormattedMessage(LogEventInfo logEvent)
{
return RenderAllocateBuilder(logEvent);
}
private void RenderAllColumns(LogEventInfo logEvent, StringBuilder sb)
{
//Memory profiling pointed out that using a foreach-loop was allocating
//an Enumerator. Switching to a for-loop avoids the memory allocation.
for (int i = 0; i < Columns.Count; i++)
{
CsvColumn col = Columns[i];
string text = col.Layout.Render(logEvent);
RenderCol(sb, i, text);
}
}
/// <summary>
/// Formats the log event for write.
/// </summary>
/// <param name="logEvent">The logging event.</param>
/// <param name="target"><see cref="StringBuilder"/> for the result</param>
protected override void RenderFormattedMessage(LogEventInfo logEvent, StringBuilder target)
{
RenderAllColumns(logEvent, target);
}
/// <summary>
/// Get the headers with the column names.
/// </summary>
/// <returns></returns>
private void RenderHeader(StringBuilder sb)
{
//Memory profiling pointed out that using a foreach-loop was allocating
//an Enumerator. Switching to a for-loop avoids the memory allocation.
for (int i = 0; i < Columns.Count; i++)
{
CsvColumn col = Columns[i];
string text = col.Name;
RenderCol(sb, i, text);
}
}
/// <summary>
/// Render 1 columnvalue (text or header) to <paramref name="sb"/>
/// </summary>
/// <param name="sb">write-to</param>
/// <param name="columnIndex">current col index</param>
/// <param name="columnValue">col text</param>
private void RenderCol(StringBuilder sb, int columnIndex, string columnValue)
{
if (columnIndex != 0)
{
sb.Append(_actualColumnDelimiter);
}
bool useQuoting;
switch (Quoting)
{
case CsvQuotingMode.Nothing:
useQuoting = false;
break;
case CsvQuotingMode.All:
useQuoting = true;
break;
default:
case CsvQuotingMode.Auto:
if (columnValue.IndexOfAny(_quotableCharacters) >= 0)
{
useQuoting = true;
}
else
{
useQuoting = false;
}
break;
}
if (useQuoting)
{
sb.Append(QuoteChar);
}
if (useQuoting)
{
sb.Append(columnValue.Replace(QuoteChar, _doubleQuoteChar));
}
else
{
sb.Append(columnValue);
}
if (useQuoting)
{
sb.Append(QuoteChar);
}
}
/// <summary>
/// Header with column names for CSV layout.
/// </summary>
[ThreadAgnostic]
private class CsvHeaderLayout : Layout
{
private readonly CsvLayout _parent;
/// <summary>
/// Initializes a new instance of the <see cref="CsvHeaderLayout"/> class.
/// </summary>
/// <param name="parent">The parent.</param>
public CsvHeaderLayout(CsvLayout parent)
{
_parent = parent;
}
internal override void PrecalculateBuilder(LogEventInfo logEvent, StringBuilder target)
{
PrecalculateBuilderInternal(logEvent, target);
}
/// <summary>
/// Renders the layout for the specified logging event by invoking layout renderers.
/// </summary>
/// <param name="logEvent">The logging event.</param>
/// <returns>The rendered layout.</returns>
protected override string GetFormattedMessage(LogEventInfo logEvent)
{
return RenderAllocateBuilder(logEvent);
}
/// <summary>
/// Renders the layout for the specified logging event by invoking layout renderers.
/// </summary>
/// <param name="logEvent">The logging event.</param>
/// <param name="target"><see cref="StringBuilder"/> for the result</param>
protected override void RenderFormattedMessage(LogEventInfo logEvent, StringBuilder target)
{
_parent.RenderHeader(target);
}
}
/// <summary>
/// Generate description of CSV Layout
/// </summary>
/// <returns>CSV Layout String Description</returns>
public override string ToString()
{
return ToStringWithNestedItems(Columns, c => c.Name);
}
}
} | 1 | 17,693 | what's the reason for this moved line? | NLog-NLog | .cs |
@@ -986,6 +986,10 @@ import 'programStyles';
lines = [];
}
+ if (overlayText && showTitle) {
+ lines = [item.Name];
+ }
+
const addRightTextMargin = isOuterFooter && options.cardLayout && !options.centerText && options.cardFooterAside !== 'none' && layoutManager.mobile;
html += getCardTextLines(lines, cssClass, !options.overlayText, isOuterFooter, options.cardLayout, addRightTextMargin, options.lines); | 1 | /* eslint-disable indent */
/**
* Module for building cards from item data.
* @module components/cardBuilder/cardBuilder
*/
import datetime from 'datetime';
import imageLoader from 'imageLoader';
import connectionManager from 'connectionManager';
import itemHelper from 'itemHelper';
import focusManager from 'focusManager';
import indicators from 'indicators';
import globalize from 'globalize';
import layoutManager from 'layoutManager';
import dom from 'dom';
import browser from 'browser';
import playbackManager from 'playbackManager';
import itemShortcuts from 'itemShortcuts';
import imageHelper from 'scripts/imagehelper';
import 'css!./card';
import 'paper-icon-button-light';
import 'programStyles';
const enableFocusTransform = !browser.slow && !browser.edge;
/**
* Generate the HTML markup for cards for a set of items.
* @param items - The items used to generate cards.
* @param options - The options of the cards.
* @returns {string} The HTML markup for the cards.
*/
export function getCardsHtml(items, options) {
if (arguments.length === 1) {
options = arguments[0];
items = options.items;
}
return buildCardsHtmlInternal(items, options);
}
/**
* Computes the number of posters per row.
* @param {string} shape - Shape of the cards.
* @param {number} screenWidth - Width of the screen.
* @param {boolean} isOrientationLandscape - Flag for the orientation of the screen.
* @returns {number} Number of cards per row for an itemsContainer.
*/
function getPostersPerRow(shape, screenWidth, isOrientationLandscape) {
switch (shape) {
case 'portrait':
if (layoutManager.tv) {
return 100 / 16.66666667;
}
if (screenWidth >= 2200) {
return 100 / 10;
}
if (screenWidth >= 1920) {
return 100 / 11.1111111111;
}
if (screenWidth >= 1600) {
return 100 / 12.5;
}
if (screenWidth >= 1400) {
return 100 / 14.28571428571;
}
if (screenWidth >= 1200) {
return 100 / 16.66666667;
}
if (screenWidth >= 800) {
return 5;
}
if (screenWidth >= 700) {
return 4;
}
if (screenWidth >= 500) {
return 100 / 33.33333333;
}
return 100 / 33.33333333;
case 'square':
if (layoutManager.tv) {
return 100 / 16.66666667;
}
if (screenWidth >= 2200) {
return 100 / 10;
}
if (screenWidth >= 1920) {
return 100 / 11.1111111111;
}
if (screenWidth >= 1600) {
return 100 / 12.5;
}
if (screenWidth >= 1400) {
return 100 / 14.28571428571;
}
if (screenWidth >= 1200) {
return 100 / 16.66666667;
}
if (screenWidth >= 800) {
return 5;
}
if (screenWidth >= 700) {
return 4;
}
if (screenWidth >= 500) {
return 100 / 33.33333333;
}
return 2;
case 'banner':
if (screenWidth >= 2200) {
return 100 / 25;
}
if (screenWidth >= 1200) {
return 100 / 33.33333333;
}
if (screenWidth >= 800) {
return 2;
}
return 1;
case 'backdrop':
if (layoutManager.tv) {
return 100 / 25;
}
if (screenWidth >= 2500) {
return 6;
}
if (screenWidth >= 1600) {
return 5;
}
if (screenWidth >= 1200) {
return 4;
}
if (screenWidth >= 770) {
return 3;
}
if (screenWidth >= 420) {
return 2;
}
return 1;
case 'smallBackdrop':
if (screenWidth >= 1600) {
return 100 / 12.5;
}
if (screenWidth >= 1400) {
return 100 / 14.2857142857;
}
if (screenWidth >= 1200) {
return 100 / 16.666666666666666666;
}
if (screenWidth >= 1000) {
return 5;
}
if (screenWidth >= 800) {
return 4;
}
if (screenWidth >= 500) {
return 100 / 33.33333333;
}
return 2;
case 'overflowSmallBackdrop':
if (layoutManager.tv) {
return 100 / 18.9;
}
if (isOrientationLandscape) {
if (screenWidth >= 800) {
return 100 / 15.5;
}
return 100 / 23.3;
} else {
if (screenWidth >= 540) {
return 100 / 30;
}
return 100 / 72;
}
case 'overflowPortrait':
if (layoutManager.tv) {
return 100 / 15.5;
}
if (isOrientationLandscape) {
if (screenWidth >= 1700) {
return 100 / 11.6;
}
return 100 / 15.5;
} else {
if (screenWidth >= 1400) {
return 100 / 15;
}
if (screenWidth >= 1200) {
return 100 / 18;
}
if (screenWidth >= 760) {
return 100 / 23;
}
if (screenWidth >= 400) {
return 100 / 31.5;
}
return 100 / 42;
}
case 'overflowSquare':
if (layoutManager.tv) {
return 100 / 15.5;
}
if (isOrientationLandscape) {
if (screenWidth >= 1700) {
return 100 / 11.6;
}
return 100 / 15.5;
} else {
if (screenWidth >= 1400) {
return 100 / 15;
}
if (screenWidth >= 1200) {
return 100 / 18;
}
if (screenWidth >= 760) {
return 100 / 23;
}
if (screenWidth >= 540) {
return 100 / 31.5;
}
return 100 / 42;
}
case 'overflowBackdrop':
if (layoutManager.tv) {
return 100 / 23.3;
}
if (isOrientationLandscape) {
if (screenWidth >= 1700) {
return 100 / 18.5;
}
return 100 / 23.3;
} else {
if (screenWidth >= 1800) {
return 100 / 23.5;
}
if (screenWidth >= 1400) {
return 100 / 30;
}
if (screenWidth >= 760) {
return 100 / 40;
}
if (screenWidth >= 640) {
return 100 / 56;
}
return 100 / 72;
}
default:
return 4;
}
}
/**
* Checks if the window is resizable.
* @param {number} windowWidth - Width of the device's screen.
* @returns {boolean} - Result of the check.
*/
function isResizable(windowWidth) {
const screen = window.screen;
if (screen) {
const screenWidth = screen.availWidth;
if ((screenWidth - windowWidth) > 20) {
return true;
}
}
return false;
}
/**
* Gets the width of a card's image according to the shape and amount of cards per row.
* @param {string} shape - Shape of the card.
* @param {number} screenWidth - Width of the screen.
* @param {boolean} isOrientationLandscape - Flag for the orientation of the screen.
* @returns {number} Width of the image for a card.
*/
function getImageWidth(shape, screenWidth, isOrientationLandscape) {
const imagesPerRow = getPostersPerRow(shape, screenWidth, isOrientationLandscape);
return Math.round(screenWidth / imagesPerRow);
}
/**
* Normalizes the options for a card.
* @param {Object} items - A set of items.
* @param {Object} options - Options for handling the items.
*/
function setCardData(items, options) {
options.shape = options.shape || 'auto';
const primaryImageAspectRatio = imageLoader.getPrimaryImageAspectRatio(items);
if (['auto', 'autohome', 'autooverflow', 'autoVertical'].includes(options.shape)) {
const requestedShape = options.shape;
options.shape = null;
if (primaryImageAspectRatio) {
if (primaryImageAspectRatio >= 3) {
options.shape = 'banner';
options.coverImage = true;
} else if (primaryImageAspectRatio >= 1.33) {
options.shape = requestedShape === 'autooverflow' ? 'overflowBackdrop' : 'backdrop';
} else if (primaryImageAspectRatio > 0.71) {
options.shape = requestedShape === 'autooverflow' ? 'overflowSquare' : 'square';
} else {
options.shape = requestedShape === 'autooverflow' ? 'overflowPortrait' : 'portrait';
}
}
if (!options.shape) {
options.shape = options.defaultShape || (requestedShape === 'autooverflow' ? 'overflowSquare' : 'square');
}
}
if (options.preferThumb === 'auto') {
options.preferThumb = options.shape === 'backdrop' || options.shape === 'overflowBackdrop';
}
options.uiAspect = getDesiredAspect(options.shape);
options.primaryImageAspectRatio = primaryImageAspectRatio;
if (!options.width && options.widths) {
options.width = options.widths[options.shape];
}
if (options.rows && typeof (options.rows) !== 'number') {
options.rows = options.rows[options.shape];
}
if (!options.width) {
let screenWidth = dom.getWindowSize().innerWidth;
const screenHeight = dom.getWindowSize().innerHeight;
if (isResizable(screenWidth)) {
const roundScreenTo = 100;
screenWidth = Math.floor(screenWidth / roundScreenTo) * roundScreenTo;
}
options.width = getImageWidth(options.shape, screenWidth, screenWidth > (screenHeight * 1.3));
}
}
/**
* Generates the internal HTML markup for cards.
* @param {Object} items - Items for which to generate the markup.
* @param {Object} options - Options for generating the markup.
* @returns {string} The internal HTML markup of the cards.
*/
function buildCardsHtmlInternal(items, options) {
let isVertical = false;
if (options.shape === 'autoVertical') {
isVertical = true;
}
setCardData(items, options);
let html = '';
let itemsInRow = 0;
let currentIndexValue;
let hasOpenRow;
let hasOpenSection;
const sectionTitleTagName = options.sectionTitleTagName || 'div';
let apiClient;
let lastServerId;
for (const [i, item] of items.entries()) {
const serverId = item.ServerId || options.serverId;
if (serverId !== lastServerId) {
lastServerId = serverId;
apiClient = connectionManager.getApiClient(lastServerId);
}
if (options.indexBy) {
let newIndexValue = '';
if (options.indexBy === 'PremiereDate') {
if (item.PremiereDate) {
try {
newIndexValue = datetime.toLocaleDateString(datetime.parseISO8601Date(item.PremiereDate), { weekday: 'long', month: 'long', day: 'numeric' });
} catch (error) {
console.error('error parsing timestamp for premiere date', error);
}
}
} else if (options.indexBy === 'ProductionYear') {
newIndexValue = item.ProductionYear;
} else if (options.indexBy === 'CommunityRating') {
newIndexValue = item.CommunityRating ? (Math.floor(item.CommunityRating) + (item.CommunityRating % 1 >= 0.5 ? 0.5 : 0)) + '+' : null;
}
if (newIndexValue !== currentIndexValue) {
if (hasOpenRow) {
html += '</div>';
hasOpenRow = false;
itemsInRow = 0;
}
if (hasOpenSection) {
html += '</div>';
if (isVertical) {
html += '</div>';
}
hasOpenSection = false;
}
if (isVertical) {
html += '<div class="verticalSection">';
} else {
html += '<div class="horizontalSection">';
}
html += '<' + sectionTitleTagName + ' class="sectionTitle">' + newIndexValue + '</' + sectionTitleTagName + '>';
if (isVertical) {
html += '<div class="itemsContainer vertical-wrap">';
}
currentIndexValue = newIndexValue;
hasOpenSection = true;
}
}
if (options.rows && itemsInRow === 0) {
if (hasOpenRow) {
html += '</div>';
hasOpenRow = false;
}
html += '<div class="cardColumn">';
hasOpenRow = true;
}
html += buildCard(i, item, apiClient, options);
itemsInRow++;
if (options.rows && itemsInRow >= options.rows) {
html += '</div>';
hasOpenRow = false;
itemsInRow = 0;
}
}
if (hasOpenRow) {
html += '</div>';
}
if (hasOpenSection) {
html += '</div>';
if (isVertical) {
html += '</div>';
}
}
return html;
}
/**
* Computes the aspect ratio for a card given its shape.
* @param {string} shape - Shape for which to get the aspect ratio.
* @returns {null|number} Ratio of the shape.
*/
function getDesiredAspect(shape) {
if (shape) {
shape = shape.toLowerCase();
if (shape.indexOf('portrait') !== -1) {
return (2 / 3);
}
if (shape.indexOf('backdrop') !== -1) {
return (16 / 9);
}
if (shape.indexOf('square') !== -1) {
return 1;
}
if (shape.indexOf('banner') !== -1) {
return (1000 / 185);
}
}
return null;
}
/** Get the URL of the card's image.
* @param {Object} item - Item for which to generate a card.
* @param {Object} apiClient - API client object.
* @param {Object} options - Options of the card.
* @param {string} shape - Shape of the desired image.
* @returns {Object} Object representing the URL of the card's image.
*/
function getCardImageUrl(item, apiClient, options, shape) {
item = item.ProgramInfo || item;
const width = options.width;
let height = null;
const primaryImageAspectRatio = item.PrimaryImageAspectRatio;
let forceName = false;
let imgUrl = null;
let imgTag = null;
let coverImage = false;
let uiAspect = null;
let imgType = null;
let itemId = null;
if (options.preferThumb && item.ImageTags && item.ImageTags.Thumb) {
imgType = 'Thumb';
imgTag = item.ImageTags.Thumb;
} else if ((options.preferBanner || shape === 'banner') && item.ImageTags && item.ImageTags.Banner) {
imgType = 'Banner';
imgTag = item.ImageTags.Banner;
} else if (options.preferDisc && item.ImageTags && item.ImageTags.Disc) {
imgType = 'Disc';
imgTag = item.ImageTags.Disc;
} else if (options.preferLogo && item.ImageTags && item.ImageTags.Logo) {
imgType = 'Logo';
imgTag = item.ImageTags.Logo;
} else if (options.preferLogo && item.ParentLogoImageTag && item.ParentLogoItemId) {
imgType = 'Logo';
imgTag = item.ParentLogoImageTag;
itemId = item.ParentLogoItemId;
} else if (options.preferThumb && item.SeriesThumbImageTag && options.inheritThumb !== false) {
imgType = 'Thumb';
imgTag = item.SeriesThumbImageTag;
itemId = item.SeriesId;
} else if (options.preferThumb && item.ParentThumbItemId && options.inheritThumb !== false && item.MediaType !== 'Photo') {
imgType = 'Thumb';
imgTag = item.ParentThumbImageTag;
itemId = item.ParentThumbItemId;
} else if (options.preferThumb && item.BackdropImageTags && item.BackdropImageTags.length) {
imgType = 'Backdrop';
imgTag = item.BackdropImageTags[0];
forceName = true;
} else if (options.preferThumb && item.ParentBackdropImageTags && item.ParentBackdropImageTags.length && options.inheritThumb !== false && item.Type === 'Episode') {
imgType = 'Backdrop';
imgTag = item.ParentBackdropImageTags[0];
itemId = item.ParentBackdropItemId;
} else if (item.ImageTags && item.ImageTags.Primary && (item.Type !== 'Episode' || item.ChildCount !== 0)) {
imgType = 'Primary';
imgTag = item.ImageTags.Primary;
height = width && primaryImageAspectRatio ? Math.round(width / primaryImageAspectRatio) : null;
if (options.preferThumb && options.showTitle !== false) {
forceName = true;
}
if (primaryImageAspectRatio) {
uiAspect = getDesiredAspect(shape);
if (uiAspect) {
coverImage = (Math.abs(primaryImageAspectRatio - uiAspect) / uiAspect) <= 0.2;
}
}
} else if (item.SeriesPrimaryImageTag) {
imgType = 'Primary';
imgTag = item.SeriesPrimaryImageTag;
itemId = item.SeriesId;
} else if (item.PrimaryImageTag) {
imgType = 'Primary';
imgTag = item.PrimaryImageTag;
itemId = item.PrimaryImageItemId;
height = width && primaryImageAspectRatio ? Math.round(width / primaryImageAspectRatio) : null;
if (options.preferThumb && options.showTitle !== false) {
forceName = true;
}
if (primaryImageAspectRatio) {
uiAspect = getDesiredAspect(shape);
if (uiAspect) {
coverImage = (Math.abs(primaryImageAspectRatio - uiAspect) / uiAspect) <= 0.2;
}
}
} else if (item.ParentPrimaryImageTag) {
imgType = 'Primary';
imgTag = item.ParentPrimaryImageTag;
itemId = item.ParentPrimaryImageItemId;
} else if (item.AlbumId && item.AlbumPrimaryImageTag) {
imgType = 'Primary';
imgTag = item.AlbumPrimaryImageTag;
itemId = item.AlbumId;
height = width && primaryImageAspectRatio ? Math.round(width / primaryImageAspectRatio) : null;
if (primaryImageAspectRatio) {
uiAspect = getDesiredAspect(shape);
if (uiAspect) {
coverImage = (Math.abs(primaryImageAspectRatio - uiAspect) / uiAspect) <= 0.2;
}
}
} else if (item.Type === 'Season' && item.ImageTags && item.ImageTags.Thumb) {
imgType = 'Thumb';
imgTag = item.ImageTags.Thumb;
} else if (item.BackdropImageTags && item.BackdropImageTags.length) {
imgType = 'Backdrop';
imgTag = item.BackdropImageTags[0];
} else if (item.ImageTags && item.ImageTags.Thumb) {
imgType = 'Thumb';
imgTag = item.ImageTags.Thumb;
} else if (item.SeriesThumbImageTag && options.inheritThumb !== false) {
imgType = 'Thumb';
imgTag = item.SeriesThumbImageTag;
itemId = item.SeriesId;
} else if (item.ParentThumbItemId && options.inheritThumb !== false) {
imgType = 'Thumb';
imgTag = item.ParentThumbImageTag;
itemId = item.ParentThumbItemId;
} else if (item.ParentBackdropImageTags && item.ParentBackdropImageTags.length && options.inheritThumb !== false) {
imgType = 'Backdrop';
imgTag = item.ParentBackdropImageTags[0];
itemId = item.ParentBackdropItemId;
}
if (!itemId) {
itemId = item.Id;
}
if (imgTag && imgType) {
imgUrl = apiClient.getScaledImageUrl(itemId, {
type: imgType,
maxHeight: height,
maxWidth: width,
tag: imgTag
});
}
const blurHashes = options.imageBlurhashes || item.ImageBlurHashes || {};
return {
imgUrl: imgUrl,
blurhash: (blurHashes[imgType] || {})[imgTag],
forceName: forceName,
coverImage: coverImage
};
}
/**
* Generates a random integer in a given range.
* @param {number} min - Minimum of the range.
* @param {number} max - Maximum of the range.
* @returns {number} Randomly generated number.
*/
function getRandomInt(min, max) {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
/**
* Generates an index used to select the default color of a card based on a string.
* @param {string} str - String to use for generating the index.
* @returns {number} Index of the color.
*/
function getDefaultColorIndex(str) {
const numRandomColors = 5;
if (str) {
const charIndex = Math.floor(str.length / 2);
const character = String(str.substr(charIndex, 1).charCodeAt());
let sum = 0;
for (let i = 0; i < character.length; i++) {
sum += parseInt(character.charAt(i));
}
const index = String(sum).substr(-1);
return (index % numRandomColors) + 1;
} else {
return getRandomInt(1, numRandomColors);
}
}
/**
* Generates the HTML markup for a card's text.
* @param {Array} lines - Array containing the text lines.
* @param {string} cssClass - Base CSS class to use for the lines.
* @param {boolean} forceLines - Flag to force the rendering of all lines.
* @param {boolean} isOuterFooter - Flag to mark the text lines as outer footer.
* @param {string} cardLayout - DEPRECATED
* @param {boolean} addRightMargin - Flag to add a right margin to the text.
* @param {number} maxLines - Maximum number of lines to render.
* @returns {string} HTML markup for the card's text.
*/
function getCardTextLines(lines, cssClass, forceLines, isOuterFooter, cardLayout, addRightMargin, maxLines) {
let html = '';
let valid = 0;
for (let i = 0; i < lines.length; i++) {
let currentCssClass = cssClass;
const text = lines[i];
if (valid > 0 && isOuterFooter) {
currentCssClass += ' cardText-secondary';
} else if (valid === 0 && isOuterFooter) {
currentCssClass += ' cardText-first';
}
if (addRightMargin) {
currentCssClass += ' cardText-rightmargin';
}
if (text) {
html += "<div class='" + currentCssClass + "'>";
html += text;
html += '</div>';
valid++;
if (maxLines && valid >= maxLines) {
break;
}
}
}
if (forceLines) {
const linesLength = maxLines || Math.min(lines.length, maxLines || lines.length);
while (valid < linesLength) {
html += "<div class='" + cssClass + "'> </div>";
valid++;
}
}
return html;
}
/**
* Determines if the item is live TV.
* @param {Object} item - Item to use for the check.
* @returns {boolean} Flag showing if the item is live TV.
*/
function isUsingLiveTvNaming(item) {
return item.Type === 'Program' || item.Type === 'Timer' || item.Type === 'Recording';
}
/**
* Returns the air time text for the item based on the given times.
* @param {object} item - Item used to generate the air time text.
* @param {string} showAirDateTime - ISO8601 date for the start of the show.
* @param {string} showAirEndTime - ISO8601 date for the end of the show.
* @returns {string} The air time text for the item based on the given dates.
*/
function getAirTimeText(item, showAirDateTime, showAirEndTime) {
let airTimeText = '';
if (item.StartDate) {
try {
let date = datetime.parseISO8601Date(item.StartDate);
if (showAirDateTime) {
airTimeText += datetime.toLocaleDateString(date, { weekday: 'short', month: 'short', day: 'numeric' }) + ' ';
}
airTimeText += datetime.getDisplayTime(date);
if (item.EndDate && showAirEndTime) {
date = datetime.parseISO8601Date(item.EndDate);
airTimeText += ' - ' + datetime.getDisplayTime(date);
}
} catch (e) {
console.error('error parsing date: ' + item.StartDate);
}
}
return airTimeText;
}
/**
* Generates the HTML markup for the card's footer text.
* @param {Object} item - Item used to generate the footer text.
* @param {Object} apiClient - API client instance.
* @param {Object} options - Options used to generate the footer text.
* @param {string} showTitle - Flag to show the title in the footer.
* @param {boolean} forceName - Flag to force showing the name of the item.
* @param {boolean} overlayText - Flag to show overlay text.
* @param {Object} imgUrl - Object representing the card's image URL.
* @param {string} footerClass - CSS classes of the footer element.
* @param {string} progressHtml - HTML markup of the progress bar element.
* @param {string} logoUrl - URL of the logo for the item.
* @param {boolean} isOuterFooter - Flag to mark the text as outer footer.
* @returns {string} HTML markup of the card's footer text element.
*/
function getCardFooterText(item, apiClient, options, showTitle, forceName, overlayText, imgUrl, footerClass, progressHtml, logoUrl, isOuterFooter) {
let html = '';
if (logoUrl) {
html += '<div class="lazy cardFooterLogo" data-src="' + logoUrl + '"></div>';
}
const showOtherText = isOuterFooter ? !overlayText : overlayText;
if (isOuterFooter && options.cardLayout && layoutManager.mobile) {
if (options.cardFooterAside !== 'none') {
html += '<button is="paper-icon-button-light" class="itemAction btnCardOptions cardText-secondary" data-action="menu"><span class="material-icons more_vert"></span></button>';
}
}
const cssClass = options.centerText ? 'cardText cardTextCentered' : 'cardText';
const serverId = item.ServerId || options.serverId;
let lines = [];
const parentTitleUnderneath = item.Type === 'MusicAlbum' || item.Type === 'Audio' || item.Type === 'MusicVideo';
let titleAdded;
if (showOtherText) {
if ((options.showParentTitle || options.showParentTitleOrTitle) && !parentTitleUnderneath) {
if (isOuterFooter && item.Type === 'Episode' && item.SeriesName) {
if (item.SeriesId) {
lines.push(getTextActionButton({
Id: item.SeriesId,
ServerId: serverId,
Name: item.SeriesName,
Type: 'Series',
IsFolder: true
}));
} else {
lines.push(item.SeriesName);
}
} else {
if (isUsingLiveTvNaming(item)) {
lines.push(item.Name);
if (!item.EpisodeTitle) {
titleAdded = true;
}
} else {
const parentTitle = item.SeriesName || item.Series || item.Album || item.AlbumArtist || '';
if (parentTitle || showTitle) {
lines.push(parentTitle);
}
}
}
}
}
let showMediaTitle = (showTitle && !titleAdded) || (options.showParentTitleOrTitle && !lines.length);
if (!showMediaTitle && !titleAdded && (showTitle || forceName)) {
showMediaTitle = true;
}
if (showMediaTitle) {
const name = options.showTitle === 'auto' && !item.IsFolder && item.MediaType === 'Photo' ? '' : itemHelper.getDisplayName(item, {
includeParentInfo: options.includeParentInfoInTitle
});
lines.push(getTextActionButton({
Id: item.Id,
ServerId: serverId,
Name: name,
Type: item.Type,
CollectionType: item.CollectionType,
IsFolder: item.IsFolder
}));
}
if (showOtherText) {
if (options.showParentTitle && parentTitleUnderneath) {
if (isOuterFooter && item.AlbumArtists && item.AlbumArtists.length) {
item.AlbumArtists[0].Type = 'MusicArtist';
item.AlbumArtists[0].IsFolder = true;
lines.push(getTextActionButton(item.AlbumArtists[0], null, serverId));
} else {
lines.push(isUsingLiveTvNaming(item) ? item.Name : (item.SeriesName || item.Series || item.Album || item.AlbumArtist || ''));
}
}
if (options.showItemCounts) {
lines.push(getItemCountsHtml(options, item));
}
if (options.textLines) {
const additionalLines = options.textLines(item);
for (let i = 0; i < additionalLines.length; i++) {
lines.push(additionalLines[i]);
}
}
if (options.showSongCount) {
let songLine = '';
if (item.SongCount) {
songLine = item.SongCount === 1 ?
globalize.translate('ValueOneSong') :
globalize.translate('ValueSongCount', item.SongCount);
}
lines.push(songLine);
}
if (options.showPremiereDate) {
if (item.PremiereDate) {
try {
lines.push(datetime.toLocaleDateString(
datetime.parseISO8601Date(item.PremiereDate),
{ weekday: 'long', month: 'long', day: 'numeric' }
));
} catch (err) {
lines.push('');
}
} else {
lines.push('');
}
}
if (options.showYear || options.showSeriesYear) {
if (item.Type === 'Series') {
if (item.Status === 'Continuing') {
lines.push(globalize.translate('SeriesYearToPresent', item.ProductionYear || ''));
} else {
if (item.EndDate && item.ProductionYear) {
const endYear = datetime.parseISO8601Date(item.EndDate).getFullYear();
lines.push(item.ProductionYear + ((endYear === item.ProductionYear) ? '' : (' - ' + endYear)));
} else {
lines.push(item.ProductionYear || '');
}
}
} else {
lines.push(item.ProductionYear || '');
}
}
if (options.showRuntime) {
if (item.RunTimeTicks) {
lines.push(datetime.getDisplayRunningTime(item.RunTimeTicks));
} else {
lines.push('');
}
}
if (options.showAirTime) {
lines.push(getAirTimeText(item, options.showAirDateTime, options.showAirEndTime) || '');
}
if (options.showChannelName) {
if (item.ChannelId) {
lines.push(getTextActionButton({
Id: item.ChannelId,
ServerId: serverId,
Name: item.ChannelName,
Type: 'TvChannel',
MediaType: item.MediaType,
IsFolder: false
}, item.ChannelName));
} else {
lines.push(item.ChannelName || ' ');
}
}
if (options.showCurrentProgram && item.Type === 'TvChannel') {
if (item.CurrentProgram) {
lines.push(item.CurrentProgram.Name);
} else {
lines.push('');
}
}
if (options.showCurrentProgramTime && item.Type === 'TvChannel') {
if (item.CurrentProgram) {
lines.push(getAirTimeText(item.CurrentProgram, false, true) || '');
} else {
lines.push('');
}
}
if (options.showSeriesTimerTime) {
if (item.RecordAnyTime) {
lines.push(globalize.translate('Anytime'));
} else {
lines.push(datetime.getDisplayTime(item.StartDate));
}
}
if (options.showSeriesTimerChannel) {
if (item.RecordAnyChannel) {
lines.push(globalize.translate('AllChannels'));
} else {
lines.push(item.ChannelName || globalize.translate('OneChannel'));
}
}
if (options.showPersonRoleOrType) {
if (item.Role) {
lines.push(globalize.translate('PersonRole', item.Role));
}
}
}
if ((showTitle || !imgUrl) && forceName && overlayText && lines.length === 1) {
lines = [];
}
const addRightTextMargin = isOuterFooter && options.cardLayout && !options.centerText && options.cardFooterAside !== 'none' && layoutManager.mobile;
html += getCardTextLines(lines, cssClass, !options.overlayText, isOuterFooter, options.cardLayout, addRightTextMargin, options.lines);
if (progressHtml) {
html += progressHtml;
}
if (html) {
if (!isOuterFooter || logoUrl || options.cardLayout) {
html = '<div class="' + footerClass + '">' + html;
//cardFooter
html += '</div>';
}
}
return html;
}
/**
* Generates the HTML markup for the action button.
* @param {Object} item - Item used to generate the action button.
* @param {string} text - Text of the action button.
* @param {string} serverId - ID of the server.
* @returns {string} HTML markup of the action button.
*/
function getTextActionButton(item, text, serverId) {
if (!text) {
text = itemHelper.getDisplayName(item);
}
if (layoutManager.tv) {
return text;
}
let html = '<button ' + itemShortcuts.getShortcutAttributesHtml(item, serverId) + ' type="button" class="itemAction textActionButton" title="' + text + '" data-action="link">';
html += text;
html += '</button>';
return html;
}
/**
* Generates HTML markup for the item count indicator.
* @param {Object} options - Options used to generate the item count.
* @param {Object} item - Item used to generate the item count.
* @returns {string} HTML markup for the item count indicator.
*/
function getItemCountsHtml(options, item) {
const counts = [];
let childText;
if (item.Type === 'Playlist') {
childText = '';
if (item.RunTimeTicks) {
let minutes = item.RunTimeTicks / 600000000;
minutes = minutes || 1;
childText += globalize.translate('ValueMinutes', Math.round(minutes));
} else {
childText += globalize.translate('ValueMinutes', 0);
}
counts.push(childText);
} else if (item.Type === 'Genre' || item.Type === 'Studio') {
if (item.MovieCount) {
childText = item.MovieCount === 1 ?
globalize.translate('ValueOneMovie') :
globalize.translate('ValueMovieCount', item.MovieCount);
counts.push(childText);
}
if (item.SeriesCount) {
childText = item.SeriesCount === 1 ?
globalize.translate('ValueOneSeries') :
globalize.translate('ValueSeriesCount', item.SeriesCount);
counts.push(childText);
}
if (item.EpisodeCount) {
childText = item.EpisodeCount === 1 ?
globalize.translate('ValueOneEpisode') :
globalize.translate('ValueEpisodeCount', item.EpisodeCount);
counts.push(childText);
}
} else if (item.Type === 'MusicGenre' || options.context === 'MusicArtist') {
if (item.AlbumCount) {
childText = item.AlbumCount === 1 ?
globalize.translate('ValueOneAlbum') :
globalize.translate('ValueAlbumCount', item.AlbumCount);
counts.push(childText);
}
if (item.SongCount) {
childText = item.SongCount === 1 ?
globalize.translate('ValueOneSong') :
globalize.translate('ValueSongCount', item.SongCount);
counts.push(childText);
}
if (item.MusicVideoCount) {
childText = item.MusicVideoCount === 1 ?
globalize.translate('ValueOneMusicVideo') :
globalize.translate('ValueMusicVideoCount', item.MusicVideoCount);
counts.push(childText);
}
} else if (item.Type === 'Series') {
childText = item.RecursiveItemCount === 1 ?
globalize.translate('ValueOneEpisode') :
globalize.translate('ValueEpisodeCount', item.RecursiveItemCount);
counts.push(childText);
}
return counts.join(', ');
}
let refreshIndicatorLoaded;
/**
* Imports the refresh indicator element.
*/
function importRefreshIndicator() {
if (!refreshIndicatorLoaded) {
refreshIndicatorLoaded = true;
/* eslint-disable-next-line no-unused-expressions */
import('emby-itemrefreshindicator');
}
}
/**
* Returns the default background class for a card based on a string.
* @param {string} str - Text used to generate the background class.
* @returns {string} CSS classes for default card backgrounds.
*/
export function getDefaultBackgroundClass(str) {
return 'defaultCardBackground defaultCardBackground' + getDefaultColorIndex(str);
}
/**
* Builds the HTML markup for an individual card.
* @param {number} index - Index of the card
* @param {object} item - Item used to generate the card.
* @param {object} apiClient - API client instance.
* @param {object} options - Options used to generate the card.
* @returns {string} HTML markup for the generated card.
*/
function buildCard(index, item, apiClient, options) {
let action = options.action || 'link';
if (action === 'play' && item.IsFolder) {
// If this hard-coding is ever removed make sure to test nested photo albums
action = 'link';
} else if (item.MediaType === 'Photo') {
action = 'play';
}
let shape = options.shape;
if (shape === 'mixed') {
shape = null;
const primaryImageAspectRatio = item.PrimaryImageAspectRatio;
if (primaryImageAspectRatio) {
if (primaryImageAspectRatio >= 1.33) {
shape = 'mixedBackdrop';
} else if (primaryImageAspectRatio > 0.71) {
shape = 'mixedSquare';
} else {
shape = 'mixedPortrait';
}
}
shape = shape || 'mixedSquare';
}
// TODO move card creation code to Card component
let className = 'card';
if (shape) {
className += ' ' + shape + 'Card';
}
if (options.cardCssClass) {
className += ' ' + options.cardCssClass;
}
if (options.cardClass) {
className += ' ' + options.cardClass;
}
if (layoutManager.desktop) {
className += ' card-hoverable';
}
if (layoutManager.tv) {
className += ' show-focus';
if (enableFocusTransform) {
className += ' show-animation';
}
}
const imgInfo = getCardImageUrl(item, apiClient, options, shape);
const imgUrl = imgInfo.imgUrl;
const blurhash = imgInfo.blurhash;
const forceName = imgInfo.forceName;
const showTitle = options.showTitle === 'auto' ? true : (options.showTitle || item.Type === 'PhotoAlbum' || item.Type === 'Folder');
const overlayText = options.overlayText;
let cardImageContainerClass = 'cardImageContainer';
const coveredImage = options.coverImage || imgInfo.coverImage;
if (coveredImage) {
cardImageContainerClass += ' coveredImage';
if (item.MediaType === 'Photo' || item.Type === 'PhotoAlbum' || item.Type === 'Folder' || item.ProgramInfo || item.Type === 'Program' || item.Type === 'Recording') {
cardImageContainerClass += ' coveredImage-noScale';
}
}
if (!imgUrl) {
cardImageContainerClass += ' ' + getDefaultBackgroundClass(item.Name);
}
let cardBoxClass = options.cardLayout ? 'cardBox visualCardBox' : 'cardBox';
let footerCssClass;
let progressHtml = indicators.getProgressBarHtml(item);
let innerCardFooter = '';
let footerOverlayed = false;
let logoUrl;
const logoHeight = 40;
if (options.showChannelLogo && item.ChannelPrimaryImageTag) {
logoUrl = apiClient.getScaledImageUrl(item.ChannelId, {
type: 'Primary',
height: logoHeight,
tag: item.ChannelPrimaryImageTag
});
} else if (options.showLogo && item.ParentLogoImageTag) {
logoUrl = apiClient.getScaledImageUrl(item.ParentLogoItemId, {
type: 'Logo',
height: logoHeight,
tag: item.ParentLogoImageTag
});
}
if (overlayText) {
logoUrl = null;
footerCssClass = progressHtml ? 'innerCardFooter fullInnerCardFooter' : 'innerCardFooter';
innerCardFooter += getCardFooterText(item, apiClient, options, showTitle, forceName, overlayText, imgUrl, footerCssClass, progressHtml, logoUrl, false);
footerOverlayed = true;
} else if (progressHtml) {
innerCardFooter += '<div class="innerCardFooter fullInnerCardFooter innerCardFooterClear">';
innerCardFooter += progressHtml;
innerCardFooter += '</div>';
progressHtml = '';
}
const mediaSourceCount = item.MediaSourceCount || 1;
if (mediaSourceCount > 1 && options.disableIndicators !== true) {
innerCardFooter += '<div class="mediaSourceIndicator">' + mediaSourceCount + '</div>';
}
let outerCardFooter = '';
if (!overlayText && !footerOverlayed) {
footerCssClass = options.cardLayout ? 'cardFooter' : 'cardFooter cardFooter-transparent';
if (logoUrl) {
footerCssClass += ' cardFooter-withlogo';
}
if (!options.cardLayout) {
logoUrl = null;
}
outerCardFooter = getCardFooterText(item, apiClient, options, showTitle, forceName, overlayText, imgUrl, footerCssClass, progressHtml, logoUrl, true);
}
if (outerCardFooter && !options.cardLayout) {
cardBoxClass += ' cardBox-bottompadded';
}
let overlayButtons = '';
if (layoutManager.mobile) {
let overlayPlayButton = options.overlayPlayButton;
if (overlayPlayButton == null && !options.overlayMoreButton && !options.overlayInfoButton && !options.cardLayout) {
overlayPlayButton = item.MediaType === 'Video';
}
const btnCssClass = 'cardOverlayButton cardOverlayButton-br itemAction';
if (options.centerPlayButton) {
overlayButtons += '<button is="paper-icon-button-light" class="' + btnCssClass + ' cardOverlayButton-centered" data-action="play"><span class="material-icons cardOverlayButtonIcon play_arrow"></span></button>';
}
if (overlayPlayButton && !item.IsPlaceHolder && (item.LocationType !== 'Virtual' || !item.MediaType || item.Type === 'Program') && item.Type !== 'Person') {
overlayButtons += '<button is="paper-icon-button-light" class="' + btnCssClass + '" data-action="play"><span class="material-icons cardOverlayButtonIcon play_arrow"></span></button>';
}
if (options.overlayMoreButton) {
overlayButtons += '<button is="paper-icon-button-light" class="' + btnCssClass + '" data-action="menu"><span class="material-icons cardOverlayButtonIcon more_vert"></span></button>';
}
}
if (options.showChildCountIndicator && item.ChildCount) {
className += ' groupedCard';
}
// cardBox can be it's own separate element if an outer footer is ever needed
let cardImageContainerOpen;
let cardImageContainerClose = '';
let cardBoxClose = '';
let cardScalableClose = '';
const cardContentClass = 'cardContent';
let blurhashAttrib = '';
if (blurhash && blurhash.length > 0) {
blurhashAttrib = 'data-blurhash="' + blurhash + '"';
}
if (layoutManager.tv) {
// Don't use the IMG tag with safari because it puts a white border around it
cardImageContainerOpen = imgUrl ? ('<div class="' + cardImageContainerClass + ' ' + cardContentClass + ' lazy" data-src="' + imgUrl + '" ' + blurhashAttrib + '>') : ('<div class="' + cardImageContainerClass + ' ' + cardContentClass + '">');
cardImageContainerClose = '</div>';
} else {
// Don't use the IMG tag with safari because it puts a white border around it
cardImageContainerOpen = imgUrl ? ('<button data-action="' + action + '" class="' + cardImageContainerClass + ' ' + cardContentClass + ' itemAction lazy" data-src="' + imgUrl + '" ' + blurhashAttrib + '>') : ('<button data-action="' + action + '" class="' + cardImageContainerClass + ' ' + cardContentClass + ' itemAction">');
cardImageContainerClose = '</button>';
}
const cardScalableClass = 'cardScalable';
cardImageContainerOpen = '<div class="' + cardBoxClass + '"><div class="' + cardScalableClass + '"><div class="cardPadder cardPadder-' + shape + '"></div>' + cardImageContainerOpen;
cardBoxClose = '</div>';
cardScalableClose = '</div>';
if (options.disableIndicators !== true) {
let indicatorsHtml = '';
if (options.missingIndicator !== false) {
indicatorsHtml += indicators.getMissingIndicator(item);
}
indicatorsHtml += indicators.getSyncIndicator(item);
indicatorsHtml += indicators.getTimerIndicator(item);
indicatorsHtml += indicators.getTypeIndicator(item);
if (options.showGroupCount) {
indicatorsHtml += indicators.getChildCountIndicatorHtml(item, {
minCount: 1
});
} else {
indicatorsHtml += indicators.getPlayedIndicatorHtml(item);
}
if (item.Type === 'CollectionFolder' || item.CollectionType) {
const refreshClass = item.RefreshProgress ? '' : ' class="hide"';
indicatorsHtml += '<div is="emby-itemrefreshindicator"' + refreshClass + ' data-progress="' + (item.RefreshProgress || 0) + '" data-status="' + item.RefreshStatus + '"></div>';
importRefreshIndicator();
}
if (indicatorsHtml) {
cardImageContainerOpen += '<div class="cardIndicators">' + indicatorsHtml + '</div>';
}
}
if (!imgUrl) {
cardImageContainerOpen += getDefaultText(item, options);
}
const tagName = (layoutManager.tv) && !overlayButtons ? 'button' : 'div';
const nameWithPrefix = (item.SortName || item.Name || '');
let prefix = nameWithPrefix.substring(0, Math.min(3, nameWithPrefix.length));
if (prefix) {
prefix = prefix.toUpperCase();
}
let timerAttributes = '';
if (item.TimerId) {
timerAttributes += ' data-timerid="' + item.TimerId + '"';
}
if (item.SeriesTimerId) {
timerAttributes += ' data-seriestimerid="' + item.SeriesTimerId + '"';
}
let actionAttribute;
if (tagName === 'button') {
className += ' itemAction';
actionAttribute = ' data-action="' + action + '"';
} else {
actionAttribute = '';
}
if (item.Type !== 'MusicAlbum' && item.Type !== 'MusicArtist' && item.Type !== 'Audio') {
className += ' card-withuserdata';
}
const positionTicksData = item.UserData && item.UserData.PlaybackPositionTicks ? (' data-positionticks="' + item.UserData.PlaybackPositionTicks + '"') : '';
const collectionIdData = options.collectionId ? (' data-collectionid="' + options.collectionId + '"') : '';
const playlistIdData = options.playlistId ? (' data-playlistid="' + options.playlistId + '"') : '';
const mediaTypeData = item.MediaType ? (' data-mediatype="' + item.MediaType + '"') : '';
const collectionTypeData = item.CollectionType ? (' data-collectiontype="' + item.CollectionType + '"') : '';
const channelIdData = item.ChannelId ? (' data-channelid="' + item.ChannelId + '"') : '';
const contextData = options.context ? (' data-context="' + options.context + '"') : '';
const parentIdData = options.parentId ? (' data-parentid="' + options.parentId + '"') : '';
let additionalCardContent = '';
if (layoutManager.desktop && !options.disableHoverMenu) {
additionalCardContent += getHoverMenuHtml(item, action, options);
}
return '<' + tagName + ' data-index="' + index + '"' + timerAttributes + actionAttribute + ' data-isfolder="' + (item.IsFolder || false) + '" data-serverid="' + (item.ServerId || options.serverId) + '" data-id="' + (item.Id || item.ItemId) + '" data-type="' + item.Type + '"' + mediaTypeData + collectionTypeData + channelIdData + positionTicksData + collectionIdData + playlistIdData + contextData + parentIdData + ' data-prefix="' + prefix + '" class="' + className + '">' + cardImageContainerOpen + innerCardFooter + cardImageContainerClose + overlayButtons + additionalCardContent + cardScalableClose + outerCardFooter + cardBoxClose + '</' + tagName + '>';
}
/**
* Generates HTML markup for the card overlay.
* @param {object} item - Item used to generate the card overlay.
* @param {string} action - Action assigned to the overlay.
* @param {Array} options - Card builder options.
* @returns {string} HTML markup of the card overlay.
*/
function getHoverMenuHtml(item, action, options) {
let html = '';
html += '<div class="cardOverlayContainer itemAction" data-action="' + action + '">';
const btnCssClass = 'cardOverlayButton cardOverlayButton-hover itemAction paper-icon-button-light';
if (playbackManager.canPlay(item)) {
html += '<button is="paper-icon-button-light" class="' + btnCssClass + ' cardOverlayFab-primary" data-action="resume"><span class="material-icons cardOverlayButtonIcon cardOverlayButtonIcon-hover play_arrow"></span></button>';
}
html += '<div class="cardOverlayButton-br flex">';
const userData = item.UserData || {};
if (itemHelper.canMarkPlayed(item)) {
/* eslint-disable-next-line no-unused-expressions */
import('emby-playstatebutton');
html += '<button is="emby-playstatebutton" type="button" data-action="none" class="' + btnCssClass + '" data-id="' + item.Id + '" data-serverid="' + item.ServerId + '" data-itemtype="' + item.Type + '" data-played="' + (userData.Played) + '"><span class="material-icons cardOverlayButtonIcon cardOverlayButtonIcon-hover check"></span></button>';
}
if (itemHelper.canRate(item)) {
const likes = userData.Likes == null ? '' : userData.Likes;
/* eslint-disable-next-line no-unused-expressions */
import('emby-ratingbutton');
html += '<button is="emby-ratingbutton" type="button" data-action="none" class="' + btnCssClass + '" data-id="' + item.Id + '" data-serverid="' + item.ServerId + '" data-itemtype="' + item.Type + '" data-likes="' + likes + '" data-isfavorite="' + (userData.IsFavorite) + '"><span class="material-icons cardOverlayButtonIcon cardOverlayButtonIcon-hover favorite"></span></button>';
}
html += '<button is="paper-icon-button-light" class="' + btnCssClass + '" data-action="menu"><span class="material-icons cardOverlayButtonIcon cardOverlayButtonIcon-hover more_vert"></span></button>';
html += '</div>';
html += '</div>';
return html;
}
/**
* Generates the text or icon used for default card backgrounds.
* @param {object} item - Item used to generate the card overlay.
* @param {object} options - Options used to generate the card overlay.
* @returns {string} HTML markup of the card overlay.
*/
export function getDefaultText(item, options) {
if (item.CollectionType) {
return '<span class="cardImageIcon material-icons ' + imageHelper.getLibraryIcon(item.CollectionType) + '"></span>';
}
switch (item.Type) {
case 'MusicAlbum':
return '<span class="cardImageIcon material-icons album"></span>';
case 'MusicArtist':
case 'Person':
return '<span class="cardImageIcon material-icons person"></span>';
case 'Audio':
return '<span class="cardImageIcon material-icons audiotrack"></span>';
case 'Movie':
return '<span class="cardImageIcon material-icons movie"></span>';
case 'Series':
return '<span class="cardImageIcon material-icons tv"></span>';
case 'Book':
return '<span class="cardImageIcon material-icons book"></span>';
case 'Folder':
return '<span class="cardImageIcon material-icons folder"></span>';
case 'BoxSet':
return '<span class="cardImageIcon material-icons collections"></span>';
case 'Playlist':
return '<span class="cardImageIcon material-icons view_list"></span>';
case 'PhotoAlbum':
return '<span class="cardImageIcon material-icons photo_album"></span>';
}
if (options && options.defaultCardImageIcon) {
return '<span class="cardImageIcon material-icons ' + options.defaultCardImageIcon + '"></span>';
}
const defaultName = isUsingLiveTvNaming(item) ? item.Name : itemHelper.getDisplayName(item);
return '<div class="cardText cardDefaultText">' + defaultName + '</div>';
}
/**
* Builds a set of cards and inserts them into the page.
* @param {Array} items - Array of items used to build the cards.
* @param {options} options - Options of the cards to build.
*/
export function buildCards(items, options) {
// Abort if the container has been disposed
if (!document.body.contains(options.itemsContainer)) {
return;
}
if (options.parentContainer) {
if (items.length) {
options.parentContainer.classList.remove('hide');
} else {
options.parentContainer.classList.add('hide');
return;
}
}
const html = buildCardsHtmlInternal(items, options);
if (html) {
if (options.itemsContainer.cardBuilderHtml !== html) {
options.itemsContainer.innerHTML = html;
if (items.length < 50) {
options.itemsContainer.cardBuilderHtml = html;
} else {
options.itemsContainer.cardBuilderHtml = null;
}
}
imageLoader.lazyChildren(options.itemsContainer);
} else {
options.itemsContainer.innerHTML = html;
options.itemsContainer.cardBuilderHtml = null;
}
if (options.autoFocus) {
focusManager.autoFocus(options.itemsContainer, true);
}
}
/**
* Ensures the indicators for a card exist and creates them if they don't exist.
* @param {HTMLDivElement} card - DOM element of the card.
* @param {HTMLDivElement} indicatorsElem - DOM element of the indicators.
* @returns {HTMLDivElement} - DOM element of the indicators.
*/
function ensureIndicators(card, indicatorsElem) {
if (indicatorsElem) {
return indicatorsElem;
}
indicatorsElem = card.querySelector('.cardIndicators');
if (!indicatorsElem) {
const cardImageContainer = card.querySelector('.cardImageContainer');
indicatorsElem = document.createElement('div');
indicatorsElem.classList.add('cardIndicators');
cardImageContainer.appendChild(indicatorsElem);
}
return indicatorsElem;
}
/**
* Adds user data to the card such as progress indicators and played status.
* @param {HTMLDivElement} card - DOM element of the card.
* @param {Object} userData - User data to apply to the card.
*/
function updateUserData(card, userData) {
const type = card.getAttribute('data-type');
const enableCountIndicator = type === 'Series' || type === 'BoxSet' || type === 'Season';
let indicatorsElem = null;
let playedIndicator = null;
let countIndicator = null;
let itemProgressBar = null;
if (userData.Played) {
playedIndicator = card.querySelector('.playedIndicator');
if (!playedIndicator) {
playedIndicator = document.createElement('div');
playedIndicator.classList.add('playedIndicator');
playedIndicator.classList.add('indicator');
indicatorsElem = ensureIndicators(card, indicatorsElem);
indicatorsElem.appendChild(playedIndicator);
}
playedIndicator.innerHTML = '<span class="material-icons indicatorIcon check"></span>';
} else {
playedIndicator = card.querySelector('.playedIndicator');
if (playedIndicator) {
playedIndicator.parentNode.removeChild(playedIndicator);
}
}
if (userData.UnplayedItemCount) {
countIndicator = card.querySelector('.countIndicator');
if (!countIndicator) {
countIndicator = document.createElement('div');
countIndicator.classList.add('countIndicator');
indicatorsElem = ensureIndicators(card, indicatorsElem);
indicatorsElem.appendChild(countIndicator);
}
countIndicator.innerHTML = userData.UnplayedItemCount;
} else if (enableCountIndicator) {
countIndicator = card.querySelector('.countIndicator');
if (countIndicator) {
countIndicator.parentNode.removeChild(countIndicator);
}
}
const progressHtml = indicators.getProgressBarHtml({
Type: type,
UserData: userData,
MediaType: 'Video'
});
if (progressHtml) {
itemProgressBar = card.querySelector('.itemProgressBar');
if (!itemProgressBar) {
itemProgressBar = document.createElement('div');
itemProgressBar.classList.add('itemProgressBar');
let innerCardFooter = card.querySelector('.innerCardFooter');
if (!innerCardFooter) {
innerCardFooter = document.createElement('div');
innerCardFooter.classList.add('innerCardFooter');
const cardImageContainer = card.querySelector('.cardImageContainer');
cardImageContainer.appendChild(innerCardFooter);
}
innerCardFooter.appendChild(itemProgressBar);
}
itemProgressBar.innerHTML = progressHtml;
} else {
itemProgressBar = card.querySelector('.itemProgressBar');
if (itemProgressBar) {
itemProgressBar.parentNode.removeChild(itemProgressBar);
}
}
}
/**
* Handles when user data has changed.
* @param {Object} userData - User data to apply to the card.
* @param {HTMLElement} scope - DOM element to use as a scope when selecting cards.
*/
export function onUserDataChanged(userData, scope) {
const cards = (scope || document.body).querySelectorAll('.card-withuserdata[data-id="' + userData.ItemId + '"]');
for (let i = 0, length = cards.length; i < length; i++) {
updateUserData(cards[i], userData);
}
}
/**
* Handles when a timer has been created.
* @param {string} programId - ID of the program.
* @param {string} newTimerId - ID of the new timer.
* @param {HTMLElement} itemsContainer - DOM element of the itemsContainer.
*/
export function onTimerCreated(programId, newTimerId, itemsContainer) {
const cells = itemsContainer.querySelectorAll('.card[data-id="' + programId + '"]');
for (let i = 0, length = cells.length; i < length; i++) {
const cell = cells[i];
const icon = cell.querySelector('.timerIndicator');
if (!icon) {
const indicatorsElem = ensureIndicators(cell);
indicatorsElem.insertAdjacentHTML('beforeend', '<span class="material-icons timerIndicator indicatorIcon fiber_manual_record"></span>');
}
cell.setAttribute('data-timerid', newTimerId);
}
}
/**
* Handles when a timer has been cancelled.
* @param {string} timerId - ID of the cancelled timer.
* @param {HTMLElement} itemsContainer - DOM element of the itemsContainer.
*/
export function onTimerCancelled(timerId, itemsContainer) {
const cells = itemsContainer.querySelectorAll('.card[data-timerid="' + timerId + '"]');
for (let i = 0; i < cells.length; i++) {
const cell = cells[i];
const icon = cell.querySelector('.timerIndicator');
if (icon) {
icon.parentNode.removeChild(icon);
}
cell.removeAttribute('data-timerid');
}
}
/**
* Handles when a series timer has been cancelled.
* @param {string} cancelledTimerId - ID of the cancelled timer.
* @param {HTMLElement} itemsContainer - DOM element of the itemsContainer.
*/
export function onSeriesTimerCancelled(cancelledTimerId, itemsContainer) {
const cells = itemsContainer.querySelectorAll('.card[data-seriestimerid="' + cancelledTimerId + '"]');
for (let i = 0; i < cells.length; i++) {
const cell = cells[i];
const icon = cell.querySelector('.timerIndicator');
if (icon) {
icon.parentNode.removeChild(icon);
}
cell.removeAttribute('data-seriestimerid');
}
}
/* eslint-enable indent */
export default {
getCardsHtml: getCardsHtml,
getDefaultBackgroundClass: getDefaultBackgroundClass,
getDefaultText: getDefaultText,
buildCards: buildCards,
onUserDataChanged: onUserDataChanged,
onTimerCreated: onTimerCreated,
onTimerCancelled: onTimerCancelled,
onSeriesTimerCancelled: onSeriesTimerCancelled
};
| 1 | 17,438 | I don't really follow what is happening here, but it looks like this _could_ conflict with the logic on the lines above... should this be an `else if`? | jellyfin-jellyfin-web | js |
@@ -97,10 +97,10 @@ namespace Microsoft.VisualStudio.TestPlatform.ObjectModel.Navigation
out int startLineNumber,
out int endLineNumber)
{
- var startPoint = methodDebugDefinition.GetSequencePoints().OrderBy(s => s.StartLine).FirstOrDefault();
+ var startPoint = methodDebugDefinition.GetSequencePoints().OrderBy(s => s.StartLine).FirstOrDefault(s => s.IsHidden == false);
startLineNumber = startPoint.StartLine;
var endPoint =
- methodDebugDefinition.GetSequencePoints().OrderByDescending(s => s.StartLine).FirstOrDefault();
+ methodDebugDefinition.GetSequencePoints().OrderByDescending(s => s.StartLine).FirstOrDefault(s => s.IsHidden == false);
endLineNumber = endPoint.StartLine;
}
| 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace Microsoft.VisualStudio.TestPlatform.ObjectModel.Navigation
{
#if !NET46
using System;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Reflection.Metadata;
using System.Reflection.Metadata.Ecma335;
/// <summary>
/// The portable pdb reader.
/// </summary>
internal class PortablePdbReader : IDisposable
{
/// <summary>
/// Use to get method token
/// </summary>
private static readonly PropertyInfo MethodInfoMethodTokenProperty =
typeof(MethodInfo).GetProperty("MetadataToken");
/// <summary>
/// Metadata reader provider from portable pdb stream
/// To get Metadate reader
/// </summary>
private MetadataReaderProvider provider;
/// <summary>
/// Metadata reader from portable pdb stream
/// To get method debug info from mehthod info
/// </summary>
private MetadataReader reader;
/// <summary>
/// Initializes a new instance of the <see cref="PortablePdbReader"/> class.
/// </summary>
/// <param name="stream">
/// Portable pdb stream
/// </param>
/// <exception cref="Exception">
/// Raises Exception on given stream is not portable pdb stream
/// </exception>
public PortablePdbReader(Stream stream)
{
if (!IsPortable(stream))
{
throw new Exception("Given stream is not portable stream");
}
this.provider = MetadataReaderProvider.FromPortablePdbStream(stream);
this.reader = this.provider.GetMetadataReader();
}
/// <summary>
/// Dispose Metadata reader
/// </summary>
public void Dispose()
{
this.provider?.Dispose();
this.provider = null;
this.reader = null;
}
/// <summary>
/// Gets dia navigation data from Metadata reader
/// </summary>
/// <param name="methodInfo">
/// Method info.
/// </param>
/// <returns>
/// The <see cref="DiaNavigationData"/>.
/// </returns>
public DiaNavigationData GetDiaNavigationData(MethodInfo methodInfo)
{
if (methodInfo == null)
{
return null;
}
var handle = GetMethodDebugInformationHandle(methodInfo);
return this.GetDiaNavigationData(handle);
}
internal static MethodDebugInformationHandle GetMethodDebugInformationHandle(MethodInfo methodInfo)
{
var methodToken = (int)MethodInfoMethodTokenProperty.GetValue(methodInfo);
var handle = ((MethodDefinitionHandle)MetadataTokens.Handle(methodToken)).ToDebugInformationHandle();
return handle;
}
private static void GetMethodStartAndEndLineNumber(
MethodDebugInformation methodDebugDefinition,
out int startLineNumber,
out int endLineNumber)
{
var startPoint = methodDebugDefinition.GetSequencePoints().OrderBy(s => s.StartLine).FirstOrDefault();
startLineNumber = startPoint.StartLine;
var endPoint =
methodDebugDefinition.GetSequencePoints().OrderByDescending(s => s.StartLine).FirstOrDefault();
endLineNumber = endPoint.StartLine;
}
/// <summary>
/// Checks gives stream is from portable pdb or not
/// </summary>
/// <param name="stream">
/// Stream.
/// </param>
/// <returns>
/// The <see cref="bool"/>.
/// </returns>
private static bool IsPortable(Stream stream)
{
// First four bytes should be 'BSJB'
var result = (stream.ReadByte() == 'B') && (stream.ReadByte() == 'S') && (stream.ReadByte() == 'J')
&& (stream.ReadByte() == 'B');
stream.Position = 0;
return result;
}
private DiaNavigationData GetDiaNavigationData(MethodDebugInformationHandle handle)
{
if (this.reader == null)
{
throw new ObjectDisposedException(nameof(PortablePdbReader));
}
DiaNavigationData diaNavigationData = null;
try
{
var methodDebugDefinition = this.reader.GetMethodDebugInformation(handle);
var fileName = this.GetMethodFileName(methodDebugDefinition);
int startLineNumber, endLineNumber;
GetMethodStartAndEndLineNumber(methodDebugDefinition, out startLineNumber, out endLineNumber);
diaNavigationData = new DiaNavigationData(fileName, startLineNumber, endLineNumber);
}
catch (BadImageFormatException exception)
{
EqtTrace.Error("failed to get dia navigation data: {0}", exception);
}
return diaNavigationData;
}
private string GetMethodFileName(MethodDebugInformation methodDebugDefinition)
{
var fileName = string.Empty;
if (!methodDebugDefinition.Document.IsNil)
{
var document = this.reader.GetDocument(methodDebugDefinition.Document);
fileName = this.reader.GetString(document.Name);
}
return fileName;
}
}
#endif
}
| 1 | 11,536 | `s => s.IsHidden == false` What's the purpose of adding this? | microsoft-vstest | .cs |
@@ -45,6 +45,7 @@ const (
optionNameGatewayMode = "gateway-mode"
optionNameClefSignerEnable = "clef-signer-enable"
optionNameClefSignerEndpoint = "clef-signer-endpoint"
+ optionNameClefSignerAddress = "clef-signer-address"
optionNameSwapEndpoint = "swap-endpoint"
optionNameSwapFactoryAddress = "swap-factory-address"
optionNameSwapInitialDeposit = "swap-initial-deposit" | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
optionNameDataDir = "data-dir"
optionNameDBCapacity = "db-capacity"
optionNamePassword = "password"
optionNamePasswordFile = "password-file"
optionNameAPIAddr = "api-addr"
optionNameP2PAddr = "p2p-addr"
optionNameNATAddr = "nat-addr"
optionNameP2PWSEnable = "p2p-ws-enable"
optionNameP2PQUICEnable = "p2p-quic-enable"
optionNameDebugAPIEnable = "debug-api-enable"
optionNameDebugAPIAddr = "debug-api-addr"
optionNameBootnodes = "bootnode"
optionNameNetworkID = "network-id"
optionWelcomeMessage = "welcome-message"
optionCORSAllowedOrigins = "cors-allowed-origins"
optionNameStandalone = "standalone"
optionNameTracingEnabled = "tracing-enable"
optionNameTracingEndpoint = "tracing-endpoint"
optionNameTracingServiceName = "tracing-service-name"
optionNameVerbosity = "verbosity"
optionNameGlobalPinningEnabled = "global-pinning-enable"
optionNamePaymentThreshold = "payment-threshold"
optionNamePaymentTolerance = "payment-tolerance"
optionNamePaymentEarly = "payment-early"
optionNameResolverEndpoints = "resolver-options"
optionNameGatewayMode = "gateway-mode"
optionNameClefSignerEnable = "clef-signer-enable"
optionNameClefSignerEndpoint = "clef-signer-endpoint"
optionNameSwapEndpoint = "swap-endpoint"
optionNameSwapFactoryAddress = "swap-factory-address"
optionNameSwapInitialDeposit = "swap-initial-deposit"
optionNameSwapEnable = "swap-enable"
)
func init() {
cobra.EnableCommandSorting = false
}
type command struct {
root *cobra.Command
config *viper.Viper
passwordReader passwordReader
cfgFile string
homeDir string
}
type option func(*command)
func newCommand(opts ...option) (c *command, err error) {
c = &command{
root: &cobra.Command{
Use: "bee",
Short: "Ethereum Swarm Bee",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
return c.initConfig()
},
},
}
for _, o := range opts {
o(c)
}
if c.passwordReader == nil {
c.passwordReader = new(stdInPasswordReader)
}
// Find home directory.
if err := c.setHomeDir(); err != nil {
return nil, err
}
c.initGlobalFlags()
if err := c.initStartCmd(); err != nil {
return nil, err
}
if err := c.initInitCmd(); err != nil {
return nil, err
}
if err := c.initDeployCmd(); err != nil {
return nil, err
}
c.initVersionCmd()
if err := c.initConfigurateOptionsCmd(); err != nil {
return nil, err
}
return c, nil
}
func (c *command) Execute() (err error) {
return c.root.Execute()
}
// Execute parses command line arguments and runs appropriate functions.
func Execute() (err error) {
c, err := newCommand()
if err != nil {
return err
}
return c.Execute()
}
func (c *command) initGlobalFlags() {
globalFlags := c.root.PersistentFlags()
globalFlags.StringVar(&c.cfgFile, "config", "", "config file (default is $HOME/.bee.yaml)")
}
func (c *command) initConfig() (err error) {
config := viper.New()
configName := ".bee"
if c.cfgFile != "" {
// Use config file from the flag.
config.SetConfigFile(c.cfgFile)
} else {
// Search config in home directory with name ".bee" (without extension).
config.AddConfigPath(c.homeDir)
config.SetConfigName(configName)
}
// Environment
config.SetEnvPrefix("bee")
config.AutomaticEnv() // read in environment variables that match
config.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if c.homeDir != "" && c.cfgFile == "" {
c.cfgFile = filepath.Join(c.homeDir, configName+".yaml")
}
// If a config file is found, read it in.
if err := config.ReadInConfig(); err != nil {
var e viper.ConfigFileNotFoundError
if !errors.As(err, &e) {
return err
}
}
c.config = config
return nil
}
func (c *command) setHomeDir() (err error) {
if c.homeDir != "" {
return
}
dir, err := os.UserHomeDir()
if err != nil {
return err
}
c.homeDir = dir
return nil
}
func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameDataDir, filepath.Join(c.homeDir, ".bee"), "data directory")
cmd.Flags().Uint64(optionNameDBCapacity, 5000000, fmt.Sprintf("db capacity in chunks, multiply by %d to get approximate capacity in bytes", swarm.ChunkSize))
cmd.Flags().String(optionNamePassword, "", "password for decrypting keys")
cmd.Flags().String(optionNamePasswordFile, "", "path to a file that contains password for decrypting keys")
cmd.Flags().String(optionNameAPIAddr, ":1633", "HTTP API listen address")
cmd.Flags().String(optionNameP2PAddr, ":1634", "P2P listen address")
cmd.Flags().String(optionNameNATAddr, "", "NAT exposed address")
cmd.Flags().Bool(optionNameP2PWSEnable, false, "enable P2P WebSocket transport")
cmd.Flags().Bool(optionNameP2PQUICEnable, false, "enable P2P QUIC transport")
cmd.Flags().StringSlice(optionNameBootnodes, []string{"/dnsaddr/bootnode.ethswarm.org"}, "initial nodes to connect to")
cmd.Flags().Bool(optionNameDebugAPIEnable, false, "enable debug HTTP API")
cmd.Flags().String(optionNameDebugAPIAddr, ":1635", "debug HTTP API listen address")
cmd.Flags().Uint64(optionNameNetworkID, 1, "ID of the Swarm network")
cmd.Flags().StringSlice(optionCORSAllowedOrigins, []string{}, "origins with CORS headers enabled")
cmd.Flags().Bool(optionNameStandalone, false, "whether we want the node to start with no listen addresses for p2p")
cmd.Flags().Bool(optionNameTracingEnabled, false, "enable tracing")
cmd.Flags().String(optionNameTracingEndpoint, "127.0.0.1:6831", "endpoint to send tracing data")
cmd.Flags().String(optionNameTracingServiceName, "bee", "service name identifier for tracing")
cmd.Flags().String(optionNameVerbosity, "info", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
cmd.Flags().String(optionWelcomeMessage, "", "send a welcome message string during handshakes")
cmd.Flags().Bool(optionNameGlobalPinningEnabled, false, "enable global pinning")
cmd.Flags().String(optionNamePaymentThreshold, "10000000000000", "threshold in BZZ where you expect to get paid from your peers")
cmd.Flags().String(optionNamePaymentTolerance, "50000000000000", "excess debt above payment threshold in BZZ where you disconnect from your peer")
cmd.Flags().String(optionNamePaymentEarly, "1000000000000", "amount in BZZ below the peers payment threshold when we initiate settlement")
cmd.Flags().StringSlice(optionNameResolverEndpoints, []string{}, "ENS compatible API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url")
cmd.Flags().Bool(optionNameGatewayMode, false, "disable a set of sensitive features in the api")
cmd.Flags().Bool(optionNameClefSignerEnable, false, "enable clef signer")
cmd.Flags().String(optionNameClefSignerEndpoint, "", "clef signer endpoint")
cmd.Flags().String(optionNameSwapEndpoint, "http://localhost:8545", "swap ethereum blockchain endpoint")
cmd.Flags().String(optionNameSwapFactoryAddress, "", "swap factory address")
cmd.Flags().String(optionNameSwapInitialDeposit, "100000000000000000", "initial deposit if deploying a new chequebook")
cmd.Flags().Bool(optionNameSwapEnable, true, "enable swap")
}
| 1 | 14,003 | I would name this `clef-ethereum-address`. We already have a bunch of addresses in Bee, and people might wrongly think that this is yet another address | ethersphere-bee | go |
@@ -202,6 +202,7 @@ class TestCase(unittest.TestCase):
# ofile.write(mb)
# ofile.close()
+ #@unittest.skip
def test6ChangeBondLength(self):
m = Chem.MolFromSmiles('CC')
rdDepictor.Compute2DCoords(m) | 1 | # Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
#
# $Id: testDepictor.py 2112 2012-07-02 09:47:45Z glandrum $
#
# pylint:disable=E1101,C0111,C0103,R0904
import unittest
import os
import sys
import numpy as np
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit import Geometry
from rdkit import RDConfig
from rdkit.Chem.ChemUtils import AlignDepict
def feq(v1, v2, tol2=1e-4):
return abs(v1 - v2) <= tol2
def ptEq(pt1, pt2, tol=1e-4):
return feq(pt1.x, pt2.x, tol) and feq(pt1.y, pt2.y, tol) and feq(pt1.z, pt2.z, tol)
def getDistMat(mol):
conf = mol.GetConformer()
nat = mol.GetNumAtoms()
nl = nat * (nat - 1) // 2
res = np.zeros(nl, np.float)
for i in range(1, nat):
pi = conf.GetAtomPosition(i)
idx = i * (i - 1) // 2
for j in range(i):
pj = conf.GetAtomPosition(j)
pj -= pi
res[idx + j] = pj.Length()
return res
def compareCoords(m, molFile):
mo = Chem.MolFromMolFile(molFile)
co = mo.GetConformer()
ci = m.GetConformer()
nat = m.GetNumAtoms()
if (nat != mo.GetNumAtoms()):
return 0
for i in range(nat):
pos = ci.GetAtomPosition(i)
opos = co.GetAtomPosition(i)
if not ptEq(pos, opos):
return 0
return 1
def compareWithOld(smilesFile, sdFile):
smiSup = Chem.SmilesMolSupplier(smilesFile, ",", 0, -1)
sdsup = Chem.SDMolSupplier(sdFile)
im = 0
for mol in smiSup:
omol = sdsup[im]
rdDepictor.Compute2DCoords(mol, canonOrient=False)
conf = mol.GetConformer()
oconf = omol.GetConformer()
nat = mol.GetNumAtoms()
for i in range(nat):
pos = conf.GetAtomPosition(i)
opos = oconf.GetAtomPosition(i)
if not ptEq(pos, opos):
print(Chem.MolToMolBlock(omol), file=sys.stderr)
print('> <Failed>\n%d\n' % i, file=sys.stderr)
print("$$$$", file=sys.stderr)
print(Chem.MolToMolBlock(mol), file=sys.stderr)
print('> <Failed>\n%d\n' % i, file=sys.stderr)
print("$$$$", file=sys.stderr)
return 0
im += 1
return 1
def stereoCompare(smilesFile):
smiSup = Chem.SmilesMolSupplier(smilesFile, ",", 0, -1)
for mol in smiSup:
rdDepictor.Compute2DCoords(mol, canonOrient=False)
mb = Chem.MolToMolBlock(mol)
nmol = Chem.MolFromMolBlock(mb)
matches = nmol.GetSubstructMatches(mol, False)
dbnds = [x for x in mol.GetBonds() if (x.GetBondType() == Chem.BondType.DOUBLE and
x.GetStereo() > Chem.BondStereo.STEREOANY) ]
ok = True
for match in matches:
for bnd in dbnds:
obnd = nmol.GetBondBetweenAtoms(
match[bnd.GetBeginAtomIdx()], match[bnd.GetEndAtomIdx()])
assert (obnd.GetBondType() == Chem.BondType.DOUBLE)
if ok:
break
if not ok:
print(Chem.MolToMolBlock(mol), file=sys.stderr)
print("$$$$", file=sys.stderr)
return 0
return 1
class TestCase(unittest.TestCase):
def _test0First200(self):
# this test is disabled because it's not particularly useful and
# causes problems every time anything changes.
fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Depictor', 'test_data',
'first_200.tpsa.csv')
#smiSup = Chem.SmilesMolSupplier(fileN, ",", 0, -1)
ofile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Depictor', 'test_data',
'first_200.python.sdf')
self.assertTrue(compareWithOld(fileN, ofile))
def test1CisTrans(self):
fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Depictor', 'test_data',
"cis_trans_cases.csv")
self.assertTrue(stereoCompare(fileN))
def test2Coords(self):
m1 = Chem.MolFromSmiles('C1CCC1CC')
coordMap = {0: Geometry.Point2D(0, 0),
1: Geometry.Point2D(1.5, 0),
2: Geometry.Point2D(1.5, 1.5),
3: Geometry.Point2D(0, 1.5)}
rdDepictor.Compute2DCoords(m1, coordMap=coordMap)
conf = m1.GetConformer(0)
for i in range(4):
self.assertTrue(
ptEq(conf.GetAtomPosition(i), Geometry.Point3D(coordMap[i].x, coordMap[i].y, 0.0)))
m1 = Chem.MolFromSmiles('CCC')
try:
rdDepictor.Compute2DCoords(m1, coordMap=coordMap)
ok = 0
except ValueError:
ok = 1
self.assertTrue(ok)
def test3IssueSF1526844(self):
t = Chem.MolFromSmiles('c1nc(N)ccc1')
rdDepictor.Compute2DCoords(t, canonOrient=False)
m2 = Chem.MolFromSmiles('c1nc(NC=O)ccc1')
AlignDepict.AlignDepict(m2, t)
expected = [Geometry.Point3D(1.5, 0.0, 0.0), Geometry.Point3D(0.75, -1.299, 0.0),
Geometry.Point3D(-0.75, -1.299, 0.0), Geometry.Point3D(-1.5, -2.5981, 0.0),
Geometry.Point3D(-3.0, -2.5981, 0.0), Geometry.Point3D(-3.75, -3.8971, 0.0),
Geometry.Point3D(-1.5, 0.0, 0.0), Geometry.Point3D(-0.75, 1.2990, 0.0),
Geometry.Point3D(0.75, 1.2990, 0.0)]
nat = m2.GetNumAtoms()
conf = m2.GetConformer()
for i in range(nat):
pos = conf.GetAtomPosition(i)
self.assertTrue(ptEq(pos, expected[i], 0.001))
def test4SamplingSpread(self):
mol = Chem.MolFromMolFile(
os.path.join(RDConfig.RDBaseDir, 'Code/GraphMol/Depictor', 'test_data/7UPJ_xtal.mol'))
# default mode
rdDepictor.Compute2DCoords(mol, canonOrient=False)
self.assertTrue(
compareCoords(mol, os.path.join(RDConfig.RDBaseDir, 'Code/GraphMol/Depictor',
'test_data/7UPJ_default.mol')))
# spread the structure as much as possible by sampling
rdDepictor.Compute2DCoords(mol, canonOrient=False, nFlipsPerSample=3, nSample=100,
sampleSeed=100, permuteDeg4Nodes=1)
self.assertTrue(
compareCoords(mol, os.path.join(RDConfig.RDBaseDir, 'Code/GraphMol/Depictor',
'test_data/7UPJ_spread.mol')))
def test5SamplingMimic3D(self):
mol = Chem.MolFromMolFile(
os.path.join(RDConfig.RDBaseDir, 'Code/GraphMol/Depictor', 'test_data/7UPJ_xtal.mol'))
dmat3D = getDistMat(mol)
# now mimic the coordinate with a very small weight
rdDepictor.Compute2DCoordsMimicDistmat(mol, dmat3D, weightDistMat=0.001)
self.assertTrue(
compareCoords(mol, os.path.join(RDConfig.RDBaseDir, 'Code/GraphMol/Depictor',
'test_data/7UPJ_mimic3D_1.mol')))
# now mimic the coordinate with a very small weight
rdDepictor.Compute2DCoordsMimicDistmat(mol, dmat3D, weightDistMat=0.003)
self.assertTrue(
compareCoords(mol, os.path.join(RDConfig.RDBaseDir, 'Code/GraphMol/Depictor',
'test_data/7UPJ_mimic3D_2.mol')))
#mb = Chem.MolToMolBlock(mol)
#ofile = open('../test_data/7UPJ_mimic3D_2.mol', 'w')
# ofile.write(mb)
# ofile.close()
def test6ChangeBondLength(self):
m = Chem.MolFromSmiles('CC')
rdDepictor.Compute2DCoords(m)
conf = m.GetConformer()
self.assertAlmostEqual(conf.GetAtomPosition(0).x, -0.750, 3)
self.assertAlmostEqual(conf.GetAtomPosition(1).x, 0.750, 3)
rdDepictor.Compute2DCoords(m, bondLength=1.0)
conf = m.GetConformer()
self.assertAlmostEqual(conf.GetAtomPosition(0).x, -0.500, 3)
self.assertAlmostEqual(conf.GetAtomPosition(1).x, 0.500, 3)
rdDepictor.Compute2DCoords(m)
conf = m.GetConformer()
self.assertAlmostEqual(conf.GetAtomPosition(0).x, -0.750, 3)
self.assertAlmostEqual(conf.GetAtomPosition(1).x, 0.750, 3)
def testConstrainedCoords(self):
templ = Chem.MolFromSmiles('c1nccc2n1ccc2')
rdDepictor.Compute2DCoords(templ)
m1 = Chem.MolFromSmiles('c1cccc2ncn3cccc3c21')
rdDepictor.GenerateDepictionMatching2DStructure(m1, templ)
m2 = Chem.MolFromSmiles('c1cc(Cl)cc2ncn3cccc3c21')
rdDepictor.Compute2DCoords(m2)
refPatt1 = Chem.MolFromSmarts('*1****2*1***2')
rdDepictor.GenerateDepictionMatching2DStructure(m2, templ, -1, refPatt1)
fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Depictor', 'test_data',
'1XP0_ligand.sdf')
xp0_lig = Chem.MolFromMolFile(fileN)
xp0_lig_2d = Chem.Mol(xp0_lig)
rdDepictor.GenerateDepictionMatching3DStructure(xp0_lig_2d, xp0_lig)
xp0_ref = Chem.MolFromSmarts('[#6]1~[#7][#6]~[#6]2[#6](=[#8])[#7]~[#6](c3ccccc3)[#7][#7]12')
rdDepictor.GenerateDepictionMatching3DStructure(xp0_lig_2d, xp0_lig, -1, xp0_ref)
if __name__ == '__main__':
rdDepictor.SetPreferCoordGen(False)
unittest.main()
| 1 | 22,621 | you can just remove this | rdkit-rdkit | cpp |
@@ -297,6 +297,13 @@ func buildUpgradeTask(kind, name, openebsNamespace string) *apis.UpgradeTask {
PoolName: name,
},
}
+ case "storagePoolClaim":
+ utaskObj.Name = "upgrade-cstor-pool-" + name
+ utaskObj.Spec.ResourceSpec = apis.ResourceSpec{
+ StoragePoolClaim: &apis.StoragePoolClaim{
+ SPCName: name,
+ },
+ }
case "cstorVolume":
utaskObj.Name = "upgrade-cstor-volume-" + name
utaskObj.Spec.ResourceSpec = apis.ResourceSpec{ | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"strings"
"text/template"
"time"
apis "github.com/openebs/maya/pkg/apis/openebs.io/upgrade/v1alpha1"
errors "github.com/openebs/maya/pkg/errors/v1alpha1"
templates "github.com/openebs/maya/pkg/upgrade/templates/v1"
utask "github.com/openebs/maya/pkg/upgrade/v1alpha2"
retry "github.com/openebs/maya/pkg/util/retry"
appsv1 "k8s.io/api/apps/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
)
func getDeployment(labels, namespace string) (*appsv1.Deployment, error) {
deployList, err := deployClient.WithNamespace(namespace).List(
&metav1.ListOptions{
LabelSelector: labels,
})
if err != nil {
return nil, err
}
if len(deployList.Items) == 0 {
return nil, errors.Errorf("no deployments found for %s in %s", labels, namespace)
}
return &(deployList.Items[0]), nil
}
func getOpenEBSVersion(d *appsv1.Deployment) (string, error) {
if d.Labels["openebs.io/version"] == "" {
return "", errors.Errorf("missing openebs version for %s", d.Name)
}
return d.Labels["openebs.io/version"], nil
}
func patchDelpoyment(
deployName,
namespace string,
pt types.PatchType,
data []byte,
) error {
_, err := deployClient.WithNamespace(namespace).Patch(
deployName,
pt,
data,
)
if err != nil {
return err
}
err = retry.
Times(60).
Wait(5 * time.Second).
Try(func(attempt uint) error {
rolloutStatus, err1 := deployClient.WithNamespace(namespace).
RolloutStatus(deployName)
if err1 != nil {
return err1
}
if !rolloutStatus.IsRolledout {
return errors.Errorf("failed to rollout because %s", rolloutStatus.Message)
}
return nil
})
if err != nil {
return err
}
return nil
}
func getContainerName(d *appsv1.Deployment) (string, error) {
containerList := d.Spec.Template.Spec.Containers
// verify length of container list
if len(containerList) == 0 {
return "", errors.New("missing container")
}
name := containerList[0].Name
// verify replica container name
if name == "" {
return "", errors.New("missing container name")
}
return name, nil
}
func getBaseImage(deployObj *appsv1.Deployment, name string) (string, error) {
for _, con := range deployObj.Spec.Template.Spec.Containers {
if con.Name == name {
baseImage := strings.Split(con.Image, ":")[0]
if urlPrefix != "" {
// urlPrefix is the url to the directory where the images are present
// the below logic takes the image name from current baseImage and
// appends it to the given urlPrefix
// For example baseImage is abc/quay.io/openebs/jiva
// and urlPrefix is xyz/aws-56546546/openebsdirectory/
// it will take jiva from current url and append it to urlPrefix
// and return xyz/aws-56546546/openebsdirectory/jiva
urlSubstr := strings.Split(baseImage, "/")
baseImage = urlPrefix + urlSubstr[len(urlSubstr)-1]
}
return baseImage, nil
}
}
return "", errors.Errorf("image not found for container %s", name)
}
func patchService(targetServiceLabel, namespace string) error {
targetServiceObj, err := serviceClient.WithNamespace(namespace).List(
metav1.ListOptions{
LabelSelector: targetServiceLabel,
},
)
if err != nil {
return errors.Wrapf(err, "failed to get service for %s", targetServiceLabel)
}
if len(targetServiceObj.Items) == 0 {
return errors.Errorf("no service found for %s in %s", targetServiceLabel, namespace)
}
targetServiceName := targetServiceObj.Items[0].Name
if targetServiceName == "" {
return errors.Errorf("missing service name")
}
version := targetServiceObj.Items[0].
Labels["openebs.io/version"]
if version != currentVersion && version != upgradeVersion {
return errors.Errorf(
"service version %s is neither %s nor %s\n",
version,
currentVersion,
upgradeVersion,
)
}
if version == currentVersion {
tmpl, err := template.New("servicePatch").
Parse(templates.OpenebsVersionPatch)
if err != nil {
return errors.Wrapf(err, "failed to create template for service patch")
}
err = tmpl.Execute(&buffer, upgradeVersion)
if err != nil {
return errors.Wrapf(err, "failed to populate template for service patch")
}
servicePatch := buffer.String()
buffer.Reset()
_, err = serviceClient.WithNamespace(namespace).Patch(
targetServiceName,
types.StrategicMergePatchType,
[]byte(servicePatch),
)
if err != nil {
return errors.Wrapf(err, "failed to patch service %s", targetServiceName)
}
klog.Infof("targetservice %s patched", targetServiceName)
} else {
klog.Infof("service already in %s version", upgradeVersion)
}
return nil
}
// createUtask creates a UpgradeTask CR for the resource
func createUtask(utaskObj *apis.UpgradeTask, openebsNamespace string,
) (*apis.UpgradeTask, error) {
var err error
if utaskObj == nil {
return nil, errors.Errorf("failed to create upgradetask : nil object")
}
utaskObj, err = utaskClient.WithNamespace(openebsNamespace).Create(utaskObj)
if err != nil {
return nil, errors.Wrapf(err, "failed to create upgradetask")
}
return utaskObj, nil
}
func updateUpgradeDetailedStatus(utaskObj *apis.UpgradeTask,
uStatusObj apis.UpgradeDetailedStatuses, openebsNamespace string,
) (*apis.UpgradeTask, error) {
var err error
if !utask.IsValidStatus(uStatusObj) {
return nil, errors.Errorf(
"failed to update upgradetask status: invalid status %v",
uStatusObj,
)
}
uStatusObj.LastUpdatedTime = metav1.Now()
if uStatusObj.Phase == apis.StepWaiting {
uStatusObj.StartTime = uStatusObj.LastUpdatedTime
utaskObj.Status.UpgradeDetailedStatuses = append(
utaskObj.Status.UpgradeDetailedStatuses,
uStatusObj,
)
} else {
l := len(utaskObj.Status.UpgradeDetailedStatuses)
uStatusObj.StartTime = utaskObj.Status.UpgradeDetailedStatuses[l-1].StartTime
utaskObj.Status.UpgradeDetailedStatuses[l-1] = uStatusObj
}
utaskObj, err = utaskClient.WithNamespace(openebsNamespace).Update(utaskObj)
if err != nil {
return nil, errors.Wrapf(err, "failed to update upgradetask ")
}
return utaskObj, nil
}
// getOrCreateUpgradeTask fetches upgrade task if provided or creates a new upgradetask CR
func getOrCreateUpgradeTask(kind, name, openebsNamespace string) (*apis.UpgradeTask, error) {
var utaskObj *apis.UpgradeTask
var err error
if openebsNamespace == "" {
return nil, errors.Errorf("missing openebsNamespace")
}
if kind == "" {
return nil, errors.Errorf("missing kind for upgradeTask")
}
if name == "" {
return nil, errors.Errorf("missing name for upgradeTask")
}
utaskObj = buildUpgradeTask(kind, name, openebsNamespace)
// the below logic first tries to fetch the CR if not found
// then creates a new CR
utaskObj1, err1 := utaskClient.WithNamespace(openebsNamespace).
Get(utaskObj.Name, metav1.GetOptions{})
if err1 != nil {
if k8serror.IsNotFound(err1) {
utaskObj, err = createUtask(utaskObj, openebsNamespace)
if err != nil {
return nil, err
}
} else {
return nil, err1
}
} else {
utaskObj = utaskObj1
}
if utaskObj.Status.StartTime.IsZero() {
utaskObj.Status.Phase = apis.UpgradeStarted
utaskObj.Status.StartTime = metav1.Now()
}
utaskObj.Status.UpgradeDetailedStatuses = []apis.UpgradeDetailedStatuses{}
utaskObj, err = utaskClient.WithNamespace(openebsNamespace).
Update(utaskObj)
if err != nil {
return nil, errors.Wrapf(err, "failed to update upgradetask")
}
return utaskObj, nil
}
func buildUpgradeTask(kind, name, openebsNamespace string) *apis.UpgradeTask {
// TODO builder
utaskObj := &apis.UpgradeTask{
ObjectMeta: metav1.ObjectMeta{
Namespace: openebsNamespace,
},
Spec: apis.UpgradeTaskSpec{
FromVersion: currentVersion,
ToVersion: upgradeVersion,
ImageTag: imageTag,
ImagePrefix: urlPrefix,
},
Status: apis.UpgradeTaskStatus{
Phase: apis.UpgradeStarted,
StartTime: metav1.Now(),
},
}
switch kind {
case "jivaVolume":
utaskObj.Name = "upgrade-jiva-volume-" + name
utaskObj.Spec.ResourceSpec = apis.ResourceSpec{
JivaVolume: &apis.JivaVolume{
PVName: name,
},
}
case "cstorPool":
utaskObj.Name = "upgrade-cstor-pool-" + name
utaskObj.Spec.ResourceSpec = apis.ResourceSpec{
CStorPool: &apis.CStorPool{
PoolName: name,
},
}
case "cstorVolume":
utaskObj.Name = "upgrade-cstor-volume-" + name
utaskObj.Spec.ResourceSpec = apis.ResourceSpec{
CStorVolume: &apis.CStorVolume{
PVName: name,
},
}
}
return utaskObj
}
| 1 | 17,611 | I think we should avoid creating a dummy CR in the case of SPC. Please see if we can avoid this since we will not be patching anything in this CR. | openebs-maya | go |
@@ -7,6 +7,10 @@
<%= render 'trails' %>
</section>
+<% if locked_features.any? %>
+ <%= render 'locked_features' %>
+<% end %>
+
<% if current_user_has_access_to?(:exercises) %>
<p class="product-headline">Hone your skills by completing these exercises</p>
<%= render 'products/exercises' %> | 1 | <%= render 'mentor_info' %>
<section class='resources'>
<%= render 'learn_repo' %>
<%= render 'learn_live' %>
<%= render 'forum' %>
<%= render 'trails' %>
</section>
<% if current_user_has_access_to?(:exercises) %>
<p class="product-headline">Hone your skills by completing these exercises</p>
<%= render 'products/exercises' %>
<% end %>
<% if current_user_has_access_to?(:workshops) %>
<p class="product-headline">Enroll in our online workshops</p>
<section class='workshops'>
<%= render partial: 'products/workshop', collection: @catalog.workshops %>
</section>
<% end %>
<% if !current_user_has_access_to?(:exercises) %>
<section class='workshops disabled'>
<div class='disabled-text'>
<h2>
<% if current_user_has_access_to?(:workshops) %>
<%= t(
'.no_exercises.title_html',
link: link_to(t('.no_exercises.link'), edit_subscription_path)
) %>
<% elsif current_user_has_access_to_shows? %>
<%= t(
'.no_workshops.title_html',
link: link_to(t('.no_workshops.link'), edit_subscription_path)
) %>
<% else %>
<%= t(
'.no_shows.title_html',
link: link_to(t('.no_shows.link'), edit_subscription_path)
) %>
<% end %>
</h2>
<%= link_to "See what you're missing", products_path %>
</div>
</section>
<% end %>
<% if current_user_has_access_to_shows? %>
<p class="product-headline center">Watch our web shows and screencasts</p>
<section class='shows'>
<%= render @catalog.shows %>
</section>
<% end %>
<% if current_user_has_access_to?(:screencasts) %>
<section class='screencasts'>
<%= render @catalog.screencasts %>
</section>
<% end %>
<p class="product-headline">Read our eBooks to boost your knowledge</p>
<section class='reading'>
<%= render @catalog.books %>
</section>
| 1 | 10,049 | Honestly not sure myself, but do you think it makes sense to move this conditional into the partial? | thoughtbot-upcase | rb |
@@ -70,7 +70,7 @@ def run(args):
sys.exit(usertypes.Exit.ok)
if args.temp_basedir:
- args.basedir = tempfile.mkdtemp()
+ args.basedir = tempfile.mkdtemp(prefix='qutebrowser-prefix-')
quitter = Quitter(args)
objreg.register('quitter', quitter) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Initialization of qutebrowser and application-wide things."""
import os
import sys
import subprocess
import configparser
import functools
import json
import time
import shutil
import tempfile
import atexit
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QDesktopServices, QPixmap, QIcon, QCursor, QWindow
from PyQt5.QtCore import (pyqtSlot, qInstallMessageHandler, QTimer, QUrl,
QObject, Qt, QEvent)
try:
import hunter
except ImportError:
hunter = None
import qutebrowser
import qutebrowser.resources # pylint: disable=unused-import
from qutebrowser.completion.models import instances as completionmodels
from qutebrowser.commands import cmdutils, runners, cmdexc
from qutebrowser.config import style, config, websettings, configexc
from qutebrowser.browser import quickmarks, cookies, cache, adblock, history
from qutebrowser.browser.network import qutescheme, proxy, networkmanager
from qutebrowser.mainwindow import mainwindow
from qutebrowser.misc import readline, ipc, savemanager, sessions, crashsignal
from qutebrowser.misc import utilcmds # pylint: disable=unused-import
from qutebrowser.utils import (log, version, message, utils, qtutils, urlutils,
objreg, usertypes, standarddir, error)
# We import utilcmds to run the cmdutils.register decorators.
qApp = None
def run(args):
"""Initialize everthing and run the application."""
# pylint: disable=too-many-statements
if args.version:
print(version.version())
print()
print()
print(qutebrowser.__copyright__)
print()
print(version.GPL_BOILERPLATE.strip())
sys.exit(usertypes.Exit.ok)
if args.temp_basedir:
args.basedir = tempfile.mkdtemp()
quitter = Quitter(args)
objreg.register('quitter', quitter)
global qApp
qApp = Application(args)
qApp.lastWindowClosed.connect(quitter.on_last_window_closed)
crash_handler = crashsignal.CrashHandler(
app=qApp, quitter=quitter, args=args, parent=qApp)
crash_handler.activate()
objreg.register('crash-handler', crash_handler)
signal_handler = crashsignal.SignalHandler(app=qApp, quitter=quitter,
parent=qApp)
signal_handler.activate()
objreg.register('signal-handler', signal_handler)
try:
sent = ipc.send_to_running_instance(args)
if sent:
sys.exit(usertypes.Exit.ok)
log.init.debug("Starting IPC server...")
server = ipc.IPCServer(args, qApp)
objreg.register('ipc-server', server)
server.got_args.connect(lambda args, cwd:
process_pos_args(args, cwd=cwd, via_ipc=True))
except ipc.AddressInUseError as e:
# This could be a race condition...
log.init.debug("Got AddressInUseError, trying again.")
time.sleep(500)
sent = ipc.send_to_running_instance(args)
if sent:
sys.exit(usertypes.Exit.ok)
else:
ipc.display_error(e, args)
sys.exit(usertypes.Exit.err_ipc)
except ipc.Error as e:
ipc.display_error(e, args)
# We didn't really initialize much so far, so we just quit hard.
sys.exit(usertypes.Exit.err_ipc)
init(args, crash_handler)
ret = qt_mainloop()
return ret
def qt_mainloop():
"""Simple wrapper to get a nicer stack trace for segfaults.
WARNING: misc/crashdialog.py checks the stacktrace for this function
name, so if this is changed, it should be changed there as well!
"""
return qApp.exec_()
def init(args, crash_handler):
"""Initialize everything.
Args:
args: The argparse namespace.
crash_handler: The CrashHandler instance.
"""
log.init.debug("Starting init...")
qApp.setQuitOnLastWindowClosed(False)
qApp.setOrganizationName("qutebrowser")
qApp.setApplicationName("qutebrowser")
qApp.setApplicationVersion(qutebrowser.__version__)
_init_icon()
utils.actute_warning()
try:
_init_modules(args, crash_handler)
except (OSError, UnicodeDecodeError) as e:
error.handle_fatal_exc(e, args, "Error while initializing!",
pre_text="Error while initializing")
sys.exit(usertypes.Exit.err_init)
QTimer.singleShot(0, functools.partial(_process_args, args))
log.init.debug("Initializing eventfilter...")
event_filter = EventFilter(qApp)
qApp.installEventFilter(event_filter)
objreg.register('event-filter', event_filter)
log.init.debug("Connecting signals...")
config_obj = objreg.get('config')
config_obj.style_changed.connect(style.get_stylesheet.cache_clear)
qApp.focusChanged.connect(on_focus_changed)
qApp.focusChanged.connect(message.on_focus_changed)
QDesktopServices.setUrlHandler('http', open_desktopservices_url)
QDesktopServices.setUrlHandler('https', open_desktopservices_url)
QDesktopServices.setUrlHandler('qute', open_desktopservices_url)
log.init.debug("Init done!")
crash_handler.raise_crashdlg()
def _init_icon():
"""Initialize the icon of qutebrowser."""
icon = QIcon()
for size in (16, 24, 32, 48, 64, 96, 128, 256, 512):
filename = ':/icons/qutebrowser-{}x{}.png'.format(size, size)
pixmap = QPixmap(filename)
qtutils.ensure_not_null(pixmap)
icon.addPixmap(pixmap)
qtutils.ensure_not_null(icon)
qApp.setWindowIcon(icon)
def _process_args(args):
"""Open startpage etc. and process commandline args."""
config_obj = objreg.get('config')
for sect, opt, val in args.temp_settings:
try:
config_obj.set('temp', sect, opt, val)
except (configexc.Error, configparser.Error) as e:
message.error('current', "set: {} - {}".format(
e.__class__.__name__, e))
if not args.override_restore:
_load_session(args.session)
session_manager = objreg.get('session-manager')
if not session_manager.did_load:
log.init.debug("Initializing main window...")
window = mainwindow.MainWindow()
if not args.nowindow:
window.show()
qApp.setActiveWindow(window)
process_pos_args(args.command)
_open_startpage()
_open_quickstart(args)
def _load_session(name):
"""Load the default session.
Args:
name: The name of the session to load, or None to read state file.
"""
state_config = objreg.get('state-config')
if name is None:
try:
name = state_config['general']['session']
except KeyError:
# No session given as argument and none in the session file ->
# start without loading a session
return
session_manager = objreg.get('session-manager')
try:
session_manager.load(name)
except sessions.SessionNotFoundError:
message.error('current', "Session {} not found!".format(name))
except sessions.SessionError as e:
message.error('current', "Failed to load session {}: {}".format(
name, e))
try:
del state_config['general']['session']
except KeyError:
pass
# If this was a _restart session, delete it.
if name == '_restart':
session_manager.delete('_restart')
def process_pos_args(args, via_ipc=False, cwd=None):
"""Process positional commandline args.
URLs to open have no prefix, commands to execute begin with a colon.
Args:
args: A list of arguments to process.
via_ipc: Whether the arguments were transmitted over IPC.
cwd: The cwd to use for fuzzy_url.
"""
if via_ipc and not args:
win_id = mainwindow.get_window(via_ipc, force_window=True)
_open_startpage(win_id)
return
win_id = None
for cmd in args:
if cmd.startswith(':'):
if win_id is None:
win_id = mainwindow.get_window(via_ipc, force_tab=True)
log.init.debug("Startup cmd {}".format(cmd))
commandrunner = runners.CommandRunner(win_id)
commandrunner.run_safely_init(cmd[1:])
elif not cmd:
log.init.debug("Empty argument")
win_id = mainwindow.get_window(via_ipc, force_window=True)
else:
win_id = mainwindow.get_window(via_ipc)
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
log.init.debug("Startup URL {}".format(cmd))
try:
url = urlutils.fuzzy_url(cmd, cwd, relative=True)
except urlutils.FuzzyUrlError as e:
message.error(0, "Error in startup argument '{}': {}".format(
cmd, e))
else:
open_target = config.get('general', 'new-instance-open-target')
background = open_target in ('tab-bg', 'tab-bg-silent')
tabbed_browser.tabopen(url, background=background)
def _open_startpage(win_id=None):
"""Open startpage.
The startpage is never opened if the given windows are not empty.
Args:
win_id: If None, open startpage in all empty windows.
If set, open the startpage in the given window.
"""
if win_id is not None:
window_ids = [win_id]
else:
window_ids = objreg.window_registry
for cur_win_id in window_ids:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=cur_win_id)
if tabbed_browser.count() == 0:
log.init.debug("Opening startpage")
for urlstr in config.get('general', 'startpage'):
try:
url = urlutils.fuzzy_url(urlstr, do_search=False)
except urlutils.FuzzyUrlError as e:
message.error(0, "Error when opening startpage: {}".format(
e))
tabbed_browser.tabopen(QUrl('about:blank'))
else:
tabbed_browser.tabopen(url)
def _open_quickstart(args):
"""Open quickstart if it's the first start.
Args:
args: The argparse namespace.
"""
if args.datadir is not None or args.basedir is not None:
# With --datadir or --basedir given, don't open quickstart.
return
state_config = objreg.get('state-config')
try:
quickstart_done = state_config['general']['quickstart-done'] == '1'
except KeyError:
quickstart_done = False
if not quickstart_done:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tabbed_browser.tabopen(
QUrl('http://www.qutebrowser.org/quickstart.html'))
state_config['general']['quickstart-done'] = '1'
def _save_version():
"""Save the current version to the state config."""
state_config = objreg.get('state-config')
state_config['general']['version'] = qutebrowser.__version__
def on_focus_changed(_old, new):
"""Register currently focused main window in the object registry."""
if new is None:
window = None
else:
window = new.window()
if window is None or not isinstance(window, mainwindow.MainWindow):
try:
objreg.delete('last-focused-main-window')
except KeyError:
pass
qApp.restoreOverrideCursor()
else:
objreg.register('last-focused-main-window', window, update=True)
_maybe_hide_mouse_cursor()
@pyqtSlot(QUrl)
def open_desktopservices_url(url):
"""Handler to open an URL via QDesktopServices."""
win_id = mainwindow.get_window(via_ipc=True, force_window=False)
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.tabopen(url)
@config.change_filter('ui', 'hide-mouse-cursor', function=True)
def _maybe_hide_mouse_cursor():
"""Hide the mouse cursor if it isn't yet and it's configured."""
if config.get('ui', 'hide-mouse-cursor'):
if qApp.overrideCursor() is not None:
return
qApp.setOverrideCursor(QCursor(Qt.BlankCursor))
else:
qApp.restoreOverrideCursor()
def _init_modules(args, crash_handler):
"""Initialize all 'modules' which need to be initialized.
Args:
args: The argparse namespace.
crash_handler: The CrashHandler instance.
"""
# pylint: disable=too-many-statements
log.init.debug("Initializing save manager...")
save_manager = savemanager.SaveManager(qApp)
objreg.register('save-manager', save_manager)
save_manager.add_saveable('version', _save_version)
log.init.debug("Initializing network...")
networkmanager.init()
log.init.debug("Initializing readline-bridge...")
readline_bridge = readline.ReadlineBridge()
objreg.register('readline-bridge', readline_bridge)
log.init.debug("Initializing directories...")
standarddir.init(args)
log.init.debug("Initializing config...")
config.init(qApp)
save_manager.init_autosave()
log.init.debug("Initializing web history...")
history.init(qApp)
log.init.debug("Initializing crashlog...")
if not args.no_err_windows:
crash_handler.handle_segfault()
log.init.debug("Initializing sessions...")
sessions.init(qApp)
log.init.debug("Initializing js-bridge...")
js_bridge = qutescheme.JSBridge(qApp)
objreg.register('js-bridge', js_bridge)
log.init.debug("Initializing websettings...")
websettings.init()
log.init.debug("Initializing adblock...")
host_blocker = adblock.HostBlocker()
host_blocker.read_hosts()
objreg.register('host-blocker', host_blocker)
log.init.debug("Initializing quickmarks...")
quickmark_manager = quickmarks.QuickmarkManager(qApp)
objreg.register('quickmark-manager', quickmark_manager)
log.init.debug("Initializing proxy...")
proxy.init()
log.init.debug("Initializing cookies...")
cookie_jar = cookies.CookieJar(qApp)
objreg.register('cookie-jar', cookie_jar)
log.init.debug("Initializing cache...")
diskcache = cache.DiskCache(qApp)
objreg.register('cache', diskcache)
log.init.debug("Initializing completions...")
completionmodels.init()
log.init.debug("Misc initialization...")
_maybe_hide_mouse_cursor()
objreg.get('config').changed.connect(_maybe_hide_mouse_cursor)
class Quitter:
"""Utility class to quit/restart the QApplication.
Attributes:
quit_status: The current quitting status.
_shutting_down: Whether we're currently shutting down.
_args: The argparse namespace.
"""
def __init__(self, args):
self.quit_status = {
'crash': True,
'tabs': False,
'main': False,
}
self._shutting_down = False
self._args = args
@pyqtSlot()
def on_last_window_closed(self):
"""Slot which gets invoked when the last window was closed."""
self.shutdown(last_window=True)
def _get_restart_args(self, pages=(), session=None):
"""Get the current working directory and args to relaunch qutebrowser.
Args:
pages: The pages to re-open.
session: The session to load, or None.
Return:
An (args, cwd) tuple.
args: The commandline as a list of strings.
cwd: The current working directory as a string.
"""
if os.path.basename(sys.argv[0]) == 'qutebrowser':
# Launched via launcher script
args = [sys.argv[0]]
cwd = None
elif hasattr(sys, 'frozen'):
args = [sys.executable]
cwd = os.path.abspath(os.path.dirname(sys.executable))
else:
args = [sys.executable, '-m', 'qutebrowser']
cwd = os.path.join(os.path.abspath(os.path.dirname(
qutebrowser.__file__)), '..')
if not os.path.isdir(cwd):
# Probably running from an python egg. Let's fallback to
# cwd=None and see if that works out.
# See https://github.com/The-Compiler/qutebrowser/issues/323
cwd = None
# Add all open pages so they get reopened.
page_args = []
for win in pages:
page_args.extend(win)
page_args.append('')
# Serialize the argparse namespace into json and pass that to the new
# process via --json-args.
# We do this as there's no way to "unparse" the namespace while
# ignoring some arguments.
argdict = vars(self._args)
argdict['session'] = None
argdict['url'] = []
argdict['command'] = page_args[:-1]
argdict['json_args'] = None
# Ensure the given session (or none at all) gets opened.
if session is None:
argdict['session'] = None
argdict['override_restore'] = True
else:
argdict['session'] = session
argdict['override_restore'] = False
# Dump the data
data = json.dumps(argdict)
args += ['--json-args', data]
log.destroy.debug("args: {}".format(args))
log.destroy.debug("cwd: {}".format(cwd))
return args, cwd
@cmdutils.register(instance='quitter', name='restart')
def restart_cmd(self):
"""Restart qutebrowser while keeping existing tabs open."""
try:
ok = self.restart(session='_restart')
except sessions.SessionError as e:
log.destroy.exception("Failed to save session!")
raise cmdexc.CommandError("Failed to save session: {}!".format(e))
if ok:
self.shutdown()
def restart(self, pages=(), session=None):
"""Inner logic to restart qutebrowser.
The "better" way to restart is to pass a session (_restart usually) as
that'll save the complete state.
However we don't do that (and pass a list of pages instead) when we
restart because of an exception, as that's a lot simpler and we don't
want to risk anything going wrong.
Args:
pages: A list of URLs to open.
session: The session to load, or None.
Return:
True if the restart succeeded, False otherwise.
"""
log.destroy.debug("sys.executable: {}".format(sys.executable))
log.destroy.debug("sys.path: {}".format(sys.path))
log.destroy.debug("sys.argv: {}".format(sys.argv))
log.destroy.debug("frozen: {}".format(hasattr(sys, 'frozen')))
# Save the session if one is given.
if session is not None:
session_manager = objreg.get('session-manager')
session_manager.save(session)
# Open a new process and immediately shutdown the existing one
try:
args, cwd = self._get_restart_args(pages, session)
if cwd is None:
subprocess.Popen(args)
else:
subprocess.Popen(args, cwd=cwd)
except OSError:
log.destroy.exception("Failed to restart")
return False
else:
return True
@cmdutils.register(instance='quitter', name=['quit', 'q'],
ignore_args=True)
def shutdown(self, status=0, session=None, last_window=False):
"""Quit qutebrowser.
Args:
status: The status code to exit with.
session: A session name if saving should be forced.
last_window: If the shutdown was triggered due to the last window
closing.
"""
if self._shutting_down:
return
self._shutting_down = True
log.destroy.debug("Shutting down with status {}, session {}...".format(
status, session))
session_manager = objreg.get('session-manager')
if session is not None:
session_manager.save(session, last_window=last_window,
load_next_time=True)
elif config.get('general', 'save-session'):
session_manager.save(sessions.default, last_window=last_window,
load_next_time=True)
deferrer = False
for win_id in objreg.window_registry:
prompter = objreg.get('prompter', None, scope='window',
window=win_id)
if prompter is not None and prompter.shutdown():
deferrer = True
if deferrer:
# If shutdown was called while we were asking a question, we're in
# a still sub-eventloop (which gets quit now) and not in the main
# one.
# This means we need to defer the real shutdown to when we're back
# in the real main event loop, or we'll get a segfault.
log.destroy.debug("Deferring real shutdown because question was "
"active.")
QTimer.singleShot(0, functools.partial(self._shutdown, status))
else:
# If we have no questions to shut down, we are already in the real
# event loop, so we can shut down immediately.
self._shutdown(status)
def _shutdown(self, status): # noqa
"""Second stage of shutdown."""
log.destroy.debug("Stage 2 of shutting down...")
if qApp is None:
# No QApplication exists yet, so quit hard.
sys.exit(status)
# Remove eventfilter
try:
log.destroy.debug("Removing eventfilter...")
qApp.removeEventFilter(objreg.get('event-filter'))
except AttributeError:
pass
# Close all windows
QApplication.closeAllWindows()
# Shut down IPC
try:
objreg.get('ipc-server').shutdown()
except KeyError:
pass
# Save everything
try:
save_manager = objreg.get('save-manager')
except KeyError:
log.destroy.debug("Save manager not initialized yet, so not "
"saving anything.")
else:
for key in save_manager.saveables:
try:
save_manager.save(key, is_exit=True)
except OSError as e:
error.handle_fatal_exc(
e, self._args, "Error while saving!",
pre_text="Error while saving {}".format(key))
# Re-enable faulthandler to stdout, then remove crash log
log.destroy.debug("Deactivating crash log...")
objreg.get('crash-handler').destroy_crashlogfile()
# Delete temp basedir
if self._args.temp_basedir:
atexit.register(shutil.rmtree, self._args.basedir)
# If we don't kill our custom handler here we might get segfaults
log.destroy.debug("Deactiving message handler...")
qInstallMessageHandler(None)
# Now we can hopefully quit without segfaults
log.destroy.debug("Deferring QApplication::exit...")
objreg.get('signal-handler').deactivate()
# We use a singleshot timer to exit here to minimize the likelihood of
# segfaults.
QTimer.singleShot(0, functools.partial(qApp.exit, status))
@cmdutils.register(instance='quitter', name='wq',
completion=[usertypes.Completion.sessions])
def save_and_quit(self, name=sessions.default):
"""Save open pages and quit.
Args:
name: The name of the session.
"""
self.shutdown(session=name)
class Application(QApplication):
"""Main application instance.
Attributes:
_args: ArgumentParser instance.
"""
def __init__(self, args):
"""Constructor.
Args:
Argument namespace from argparse.
"""
qt_args = qtutils.get_args(args)
log.init.debug("Qt arguments: {}, based on {}".format(qt_args, args))
super().__init__(qt_args)
log.init.debug("Initializing application...")
self._args = args
objreg.register('args', args)
objreg.register('app', self)
def __repr__(self):
return utils.get_repr(self)
def exit(self, status):
"""Extend QApplication::exit to log the event."""
log.destroy.debug("Now calling QApplication::exit.")
if self._args.debug_exit:
if hunter is None:
print("Not logging late shutdown because hunter could not be "
"imported!", file=sys.stderr)
else:
print("Now logging late shutdown.", file=sys.stderr)
hunter.trace()
super().exit(status)
class EventFilter(QObject):
"""Global Qt event filter.
Attributes:
_activated: Whether the EventFilter is currently active.
_handlers; A {QEvent.Type: callable} dict with the handlers for an
event.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._activated = True
self._handlers = {
QEvent.MouseButtonDblClick: self._handle_mouse_event,
QEvent.MouseButtonPress: self._handle_mouse_event,
QEvent.MouseButtonRelease: self._handle_mouse_event,
QEvent.MouseMove: self._handle_mouse_event,
QEvent.KeyPress: self._handle_key_event,
QEvent.KeyRelease: self._handle_key_event,
}
def _handle_key_event(self, event):
"""Handle a key press/release event.
Args:
event: The QEvent which is about to be delivered.
Return:
True if the event should be filtered, False if it's passed through.
"""
if qApp.activeWindow() not in objreg.window_registry.values():
# Some other window (print dialog, etc.) is focused so we pass the
# event through.
return False
try:
man = objreg.get('mode-manager', scope='window', window='current')
return man.eventFilter(event)
except objreg.RegistryUnavailableError:
# No window available yet, or not a MainWindow
return False
def _handle_mouse_event(self, _event):
"""Handle a mouse event.
Args:
_event: The QEvent which is about to be delivered.
Return:
True if the event should be filtered, False if it's passed through.
"""
if qApp.overrideCursor() is None:
# Mouse cursor shown -> don't filter event
return False
else:
# Mouse cursor hidden -> filter event
return True
def eventFilter(self, obj, event):
"""Handle an event.
Args:
obj: The object which will get the event.
event: The QEvent which is about to be delivered.
Return:
True if the event should be filtered, False if it's passed through.
"""
try:
if not self._activated:
return False
if not isinstance(obj, QWindow):
# We already handled this same event at some point earlier, so
# we're not interested in it anymore.
return False
try:
handler = self._handlers[event.type()]
except KeyError:
return False
else:
return handler(event)
except:
# If there is an exception in here and we leave the eventfilter
# activated, we'll get an infinite loop and a stack overflow.
self._activated = False
raise
| 1 | 12,987 | As discussed in IRC (just so it doesn't get lost): This probably should be `-basedir-`, not `-prefix-` | qutebrowser-qutebrowser | py |
@@ -40,7 +40,8 @@ namespace Nethermind.Db
public DbOnTheRocks(string basePath, string dbPath, IDbConfig dbConfig, ILogManager logManager = null) // TODO: check column families
{
- var fullPath = Path.Combine(basePath, dbPath);
+ var directory = PathUtils.GetExecutingDirectory();
+ var fullPath = Path.Combine(directory, basePath, dbPath);
var logger = logManager?.GetClassLogger();
if (!Directory.Exists(fullPath))
{ | 1 | /*
* Copyright (c) 2018 Demerzel Solutions Limited
* This file is part of the Nethermind library.
*
* The Nethermind library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Nethermind library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Reflection;
using Nethermind.Db.Config;
using Nethermind.Logging;
using Nethermind.Store;
using NLog.Filters;
using RocksDbSharp;
namespace Nethermind.Db
{
public abstract class DbOnTheRocks : IDb, IDbWithSpan
{
private static readonly ConcurrentDictionary<string, RocksDb> DbsByPath = new ConcurrentDictionary<string, RocksDb>();
private readonly RocksDb _db;
private WriteBatch _currentBatch;
private WriteOptions _writeOptions;
public abstract string Name { get; }
public DbOnTheRocks(string basePath, string dbPath, IDbConfig dbConfig, ILogManager logManager = null) // TODO: check column families
{
var fullPath = Path.Combine(basePath, dbPath);
var logger = logManager?.GetClassLogger();
if (!Directory.Exists(fullPath))
{
Directory.CreateDirectory(fullPath);
}
if (logger != null)
{
if (logger.IsInfo) logger.Info($"Using database directory {fullPath}");
}
try
{
var options = BuildOptions(dbConfig);
_db = DbsByPath.GetOrAdd(fullPath, path => RocksDb.Open(options, path));
}
catch (DllNotFoundException e) when (e.Message.Contains("libdl"))
{
throw new ApplicationException($"Unable to load 'libdl' necessary to init the RocksDB database. Please run{Environment.NewLine}" +
"sudo apt update && sudo apt install libsnappy-dev libc6-dev libc6");
}
}
protected abstract void UpdateReadMetrics();
protected abstract void UpdateWriteMetrics();
private T ReadConfig<T>(IDbConfig dbConfig, string propertyName)
{
var prefixed = string.Concat(Name == "State" ? string.Empty : string.Concat(Name, "Db"),
propertyName);
try
{
return (T) dbConfig.GetType().GetProperty(prefixed, BindingFlags.Public | BindingFlags.Instance)
.GetValue(dbConfig);
}
catch (Exception e)
{
throw new InvalidDataException($"Unable to read {prefixed} property from DB config", e);
}
}
private DbOptions BuildOptions(IDbConfig dbConfig)
{
var tableOptions = new BlockBasedTableOptions();
tableOptions.SetBlockSize(16 * 1024);
tableOptions.SetPinL0FilterAndIndexBlocksInCache(true);
tableOptions.SetCacheIndexAndFilterBlocks(ReadConfig<bool>(dbConfig, nameof(dbConfig.CacheIndexAndFilterBlocks)));
tableOptions.SetFilterPolicy(BloomFilterPolicy.Create(10, true));
tableOptions.SetFormatVersion(2);
var blockCacheSize = ReadConfig<ulong>(dbConfig, nameof(dbConfig.BlockCacheSize));
var cache = Native.Instance.rocksdb_cache_create_lru(new UIntPtr(blockCacheSize));
tableOptions.SetBlockCache(cache);
var options = new DbOptions();
options.SetCreateIfMissing(true);
options.SetAdviseRandomOnOpen(true);
options.OptimizeForPointLookup(blockCacheSize); // I guess this should be the one option controlled by the DB size property - bind it to LRU cache size
//options.SetCompression(CompressionTypeEnum.rocksdb_snappy_compression);
//options.SetLevelCompactionDynamicLevelBytes(true);
/*
* Multi-Threaded Compactions
* Compactions are needed to remove multiple copies of the same key that may occur if an application overwrites an existing key. Compactions also process deletions of keys. Compactions may occur in multiple threads if configured appropriately.
* The entire database is stored in a set of sstfiles. When a memtable is full, its content is written out to a file in Level-0 (L0). RocksDB removes duplicate and overwritten keys in the memtable when it is flushed to a file in L0. Some files are periodically read in and merged to form larger files - this is called compaction.
* The overall write throughput of an LSM database directly depends on the speed at which compactions can occur, especially when the data is stored in fast storage like SSD or RAM. RocksDB may be configured to issue concurrent compaction requests from multiple threads. It is observed that sustained write rates may increase by as much as a factor of 10 with multi-threaded compaction when the database is on SSDs, as compared to single-threaded compactions.
* TKS: Observed 500MB/s compared to ~100MB/s between multithreaded and single thread compactions on my machine (processor count is returning 12 for 6 cores with hyperthreading)
* TKS: CPU goes to insane 30% usage on idle - compacting only app
*/
options.SetMaxBackgroundCompactions(Environment.ProcessorCount);
//options.SetMaxOpenFiles(32);
options.SetWriteBufferSize(ReadConfig<ulong>(dbConfig, nameof(dbConfig.WriteBufferSize)));
options.SetMaxWriteBufferNumber((int)ReadConfig<uint>(dbConfig, nameof(dbConfig.WriteBufferNumber)));
options.SetMinWriteBufferNumberToMerge(2);
options.SetBlockBasedTableFactory(tableOptions);
options.SetMaxBackgroundFlushes(Environment.ProcessorCount);
options.IncreaseParallelism(Environment.ProcessorCount);
options.SetRecycleLogFileNum(dbConfig.RecycleLogFileNum); // potential optimization for reusing allocated log files
// options.SetLevelCompactionDynamicLevelBytes(true); // only switch on on empty DBs
_writeOptions = new WriteOptions();
_writeOptions.SetSync(dbConfig.WriteAheadLogSync); // potential fix for corruption on hard process termination, may cause performance degradation
return options;
}
public byte[] this[byte[] key]
{
get
{
UpdateReadMetrics();
return _db.Get(key);
}
set
{
UpdateWriteMetrics();
if (_currentBatch != null)
{
if (value == null)
{
_currentBatch.Delete(key);
}
else
{
_currentBatch.Put(key, value);
}
}
else
{
if (value == null)
{
_db.Remove(key, null, _writeOptions);
}
else
{
_db.Put(key, value, null, _writeOptions);
}
}
}
}
public Span<byte> GetSpan(byte[] key)
{
UpdateReadMetrics();
return _db.GetSpan(key);
}
public void DangerousReleaseMemory(in Span<byte> span)
{
_db.DangerousReleaseMemory(in span);
}
public void Remove(byte[] key)
{
_db.Remove(key, null, _writeOptions);
}
public byte[][] GetAll()
{
var iterator = _db.NewIterator();
iterator = iterator.SeekToFirst();
var values = new List<byte[]>();
while (iterator.Valid())
{
values.Add(iterator.Value());
iterator = iterator.Next();
}
iterator.Dispose();
return values.ToArray();
}
private byte[] _keyExistsBuffer = new byte[1];
public bool KeyExists(byte[] key)
{
// seems it has no performance impact
return _db.Get(key) != null;
// return _db.Get(key, 32, _keyExistsBuffer, 0, 0, null, null) != -1;
}
public void StartBatch()
{
_currentBatch = new WriteBatch();
}
public void CommitBatch()
{
_db.Write(_currentBatch, _writeOptions);
_currentBatch.Dispose();
_currentBatch = null;
}
public void Dispose()
{
_db?.Dispose();
_currentBatch?.Dispose();
}
}
} | 1 | 22,664 | basepath can be absoluta path and this needs to be supported | NethermindEth-nethermind | .cs |
@@ -280,7 +280,7 @@ void ConfigPanelWidget::updateIconThemeSettings()
void ConfigPanelWidget::addPosition(const QString& name, int screen, LXQtPanel::Position position)
{
if (LXQtPanel::canPlacedOn(screen, position))
- ui->comboBox_position->addItem(name, QVariant::fromValue((ScreenPosition){screen, position}));
+ ui->comboBox_position->addItem(name, QVariant::fromValue(ScreenPosition{screen, position}));
}
| 1 | /* BEGIN_COMMON_COPYRIGHT_HEADER
* (c)LGPL2+
*
* LXQt - a lightweight, Qt based, desktop toolset
* https://lxqt.org
*
* Copyright: 2010-2011 Razor team
* Authors:
* Marat "Morion" Talipov <[email protected]>
*
* This program or library is free software; you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301 USA
*
* END_COMMON_COPYRIGHT_HEADER */
#include "configpanelwidget.h"
#include "ui_configpanelwidget.h"
#include "../lxqtpanellimits.h"
#include <KWindowSystem/KWindowSystem>
#include <QDebug>
#include <QListView>
#include <QDesktopWidget>
#include <QWindow>
#include <QColorDialog>
#include <QFileDialog>
#include <QStandardPaths>
using namespace LXQt;
struct ScreenPosition
{
int screen;
ILXQtPanel::Position position;
};
Q_DECLARE_METATYPE(ScreenPosition)
ConfigPanelWidget::ConfigPanelWidget(LXQtPanel *panel, QWidget *parent) :
QWidget(parent),
ui(new Ui::ConfigPanelWidget),
mPanel(panel)
{
ui->setupUi(this);
fillComboBox_position();
fillComboBox_alignment();
fillComboBox_icon();
mOldPanelSize = mPanel->panelSize();
mOldIconSize = mPanel->iconSize();
mOldLineCount = mPanel->lineCount();
mOldLength = mPanel->length();
mOldLengthInPercents = mPanel->lengthInPercents();
mOldAlignment = mPanel->alignment();
mOldScreenNum = mPanel->screenNum();
mScreenNum = mOldScreenNum;
mOldPosition = mPanel->position();
mPosition = mOldPosition;
mOldHidable = mPanel->hidable();
mOldVisibleMargin = mPanel->visibleMargin();
mOldAnimation = mPanel->animationTime();
mOldShowDelay = mPanel->showDelay();
ui->spinBox_panelSize->setMinimum(PANEL_MINIMUM_SIZE);
ui->spinBox_panelSize->setMaximum(PANEL_MAXIMUM_SIZE);
mOldFontColor = mPanel->fontColor();
mFontColor = mOldFontColor;
mOldBackgroundColor = mPanel->backgroundColor();
mBackgroundColor = mOldBackgroundColor;
mOldBackgroundImage = mPanel->backgroundImage();
mOldOpacity = mPanel->opacity();
mOldReserveSpace = mPanel->reserveSpace();
// reset configurations from file
reset();
connect(ui->spinBox_panelSize, SIGNAL(valueChanged(int)), this, SLOT(editChanged()));
connect(ui->spinBox_iconSize, SIGNAL(valueChanged(int)), this, SLOT(editChanged()));
connect(ui->spinBox_lineCount, SIGNAL(valueChanged(int)), this, SLOT(editChanged()));
connect(ui->spinBox_length, SIGNAL(valueChanged(int)), this, SLOT(editChanged()));
connect(ui->comboBox_lenghtType, SIGNAL(activated(int)), this, SLOT(widthTypeChanged()));
connect(ui->comboBox_alignment, SIGNAL(activated(int)), this, SLOT(editChanged()));
connect(ui->comboBox_position, SIGNAL(activated(int)), this, SLOT(positionChanged()));
connect(ui->checkBox_hidable, SIGNAL(toggled(bool)), this, SLOT(editChanged()));
connect(ui->checkBox_visibleMargin, SIGNAL(toggled(bool)), this, SLOT(editChanged()));
connect(ui->spinBox_animation, SIGNAL(valueChanged(int)), this, SLOT(editChanged()));
connect(ui->spinBox_delay, SIGNAL(valueChanged(int)), this, SLOT(editChanged()));
connect(ui->checkBox_customFontColor, SIGNAL(toggled(bool)), this, SLOT(editChanged()));
connect(ui->pushButton_customFontColor, SIGNAL(clicked(bool)), this, SLOT(pickFontColor()));
connect(ui->checkBox_customBgColor, SIGNAL(toggled(bool)), this, SLOT(editChanged()));
connect(ui->pushButton_customBgColor, SIGNAL(clicked(bool)), this, SLOT(pickBackgroundColor()));
connect(ui->checkBox_customBgImage, SIGNAL(toggled(bool)), this, SLOT(editChanged()));
connect(ui->lineEdit_customBgImage, SIGNAL(textChanged(QString)), this, SLOT(editChanged()));
connect(ui->pushButton_customBgImage, SIGNAL(clicked(bool)), this, SLOT(pickBackgroundImage()));
connect(ui->slider_opacity, &QSlider::valueChanged, this, &ConfigPanelWidget::editChanged);
connect(ui->checkBox_reserveSpace, &QAbstractButton::toggled, [this](bool checked) { mPanel->setReserveSpace(checked, true); });
connect(ui->groupBox_icon, &QGroupBox::clicked, this, &ConfigPanelWidget::editChanged);
connect(ui->comboBox_icon, QOverload<int>::of(&QComboBox::activated), this, &ConfigPanelWidget::editChanged);
}
/************************************************
*
************************************************/
void ConfigPanelWidget::reset()
{
ui->spinBox_panelSize->setValue(mOldPanelSize);
ui->spinBox_iconSize->setValue(mOldIconSize);
ui->spinBox_lineCount->setValue(mOldLineCount);
ui->comboBox_position->setCurrentIndex(indexForPosition(mOldScreenNum, mOldPosition));
ui->checkBox_hidable->setChecked(mOldHidable);
ui->checkBox_visibleMargin->setChecked(mOldVisibleMargin);
ui->spinBox_animation->setValue(mOldAnimation);
ui->spinBox_delay->setValue(mOldShowDelay);
fillComboBox_alignment();
ui->comboBox_alignment->setCurrentIndex(mOldAlignment + 1);
ui->comboBox_lenghtType->setCurrentIndex(mOldLengthInPercents ? 0 : 1);
widthTypeChanged();
ui->spinBox_length->setValue(mOldLength);
mFontColor.setNamedColor(mOldFontColor.name());
ui->pushButton_customFontColor->setStyleSheet(QString("background: %1").arg(mOldFontColor.name()));
mBackgroundColor.setNamedColor(mOldBackgroundColor.name());
ui->pushButton_customBgColor->setStyleSheet(QString("background: %1").arg(mOldBackgroundColor.name()));
ui->lineEdit_customBgImage->setText(mOldBackgroundImage);
ui->slider_opacity->setValue(mOldOpacity);
ui->checkBox_reserveSpace->setChecked(mOldReserveSpace);
ui->checkBox_customFontColor->setChecked(mOldFontColor.isValid());
ui->checkBox_customBgColor->setChecked(mOldBackgroundColor.isValid());
ui->checkBox_customBgImage->setChecked(QFileInfo(mOldBackgroundImage).exists());
// update position
positionChanged();
}
/************************************************
*
************************************************/
void ConfigPanelWidget::fillComboBox_position()
{
int screenCount = QApplication::desktop()->screenCount();
if (screenCount == 1)
{
addPosition(tr("Top of desktop"), 0, LXQtPanel::PositionTop);
addPosition(tr("Left of desktop"), 0, LXQtPanel::PositionLeft);
addPosition(tr("Right of desktop"), 0, LXQtPanel::PositionRight);
addPosition(tr("Bottom of desktop"), 0, LXQtPanel::PositionBottom);
}
else
{
for (int screenNum = 0; screenNum < screenCount; screenNum++)
{
if (screenNum)
ui->comboBox_position->insertSeparator(9999);
addPosition(tr("Top of desktop %1").arg(screenNum +1), screenNum, LXQtPanel::PositionTop);
addPosition(tr("Left of desktop %1").arg(screenNum +1), screenNum, LXQtPanel::PositionLeft);
addPosition(tr("Right of desktop %1").arg(screenNum +1), screenNum, LXQtPanel::PositionRight);
addPosition(tr("Bottom of desktop %1").arg(screenNum +1), screenNum, LXQtPanel::PositionBottom);
}
}
}
/************************************************
*
************************************************/
void ConfigPanelWidget::fillComboBox_alignment()
{
ui->comboBox_alignment->setItemData(0, QVariant(LXQtPanel::AlignmentLeft));
ui->comboBox_alignment->setItemData(1, QVariant(LXQtPanel::AlignmentCenter));
ui->comboBox_alignment->setItemData(2, QVariant(LXQtPanel::AlignmentRight));
if (mPosition == ILXQtPanel::PositionTop ||
mPosition == ILXQtPanel::PositionBottom)
{
ui->comboBox_alignment->setItemText(0, tr("Left"));
ui->comboBox_alignment->setItemText(1, tr("Center"));
ui->comboBox_alignment->setItemText(2, tr("Right"));
}
else
{
ui->comboBox_alignment->setItemText(0, tr("Top"));
ui->comboBox_alignment->setItemText(1, tr("Center"));
ui->comboBox_alignment->setItemText(2, tr("Bottom"));
};
}
/************************************************
*
************************************************/
void ConfigPanelWidget::fillComboBox_icon()
{
ui->groupBox_icon->setChecked(!mPanel->iconTheme().isEmpty());
QStringList themeList;
QStringList processed;
const QStringList baseDirs = QIcon::themeSearchPaths();
for (const QString &baseDirName : baseDirs)
{
QDir baseDir(baseDirName);
if (!baseDir.exists())
continue;
const QFileInfoList dirs = baseDir.entryInfoList(QDir::AllDirs | QDir::NoDotAndDotDot, QDir::Name);
for (const QFileInfo &dir : dirs)
{
if (!processed.contains(dir.canonicalFilePath()))
{
processed << dir.canonicalFilePath();
QDir Dir(dir.canonicalFilePath());
QSettings file(Dir.absoluteFilePath(QStringLiteral("index.theme")), QSettings::IniFormat);
if (file.status() == QSettings::NoError
&& !file.value(QStringLiteral("Icon Theme/Directories")).toStringList().join(QLatin1Char(' ')).isEmpty()
&& !file.value(QStringLiteral("Icon Theme/Hidden"), false).toBool())
{
themeList << Dir.dirName();
}
}
}
}
if (!themeList.isEmpty())
{
themeList.sort();
ui->comboBox_icon->insertItems(0, themeList);
QString curTheme = QIcon::themeName();
if (!curTheme.isEmpty())
ui->comboBox_icon->setCurrentText(curTheme);
}
}
/************************************************
*
************************************************/
void ConfigPanelWidget::updateIconThemeSettings()
{
ui->groupBox_icon->setChecked(!mPanel->iconTheme().isEmpty());
QString curTheme = QIcon::themeName();
if (!curTheme.isEmpty())
ui->comboBox_icon->setCurrentText(curTheme);
}
/************************************************
*
************************************************/
void ConfigPanelWidget::addPosition(const QString& name, int screen, LXQtPanel::Position position)
{
if (LXQtPanel::canPlacedOn(screen, position))
ui->comboBox_position->addItem(name, QVariant::fromValue((ScreenPosition){screen, position}));
}
/************************************************
*
************************************************/
int ConfigPanelWidget::indexForPosition(int screen, ILXQtPanel::Position position)
{
for (int i = 0; i < ui->comboBox_position->count(); i++)
{
ScreenPosition sp = ui->comboBox_position->itemData(i).value<ScreenPosition>();
if (screen == sp.screen && position == sp.position)
return i;
}
return -1;
}
/************************************************
*
************************************************/
ConfigPanelWidget::~ConfigPanelWidget()
{
delete ui;
}
/************************************************
*
************************************************/
void ConfigPanelWidget::editChanged()
{
mPanel->setPanelSize(ui->spinBox_panelSize->value(), true);
mPanel->setIconSize(ui->spinBox_iconSize->value(), true);
mPanel->setLineCount(ui->spinBox_lineCount->value(), true);
mPanel->setLength(ui->spinBox_length->value(),
ui->comboBox_lenghtType->currentIndex() == 0,
true);
LXQtPanel::Alignment align = LXQtPanel::Alignment(
ui->comboBox_alignment->itemData(
ui->comboBox_alignment->currentIndex()
).toInt());
mPanel->setAlignment(align, true);
mPanel->setPosition(mScreenNum, mPosition, true);
mPanel->setHidable(ui->checkBox_hidable->isChecked(), true);
mPanel->setVisibleMargin(ui->checkBox_visibleMargin->isChecked(), true);
mPanel->setAnimationTime(ui->spinBox_animation->value(), true);
mPanel->setShowDelay(ui->spinBox_delay->value(), true);
mPanel->setFontColor(ui->checkBox_customFontColor->isChecked() ? mFontColor : QColor(), true);
if (ui->checkBox_customBgColor->isChecked())
{
mPanel->setBackgroundColor(mBackgroundColor, true);
mPanel->setOpacity(ui->slider_opacity->value(), true);
}
else
{
mPanel->setBackgroundColor(QColor(), true);
mPanel->setOpacity(100, true);
}
QString image = ui->checkBox_customBgImage->isChecked() ? ui->lineEdit_customBgImage->text() : QString();
mPanel->setBackgroundImage(image, true);
if (!ui->groupBox_icon->isChecked())
mPanel->setIconTheme(QString());
else if (!ui->comboBox_icon->currentText().isEmpty())
mPanel->setIconTheme(ui->comboBox_icon->currentText());
}
/************************************************
*
************************************************/
void ConfigPanelWidget::widthTypeChanged()
{
int max = getMaxLength();
if (ui->comboBox_lenghtType->currentIndex() == 0)
{
// Percents .............................
int v = ui->spinBox_length->value() * 100.0 / max;
ui->spinBox_length->setRange(1, 100);
ui->spinBox_length->setValue(v);
}
else
{
// Pixels ...............................
int v = max / 100.0 * ui->spinBox_length->value();
ui->spinBox_length->setRange(-max, max);
ui->spinBox_length->setValue(v);
}
}
/************************************************
*
************************************************/
int ConfigPanelWidget::getMaxLength()
{
QDesktopWidget* dw = QApplication::desktop();
if (mPosition == ILXQtPanel::PositionTop ||
mPosition == ILXQtPanel::PositionBottom)
return dw->screenGeometry(mScreenNum).width();
else
return dw->screenGeometry(mScreenNum).height();
}
/************************************************
*
************************************************/
void ConfigPanelWidget::positionChanged()
{
ScreenPosition sp = ui->comboBox_position->itemData(
ui->comboBox_position->currentIndex()).value<ScreenPosition>();
bool updateAlig = (sp.position == ILXQtPanel::PositionTop ||
sp.position == ILXQtPanel::PositionBottom) !=
(mPosition == ILXQtPanel::PositionTop ||
mPosition == ILXQtPanel::PositionBottom);
int oldMax = getMaxLength();
mPosition = sp.position;
mScreenNum = sp.screen;
int newMax = getMaxLength();
if (ui->comboBox_lenghtType->currentIndex() == 1 &&
oldMax != newMax)
{
// Pixels ...............................
int v = ui->spinBox_length->value() * 1.0 * newMax / oldMax;
ui->spinBox_length->setMaximum(newMax);
ui->spinBox_length->setValue(v);
}
if (updateAlig)
fillComboBox_alignment();
editChanged();
}
/************************************************
*
************************************************/
void ConfigPanelWidget::pickFontColor()
{
QColorDialog d(QColor(mFontColor.name()), this);
d.setWindowTitle(tr("Pick color"));
d.setWindowModality(Qt::WindowModal);
if (d.exec() && d.currentColor().isValid())
{
mFontColor.setNamedColor(d.currentColor().name());
ui->pushButton_customFontColor->setStyleSheet(QString("background: %1").arg(mFontColor.name()));
editChanged();
}
}
/************************************************
*
************************************************/
void ConfigPanelWidget::pickBackgroundColor()
{
QColorDialog d(QColor(mBackgroundColor.name()), this);
d.setWindowTitle(tr("Pick color"));
d.setWindowModality(Qt::WindowModal);
if (d.exec() && d.currentColor().isValid())
{
mBackgroundColor.setNamedColor(d.currentColor().name());
ui->pushButton_customBgColor->setStyleSheet(QString("background: %1").arg(mBackgroundColor.name()));
editChanged();
}
}
/************************************************
*
************************************************/
void ConfigPanelWidget::pickBackgroundImage()
{
QString picturesLocation;
picturesLocation = QStandardPaths::writableLocation(QStandardPaths::PicturesLocation);
QFileDialog* d = new QFileDialog(this, tr("Pick image"), picturesLocation, tr("Images (*.png *.gif *.jpg)"));
d->setAttribute(Qt::WA_DeleteOnClose);
d->setWindowModality(Qt::WindowModal);
connect(d, &QFileDialog::fileSelected, ui->lineEdit_customBgImage, &QLineEdit::setText);
d->show();
}
| 1 | 6,381 | Didn't get to the commit message | lxqt-lxqt-panel | cpp |
@@ -0,0 +1,14 @@
+class DesignForDevelopersResourcesController < ApplicationController
+ layout "d4d_resources"
+
+ def index
+ end
+
+ def show
+ if template_exists?(params[:id], params[:controller])
+ render params[:id]
+ else
+ raise ActiveRecord::RecordNotFound
+ end
+ end
+end | 1 | 1 | 6,669 | What about raising `ActionView::MissingTemplate` instead? That's what HighVoltage does. | thoughtbot-upcase | rb |
|
@@ -527,6 +527,7 @@ void nano::json_handler::account_balance ()
auto balance (node.balance_pending (account, include_only_confirmed));
response_l.put ("balance", balance.first.convert_to<std::string> ());
response_l.put ("pending", balance.second.convert_to<std::string> ());
+ response_l.put ("receivable", balance.second.convert_to<std::string> ());
}
response_errors ();
} | 1 | #include <nano/lib/config.hpp>
#include <nano/lib/json_error_response.hpp>
#include <nano/lib/timer.hpp>
#include <nano/node/bootstrap/bootstrap_lazy.hpp>
#include <nano/node/common.hpp>
#include <nano/node/election.hpp>
#include <nano/node/json_handler.hpp>
#include <nano/node/node.hpp>
#include <nano/node/node_rpc_config.hpp>
#include <nano/node/telemetry.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <algorithm>
#include <chrono>
namespace
{
void construct_json (nano::container_info_component * component, boost::property_tree::ptree & parent);
using ipc_json_handler_no_arg_func_map = std::unordered_map<std::string, std::function<void (nano::json_handler *)>>;
ipc_json_handler_no_arg_func_map create_ipc_json_handler_no_arg_func_map ();
auto ipc_json_handler_no_arg_funcs = create_ipc_json_handler_no_arg_func_map ();
bool block_confirmed (nano::node & node, nano::transaction & transaction, nano::block_hash const & hash, bool include_active, bool include_only_confirmed);
const char * epoch_as_string (nano::epoch);
}
nano::json_handler::json_handler (nano::node & node_a, nano::node_rpc_config const & node_rpc_config_a, std::string const & body_a, std::function<void (std::string const &)> const & response_a, std::function<void ()> stop_callback_a) :
body (body_a),
node (node_a),
response (response_a),
stop_callback (stop_callback_a),
node_rpc_config (node_rpc_config_a)
{
}
std::function<void ()> nano::json_handler::create_worker_task (std::function<void (std::shared_ptr<nano::json_handler> const &)> const & action_a)
{
return [rpc_l = shared_from_this (), action_a] () {
try
{
action_a (rpc_l);
}
catch (std::runtime_error const &)
{
json_error_response (rpc_l->response, "Unable to parse JSON");
}
catch (...)
{
json_error_response (rpc_l->response, "Internal server error in RPC");
}
};
}
void nano::json_handler::process_request (bool unsafe_a)
{
try
{
std::stringstream istream (body);
boost::property_tree::read_json (istream, request);
if (node_rpc_config.request_callback)
{
debug_assert (nano::network_constants ().is_dev_network ());
node_rpc_config.request_callback (request);
}
action = request.get<std::string> ("action");
auto no_arg_func_iter = ipc_json_handler_no_arg_funcs.find (action);
if (no_arg_func_iter != ipc_json_handler_no_arg_funcs.cend ())
{
// First try the map of options with no arguments
no_arg_func_iter->second (this);
}
else
{
// Try the rest of the options
if (action == "wallet_seed")
{
if (unsafe_a || node.network_params.network.is_dev_network ())
{
wallet_seed ();
}
else
{
json_error_response (response, "Unsafe RPC not allowed");
}
}
else if (action == "chain")
{
chain ();
}
else if (action == "successors")
{
chain (true);
}
else if (action == "history")
{
response_l.put ("deprecated", "1");
request.put ("head", request.get<std::string> ("hash"));
account_history ();
}
else if (action == "knano_from_raw" || action == "krai_from_raw")
{
mnano_from_raw (nano::kxrb_ratio);
}
else if (action == "knano_to_raw" || action == "krai_to_raw")
{
mnano_to_raw (nano::kxrb_ratio);
}
else if (action == "rai_from_raw")
{
mnano_from_raw (nano::xrb_ratio);
}
else if (action == "rai_to_raw")
{
mnano_to_raw (nano::xrb_ratio);
}
else if (action == "mnano_from_raw" || action == "mrai_from_raw")
{
mnano_from_raw ();
}
else if (action == "mnano_to_raw" || action == "mrai_to_raw")
{
mnano_to_raw ();
}
else if (action == "nano_to_raw")
{
nano_to_raw ();
}
else if (action == "raw_to_nano")
{
raw_to_nano ();
}
else if (action == "password_valid")
{
password_valid ();
}
else if (action == "wallet_locked")
{
password_valid (true);
}
else
{
json_error_response (response, "Unknown command");
}
}
}
catch (std::runtime_error const &)
{
json_error_response (response, "Unable to parse JSON");
}
catch (...)
{
json_error_response (response, "Internal server error in RPC");
}
}
void nano::json_handler::response_errors ()
{
if (!ec && response_l.empty ())
{
// Return an error code if no response data was given
ec = nano::error_rpc::empty_response;
}
if (ec)
{
boost::property_tree::ptree response_error;
response_error.put ("error", ec.message ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_error);
response (ostream.str ());
}
else
{
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
response (ostream.str ());
}
}
std::shared_ptr<nano::wallet> nano::json_handler::wallet_impl ()
{
if (!ec)
{
std::string wallet_text (request.get<std::string> ("wallet"));
nano::wallet_id wallet;
if (!wallet.decode_hex (wallet_text))
{
if (auto existing = node.wallets.open (wallet); existing != nullptr)
{
return existing;
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
else
{
ec = nano::error_common::bad_wallet_number;
}
}
return nullptr;
}
bool nano::json_handler::wallet_locked_impl (nano::transaction const & transaction_a, std::shared_ptr<nano::wallet> const & wallet_a)
{
bool result (false);
if (!ec)
{
if (!wallet_a->store.valid_password (transaction_a))
{
ec = nano::error_common::wallet_locked;
result = true;
}
}
return result;
}
bool nano::json_handler::wallet_account_impl (nano::transaction const & transaction_a, std::shared_ptr<nano::wallet> const & wallet_a, nano::account const & account_a)
{
bool result (false);
if (!ec)
{
if (wallet_a->store.find (transaction_a, account_a) != wallet_a->store.end ())
{
result = true;
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
return result;
}
nano::account nano::json_handler::account_impl (std::string account_text, std::error_code ec_a)
{
nano::account result (0);
if (!ec)
{
if (account_text.empty ())
{
account_text = request.get<std::string> ("account");
}
if (result.decode_account (account_text))
{
ec = ec_a;
}
else if (account_text[3] == '-' || account_text[4] == '-')
{
// nano- and xrb- prefixes are deprecated
response_l.put ("deprecated_account_format", "1");
}
}
return result;
}
nano::account_info nano::json_handler::account_info_impl (nano::transaction const & transaction_a, nano::account const & account_a)
{
nano::account_info result;
if (!ec)
{
if (node.store.account.get (transaction_a, account_a, result))
{
ec = nano::error_common::account_not_found;
node.bootstrap_initiator.bootstrap_lazy (account_a, false, false, account_a.to_account ());
}
}
return result;
}
nano::amount nano::json_handler::amount_impl ()
{
nano::amount result (0);
if (!ec)
{
std::string amount_text (request.get<std::string> ("amount"));
if (result.decode_dec (amount_text))
{
ec = nano::error_common::invalid_amount;
}
}
return result;
}
std::shared_ptr<nano::block> nano::json_handler::block_impl (bool signature_work_required)
{
const bool json_block_l = request.get<bool> ("json_block", false);
std::shared_ptr<nano::block> result{ nullptr };
if (!ec)
{
boost::property_tree::ptree block_l;
if (json_block_l)
{
block_l = request.get_child ("block");
}
else
{
std::string block_text (request.get<std::string> ("block"));
std::stringstream block_stream (block_text);
try
{
boost::property_tree::read_json (block_stream, block_l);
}
catch (...)
{
ec = nano::error_blocks::invalid_block;
}
}
if (!ec)
{
if (!signature_work_required)
{
block_l.put ("signature", "0");
block_l.put ("work", "0");
}
result = nano::deserialize_block_json (block_l);
if (result == nullptr)
{
ec = nano::error_blocks::invalid_block;
}
}
}
return result;
}
nano::block_hash nano::json_handler::hash_impl (std::string search_text)
{
nano::block_hash result (0);
if (!ec)
{
std::string hash_text (request.get<std::string> (search_text));
if (result.decode_hex (hash_text))
{
ec = nano::error_blocks::invalid_block_hash;
}
}
return result;
}
nano::amount nano::json_handler::threshold_optional_impl ()
{
nano::amount result (0);
boost::optional<std::string> threshold_text (request.get_optional<std::string> ("threshold"));
if (!ec && threshold_text.is_initialized ())
{
if (result.decode_dec (threshold_text.get ()))
{
ec = nano::error_common::bad_threshold;
}
}
return result;
}
uint64_t nano::json_handler::work_optional_impl ()
{
uint64_t result (0);
boost::optional<std::string> work_text (request.get_optional<std::string> ("work"));
if (!ec && work_text.is_initialized ())
{
if (nano::from_string_hex (work_text.get (), result))
{
ec = nano::error_common::bad_work_format;
}
}
return result;
}
uint64_t nano::json_handler::difficulty_optional_impl (nano::work_version const version_a)
{
auto difficulty (node.default_difficulty (version_a));
boost::optional<std::string> difficulty_text (request.get_optional<std::string> ("difficulty"));
if (!ec && difficulty_text.is_initialized ())
{
if (nano::from_string_hex (difficulty_text.get (), difficulty))
{
ec = nano::error_rpc::bad_difficulty_format;
}
}
return difficulty;
}
uint64_t nano::json_handler::difficulty_ledger (nano::block const & block_a)
{
nano::block_details details (nano::epoch::epoch_0, false, false, false);
bool details_found (false);
auto transaction (node.store.tx_begin_read ());
// Previous block find
std::shared_ptr<nano::block> block_previous (nullptr);
auto previous (block_a.previous ());
if (!previous.is_zero ())
{
block_previous = node.store.block.get (transaction, previous);
}
// Send check
if (block_previous != nullptr)
{
details.is_send = node.store.block.balance (transaction, previous) > block_a.balance ().number ();
details_found = true;
}
// Epoch check
if (block_previous != nullptr)
{
details.epoch = block_previous->sideband ().details.epoch;
}
auto link (block_a.link ());
if (!link.is_zero () && !details.is_send)
{
auto block_link (node.store.block.get (transaction, link.as_block_hash ()));
if (block_link != nullptr && node.store.pending.exists (transaction, nano::pending_key (block_a.account (), link.as_block_hash ())))
{
details.epoch = std::max (details.epoch, block_link->sideband ().details.epoch);
details.is_receive = true;
details_found = true;
}
}
return details_found ? nano::work_threshold (block_a.work_version (), details) : node.default_difficulty (block_a.work_version ());
}
double nano::json_handler::multiplier_optional_impl (nano::work_version const version_a, uint64_t & difficulty)
{
double multiplier (1.);
boost::optional<std::string> multiplier_text (request.get_optional<std::string> ("multiplier"));
if (!ec && multiplier_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<double> (multiplier_text.get (), multiplier);
if (success && multiplier > 0.)
{
difficulty = nano::difficulty::from_multiplier (multiplier, node.default_difficulty (version_a));
}
else
{
ec = nano::error_rpc::bad_multiplier_format;
}
}
return multiplier;
}
nano::work_version nano::json_handler::work_version_optional_impl (nano::work_version const default_a)
{
nano::work_version result = default_a;
boost::optional<std::string> version_text (request.get_optional<std::string> ("version"));
if (!ec && version_text.is_initialized ())
{
if (*version_text == nano::to_string (nano::work_version::work_1))
{
result = nano::work_version::work_1;
}
else
{
ec = nano::error_rpc::bad_work_version;
}
}
return result;
}
namespace
{
bool decode_unsigned (std::string const & text, uint64_t & number)
{
bool result;
size_t end;
try
{
number = std::stoull (text, &end);
result = false;
}
catch (std::invalid_argument const &)
{
result = true;
}
catch (std::out_of_range const &)
{
result = true;
}
result = result || end != text.size ();
return result;
}
}
uint64_t nano::json_handler::count_impl ()
{
uint64_t result (0);
if (!ec)
{
std::string count_text (request.get<std::string> ("count"));
if (decode_unsigned (count_text, result) || result == 0)
{
ec = nano::error_common::invalid_count;
}
}
return result;
}
uint64_t nano::json_handler::count_optional_impl (uint64_t result)
{
boost::optional<std::string> count_text (request.get_optional<std::string> ("count"));
if (!ec && count_text.is_initialized ())
{
if (decode_unsigned (count_text.get (), result))
{
ec = nano::error_common::invalid_count;
}
}
return result;
}
uint64_t nano::json_handler::offset_optional_impl (uint64_t result)
{
boost::optional<std::string> offset_text (request.get_optional<std::string> ("offset"));
if (!ec && offset_text.is_initialized ())
{
if (decode_unsigned (offset_text.get (), result))
{
ec = nano::error_rpc::invalid_offset;
}
}
return result;
}
void nano::json_handler::account_balance ()
{
auto account (account_impl ());
if (!ec)
{
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", true);
auto balance (node.balance_pending (account, include_only_confirmed));
response_l.put ("balance", balance.first.convert_to<std::string> ());
response_l.put ("pending", balance.second.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::account_block_count ()
{
auto account (account_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
if (!ec)
{
response_l.put ("block_count", std::to_string (info.block_count));
}
}
response_errors ();
}
void nano::json_handler::account_create ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
const bool generate_work = rpc_l->request.get<bool> ("work", true);
nano::account new_key;
auto index_text (rpc_l->request.get_optional<std::string> ("index"));
if (index_text.is_initialized ())
{
uint64_t index;
if (decode_unsigned (index_text.get (), index) || index > static_cast<uint64_t> (std::numeric_limits<uint32_t>::max ()))
{
rpc_l->ec = nano::error_common::invalid_index;
}
else
{
new_key = wallet->deterministic_insert (static_cast<uint32_t> (index), generate_work);
}
}
else
{
new_key = wallet->deterministic_insert (generate_work);
}
if (!rpc_l->ec)
{
if (!new_key.is_zero ())
{
rpc_l->response_l.put ("account", new_key.to_account ());
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::account_get ()
{
std::string key_text (request.get<std::string> ("key"));
nano::public_key pub;
if (!pub.decode_hex (key_text))
{
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::bad_public_key;
}
response_errors ();
}
void nano::json_handler::account_info ()
{
auto account (account_impl ());
if (!ec)
{
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
const bool include_confirmed = request.get<bool> ("include_confirmed", false);
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
nano::confirmation_height_info confirmation_height_info;
node.store.confirmation_height.get (transaction, account, confirmation_height_info);
if (!ec)
{
response_l.put ("frontier", info.head.to_string ());
response_l.put ("open_block", info.open_block.to_string ());
response_l.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
nano::amount balance_l (info.balance);
std::string balance;
balance_l.encode_dec (balance);
response_l.put ("balance", balance);
nano::amount confirmed_balance_l;
if (include_confirmed)
{
if (info.block_count != confirmation_height_info.height)
{
confirmed_balance_l = node.ledger.balance (transaction, confirmation_height_info.frontier);
}
else
{
// block_height and confirmed height are the same, so can just reuse balance
confirmed_balance_l = balance_l;
}
std::string confirmed_balance;
confirmed_balance_l.encode_dec (confirmed_balance);
response_l.put ("confirmed_balance", confirmed_balance);
}
response_l.put ("modified_timestamp", std::to_string (info.modified));
response_l.put ("block_count", std::to_string (info.block_count));
response_l.put ("account_version", epoch_as_string (info.epoch ()));
auto confirmed_frontier = confirmation_height_info.frontier.to_string ();
if (include_confirmed)
{
response_l.put ("confirmed_height", std::to_string (confirmation_height_info.height));
response_l.put ("confirmed_frontier", confirmed_frontier);
}
else
{
// For backwards compatibility purposes
response_l.put ("confirmation_height", std::to_string (confirmation_height_info.height));
response_l.put ("confirmation_height_frontier", confirmed_frontier);
}
std::shared_ptr<nano::block> confirmed_frontier_block;
if (include_confirmed && confirmation_height_info.height > 0)
{
confirmed_frontier_block = node.store.block.get (transaction, confirmation_height_info.frontier);
}
if (representative)
{
response_l.put ("representative", info.representative.to_account ());
if (include_confirmed)
{
nano::account confirmed_representative{ 0 };
if (confirmed_frontier_block)
{
confirmed_representative = confirmed_frontier_block->representative ();
if (confirmed_representative.is_zero ())
{
confirmed_representative = node.store.block.get (transaction, node.ledger.representative (transaction, confirmation_height_info.frontier))->representative ();
}
}
response_l.put ("confirmed_representative", confirmed_representative.to_account ());
}
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_l.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
response_l.put ("pending", account_pending.convert_to<std::string> ());
if (include_confirmed)
{
auto account_pending (node.ledger.account_pending (transaction, account, true));
response_l.put ("confirmed_pending", account_pending.convert_to<std::string> ());
}
}
}
}
response_errors ();
}
void nano::json_handler::account_key ()
{
auto account (account_impl ());
if (!ec)
{
response_l.put ("key", account.to_string ());
}
response_errors ();
}
void nano::json_handler::account_list ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree accounts;
auto transaction (node.wallets.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), j (wallet->store.end ()); i != j; ++i)
{
boost::property_tree::ptree entry;
entry.put ("", nano::account (i->first).to_account ());
accounts.push_back (std::make_pair ("", entry));
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::account_move ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string source_text (rpc_l->request.get<std::string> ("source"));
auto accounts_text (rpc_l->request.get_child ("accounts"));
nano::wallet_id source;
if (!source.decode_hex (source_text))
{
auto existing (rpc_l->node.wallets.items.find (source));
if (existing != rpc_l->node.wallets.items.end ())
{
auto source (existing->second);
std::vector<nano::public_key> accounts;
for (auto i (accounts_text.begin ()), n (accounts_text.end ()); i != n; ++i)
{
auto account (rpc_l->account_impl (i->second.get<std::string> ("")));
accounts.push_back (account);
}
auto transaction (rpc_l->node.wallets.tx_begin_write ());
auto error (wallet->store.move (transaction, source->store, accounts));
rpc_l->response_l.put ("moved", error ? "0" : "1");
}
else
{
rpc_l->ec = nano::error_rpc::source_not_found;
}
}
else
{
rpc_l->ec = nano::error_rpc::bad_source;
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::account_remove ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
wallet->store.erase (transaction, account);
rpc_l->response_l.put ("removed", "1");
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::account_representative ()
{
auto account (account_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
if (!ec)
{
response_l.put ("representative", info.representative.to_account ());
}
}
response_errors ();
}
void nano::json_handler::account_representative_set ()
{
node.workers.push_task (create_worker_task ([work_generation_enabled = node.work_generation_enabled ()] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
std::string representative_text (rpc_l->request.get<std::string> ("representative"));
auto representative (rpc_l->account_impl (representative_text, nano::error_rpc::bad_representative_number));
if (!rpc_l->ec)
{
auto work (rpc_l->work_optional_impl ());
if (!rpc_l->ec && work)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
auto block_transaction (rpc_l->node.store.tx_begin_read ());
auto info (rpc_l->account_info_impl (block_transaction, account));
if (!rpc_l->ec)
{
nano::block_details details (info.epoch (), false, false, false);
if (nano::work_difficulty (nano::work_version::work_1, info.head, work) < nano::work_threshold (nano::work_version::work_1, details))
{
rpc_l->ec = nano::error_common::invalid_work;
}
}
}
}
else if (!rpc_l->ec) // work == 0
{
if (!work_generation_enabled)
{
rpc_l->ec = nano::error_common::disabled_work_generation;
}
}
if (!rpc_l->ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (rpc_l->response);
auto response_data (std::make_shared<boost::property_tree::ptree> (rpc_l->response_l));
wallet->change_async (
account, representative, [response_a, response_data] (std::shared_ptr<nano::block> const & block) {
if (block != nullptr)
{
response_data->put ("block", block->hash ().to_string ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, *response_data);
response_a (ostream.str ());
}
else
{
json_error_response (response_a, "Error generating block");
}
},
work, generate_work);
}
}
// Because of change_async
if (rpc_l->ec)
{
rpc_l->response_errors ();
}
}));
}
void nano::json_handler::account_weight ()
{
auto account (account_impl ());
if (!ec)
{
auto balance (node.weight (account));
response_l.put ("weight", balance.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::accounts_balances ()
{
boost::property_tree::ptree balances;
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
boost::property_tree::ptree entry;
auto balance (node.balance_pending (account, false));
entry.put ("balance", balance.first.convert_to<std::string> ());
entry.put ("pending", balance.second.convert_to<std::string> ());
balances.push_back (std::make_pair (account.to_account (), entry));
}
}
response_l.add_child ("balances", balances);
response_errors ();
}
void nano::json_handler::accounts_create ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
auto count (rpc_l->count_impl ());
if (!rpc_l->ec)
{
const bool generate_work = rpc_l->request.get<bool> ("work", false);
boost::property_tree::ptree accounts;
for (auto i (0); accounts.size () < count; ++i)
{
nano::account new_key (wallet->deterministic_insert (generate_work));
if (!new_key.is_zero ())
{
boost::property_tree::ptree entry;
entry.put ("", new_key.to_account ());
accounts.push_back (std::make_pair ("", entry));
}
}
rpc_l->response_l.add_child ("accounts", accounts);
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::accounts_frontiers ()
{
boost::property_tree::ptree frontiers;
auto transaction (node.store.tx_begin_read ());
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
auto latest (node.ledger.latest (transaction, account));
if (!latest.is_zero ())
{
frontiers.put (account.to_account (), latest.to_string ());
}
}
}
response_l.add_child ("frontiers", frontiers);
response_errors ();
}
void nano::json_handler::accounts_pending ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", true);
const bool sorting = request.get<bool> ("sorting", false);
auto simple (threshold.is_zero () && !source && !sorting); // if simple, response is a list of hashes for each account
boost::property_tree::ptree pending;
auto transaction (node.store.tx_begin_read ());
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
boost::property_tree::ptree peers_l;
for (auto i (node.store.pending.begin (transaction, nano::pending_key (account, 0))), n (node.store.pending.end ()); i != n && nano::pending_key (i->first).account == account && peers_l.size () < count; ++i)
{
nano::pending_key const & key (i->first);
if (block_confirmed (node, transaction, key.hash, include_active, include_only_confirmed))
{
if (simple)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info const & info (i->second);
if (info.amount.number () >= threshold.number ())
{
if (source)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
pending_tree.put ("source", info.source.to_account ());
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (sorting && !simple)
{
if (source)
{
peers_l.sort ([] (const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("amount") > child2.second.template get<nano::uint128_t> ("amount");
});
}
else
{
peers_l.sort ([] (const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("") > child2.second.template get<nano::uint128_t> ("");
});
}
}
if (!peers_l.empty ())
{
pending.add_child (account.to_account (), peers_l);
}
}
}
response_l.add_child ("blocks", pending);
response_errors ();
}
void nano::json_handler::active_difficulty ()
{
auto include_trend (request.get<bool> ("include_trend", false));
auto const multiplier_active = 1.0;
auto const default_difficulty (node.default_difficulty (nano::work_version::work_1));
auto const default_receive_difficulty (node.default_receive_difficulty (nano::work_version::work_1));
auto const receive_current_denormalized (nano::denormalized_multiplier (multiplier_active, node.network_params.network.publish_thresholds.epoch_2_receive));
response_l.put ("deprecated", "1");
response_l.put ("network_minimum", nano::to_string_hex (default_difficulty));
response_l.put ("network_receive_minimum", nano::to_string_hex (default_receive_difficulty));
response_l.put ("network_current", nano::to_string_hex (nano::difficulty::from_multiplier (multiplier_active, default_difficulty)));
response_l.put ("network_receive_current", nano::to_string_hex (nano::difficulty::from_multiplier (receive_current_denormalized, default_receive_difficulty)));
response_l.put ("multiplier", 1.0);
if (include_trend)
{
boost::property_tree::ptree difficulty_trend_l;
// To keep this RPC backwards-compatible
boost::property_tree::ptree entry;
entry.put ("", "1.000000000000000");
difficulty_trend_l.push_back (std::make_pair ("", entry));
response_l.add_child ("difficulty_trend", difficulty_trend_l);
}
response_errors ();
}
void nano::json_handler::available_supply ()
{
auto genesis_balance (node.balance (node.network_params.ledger.genesis->account ())); // Cold storage genesis
auto landing_balance (node.balance (nano::account ("059F68AAB29DE0D3A27443625C7EA9CDDB6517A8B76FE37727EF6A4D76832AD5"))); // Active unavailable account
auto faucet_balance (node.balance (nano::account ("8E319CE6F3025E5B2DF66DA7AB1467FE48F1679C13DD43BFDB29FA2E9FC40D3B"))); // Faucet account
auto burned_balance ((node.balance_pending (nano::account (0), false)).second); // Burning 0 account
auto available (nano::dev::constants.genesis_amount - genesis_balance - landing_balance - faucet_balance - burned_balance);
response_l.put ("available", available.convert_to<std::string> ());
response_errors ();
}
void nano::json_handler::block_info ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block.get (transaction, hash));
if (block != nullptr)
{
nano::account account (block->account ().is_zero () ? block->sideband ().account : block->account ());
response_l.put ("block_account", account.to_account ());
bool error_or_pruned (false);
auto amount (node.ledger.amount_safe (transaction, hash, error_or_pruned));
if (!error_or_pruned)
{
response_l.put ("amount", amount.convert_to<std::string> ());
}
auto balance (node.ledger.balance (transaction, hash));
response_l.put ("balance", balance.convert_to<std::string> ());
response_l.put ("height", std::to_string (block->sideband ().height));
response_l.put ("local_timestamp", std::to_string (block->sideband ().timestamp));
response_l.put ("successor", block->sideband ().successor.to_string ());
auto confirmed (node.ledger.block_confirmed (transaction, hash));
response_l.put ("confirmed", confirmed);
bool json_block_l = request.get<bool> ("json_block", false);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
response_l.add_child ("contents", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
response_l.put ("contents", contents);
}
if (block->type () == nano::block_type::state)
{
auto subtype (nano::state_subtype (block->sideband ().details));
response_l.put ("subtype", subtype);
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::block_confirm ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block_l (node.store.block.get (transaction, hash));
if (block_l != nullptr)
{
if (!node.ledger.block_confirmed (transaction, hash))
{
// Start new confirmation for unconfirmed (or not being confirmed) block
if (!node.confirmation_height_processor.is_processing_block (hash))
{
node.block_confirm (std::move (block_l));
}
}
else
{
// Add record in confirmation history for confirmed block
nano::election_status status{ block_l, 0, 0, std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()), std::chrono::duration_values<std::chrono::milliseconds>::zero (), 0, 1, 0, nano::election_status_type::active_confirmation_height };
node.active.add_recently_cemented (status);
// Trigger callback for confirmed block
node.block_arrival.add (hash);
auto account (node.ledger.account (transaction, hash));
bool error_or_pruned (false);
auto amount (node.ledger.amount_safe (transaction, hash, error_or_pruned));
bool is_state_send (false);
if (!error_or_pruned)
{
if (auto state = dynamic_cast<nano::state_block *> (block_l.get ()))
{
is_state_send = node.ledger.is_send (transaction, *state);
}
}
node.observers.blocks.notify (status, {}, account, amount, is_state_send);
}
response_l.put ("started", "1");
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::blocks ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
for (boost::property_tree::ptree::value_type & hashes : request.get_child ("hashes"))
{
if (!ec)
{
std::string hash_text = hashes.second.data ();
nano::block_hash hash;
if (!hash.decode_hex (hash_text))
{
auto block (node.store.block.get (transaction, hash));
if (block != nullptr)
{
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
blocks.add_child (hash_text, block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
blocks.put (hash_text, contents);
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
}
response_l.add_child ("blocks", blocks);
response_errors ();
}
void nano::json_handler::blocks_info ()
{
const bool pending = request.get<bool> ("pending", false);
const bool source = request.get<bool> ("source", false);
const bool json_block_l = request.get<bool> ("json_block", false);
const bool include_not_found = request.get<bool> ("include_not_found", false);
boost::property_tree::ptree blocks;
boost::property_tree::ptree blocks_not_found;
auto transaction (node.store.tx_begin_read ());
for (boost::property_tree::ptree::value_type & hashes : request.get_child ("hashes"))
{
if (!ec)
{
std::string hash_text = hashes.second.data ();
nano::block_hash hash;
if (!hash.decode_hex (hash_text))
{
auto block (node.store.block.get (transaction, hash));
if (block != nullptr)
{
boost::property_tree::ptree entry;
nano::account account (block->account ().is_zero () ? block->sideband ().account : block->account ());
entry.put ("block_account", account.to_account ());
bool error_or_pruned (false);
auto amount (node.ledger.amount_safe (transaction, hash, error_or_pruned));
if (!error_or_pruned)
{
entry.put ("amount", amount.convert_to<std::string> ());
}
auto balance (node.ledger.balance (transaction, hash));
entry.put ("balance", balance.convert_to<std::string> ());
entry.put ("height", std::to_string (block->sideband ().height));
entry.put ("local_timestamp", std::to_string (block->sideband ().timestamp));
entry.put ("successor", block->sideband ().successor.to_string ());
auto confirmed (node.ledger.block_confirmed (transaction, hash));
entry.put ("confirmed", confirmed);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
entry.put ("contents", contents);
}
if (block->type () == nano::block_type::state)
{
auto subtype (nano::state_subtype (block->sideband ().details));
entry.put ("subtype", subtype);
}
if (pending)
{
bool exists (false);
auto destination (node.ledger.block_destination (transaction, *block));
if (!destination.is_zero ())
{
exists = node.store.pending.exists (transaction, nano::pending_key (destination, hash));
}
entry.put ("pending", exists ? "1" : "0");
}
if (source)
{
nano::block_hash source_hash (node.ledger.block_source (transaction, *block));
auto block_a (node.store.block.get (transaction, source_hash));
if (block_a != nullptr)
{
auto source_account (node.ledger.account (transaction, source_hash));
entry.put ("source_account", source_account.to_account ());
}
else
{
entry.put ("source_account", "0");
}
}
blocks.push_back (std::make_pair (hash_text, entry));
}
else if (include_not_found)
{
boost::property_tree::ptree entry;
entry.put ("", hash_text);
blocks_not_found.push_back (std::make_pair ("", entry));
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
}
if (!ec)
{
response_l.add_child ("blocks", blocks);
if (include_not_found)
{
response_l.add_child ("blocks_not_found", blocks_not_found);
}
}
response_errors ();
}
void nano::json_handler::block_account ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block.exists (transaction, hash))
{
auto account (node.ledger.account (transaction, hash));
response_l.put ("account", account.to_account ());
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::block_count ()
{
response_l.put ("count", std::to_string (node.ledger.cache.block_count));
response_l.put ("unchecked", std::to_string (node.store.unchecked.count (node.store.tx_begin_read ())));
response_l.put ("cemented", std::to_string (node.ledger.cache.cemented_count));
if (node.flags.enable_pruning)
{
response_l.put ("full", std::to_string (node.ledger.cache.block_count - node.ledger.cache.pruned_count));
response_l.put ("pruned", std::to_string (node.ledger.cache.pruned_count));
}
response_errors ();
}
void nano::json_handler::block_create ()
{
std::string type (request.get<std::string> ("type"));
nano::wallet_id wallet (0);
// Default to work_1 if not specified
auto work_version (work_version_optional_impl (nano::work_version::work_1));
auto difficulty_l (difficulty_optional_impl (work_version));
boost::optional<std::string> wallet_text (request.get_optional<std::string> ("wallet"));
if (!ec && wallet_text.is_initialized ())
{
if (wallet.decode_hex (wallet_text.get ()))
{
ec = nano::error_common::bad_wallet_number;
}
}
nano::account account (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (!ec && account_text.is_initialized ())
{
account = account_impl (account_text.get ());
}
nano::account representative (0);
boost::optional<std::string> representative_text (request.get_optional<std::string> ("representative"));
if (!ec && representative_text.is_initialized ())
{
representative = account_impl (representative_text.get (), nano::error_rpc::bad_representative_number);
}
nano::account destination (0);
boost::optional<std::string> destination_text (request.get_optional<std::string> ("destination"));
if (!ec && destination_text.is_initialized ())
{
destination = account_impl (destination_text.get (), nano::error_rpc::bad_destination);
}
nano::block_hash source (0);
boost::optional<std::string> source_text (request.get_optional<std::string> ("source"));
if (!ec && source_text.is_initialized ())
{
if (source.decode_hex (source_text.get ()))
{
ec = nano::error_rpc::bad_source;
}
}
nano::amount amount (0);
boost::optional<std::string> amount_text (request.get_optional<std::string> ("amount"));
if (!ec && amount_text.is_initialized ())
{
if (amount.decode_dec (amount_text.get ()))
{
ec = nano::error_common::invalid_amount;
}
}
auto work (work_optional_impl ());
nano::raw_key prv;
prv.clear ();
nano::block_hash previous (0);
nano::amount balance (0);
if (work == 0 && !node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
if (!ec && wallet != 0 && account != 0)
{
auto existing (node.wallets.items.find (wallet));
if (existing != node.wallets.items.end ())
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
wallet_locked_impl (transaction, existing->second);
wallet_account_impl (transaction, existing->second, account);
if (!ec)
{
existing->second->store.fetch (transaction, account, prv);
previous = node.ledger.latest (block_transaction, account);
balance = node.ledger.account_balance (block_transaction, account);
}
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
boost::optional<std::string> key_text (request.get_optional<std::string> ("key"));
if (!ec && key_text.is_initialized ())
{
if (prv.decode_hex (key_text.get ()))
{
ec = nano::error_common::bad_private_key;
}
}
boost::optional<std::string> previous_text (request.get_optional<std::string> ("previous"));
if (!ec && previous_text.is_initialized ())
{
if (previous.decode_hex (previous_text.get ()))
{
ec = nano::error_rpc::bad_previous;
}
}
boost::optional<std::string> balance_text (request.get_optional<std::string> ("balance"));
if (!ec && balance_text.is_initialized ())
{
if (balance.decode_dec (balance_text.get ()))
{
ec = nano::error_rpc::invalid_balance;
}
}
nano::link link (0);
boost::optional<std::string> link_text (request.get_optional<std::string> ("link"));
if (!ec && link_text.is_initialized ())
{
if (link.decode_account (link_text.get ()))
{
if (link.decode_hex (link_text.get ()))
{
ec = nano::error_rpc::bad_link;
}
}
}
else
{
// Retrieve link from source or destination
if (source.is_zero ())
{
link = destination;
}
else
{
link = source;
}
}
if (!ec)
{
auto rpc_l (shared_from_this ());
// Serializes the block contents to the RPC response
auto block_response_put_l = [rpc_l, this] (nano::block const & block_a) {
boost::property_tree::ptree response_l;
response_l.put ("hash", block_a.hash ().to_string ());
response_l.put ("difficulty", nano::to_string_hex (block_a.difficulty ()));
bool json_block_l = request.get<bool> ("json_block", false);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block_a.serialize_json (block_node_l);
response_l.add_child ("block", block_node_l);
}
else
{
std::string contents;
block_a.serialize_json (contents);
response_l.put ("block", contents);
}
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
rpc_l->response (ostream.str ());
};
// Wrapper from argument to lambda capture, to extend the block's scope
auto get_callback_l = [rpc_l, block_response_put_l] (std::shared_ptr<nano::block> const & block_a) {
// Callback upon work generation success or failure
return [block_a, rpc_l, block_response_put_l] (boost::optional<uint64_t> const & work_a) {
if (block_a != nullptr)
{
if (work_a.is_initialized ())
{
block_a->block_work_set (*work_a);
block_response_put_l (*block_a);
}
else
{
rpc_l->ec = nano::error_common::failure_work_generation;
}
}
else
{
rpc_l->ec = nano::error_common::generic;
}
if (rpc_l->ec)
{
rpc_l->response_errors ();
}
};
};
if (prv != 0)
{
nano::account pub (nano::pub_key (prv));
// Fetching account balance & previous for send blocks (if aren't given directly)
if (!previous_text.is_initialized () && !balance_text.is_initialized ())
{
auto transaction (node.store.tx_begin_read ());
previous = node.ledger.latest (transaction, pub);
balance = node.ledger.account_balance (transaction, pub);
}
// Double check current balance if previous block is specified
else if (previous_text.is_initialized () && balance_text.is_initialized () && type == "send")
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block.exists (transaction, previous) && node.store.block.balance (transaction, previous) != balance.number ())
{
ec = nano::error_rpc::block_create_balance_mismatch;
}
}
// Check for incorrect account key
if (!ec && account_text.is_initialized ())
{
if (account != pub)
{
ec = nano::error_rpc::block_create_public_key_mismatch;
}
}
nano::block_builder builder_l;
std::shared_ptr<nano::block> block_l{ nullptr };
nano::root root_l;
std::error_code ec_build;
if (type == "state")
{
if (previous_text.is_initialized () && !representative.is_zero () && (!link.is_zero () || link_text.is_initialized ()))
{
block_l = builder_l.state ()
.account (pub)
.previous (previous)
.representative (representative)
.balance (balance)
.link (link)
.sign (prv, pub)
.build (ec_build);
if (previous.is_zero ())
{
root_l = pub;
}
else
{
root_l = previous;
}
}
else
{
ec = nano::error_rpc::block_create_requirements_state;
}
}
else if (type == "open")
{
if (representative != 0 && source != 0)
{
block_l = builder_l.open ()
.account (pub)
.source (source)
.representative (representative)
.sign (prv, pub)
.build (ec_build);
root_l = pub;
}
else
{
ec = nano::error_rpc::block_create_requirements_open;
}
}
else if (type == "receive")
{
if (source != 0 && previous != 0)
{
block_l = builder_l.receive ()
.previous (previous)
.source (source)
.sign (prv, pub)
.build (ec_build);
root_l = previous;
}
else
{
ec = nano::error_rpc::block_create_requirements_receive;
}
}
else if (type == "change")
{
if (representative != 0 && previous != 0)
{
block_l = builder_l.change ()
.previous (previous)
.representative (representative)
.sign (prv, pub)
.build (ec_build);
root_l = previous;
}
else
{
ec = nano::error_rpc::block_create_requirements_change;
}
}
else if (type == "send")
{
if (destination != 0 && previous != 0 && balance != 0 && amount != 0)
{
if (balance.number () >= amount.number ())
{
block_l = builder_l.send ()
.previous (previous)
.destination (destination)
.balance (balance.number () - amount.number ())
.sign (prv, pub)
.build (ec_build);
root_l = previous;
}
else
{
ec = nano::error_common::insufficient_balance;
}
}
else
{
ec = nano::error_rpc::block_create_requirements_send;
}
}
else
{
ec = nano::error_blocks::invalid_type;
}
if (!ec && (!ec_build || ec_build == nano::error_common::missing_work))
{
if (work == 0)
{
// Difficulty calculation
if (request.count ("difficulty") == 0)
{
difficulty_l = difficulty_ledger (*block_l);
}
node.work_generate (work_version, root_l, difficulty_l, get_callback_l (block_l), nano::account (pub));
}
else
{
block_l->block_work_set (work);
block_response_put_l (*block_l);
}
}
}
else
{
ec = nano::error_rpc::block_create_key_required;
}
}
// Because of callback
if (ec)
{
response_errors ();
}
}
void nano::json_handler::block_hash ()
{
auto block (block_impl (true));
if (!ec)
{
response_l.put ("hash", block->hash ().to_string ());
}
response_errors ();
}
void nano::json_handler::bootstrap ()
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
boost::system::error_code address_ec;
auto address (boost::asio::ip::make_address_v6 (address_text, address_ec));
if (!address_ec)
{
uint16_t port;
if (!nano::parse_port (port_text, port))
{
if (!node.flags.disable_legacy_bootstrap)
{
std::string bootstrap_id (request.get<std::string> ("id", ""));
node.bootstrap_initiator.bootstrap (nano::endpoint (address, port), true, bootstrap_id);
response_l.put ("success", "");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_legacy;
}
}
else
{
ec = nano::error_common::invalid_port;
}
}
else
{
ec = nano::error_common::invalid_ip_address;
}
response_errors ();
}
void nano::json_handler::bootstrap_any ()
{
const bool force = request.get<bool> ("force", false);
if (!node.flags.disable_legacy_bootstrap)
{
nano::account start_account (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (account_text.is_initialized ())
{
start_account = account_impl (account_text.get ());
}
std::string bootstrap_id (request.get<std::string> ("id", ""));
node.bootstrap_initiator.bootstrap (force, bootstrap_id, std::numeric_limits<uint32_t>::max (), start_account);
response_l.put ("success", "");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_legacy;
}
response_errors ();
}
void nano::json_handler::bootstrap_lazy ()
{
auto hash (hash_impl ());
const bool force = request.get<bool> ("force", false);
if (!ec)
{
if (!node.flags.disable_lazy_bootstrap)
{
auto existed (node.bootstrap_initiator.current_lazy_attempt () != nullptr);
std::string bootstrap_id (request.get<std::string> ("id", ""));
auto key_inserted (node.bootstrap_initiator.bootstrap_lazy (hash, force, true, bootstrap_id));
bool started = !existed && key_inserted;
response_l.put ("started", started ? "1" : "0");
response_l.put ("key_inserted", key_inserted ? "1" : "0");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_lazy;
}
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::bootstrap_status ()
{
auto attempts_count (node.bootstrap_initiator.attempts.size ());
response_l.put ("bootstrap_threads", std::to_string (node.config.bootstrap_initiator_threads));
response_l.put ("running_attempts_count", std::to_string (attempts_count));
response_l.put ("total_attempts_count", std::to_string (node.bootstrap_initiator.attempts.incremental));
boost::property_tree::ptree connections;
{
nano::lock_guard<nano::mutex> connections_lock (node.bootstrap_initiator.connections->mutex);
connections.put ("clients", std::to_string (node.bootstrap_initiator.connections->clients.size ()));
connections.put ("connections", std::to_string (node.bootstrap_initiator.connections->connections_count));
connections.put ("idle", std::to_string (node.bootstrap_initiator.connections->idle.size ()));
connections.put ("target_connections", std::to_string (node.bootstrap_initiator.connections->target_connections (node.bootstrap_initiator.connections->pulls.size (), attempts_count)));
connections.put ("pulls", std::to_string (node.bootstrap_initiator.connections->pulls.size ()));
}
response_l.add_child ("connections", connections);
boost::property_tree::ptree attempts;
{
nano::lock_guard<nano::mutex> attempts_lock (node.bootstrap_initiator.attempts.bootstrap_attempts_mutex);
for (auto i : node.bootstrap_initiator.attempts.attempts)
{
boost::property_tree::ptree entry;
auto & attempt (i.second);
entry.put ("id", attempt->id);
entry.put ("mode", attempt->mode_text ());
entry.put ("started", static_cast<bool> (attempt->started));
entry.put ("pulling", std::to_string (attempt->pulling));
entry.put ("total_blocks", std::to_string (attempt->total_blocks));
entry.put ("requeued_pulls", std::to_string (attempt->requeued_pulls));
attempt->get_information (entry);
entry.put ("duration", std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - attempt->attempt_start).count ());
attempts.push_back (std::make_pair ("", entry));
}
}
response_l.add_child ("attempts", attempts);
response_errors ();
}
void nano::json_handler::chain (bool successors)
{
successors = successors != request.get<bool> ("reverse", false);
auto hash (hash_impl ("block"));
auto count (count_impl ());
auto offset (offset_optional_impl (0));
if (!ec)
{
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
while (!hash.is_zero () && blocks.size () < count)
{
auto block_l (node.store.block.get (transaction, hash));
if (block_l != nullptr)
{
if (offset > 0)
{
--offset;
}
else
{
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
}
hash = successors ? node.store.block.successor (transaction, hash) : block_l->previous ();
}
else
{
hash.clear ();
}
}
response_l.add_child ("blocks", blocks);
}
response_errors ();
}
void nano::json_handler::confirmation_active ()
{
uint64_t announcements (0);
uint64_t confirmed (0);
boost::optional<std::string> announcements_text (request.get_optional<std::string> ("announcements"));
if (announcements_text.is_initialized ())
{
announcements = strtoul (announcements_text.get ().c_str (), NULL, 10);
}
boost::property_tree::ptree elections;
auto active_elections = node.active.list_active ();
for (auto const & election : active_elections)
{
if (election->confirmation_request_count >= announcements)
{
if (!election->confirmed ())
{
boost::property_tree::ptree entry;
entry.put ("", election->qualified_root.to_string ());
elections.push_back (std::make_pair ("", entry));
}
else
{
++confirmed;
}
}
}
response_l.add_child ("confirmations", elections);
response_l.put ("unconfirmed", elections.size ());
response_l.put ("confirmed", confirmed);
response_errors ();
}
void nano::json_handler::confirmation_height_currently_processing ()
{
auto hash = node.confirmation_height_processor.current ();
if (!hash.is_zero ())
{
response_l.put ("hash", hash.to_string ());
}
else
{
ec = nano::error_rpc::confirmation_height_not_processing;
}
response_errors ();
}
void nano::json_handler::confirmation_history ()
{
boost::property_tree::ptree elections;
boost::property_tree::ptree confirmation_stats;
std::chrono::milliseconds running_total (0);
nano::block_hash hash (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("hash"));
if (hash_text.is_initialized ())
{
hash = hash_impl ();
}
if (!ec)
{
for (auto const & status : node.active.list_recently_cemented ())
{
if (hash.is_zero () || status.winner->hash () == hash)
{
boost::property_tree::ptree election;
election.put ("hash", status.winner->hash ().to_string ());
election.put ("duration", status.election_duration.count ());
election.put ("time", status.election_end.count ());
election.put ("tally", status.tally.to_string_dec ());
election.add ("final", status.final_tally.to_string_dec ());
election.put ("blocks", std::to_string (status.block_count));
election.put ("voters", std::to_string (status.voter_count));
election.put ("request_count", std::to_string (status.confirmation_request_count));
elections.push_back (std::make_pair ("", election));
}
running_total += status.election_duration;
}
}
confirmation_stats.put ("count", elections.size ());
if (elections.size () >= 1)
{
confirmation_stats.put ("average", (running_total.count ()) / elections.size ());
}
response_l.add_child ("confirmation_stats", confirmation_stats);
response_l.add_child ("confirmations", elections);
response_errors ();
}
void nano::json_handler::confirmation_info ()
{
const bool representatives = request.get<bool> ("representatives", false);
const bool contents = request.get<bool> ("contents", true);
const bool json_block_l = request.get<bool> ("json_block", false);
std::string root_text (request.get<std::string> ("root"));
nano::qualified_root root;
if (!root.decode_hex (root_text))
{
auto election (node.active.election (root));
if (election != nullptr && !election->confirmed ())
{
auto info = election->current_status ();
response_l.put ("announcements", std::to_string (info.status.confirmation_request_count));
response_l.put ("voters", std::to_string (info.votes.size ()));
response_l.put ("last_winner", info.status.winner->hash ().to_string ());
nano::uint128_t total (0);
boost::property_tree::ptree blocks;
for (auto const & [tally, block] : info.tally)
{
boost::property_tree::ptree entry;
entry.put ("tally", tally.convert_to<std::string> ());
total += tally;
if (contents)
{
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
entry.put ("contents", contents);
}
}
if (representatives)
{
std::multimap<nano::uint128_t, nano::account, std::greater<nano::uint128_t>> representatives;
for (auto const & [representative, vote] : info.votes)
{
if (block->hash () == vote.hash)
{
auto amount (node.ledger.cache.rep_weights.representation_get (representative));
representatives.emplace (std::move (amount), representative);
}
}
boost::property_tree::ptree representatives_list;
for (auto const & [amount, representative] : representatives)
{
representatives_list.put (representative.to_account (), amount.convert_to<std::string> ());
}
entry.add_child ("representatives", representatives_list);
}
blocks.add_child ((block->hash ()).to_string (), entry);
}
response_l.put ("total_tally", total.convert_to<std::string> ());
response_l.put ("final_tally", info.status.final_tally.to_string_dec ());
response_l.add_child ("blocks", blocks);
}
else
{
ec = nano::error_rpc::confirmation_not_found;
}
}
else
{
ec = nano::error_rpc::invalid_root;
}
response_errors ();
}
void nano::json_handler::confirmation_quorum ()
{
response_l.put ("quorum_delta", node.online_reps.delta ().convert_to<std::string> ());
response_l.put ("online_weight_quorum_percent", std::to_string (node.online_reps.online_weight_quorum));
response_l.put ("online_weight_minimum", node.config.online_weight_minimum.to_string_dec ());
response_l.put ("online_stake_total", node.online_reps.online ().convert_to<std::string> ());
response_l.put ("trended_stake_total", node.online_reps.trended ().convert_to<std::string> ());
response_l.put ("peers_stake_total", node.rep_crawler.total_weight ().convert_to<std::string> ());
if (request.get<bool> ("peer_details", false))
{
boost::property_tree::ptree peers;
for (auto & peer : node.rep_crawler.representatives ())
{
boost::property_tree::ptree peer_node;
peer_node.put ("account", peer.account.to_account ());
peer_node.put ("ip", peer.channel->to_string ());
peer_node.put ("weight", peer.weight.to_string_dec ());
peers.push_back (std::make_pair ("", peer_node));
}
response_l.add_child ("peers", peers);
}
response_errors ();
}
void nano::json_handler::database_txn_tracker ()
{
boost::property_tree::ptree json;
if (node.config.diagnostics_config.txn_tracking.enable)
{
unsigned min_read_time_milliseconds = 0;
boost::optional<std::string> min_read_time_text (request.get_optional<std::string> ("min_read_time"));
if (min_read_time_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<unsigned> (*min_read_time_text, min_read_time_milliseconds);
if (!success)
{
ec = nano::error_common::invalid_amount;
}
}
unsigned min_write_time_milliseconds = 0;
if (!ec)
{
boost::optional<std::string> min_write_time_text (request.get_optional<std::string> ("min_write_time"));
if (min_write_time_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<unsigned> (*min_write_time_text, min_write_time_milliseconds);
if (!success)
{
ec = nano::error_common::invalid_amount;
}
}
}
if (!ec)
{
node.store.serialize_mdb_tracker (json, std::chrono::milliseconds (min_read_time_milliseconds), std::chrono::milliseconds (min_write_time_milliseconds));
response_l.put_child ("txn_tracking", json);
}
}
else
{
ec = nano::error_common::tracking_not_enabled;
}
response_errors ();
}
void nano::json_handler::delegators ()
{
auto representative (account_impl ());
auto count (count_optional_impl (1024));
auto threshold (threshold_optional_impl ());
auto start_account_text (request.get_optional<std::string> ("start"));
nano::account start_account (0);
if (!ec && start_account_text.is_initialized ())
{
start_account = account_impl (start_account_text.get ());
}
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
boost::property_tree::ptree delegators;
for (auto i (node.store.account.begin (transaction, start_account.number () + 1)), n (node.store.account.end ()); i != n && delegators.size () < count; ++i)
{
nano::account_info const & info (i->second);
if (info.representative == representative)
{
if (info.balance.number () >= threshold.number ())
{
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
nano::account const & delegator (i->first);
delegators.put (delegator.to_account (), balance);
}
}
}
response_l.add_child ("delegators", delegators);
}
response_errors ();
}
void nano::json_handler::delegators_count ()
{
auto account (account_impl ());
if (!ec)
{
uint64_t count (0);
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.account.begin (transaction)), n (node.store.account.end ()); i != n; ++i)
{
nano::account_info const & info (i->second);
if (info.representative == account)
{
++count;
}
}
response_l.put ("count", std::to_string (count));
}
response_errors ();
}
void nano::json_handler::deterministic_key ()
{
std::string seed_text (request.get<std::string> ("seed"));
std::string index_text (request.get<std::string> ("index"));
nano::raw_key seed;
if (!seed.decode_hex (seed_text))
{
try
{
uint32_t index (std::stoul (index_text));
nano::raw_key prv = nano::deterministic_key (seed, index);
nano::public_key pub (nano::pub_key (prv));
response_l.put ("private", prv.to_string ());
response_l.put ("public", pub.to_string ());
response_l.put ("account", pub.to_account ());
}
catch (std::logic_error const &)
{
ec = nano::error_common::invalid_index;
}
}
else
{
ec = nano::error_common::bad_seed;
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::epoch_upgrade ()
{
nano::epoch epoch (nano::epoch::invalid);
uint8_t epoch_int (request.get<uint8_t> ("epoch"));
switch (epoch_int)
{
case 1:
epoch = nano::epoch::epoch_1;
break;
case 2:
epoch = nano::epoch::epoch_2;
break;
default:
break;
}
if (epoch != nano::epoch::invalid)
{
uint64_t count_limit (count_optional_impl ());
uint64_t threads (0);
boost::optional<std::string> threads_text (request.get_optional<std::string> ("threads"));
if (!ec && threads_text.is_initialized ())
{
if (decode_unsigned (threads_text.get (), threads))
{
ec = nano::error_rpc::invalid_threads_count;
}
}
std::string key_text (request.get<std::string> ("key"));
nano::raw_key prv;
if (!prv.decode_hex (key_text))
{
if (nano::pub_key (prv) == node.ledger.epoch_signer (node.ledger.epoch_link (epoch)))
{
if (!node.epoch_upgrader (prv, epoch, count_limit, threads))
{
response_l.put ("started", "1");
}
else
{
response_l.put ("started", "0");
}
}
else
{
ec = nano::error_rpc::invalid_epoch_signer;
}
}
else
{
ec = nano::error_common::bad_private_key;
}
}
else
{
ec = nano::error_rpc::invalid_epoch;
}
response_errors ();
}
void nano::json_handler::frontiers ()
{
auto start (account_impl ());
auto count (count_impl ());
if (!ec)
{
boost::property_tree::ptree frontiers;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.account.begin (transaction, start)), n (node.store.account.end ()); i != n && frontiers.size () < count; ++i)
{
frontiers.put (i->first.to_account (), i->second.head.to_string ());
}
response_l.add_child ("frontiers", frontiers);
}
response_errors ();
}
void nano::json_handler::account_count ()
{
auto size (node.ledger.cache.account_count.load ());
response_l.put ("count", std::to_string (size));
response_errors ();
}
namespace
{
class history_visitor : public nano::block_visitor
{
public:
history_visitor (nano::json_handler & handler_a, bool raw_a, nano::transaction & transaction_a, boost::property_tree::ptree & tree_a, nano::block_hash const & hash_a, std::vector<nano::public_key> const & accounts_filter_a) :
handler (handler_a),
raw (raw_a),
transaction (transaction_a),
tree (tree_a),
hash (hash_a),
accounts_filter (accounts_filter_a)
{
}
virtual ~history_visitor () = default;
void send_block (nano::send_block const & block_a)
{
if (should_ignore_account (block_a.hashables.destination))
{
return;
}
tree.put ("type", "send");
auto account (block_a.hashables.destination.to_account ());
tree.put ("account", account);
bool error_or_pruned (false);
auto amount (handler.node.ledger.amount_safe (transaction, hash, error_or_pruned).convert_to<std::string> ());
if (!error_or_pruned)
{
tree.put ("amount", amount);
}
if (raw)
{
tree.put ("destination", account);
tree.put ("balance", block_a.hashables.balance.to_string_dec ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void receive_block (nano::receive_block const & block_a)
{
tree.put ("type", "receive");
bool error_or_pruned (false);
auto amount (handler.node.ledger.amount_safe (transaction, hash, error_or_pruned).convert_to<std::string> ());
if (!error_or_pruned)
{
auto source_account (handler.node.ledger.account_safe (transaction, block_a.hashables.source, error_or_pruned));
if (!error_or_pruned)
{
tree.put ("account", source_account.to_account ());
}
tree.put ("amount", amount);
}
if (raw)
{
tree.put ("source", block_a.hashables.source.to_string ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void open_block (nano::open_block const & block_a)
{
if (raw)
{
tree.put ("type", "open");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("source", block_a.hashables.source.to_string ());
tree.put ("opened", block_a.hashables.account.to_account ());
}
else
{
// Report opens as a receive
tree.put ("type", "receive");
}
if (block_a.hashables.source != network_params.ledger.genesis->account ())
{
bool error_or_pruned (false);
auto amount (handler.node.ledger.amount_safe (transaction, hash, error_or_pruned).convert_to<std::string> ());
if (!error_or_pruned)
{
auto source_account (handler.node.ledger.account_safe (transaction, block_a.hashables.source, error_or_pruned));
if (!error_or_pruned)
{
tree.put ("account", source_account.to_account ());
}
tree.put ("amount", amount);
}
}
else
{
tree.put ("account", network_params.ledger.genesis->account ().to_account ());
tree.put ("amount", nano::dev::constants.genesis_amount.convert_to<std::string> ());
}
}
void change_block (nano::change_block const & block_a)
{
if (raw && accounts_filter.empty ())
{
tree.put ("type", "change");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void state_block (nano::state_block const & block_a)
{
if (raw)
{
tree.put ("type", "state");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("link", block_a.hashables.link.to_string ());
tree.put ("balance", block_a.hashables.balance.to_string_dec ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
auto balance (block_a.hashables.balance.number ());
bool error_or_pruned (false);
auto previous_balance (handler.node.ledger.balance_safe (transaction, block_a.hashables.previous, error_or_pruned));
if (error_or_pruned)
{
if (raw)
{
tree.put ("subtype", "unknown");
}
else
{
tree.put ("type", "unknown");
}
}
else if (balance < previous_balance)
{
if (should_ignore_account (block_a.hashables.link.as_account ()))
{
tree.clear ();
return;
}
if (raw)
{
tree.put ("subtype", "send");
}
else
{
tree.put ("type", "send");
}
tree.put ("account", block_a.hashables.link.to_account ());
tree.put ("amount", (previous_balance - balance).convert_to<std::string> ());
}
else
{
if (block_a.hashables.link.is_zero ())
{
if (raw && accounts_filter.empty ())
{
tree.put ("subtype", "change");
}
}
else if (balance == previous_balance && handler.node.ledger.is_epoch_link (block_a.hashables.link))
{
if (raw && accounts_filter.empty ())
{
tree.put ("subtype", "epoch");
tree.put ("account", handler.node.ledger.epoch_signer (block_a.link ()).to_account ());
}
}
else
{
auto source_account (handler.node.ledger.account_safe (transaction, block_a.hashables.link.as_block_hash (), error_or_pruned));
if (!error_or_pruned && should_ignore_account (source_account))
{
tree.clear ();
return;
}
if (raw)
{
tree.put ("subtype", "receive");
}
else
{
tree.put ("type", "receive");
}
if (!error_or_pruned)
{
tree.put ("account", source_account.to_account ());
}
tree.put ("amount", (balance - previous_balance).convert_to<std::string> ());
}
}
}
bool should_ignore_account (nano::public_key const & account)
{
bool ignore (false);
if (!accounts_filter.empty ())
{
if (std::find (accounts_filter.begin (), accounts_filter.end (), account) == accounts_filter.end ())
{
ignore = true;
}
}
return ignore;
}
nano::json_handler & handler;
bool raw;
nano::transaction & transaction;
boost::property_tree::ptree & tree;
nano::block_hash const & hash;
nano::network_params network_params;
std::vector<nano::public_key> const & accounts_filter;
};
}
void nano::json_handler::account_history ()
{
std::vector<nano::public_key> accounts_to_filter;
const auto accounts_filter_node = request.get_child_optional ("account_filter");
if (accounts_filter_node.is_initialized ())
{
for (auto & a : (*accounts_filter_node))
{
auto account (account_impl (a.second.get<std::string> ("")));
if (!ec)
{
accounts_to_filter.push_back (account);
}
else
{
break;
}
}
}
nano::account account;
nano::block_hash hash;
bool reverse (request.get_optional<bool> ("reverse") == true);
auto head_str (request.get_optional<std::string> ("head"));
auto transaction (node.store.tx_begin_read ());
auto count (count_impl ());
auto offset (offset_optional_impl (0));
if (head_str)
{
if (!hash.decode_hex (*head_str))
{
if (node.store.block.exists (transaction, hash))
{
account = node.ledger.account (transaction, hash);
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
else
{
account = account_impl ();
if (!ec)
{
if (reverse)
{
auto info (account_info_impl (transaction, account));
if (!ec)
{
hash = info.open_block;
}
}
else
{
hash = node.ledger.latest (transaction, account);
}
}
}
if (!ec)
{
boost::property_tree::ptree history;
bool output_raw (request.get_optional<bool> ("raw") == true);
response_l.put ("account", account.to_account ());
auto block (node.store.block.get (transaction, hash));
while (block != nullptr && count > 0)
{
if (offset > 0)
{
--offset;
}
else
{
boost::property_tree::ptree entry;
history_visitor visitor (*this, output_raw, transaction, entry, hash, accounts_to_filter);
block->visit (visitor);
if (!entry.empty ())
{
entry.put ("local_timestamp", std::to_string (block->sideband ().timestamp));
entry.put ("height", std::to_string (block->sideband ().height));
entry.put ("hash", hash.to_string ());
if (output_raw)
{
entry.put ("work", nano::to_string_hex (block->block_work ()));
entry.put ("signature", block->block_signature ().to_string ());
}
history.push_back (std::make_pair ("", entry));
--count;
}
}
hash = reverse ? node.store.block.successor (transaction, hash) : block->previous ();
block = node.store.block.get (transaction, hash);
}
response_l.add_child ("history", history);
if (!hash.is_zero ())
{
response_l.put (reverse ? "next" : "previous", hash.to_string ());
}
}
response_errors ();
}
void nano::json_handler::keepalive ()
{
if (!ec)
{
std::string address_text (request.get<std::string> ("address"));
std::string port_text (request.get<std::string> ("port"));
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.keepalive (address_text, port);
response_l.put ("started", "1");
}
else
{
ec = nano::error_common::invalid_port;
}
}
response_errors ();
}
void nano::json_handler::key_create ()
{
nano::keypair pair;
response_l.put ("private", pair.prv.to_string ());
response_l.put ("public", pair.pub.to_string ());
response_l.put ("account", pair.pub.to_account ());
response_errors ();
}
void nano::json_handler::key_expand ()
{
std::string key_text (request.get<std::string> ("key"));
nano::raw_key prv;
if (!prv.decode_hex (key_text))
{
nano::public_key pub (nano::pub_key (prv));
response_l.put ("private", prv.to_string ());
response_l.put ("public", pub.to_string ());
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::bad_private_key;
}
response_errors ();
}
void nano::json_handler::ledger ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
if (!ec)
{
nano::account start (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (account_text.is_initialized ())
{
start = account_impl (account_text.get ());
}
uint64_t modified_since (0);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
if (decode_unsigned (modified_since_text.get (), modified_since))
{
ec = nano::error_rpc::invalid_timestamp;
}
}
const bool sorting = request.get<bool> ("sorting", false);
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
boost::property_tree::ptree accounts;
auto transaction (node.store.tx_begin_read ());
if (!ec && !sorting) // Simple
{
for (auto i (node.store.account.begin (transaction, start)), n (node.store.account.end ()); i != n && accounts.size () < count; ++i)
{
nano::account_info const & info (i->second);
if (info.modified >= modified_since && (pending || info.balance.number () >= threshold.number ()))
{
nano::account const & account (i->first);
boost::property_tree::ptree response_a;
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
if (info.balance.number () + account_pending < threshold.number ())
{
continue;
}
response_a.put ("pending", account_pending.convert_to<std::string> ());
}
response_a.put ("frontier", info.head.to_string ());
response_a.put ("open_block", info.open_block.to_string ());
response_a.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
response_a.put ("balance", balance);
response_a.put ("modified_timestamp", std::to_string (info.modified));
response_a.put ("block_count", std::to_string (info.block_count));
if (representative)
{
response_a.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_a.put ("weight", account_weight.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), response_a));
}
}
}
else if (!ec) // Sorting
{
std::vector<std::pair<nano::uint128_union, nano::account>> ledger_l;
for (auto i (node.store.account.begin (transaction, start)), n (node.store.account.end ()); i != n; ++i)
{
nano::account_info const & info (i->second);
nano::uint128_union balance (info.balance);
if (info.modified >= modified_since)
{
ledger_l.emplace_back (balance, i->first);
}
}
std::sort (ledger_l.begin (), ledger_l.end ());
std::reverse (ledger_l.begin (), ledger_l.end ());
nano::account_info info;
for (auto i (ledger_l.begin ()), n (ledger_l.end ()); i != n && accounts.size () < count; ++i)
{
node.store.account.get (transaction, i->second, info);
if (pending || info.balance.number () >= threshold.number ())
{
nano::account const & account (i->second);
boost::property_tree::ptree response_a;
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
if (info.balance.number () + account_pending < threshold.number ())
{
continue;
}
response_a.put ("pending", account_pending.convert_to<std::string> ());
}
response_a.put ("frontier", info.head.to_string ());
response_a.put ("open_block", info.open_block.to_string ());
response_a.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
std::string balance;
(i->first).encode_dec (balance);
response_a.put ("balance", balance);
response_a.put ("modified_timestamp", std::to_string (info.modified));
response_a.put ("block_count", std::to_string (info.block_count));
if (representative)
{
response_a.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_a.put ("weight", account_weight.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), response_a));
}
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::mnano_from_raw (nano::uint128_t ratio)
{
auto amount (amount_impl ());
response_l.put ("deprecated", "1");
if (!ec)
{
auto result (amount.number () / ratio);
response_l.put ("amount", result.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::mnano_to_raw (nano::uint128_t ratio)
{
auto amount (amount_impl ());
response_l.put ("deprecated", "1");
if (!ec)
{
auto result (amount.number () * ratio);
if (result > amount.number ())
{
response_l.put ("amount", result.convert_to<std::string> ());
}
else
{
ec = nano::error_common::invalid_amount_big;
}
}
response_errors ();
}
void nano::json_handler::nano_to_raw ()
{
auto amount (amount_impl ());
if (!ec)
{
auto result (amount.number () * nano::Mxrb_ratio);
if (result > amount.number ())
{
response_l.put ("amount", result.convert_to<std::string> ());
}
else
{
ec = nano::error_common::invalid_amount_big;
}
}
response_errors ();
}
void nano::json_handler::raw_to_nano ()
{
auto amount (amount_impl ());
if (!ec)
{
auto result (amount.number () / nano::Mxrb_ratio);
response_l.put ("amount", result.convert_to<std::string> ());
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::node_id ()
{
if (!ec)
{
response_l.put ("private", node.node_id.prv.to_string ());
response_l.put ("public", node.node_id.pub.to_string ());
response_l.put ("as_account", node.node_id.pub.to_account ());
response_l.put ("node_id", node.node_id.pub.to_node_id ());
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::node_id_delete ()
{
response_l.put ("deprecated", "1");
response_errors ();
}
void nano::json_handler::password_change ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
if (!rpc_l->ec)
{
std::string password_text (rpc_l->request.get<std::string> ("password"));
bool error (wallet->store.rekey (transaction, password_text));
rpc_l->response_l.put ("changed", error ? "0" : "1");
if (!error)
{
rpc_l->node.logger.try_log ("Wallet password changed");
}
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::password_enter ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string password_text (rpc_l->request.get<std::string> ("password"));
auto transaction (wallet->wallets.tx_begin_write ());
auto error (wallet->enter_password (transaction, password_text));
rpc_l->response_l.put ("valid", error ? "0" : "1");
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::password_valid (bool wallet_locked)
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto valid (wallet->store.valid_password (transaction));
if (!wallet_locked)
{
response_l.put ("valid", valid ? "1" : "0");
}
else
{
response_l.put ("locked", valid ? "0" : "1");
}
}
response_errors ();
}
void nano::json_handler::peers ()
{
boost::property_tree::ptree peers_l;
const bool peer_details = request.get<bool> ("peer_details", false);
auto peers_list (node.network.list (std::numeric_limits<size_t>::max ()));
std::sort (peers_list.begin (), peers_list.end (), [] (const auto & lhs, const auto & rhs) {
return lhs->get_endpoint () < rhs->get_endpoint ();
});
for (auto i (peers_list.begin ()), n (peers_list.end ()); i != n; ++i)
{
std::stringstream text;
auto channel (*i);
text << channel->to_string ();
if (peer_details)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("protocol_version", std::to_string (channel->get_network_version ()));
auto node_id_l (channel->get_node_id_optional ());
if (node_id_l.is_initialized ())
{
pending_tree.put ("node_id", node_id_l.get ().to_node_id ());
}
else
{
pending_tree.put ("node_id", "");
}
pending_tree.put ("type", channel->get_type () == nano::transport::transport_type::tcp ? "tcp" : "udp");
peers_l.push_back (boost::property_tree::ptree::value_type (text.str (), pending_tree));
}
else
{
peers_l.push_back (boost::property_tree::ptree::value_type (text.str (), boost::property_tree::ptree (std::to_string (channel->get_network_version ()))));
}
}
response_l.add_child ("peers", peers_l);
response_errors ();
}
void nano::json_handler::pending ()
{
auto account (account_impl ());
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool min_version = request.get<bool> ("min_version", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", true);
const bool sorting = request.get<bool> ("sorting", false);
auto simple (threshold.is_zero () && !source && !min_version && !sorting); // if simple, response is a list of hashes
const bool should_sort = sorting && !simple;
if (!ec)
{
boost::property_tree::ptree peers_l;
auto transaction (node.store.tx_begin_read ());
// The ptree container is used if there are any children nodes (e.g source/min_version) otherwise the amount container is used.
std::vector<std::pair<std::string, boost::property_tree::ptree>> hash_ptree_pairs;
std::vector<std::pair<std::string, nano::uint128_t>> hash_amount_pairs;
for (auto i (node.store.pending.begin (transaction, nano::pending_key (account, 0))), n (node.store.pending.end ()); i != n && nano::pending_key (i->first).account == account && (should_sort || peers_l.size () < count); ++i)
{
nano::pending_key const & key (i->first);
if (block_confirmed (node, transaction, key.hash, include_active, include_only_confirmed))
{
if (simple)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info const & info (i->second);
if (info.amount.number () >= threshold.number ())
{
if (source || min_version)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
if (source)
{
pending_tree.put ("source", info.source.to_account ());
}
if (min_version)
{
pending_tree.put ("min_version", epoch_as_string (info.epoch));
}
if (should_sort)
{
hash_ptree_pairs.emplace_back (key.hash.to_string (), pending_tree);
}
else
{
peers_l.add_child (key.hash.to_string (), pending_tree);
}
}
else
{
if (should_sort)
{
hash_amount_pairs.emplace_back (key.hash.to_string (), info.amount.number ());
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
}
if (should_sort)
{
if (source || min_version)
{
auto mid = hash_ptree_pairs.size () <= count ? hash_ptree_pairs.end () : hash_ptree_pairs.begin () + count;
std::partial_sort (hash_ptree_pairs.begin (), mid, hash_ptree_pairs.end (), [] (const auto & lhs, const auto & rhs) {
return lhs.second.template get<nano::uint128_t> ("amount") > rhs.second.template get<nano::uint128_t> ("amount");
});
for (auto i = 0; i < hash_ptree_pairs.size () && i < count; ++i)
{
peers_l.add_child (hash_ptree_pairs[i].first, hash_ptree_pairs[i].second);
}
}
else
{
auto mid = hash_amount_pairs.size () <= count ? hash_amount_pairs.end () : hash_amount_pairs.begin () + count;
std::partial_sort (hash_amount_pairs.begin (), mid, hash_amount_pairs.end (), [] (const auto & lhs, const auto & rhs) {
return lhs.second > rhs.second;
});
for (auto i = 0; i < hash_amount_pairs.size () && i < count; ++i)
{
peers_l.put (hash_amount_pairs[i].first, hash_amount_pairs[i].second.convert_to<std::string> ());
}
}
}
response_l.add_child ("blocks", peers_l);
}
response_errors ();
}
void nano::json_handler::pending_exists ()
{
auto hash (hash_impl ());
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", true);
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block.get (transaction, hash));
if (block != nullptr)
{
auto exists (false);
auto destination (node.ledger.block_destination (transaction, *block));
if (!destination.is_zero ())
{
exists = node.store.pending.exists (transaction, nano::pending_key (destination, hash));
}
exists = exists && (block_confirmed (node, transaction, block->hash (), include_active, include_only_confirmed));
response_l.put ("exists", exists ? "1" : "0");
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::process ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
const bool is_async = rpc_l->request.get<bool> ("async", false);
auto block (rpc_l->block_impl (true));
// State blocks subtype check
if (!rpc_l->ec && block->type () == nano::block_type::state)
{
std::string subtype_text (rpc_l->request.get<std::string> ("subtype", ""));
if (!subtype_text.empty ())
{
std::shared_ptr<nano::state_block> block_state (std::static_pointer_cast<nano::state_block> (block));
auto transaction (rpc_l->node.store.tx_begin_read ());
if (!block_state->hashables.previous.is_zero () && !rpc_l->node.store.block.exists (transaction, block_state->hashables.previous))
{
rpc_l->ec = nano::error_process::gap_previous;
}
else
{
auto balance (rpc_l->node.ledger.account_balance (transaction, block_state->hashables.account));
if (subtype_text == "send")
{
if (balance <= block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
// Send with previous == 0 fails balance check. No previous != 0 check required
}
else if (subtype_text == "receive")
{
if (balance > block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
// Receive can be point to open block. No previous != 0 check required
}
else if (subtype_text == "open")
{
if (!block_state->hashables.previous.is_zero ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_previous;
}
}
else if (subtype_text == "change")
{
if (balance != block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
else if (block_state->hashables.previous.is_zero ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_previous;
}
}
else if (subtype_text == "epoch")
{
if (balance != block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
else if (!rpc_l->node.ledger.is_epoch_link (block_state->hashables.link))
{
rpc_l->ec = nano::error_rpc::invalid_subtype_epoch_link;
}
}
else
{
rpc_l->ec = nano::error_rpc::invalid_subtype;
}
}
}
}
if (!rpc_l->ec)
{
if (!nano::work_validate_entry (*block))
{
if (!is_async)
{
auto result (rpc_l->node.process_local (block));
switch (result.code)
{
case nano::process_result::progress:
{
rpc_l->response_l.put ("hash", block->hash ().to_string ());
break;
}
case nano::process_result::gap_previous:
{
rpc_l->ec = nano::error_process::gap_previous;
break;
}
case nano::process_result::gap_source:
{
rpc_l->ec = nano::error_process::gap_source;
break;
}
case nano::process_result::old:
{
rpc_l->ec = nano::error_process::old;
break;
}
case nano::process_result::bad_signature:
{
rpc_l->ec = nano::error_process::bad_signature;
break;
}
case nano::process_result::negative_spend:
{
// TODO once we get RPC versioning, this should be changed to "negative spend"
rpc_l->ec = nano::error_process::negative_spend;
break;
}
case nano::process_result::balance_mismatch:
{
rpc_l->ec = nano::error_process::balance_mismatch;
break;
}
case nano::process_result::unreceivable:
{
rpc_l->ec = nano::error_process::unreceivable;
break;
}
case nano::process_result::block_position:
{
rpc_l->ec = nano::error_process::block_position;
break;
}
case nano::process_result::gap_epoch_open_pending:
{
rpc_l->ec = nano::error_process::gap_epoch_open_pending;
break;
}
case nano::process_result::fork:
{
const bool force = rpc_l->request.get<bool> ("force", false);
if (force)
{
rpc_l->node.active.erase (*block);
rpc_l->node.block_processor.force (block);
rpc_l->response_l.put ("hash", block->hash ().to_string ());
}
else
{
rpc_l->ec = nano::error_process::fork;
}
break;
}
case nano::process_result::insufficient_work:
{
rpc_l->ec = nano::error_process::insufficient_work;
break;
}
default:
{
rpc_l->ec = nano::error_process::other;
break;
}
}
}
else
{
if (block->type () == nano::block_type::state)
{
rpc_l->node.process_local_async (block);
rpc_l->response_l.put ("started", "1");
}
else
{
rpc_l->ec = nano::error_common::is_not_state_block;
}
}
}
else
{
rpc_l->ec = nano::error_blocks::work_low;
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::pruned_exists ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
if (node.ledger.pruning)
{
auto exists (node.store.pruned.exists (transaction, hash));
response_l.put ("exists", exists ? "1" : "0");
}
else
{
ec = nano::error_rpc::pruning_disabled;
}
}
response_errors ();
}
void nano::json_handler::receive ()
{
auto wallet (wallet_impl ());
auto account (account_impl ());
auto hash (hash_impl ("block"));
if (!ec)
{
auto wallet_transaction (node.wallets.tx_begin_read ());
wallet_locked_impl (wallet_transaction, wallet);
wallet_account_impl (wallet_transaction, wallet, account);
if (!ec)
{
auto block_transaction (node.store.tx_begin_read ());
if (node.ledger.block_or_pruned_exists (block_transaction, hash))
{
nano::pending_info pending_info;
if (!node.store.pending.get (block_transaction, nano::pending_key (account, hash), pending_info))
{
auto work (work_optional_impl ());
if (!ec && work)
{
nano::account_info info;
nano::root head;
nano::epoch epoch = pending_info.epoch;
if (!node.store.account.get (block_transaction, account, info))
{
head = info.head;
// When receiving, epoch version is the higher between the previous and the source blocks
epoch = std::max (info.epoch (), epoch);
}
else
{
head = account;
}
nano::block_details details (epoch, false, true, false);
if (nano::work_difficulty (nano::work_version::work_1, head, work) < nano::work_threshold (nano::work_version::work_1, details))
{
ec = nano::error_common::invalid_work;
}
}
else if (!ec) // && work == 0
{
if (!node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
}
if (!ec)
{
// Representative is only used by receive_action when opening accounts
// Set a wallet default representative for new accounts
nano::account representative (wallet->store.representative (wallet_transaction));
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (response);
wallet->receive_async (
hash, representative, nano::dev::constants.genesis_amount, account, [response_a] (std::shared_ptr<nano::block> const & block_a) {
if (block_a != nullptr)
{
boost::property_tree::ptree response_l;
response_l.put ("block", block_a->hash ().to_string ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
response_a (ostream.str ());
}
else
{
json_error_response (response_a, "Error generating block");
}
},
work, generate_work);
}
}
else
{
ec = nano::error_process::unreceivable;
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
}
// Because of receive_async
if (ec)
{
response_errors ();
}
}
void nano::json_handler::receive_minimum ()
{
if (!ec)
{
response_l.put ("amount", node.config.receive_minimum.to_string_dec ());
}
response_errors ();
}
void nano::json_handler::receive_minimum_set ()
{
auto amount (amount_impl ());
if (!ec)
{
node.config.receive_minimum = amount;
response_l.put ("success", "");
}
response_errors ();
}
void nano::json_handler::representatives ()
{
auto count (count_optional_impl ());
if (!ec)
{
const bool sorting = request.get<bool> ("sorting", false);
boost::property_tree::ptree representatives;
auto rep_amounts = node.ledger.cache.rep_weights.get_rep_amounts ();
if (!sorting) // Simple
{
std::map<nano::account, nano::uint128_t> ordered (rep_amounts.begin (), rep_amounts.end ());
for (auto & rep_amount : rep_amounts)
{
auto const & account (rep_amount.first);
auto const & amount (rep_amount.second);
representatives.put (account.to_account (), amount.convert_to<std::string> ());
if (representatives.size () > count)
{
break;
}
}
}
else // Sorting
{
std::vector<std::pair<nano::uint128_t, std::string>> representation;
for (auto & rep_amount : rep_amounts)
{
auto const & account (rep_amount.first);
auto const & amount (rep_amount.second);
representation.emplace_back (amount, account.to_account ());
}
std::sort (representation.begin (), representation.end ());
std::reverse (representation.begin (), representation.end ());
for (auto i (representation.begin ()), n (representation.end ()); i != n && representatives.size () < count; ++i)
{
representatives.put (i->second, (i->first).convert_to<std::string> ());
}
}
response_l.add_child ("representatives", representatives);
}
response_errors ();
}
void nano::json_handler::representatives_online ()
{
const auto accounts_node = request.get_child_optional ("accounts");
const bool weight = request.get<bool> ("weight", false);
std::vector<nano::public_key> accounts_to_filter;
if (accounts_node.is_initialized ())
{
for (auto & a : (*accounts_node))
{
auto account (account_impl (a.second.get<std::string> ("")));
if (!ec)
{
accounts_to_filter.push_back (account);
}
else
{
break;
}
}
}
if (!ec)
{
boost::property_tree::ptree representatives;
auto reps (node.online_reps.list ());
for (auto & i : reps)
{
if (accounts_node.is_initialized ())
{
if (accounts_to_filter.empty ())
{
break;
}
auto found_acc = std::find (accounts_to_filter.begin (), accounts_to_filter.end (), i);
if (found_acc == accounts_to_filter.end ())
{
continue;
}
else
{
accounts_to_filter.erase (found_acc);
}
}
if (weight)
{
boost::property_tree::ptree weight_node;
auto account_weight (node.ledger.weight (i));
weight_node.put ("weight", account_weight.convert_to<std::string> ());
representatives.add_child (i.to_account (), weight_node);
}
else
{
boost::property_tree::ptree entry;
entry.put ("", i.to_account ());
representatives.push_back (std::make_pair ("", entry));
}
}
response_l.add_child ("representatives", representatives);
}
response_errors ();
}
void nano::json_handler::republish ()
{
auto count (count_optional_impl (1024U));
uint64_t sources (0);
uint64_t destinations (0);
boost::optional<std::string> sources_text (request.get_optional<std::string> ("sources"));
if (!ec && sources_text.is_initialized ())
{
if (decode_unsigned (sources_text.get (), sources))
{
ec = nano::error_rpc::invalid_sources;
}
}
boost::optional<std::string> destinations_text (request.get_optional<std::string> ("destinations"));
if (!ec && destinations_text.is_initialized ())
{
if (decode_unsigned (destinations_text.get (), destinations))
{
ec = nano::error_rpc::invalid_destinations;
}
}
auto hash (hash_impl ());
if (!ec)
{
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block.get (transaction, hash));
if (block != nullptr)
{
std::deque<std::shared_ptr<nano::block>> republish_bundle;
for (auto i (0); !hash.is_zero () && i < count; ++i)
{
block = node.store.block.get (transaction, hash);
if (sources != 0) // Republish source chain
{
nano::block_hash source (node.ledger.block_source (transaction, *block));
auto block_a (node.store.block.get (transaction, source));
std::vector<nano::block_hash> hashes;
while (block_a != nullptr && hashes.size () < sources)
{
hashes.push_back (source);
source = block_a->previous ();
block_a = node.store.block.get (transaction, source);
}
std::reverse (hashes.begin (), hashes.end ());
for (auto & hash_l : hashes)
{
block_a = node.store.block.get (transaction, hash_l);
republish_bundle.push_back (std::move (block_a));
boost::property_tree::ptree entry_l;
entry_l.put ("", hash_l.to_string ());
blocks.push_back (std::make_pair ("", entry_l));
}
}
republish_bundle.push_back (std::move (block)); // Republish block
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
if (destinations != 0) // Republish destination chain
{
auto block_b (node.store.block.get (transaction, hash));
auto destination (node.ledger.block_destination (transaction, *block_b));
if (!destination.is_zero ())
{
if (!node.store.pending.exists (transaction, nano::pending_key (destination, hash)))
{
nano::block_hash previous (node.ledger.latest (transaction, destination));
auto block_d (node.store.block.get (transaction, previous));
nano::block_hash source;
std::vector<nano::block_hash> hashes;
while (block_d != nullptr && hash != source)
{
hashes.push_back (previous);
source = node.ledger.block_source (transaction, *block_d);
previous = block_d->previous ();
block_d = node.store.block.get (transaction, previous);
}
std::reverse (hashes.begin (), hashes.end ());
if (hashes.size () > destinations)
{
hashes.resize (destinations);
}
for (auto & hash_l : hashes)
{
block_d = node.store.block.get (transaction, hash_l);
republish_bundle.push_back (std::move (block_d));
boost::property_tree::ptree entry_l;
entry_l.put ("", hash_l.to_string ());
blocks.push_back (std::make_pair ("", entry_l));
}
}
}
}
hash = node.store.block.successor (transaction, hash);
}
node.network.flood_block_many (std::move (republish_bundle), nullptr, 25);
response_l.put ("success", ""); // obsolete
response_l.add_child ("blocks", blocks);
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::search_pending ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto error (wallet->search_pending (wallet->wallets.tx_begin_read ()));
response_l.put ("started", !error);
}
response_errors ();
}
void nano::json_handler::search_pending_all ()
{
if (!ec)
{
node.wallets.search_pending_all ();
response_l.put ("success", "");
}
response_errors ();
}
void nano::json_handler::send ()
{
auto wallet (wallet_impl ());
auto amount (amount_impl ());
// Sending 0 amount is invalid with state blocks
if (!ec && amount.is_zero ())
{
ec = nano::error_common::invalid_amount;
}
std::string source_text (request.get<std::string> ("source"));
auto source (account_impl (source_text, nano::error_rpc::bad_source));
std::string destination_text (request.get<std::string> ("destination"));
auto destination (account_impl (destination_text, nano::error_rpc::bad_destination));
if (!ec)
{
auto work (work_optional_impl ());
nano::uint128_t balance (0);
if (!ec && work == 0 && !node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
wallet_locked_impl (transaction, wallet);
wallet_account_impl (transaction, wallet, source);
auto info (account_info_impl (block_transaction, source));
if (!ec)
{
balance = (info.balance).number ();
}
if (!ec && work)
{
nano::block_details details (info.epoch (), true, false, false);
if (nano::work_difficulty (nano::work_version::work_1, info.head, work) < nano::work_threshold (nano::work_version::work_1, details))
{
ec = nano::error_common::invalid_work;
}
}
}
if (!ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
boost::optional<std::string> send_id (request.get_optional<std::string> ("id"));
auto response_a (response);
auto response_data (std::make_shared<boost::property_tree::ptree> (response_l));
wallet->send_async (
source, destination, amount.number (), [balance, amount, response_a, response_data] (std::shared_ptr<nano::block> const & block_a) {
if (block_a != nullptr)
{
response_data->put ("block", block_a->hash ().to_string ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, *response_data);
response_a (ostream.str ());
}
else
{
if (balance >= amount.number ())
{
json_error_response (response_a, "Error generating block");
}
else
{
std::error_code ec (nano::error_common::insufficient_balance);
json_error_response (response_a, ec.message ());
}
}
},
work, generate_work, send_id);
}
}
// Because of send_async
if (ec)
{
response_errors ();
}
}
void nano::json_handler::sign ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
// Retrieving hash
nano::block_hash hash (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("hash"));
if (hash_text.is_initialized ())
{
hash = hash_impl ();
}
// Retrieving block
std::shared_ptr<nano::block> block;
if (!ec && request.count ("block"))
{
block = block_impl (true);
if (block != nullptr)
{
hash = block->hash ();
}
}
// Hash or block are not initialized
if (!ec && hash.is_zero ())
{
ec = nano::error_blocks::invalid_block;
}
// Hash is initialized without config permission
else if (!ec && !hash.is_zero () && block == nullptr && !node_rpc_config.enable_sign_hash)
{
ec = nano::error_rpc::sign_hash_disabled;
}
if (!ec)
{
nano::raw_key prv;
prv.clear ();
// Retrieving private key from request
boost::optional<std::string> key_text (request.get_optional<std::string> ("key"));
if (key_text.is_initialized ())
{
if (prv.decode_hex (key_text.get ()))
{
ec = nano::error_common::bad_private_key;
}
}
else
{
// Retrieving private key from wallet
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
boost::optional<std::string> wallet_text (request.get_optional<std::string> ("wallet"));
if (wallet_text.is_initialized () && account_text.is_initialized ())
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
wallet_locked_impl (transaction, wallet);
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
wallet->store.fetch (transaction, account, prv);
}
}
}
}
// Signing
if (prv != 0)
{
nano::public_key pub (nano::pub_key (prv));
nano::signature signature (nano::sign_message (prv, pub, hash));
response_l.put ("signature", signature.to_string ());
if (block != nullptr)
{
block->signature_set (signature);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
response_l.add_child ("block", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
response_l.put ("block", contents);
}
}
}
else
{
ec = nano::error_rpc::block_create_key_required;
}
}
response_errors ();
}
void nano::json_handler::stats ()
{
auto sink = node.stats.log_sink_json ();
std::string type (request.get<std::string> ("type", ""));
bool use_sink = false;
if (type == "counters")
{
node.stats.log_counters (*sink);
use_sink = true;
}
else if (type == "objects")
{
construct_json (collect_container_info (node, "node").get (), response_l);
}
else if (type == "samples")
{
node.stats.log_samples (*sink);
use_sink = true;
}
else if (type == "database")
{
node.store.serialize_memory_stats (response_l);
}
else
{
ec = nano::error_rpc::invalid_missing_type;
}
if (!ec && use_sink)
{
auto stat_tree_l (*static_cast<boost::property_tree::ptree *> (sink->to_object ()));
stat_tree_l.put ("stat_duration_seconds", node.stats.last_reset ().count ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, stat_tree_l);
response (ostream.str ());
}
else
{
response_errors ();
}
}
void nano::json_handler::stats_clear ()
{
node.stats.clear ();
response_l.put ("success", "");
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
response (ostream.str ());
}
void nano::json_handler::stop ()
{
response_l.put ("success", "");
response_errors ();
if (!ec)
{
node.stop ();
stop_callback ();
}
}
void nano::json_handler::telemetry ()
{
auto rpc_l (shared_from_this ());
auto address_text (request.get_optional<std::string> ("address"));
auto port_text (request.get_optional<std::string> ("port"));
if (address_text.is_initialized () || port_text.is_initialized ())
{
// Check both are specified
std::shared_ptr<nano::transport::channel> channel;
if (address_text.is_initialized () && port_text.is_initialized ())
{
uint16_t port;
if (!nano::parse_port (*port_text, port))
{
boost::asio::ip::address address;
if (!nano::parse_address (*address_text, address))
{
nano::endpoint endpoint (address, port);
if (address.is_loopback () && port == rpc_l->node.network.endpoint ().port ())
{
// Requesting telemetry metrics locally
auto telemetry_data = nano::local_telemetry_data (rpc_l->node.ledger, rpc_l->node.network, rpc_l->node.config.bandwidth_limit, rpc_l->node.network_params, rpc_l->node.startup_time, rpc_l->node.default_difficulty (nano::work_version::work_1), rpc_l->node.node_id);
nano::jsonconfig config_l;
auto const should_ignore_identification_metrics = false;
auto err = telemetry_data.serialize_json (config_l, should_ignore_identification_metrics);
auto const & ptree = config_l.get_tree ();
if (!err)
{
rpc_l->response_l.insert (rpc_l->response_l.begin (), ptree.begin (), ptree.end ());
}
rpc_l->response_errors ();
return;
}
else
{
channel = node.network.find_channel (nano::transport::map_endpoint_to_v6 (endpoint));
if (!channel)
{
ec = nano::error_rpc::peer_not_found;
}
}
}
else
{
ec = nano::error_common::invalid_ip_address;
}
}
else
{
ec = nano::error_common::invalid_port;
}
}
else
{
ec = nano::error_rpc::requires_port_and_address;
}
if (!ec)
{
debug_assert (channel);
if (node.telemetry)
{
node.telemetry->get_metrics_single_peer_async (channel, [rpc_l] (auto const & telemetry_response_a) {
if (!telemetry_response_a.error)
{
nano::jsonconfig config_l;
auto const should_ignore_identification_metrics = false;
auto err = telemetry_response_a.telemetry_data.serialize_json (config_l, should_ignore_identification_metrics);
auto const & ptree = config_l.get_tree ();
if (!err)
{
rpc_l->response_l.insert (rpc_l->response_l.begin (), ptree.begin (), ptree.end ());
}
else
{
rpc_l->ec = nano::error_rpc::generic;
}
}
else
{
rpc_l->ec = nano::error_rpc::generic;
}
rpc_l->response_errors ();
});
}
else
{
response_errors ();
}
}
else
{
response_errors ();
}
}
else
{
// By default, consolidated (average or mode) telemetry metrics are returned,
// setting "raw" to true returns metrics from all nodes requested.
auto raw = request.get_optional<bool> ("raw");
auto output_raw = raw.value_or (false);
if (node.telemetry)
{
auto telemetry_responses = node.telemetry->get_metrics ();
if (output_raw)
{
boost::property_tree::ptree metrics;
for (auto & telemetry_metrics : telemetry_responses)
{
nano::jsonconfig config_l;
auto const should_ignore_identification_metrics = false;
auto err = telemetry_metrics.second.serialize_json (config_l, should_ignore_identification_metrics);
config_l.put ("address", telemetry_metrics.first.address ());
config_l.put ("port", telemetry_metrics.first.port ());
if (!err)
{
metrics.push_back (std::make_pair ("", config_l.get_tree ()));
}
else
{
ec = nano::error_rpc::generic;
}
}
response_l.put_child ("metrics", metrics);
}
else
{
nano::jsonconfig config_l;
std::vector<nano::telemetry_data> telemetry_datas;
telemetry_datas.reserve (telemetry_responses.size ());
std::transform (telemetry_responses.begin (), telemetry_responses.end (), std::back_inserter (telemetry_datas), [] (auto const & endpoint_telemetry_data) {
return endpoint_telemetry_data.second;
});
auto average_telemetry_metrics = nano::consolidate_telemetry_data (telemetry_datas);
// Don't add node_id/signature in consolidated metrics
auto const should_ignore_identification_metrics = true;
auto err = average_telemetry_metrics.serialize_json (config_l, should_ignore_identification_metrics);
auto const & ptree = config_l.get_tree ();
if (!err)
{
response_l.insert (response_l.begin (), ptree.begin (), ptree.end ());
}
else
{
ec = nano::error_rpc::generic;
}
}
}
response_errors ();
}
}
void nano::json_handler::unchecked ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto count (count_optional_impl ());
if (!ec)
{
boost::property_tree::ptree unchecked;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked.begin (transaction)), n (node.store.unchecked.end ()); i != n && unchecked.size () < count; ++i)
{
nano::unchecked_info const & info (i->second);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
unchecked.add_child (info.block->hash ().to_string (), block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
unchecked.put (info.block->hash ().to_string (), contents);
}
}
response_l.add_child ("blocks", unchecked);
}
response_errors ();
}
void nano::json_handler::unchecked_clear ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto transaction (rpc_l->node.store.tx_begin_write ({ tables::unchecked }));
rpc_l->node.store.unchecked.clear (transaction);
rpc_l->response_l.put ("success", "");
rpc_l->response_errors ();
}));
}
void nano::json_handler::unchecked_get ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked.begin (transaction)), n (node.store.unchecked.end ()); i != n; ++i)
{
nano::unchecked_key const & key (i->first);
if (key.hash == hash)
{
nano::unchecked_info const & info (i->second);
response_l.put ("modified_timestamp", std::to_string (info.modified));
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
response_l.add_child ("contents", block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
response_l.put ("contents", contents);
}
break;
}
}
if (response_l.empty ())
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::unchecked_keys ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto count (count_optional_impl ());
nano::block_hash key (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("key"));
if (!ec && hash_text.is_initialized ())
{
if (key.decode_hex (hash_text.get ()))
{
ec = nano::error_rpc::bad_key;
}
}
if (!ec)
{
boost::property_tree::ptree unchecked;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked.begin (transaction, nano::unchecked_key (key, 0))), n (node.store.unchecked.end ()); i != n && unchecked.size () < count; ++i)
{
boost::property_tree::ptree entry;
nano::unchecked_info const & info (i->second);
entry.put ("key", i->first.key ().to_string ());
entry.put ("hash", info.block->hash ().to_string ());
entry.put ("modified_timestamp", std::to_string (info.modified));
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
entry.put ("contents", contents);
}
unchecked.push_back (std::make_pair ("", entry));
}
response_l.add_child ("unchecked", unchecked);
}
response_errors ();
}
void nano::json_handler::unopened ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
nano::account start (1); // exclude burn account by default
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (account_text.is_initialized ())
{
start = account_impl (account_text.get ());
}
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto iterator (node.store.pending.begin (transaction, nano::pending_key (start, 0)));
auto end (node.store.pending.end ());
nano::account current_account (start);
nano::uint128_t current_account_sum{ 0 };
boost::property_tree::ptree accounts;
while (iterator != end && accounts.size () < count)
{
nano::pending_key key (iterator->first);
nano::account account (key.account);
nano::pending_info info (iterator->second);
if (node.store.account.exists (transaction, account))
{
if (account.number () == std::numeric_limits<nano::uint256_t>::max ())
{
break;
}
// Skip existing accounts
iterator = node.store.pending.begin (transaction, nano::pending_key (account.number () + 1, 0));
}
else
{
if (account != current_account)
{
if (current_account_sum > 0)
{
if (current_account_sum >= threshold.number ())
{
accounts.put (current_account.to_account (), current_account_sum.convert_to<std::string> ());
}
current_account_sum = 0;
}
current_account = account;
}
current_account_sum += info.amount.number ();
++iterator;
}
}
// last one after iterator reaches end
if (accounts.size () < count && current_account_sum > 0 && current_account_sum >= threshold.number ())
{
accounts.put (current_account.to_account (), current_account_sum.convert_to<std::string> ());
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::uptime ()
{
response_l.put ("seconds", std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - node.startup_time).count ());
response_errors ();
}
void nano::json_handler::version ()
{
response_l.put ("rpc_version", "1");
response_l.put ("store_version", std::to_string (node.store_version ()));
response_l.put ("protocol_version", std::to_string (node.network_params.protocol.protocol_version));
response_l.put ("node_vendor", boost::str (boost::format ("Nano %1%") % NANO_VERSION_STRING));
response_l.put ("store_vendor", node.store.vendor_get ());
response_l.put ("network", node.network_params.network.get_current_network_as_string ());
response_l.put ("network_identifier", node.network_params.ledger.genesis->hash ().to_string ());
response_l.put ("build_info", BUILD_INFO);
response_errors ();
}
void nano::json_handler::validate_account_number ()
{
auto account (account_impl ());
(void)account;
response_l.put ("valid", ec ? "0" : "1");
ec = std::error_code (); // error is just invalid account
response_errors ();
}
void nano::json_handler::wallet_add ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string key_text (rpc_l->request.get<std::string> ("key"));
nano::raw_key key;
if (!key.decode_hex (key_text))
{
const bool generate_work = rpc_l->request.get<bool> ("work", true);
auto pub (wallet->insert_adhoc (key, generate_work));
if (!pub.is_zero ())
{
rpc_l->response_l.put ("account", pub.to_account ());
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
else
{
rpc_l->ec = nano::error_common::bad_private_key;
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::wallet_add_watch ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
for (auto & accounts : rpc_l->request.get_child ("accounts"))
{
auto account (rpc_l->account_impl (accounts.second.data ()));
if (!rpc_l->ec)
{
if (wallet->insert_watch (transaction, account))
{
rpc_l->ec = nano::error_common::bad_public_key;
}
}
}
if (!rpc_l->ec)
{
rpc_l->response_l.put ("success", "");
}
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::wallet_info ()
{
auto wallet (wallet_impl ());
if (!ec)
{
nano::uint128_t balance (0);
nano::uint128_t pending (0);
uint64_t count (0);
uint64_t block_count (0);
uint64_t cemented_block_count (0);
uint64_t deterministic_count (0);
uint64_t adhoc_count (0);
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info account_info{};
if (!node.store.account.get (block_transaction, account, account_info))
{
block_count += account_info.block_count;
}
nano::confirmation_height_info confirmation_info{};
if (!node.store.confirmation_height.get (block_transaction, account, confirmation_info))
{
cemented_block_count += confirmation_info.height;
}
balance += account_info.balance.number ();
pending += node.ledger.account_pending (block_transaction, account);
nano::key_type key_type (wallet->store.key_type (i->second));
if (key_type == nano::key_type::deterministic)
{
deterministic_count++;
}
else if (key_type == nano::key_type::adhoc)
{
adhoc_count++;
}
++count;
}
uint32_t deterministic_index (wallet->store.deterministic_index_get (transaction));
response_l.put ("balance", balance.convert_to<std::string> ());
response_l.put ("pending", pending.convert_to<std::string> ());
response_l.put ("accounts_count", std::to_string (count));
response_l.put ("accounts_block_count", std::to_string (block_count));
response_l.put ("accounts_cemented_block_count", std::to_string (cemented_block_count));
response_l.put ("deterministic_count", std::to_string (deterministic_count));
response_l.put ("adhoc_count", std::to_string (adhoc_count));
response_l.put ("deterministic_index", std::to_string (deterministic_index));
}
response_errors ();
}
void nano::json_handler::wallet_balances ()
{
auto wallet (wallet_impl ());
auto threshold (threshold_optional_impl ());
if (!ec)
{
boost::property_tree::ptree balances;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::uint128_t balance = node.ledger.account_balance (block_transaction, account);
if (balance >= threshold.number ())
{
boost::property_tree::ptree entry;
nano::uint128_t pending = node.ledger.account_pending (block_transaction, account);
entry.put ("balance", balance.convert_to<std::string> ());
entry.put ("pending", pending.convert_to<std::string> ());
balances.push_back (std::make_pair (account.to_account (), entry));
}
}
response_l.add_child ("balances", balances);
}
response_errors ();
}
void nano::json_handler::wallet_change_seed ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string seed_text (rpc_l->request.get<std::string> ("seed"));
nano::raw_key seed;
if (!seed.decode_hex (seed_text))
{
auto count (static_cast<uint32_t> (rpc_l->count_optional_impl (0)));
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
nano::public_key account (wallet->change_seed (transaction, seed, count));
rpc_l->response_l.put ("success", "");
rpc_l->response_l.put ("last_restored_account", account.to_account ());
auto index (wallet->store.deterministic_index_get (transaction));
debug_assert (index > 0);
rpc_l->response_l.put ("restored_count", std::to_string (index));
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
else
{
rpc_l->ec = nano::error_common::bad_seed;
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::wallet_contains ()
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto exists (wallet->store.find (transaction, account) != wallet->store.end ());
response_l.put ("exists", exists ? "1" : "0");
}
response_errors ();
}
void nano::json_handler::wallet_create ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
nano::raw_key seed;
auto seed_text (rpc_l->request.get_optional<std::string> ("seed"));
if (seed_text.is_initialized () && seed.decode_hex (seed_text.get ()))
{
rpc_l->ec = nano::error_common::bad_seed;
}
if (!rpc_l->ec)
{
auto wallet_id = random_wallet_id ();
auto wallet (rpc_l->node.wallets.create (wallet_id));
auto existing (rpc_l->node.wallets.items.find (wallet_id));
if (existing != rpc_l->node.wallets.items.end ())
{
rpc_l->response_l.put ("wallet", wallet_id.to_string ());
}
else
{
rpc_l->ec = nano::error_common::wallet_lmdb_max_dbs;
}
if (!rpc_l->ec && seed_text.is_initialized ())
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
nano::public_key account (wallet->change_seed (transaction, seed));
rpc_l->response_l.put ("last_restored_account", account.to_account ());
auto index (wallet->store.deterministic_index_get (transaction));
debug_assert (index > 0);
rpc_l->response_l.put ("restored_count", std::to_string (index));
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::wallet_destroy ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
std::string wallet_text (rpc_l->request.get<std::string> ("wallet"));
nano::wallet_id wallet;
if (!wallet.decode_hex (wallet_text))
{
auto existing (rpc_l->node.wallets.items.find (wallet));
if (existing != rpc_l->node.wallets.items.end ())
{
rpc_l->node.wallets.destroy (wallet);
bool destroyed (rpc_l->node.wallets.items.find (wallet) == rpc_l->node.wallets.items.end ());
rpc_l->response_l.put ("destroyed", destroyed ? "1" : "0");
}
else
{
rpc_l->ec = nano::error_common::wallet_not_found;
}
}
else
{
rpc_l->ec = nano::error_common::bad_wallet_number;
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::wallet_export ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
std::string json;
wallet->store.serialize_json (transaction, json);
response_l.put ("json", json);
}
response_errors ();
}
void nano::json_handler::wallet_frontiers ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree frontiers;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
auto latest (node.ledger.latest (block_transaction, account));
if (!latest.is_zero ())
{
frontiers.put (account.to_account (), latest.to_string ());
}
}
response_l.add_child ("frontiers", frontiers);
}
response_errors ();
}
void nano::json_handler::wallet_history ()
{
uint64_t modified_since (1);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
if (decode_unsigned (modified_since_text.get (), modified_since))
{
ec = nano::error_rpc::invalid_timestamp;
}
}
auto wallet (wallet_impl ());
if (!ec)
{
std::multimap<uint64_t, boost::property_tree::ptree, std::greater<uint64_t>> entries;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!node.store.account.get (block_transaction, account, info))
{
auto timestamp (info.modified);
auto hash (info.head);
while (timestamp >= modified_since && !hash.is_zero ())
{
auto block (node.store.block.get (block_transaction, hash));
timestamp = block->sideband ().timestamp;
if (block != nullptr && timestamp >= modified_since)
{
boost::property_tree::ptree entry;
std::vector<nano::public_key> no_filter;
history_visitor visitor (*this, false, block_transaction, entry, hash, no_filter);
block->visit (visitor);
if (!entry.empty ())
{
entry.put ("block_account", account.to_account ());
entry.put ("hash", hash.to_string ());
entry.put ("local_timestamp", std::to_string (timestamp));
entries.insert (std::make_pair (timestamp, entry));
}
hash = block->previous ();
}
else
{
hash.clear ();
}
}
}
}
boost::property_tree::ptree history;
for (auto i (entries.begin ()), n (entries.end ()); i != n; ++i)
{
history.push_back (std::make_pair ("", i->second));
}
response_l.add_child ("history", history);
}
response_errors ();
}
void nano::json_handler::wallet_key_valid ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto valid (wallet->store.valid_password (transaction));
response_l.put ("valid", valid ? "1" : "0");
}
response_errors ();
}
void nano::json_handler::wallet_ledger ()
{
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
uint64_t modified_since (0);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
modified_since = strtoul (modified_since_text.get ().c_str (), NULL, 10);
}
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree accounts;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!node.store.account.get (block_transaction, account, info))
{
if (info.modified >= modified_since)
{
boost::property_tree::ptree entry;
entry.put ("frontier", info.head.to_string ());
entry.put ("open_block", info.open_block.to_string ());
entry.put ("representative_block", node.ledger.representative (block_transaction, info.head).to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
entry.put ("balance", balance);
entry.put ("modified_timestamp", std::to_string (info.modified));
entry.put ("block_count", std::to_string (info.block_count));
if (representative)
{
entry.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
entry.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (block_transaction, account));
entry.put ("pending", account_pending.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), entry));
}
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::wallet_lock ()
{
auto wallet (wallet_impl ());
if (!ec)
{
nano::raw_key empty;
empty.clear ();
wallet->store.password.value_set (empty);
response_l.put ("locked", "1");
node.logger.try_log ("Wallet locked");
}
response_errors ();
}
void nano::json_handler::wallet_pending ()
{
auto wallet (wallet_impl ());
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool min_version = request.get<bool> ("min_version", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", true);
if (!ec)
{
boost::property_tree::ptree pending;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
boost::property_tree::ptree peers_l;
for (auto ii (node.store.pending.begin (block_transaction, nano::pending_key (account, 0))), nn (node.store.pending.end ()); ii != nn && nano::pending_key (ii->first).account == account && peers_l.size () < count; ++ii)
{
nano::pending_key key (ii->first);
if (block_confirmed (node, block_transaction, key.hash, include_active, include_only_confirmed))
{
if (threshold.is_zero () && !source)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info info (ii->second);
if (info.amount.number () >= threshold.number ())
{
if (source || min_version)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
if (source)
{
pending_tree.put ("source", info.source.to_account ());
}
if (min_version)
{
pending_tree.put ("min_version", epoch_as_string (info.epoch));
}
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (!peers_l.empty ())
{
pending.add_child (account.to_account (), peers_l);
}
}
response_l.add_child ("blocks", pending);
}
response_errors ();
}
void nano::json_handler::wallet_representative ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
response_l.put ("representative", wallet->store.representative (transaction).to_account ());
}
response_errors ();
}
void nano::json_handler::wallet_representative_set ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
std::string representative_text (rpc_l->request.get<std::string> ("representative"));
auto representative (rpc_l->account_impl (representative_text, nano::error_rpc::bad_representative_number));
if (!rpc_l->ec)
{
bool update_existing_accounts (rpc_l->request.get<bool> ("update_existing_accounts", false));
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction) || !update_existing_accounts)
{
wallet->store.representative_set (transaction, representative);
rpc_l->response_l.put ("set", "1");
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
// Change representative for all wallet accounts
if (!rpc_l->ec && update_existing_accounts)
{
std::vector<nano::account> accounts;
{
auto transaction (rpc_l->node.wallets.tx_begin_read ());
auto block_transaction (rpc_l->node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!rpc_l->node.store.account.get (block_transaction, account, info))
{
if (info.representative != representative)
{
accounts.push_back (account);
}
}
}
}
for (auto & account : accounts)
{
wallet->change_async (
account, representative, [] (std::shared_ptr<nano::block> const &) {}, 0, false);
}
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::wallet_republish ()
{
auto wallet (wallet_impl ());
auto count (count_impl ());
if (!ec)
{
boost::property_tree::ptree blocks;
std::deque<std::shared_ptr<nano::block>> republish_bundle;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
auto latest (node.ledger.latest (block_transaction, account));
std::shared_ptr<nano::block> block;
std::vector<nano::block_hash> hashes;
while (!latest.is_zero () && hashes.size () < count)
{
hashes.push_back (latest);
block = node.store.block.get (block_transaction, latest);
if (block != nullptr)
{
latest = block->previous ();
}
else
{
latest.clear ();
}
}
std::reverse (hashes.begin (), hashes.end ());
for (auto & hash : hashes)
{
block = node.store.block.get (block_transaction, hash);
republish_bundle.push_back (std::move (block));
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
}
}
node.network.flood_block_many (std::move (republish_bundle), nullptr, 25);
response_l.add_child ("blocks", blocks);
}
response_errors ();
}
void nano::json_handler::wallet_seed ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
if (wallet->store.valid_password (transaction))
{
nano::raw_key seed;
wallet->store.seed (seed, transaction);
response_l.put ("seed", seed.to_string ());
}
else
{
ec = nano::error_common::wallet_locked;
}
}
response_errors ();
}
void nano::json_handler::wallet_work_get ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree works;
auto transaction (node.wallets.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
uint64_t work (0);
auto error_work (wallet->store.work_get (transaction, account, work));
(void)error_work;
works.put (account.to_account (), nano::to_string_hex (work));
}
response_l.add_child ("works", works);
}
response_errors ();
}
void nano::json_handler::work_generate ()
{
boost::optional<nano::account> account;
auto account_opt (request.get_optional<std::string> ("account"));
// Default to work_1 if not specified
auto work_version (work_version_optional_impl (nano::work_version::work_1));
if (!ec && account_opt.is_initialized ())
{
account = account_impl (account_opt.get ());
}
if (!ec)
{
auto hash (hash_impl ());
auto difficulty (difficulty_optional_impl (work_version));
multiplier_optional_impl (work_version, difficulty);
if (!ec && (difficulty > node.max_work_generate_difficulty (work_version) || difficulty < nano::work_threshold_entry (work_version, nano::block_type::state)))
{
ec = nano::error_rpc::difficulty_limit;
}
// Retrieving optional block
std::shared_ptr<nano::block> block;
if (!ec && request.count ("block"))
{
block = block_impl (true);
if (block != nullptr)
{
if (hash != block->root ())
{
ec = nano::error_rpc::block_root_mismatch;
}
if (request.count ("version") == 0)
{
work_version = block->work_version ();
}
else if (!ec && work_version != block->work_version ())
{
ec = nano::error_rpc::block_work_version_mismatch;
}
// Difficulty calculation
if (!ec && request.count ("difficulty") == 0 && request.count ("multiplier") == 0)
{
difficulty = difficulty_ledger (*block);
}
// If optional block difficulty is higher than requested difficulty, send error
if (!ec && block->difficulty () >= difficulty)
{
ec = nano::error_rpc::block_work_enough;
}
}
}
if (!ec && response_l.empty ())
{
auto use_peers (request.get<bool> ("use_peers", false));
auto rpc_l (shared_from_this ());
auto callback = [rpc_l, hash, work_version, this] (boost::optional<uint64_t> const & work_a) {
if (work_a)
{
boost::property_tree::ptree response_l;
response_l.put ("hash", hash.to_string ());
uint64_t work (work_a.value ());
response_l.put ("work", nano::to_string_hex (work));
std::stringstream ostream;
auto result_difficulty (nano::work_difficulty (work_version, hash, work));
response_l.put ("difficulty", nano::to_string_hex (result_difficulty));
auto result_multiplier = nano::difficulty::to_multiplier (result_difficulty, node.default_difficulty (work_version));
response_l.put ("multiplier", nano::to_string (result_multiplier));
boost::property_tree::write_json (ostream, response_l);
rpc_l->response (ostream.str ());
}
else
{
json_error_response (rpc_l->response, "Cancelled");
}
};
if (!use_peers)
{
if (node.local_work_generation_enabled ())
{
auto error = node.distributed_work.make (work_version, hash, {}, difficulty, callback, {});
if (error)
{
ec = nano::error_common::failure_work_generation;
}
}
else
{
ec = nano::error_common::disabled_local_work_generation;
}
}
else
{
if (!account_opt.is_initialized ())
{
// Fetch account from block if not given
auto transaction_l (node.store.tx_begin_read ());
if (node.store.block.exists (transaction_l, hash))
{
account = node.store.block.account (transaction_l, hash);
}
}
auto secondary_work_peers_l (request.get<bool> ("secondary_work_peers", false));
auto const & peers_l (secondary_work_peers_l ? node.config.secondary_work_peers : node.config.work_peers);
if (node.work_generation_enabled (peers_l))
{
node.work_generate (work_version, hash, difficulty, callback, account, secondary_work_peers_l);
}
else
{
ec = nano::error_common::disabled_work_generation;
}
}
}
}
// Because of callback
if (ec)
{
response_errors ();
}
}
void nano::json_handler::work_cancel ()
{
auto hash (hash_impl ());
if (!ec)
{
node.observers.work_cancel.notify (hash);
response_l.put ("success", "");
}
response_errors ();
}
void nano::json_handler::work_get ()
{
auto wallet (wallet_impl ());
auto account (account_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
uint64_t work (0);
auto error_work (wallet->store.work_get (transaction, account, work));
(void)error_work;
response_l.put ("work", nano::to_string_hex (work));
}
}
response_errors ();
}
void nano::json_handler::work_set ()
{
node.workers.push_task (create_worker_task ([] (std::shared_ptr<nano::json_handler> const & rpc_l) {
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
auto work (rpc_l->work_optional_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
wallet->store.work_put (transaction, account, work);
rpc_l->response_l.put ("success", "");
}
}
rpc_l->response_errors ();
}));
}
void nano::json_handler::work_validate ()
{
auto hash (hash_impl ());
auto work (work_optional_impl ());
// Default to work_1 if not specified
auto work_version (work_version_optional_impl (nano::work_version::work_1));
auto difficulty (difficulty_optional_impl (work_version));
multiplier_optional_impl (work_version, difficulty);
if (!ec)
{
/* Transition to epoch_2 difficulty levels breaks previous behavior.
* When difficulty is not given, the default difficulty to validate changes when the first epoch_2 block is seen, breaking previous behavior.
* For this reason, when difficulty is not given, the "valid" field is no longer included in the response to break loudly any client expecting it.
* Instead, use the new fields:
* * valid_all: the work is valid at the current highest difficulty threshold
* * valid_receive: the work is valid for a receive block in an epoch_2 upgraded account
*/
auto result_difficulty (nano::work_difficulty (work_version, hash, work));
if (request.count ("difficulty"))
{
response_l.put ("valid", (result_difficulty >= difficulty) ? "1" : "0");
}
response_l.put ("valid_all", (result_difficulty >= node.default_difficulty (work_version)) ? "1" : "0");
response_l.put ("valid_receive", (result_difficulty >= nano::work_threshold (work_version, nano::block_details (nano::epoch::epoch_2, false, true, false))) ? "1" : "0");
response_l.put ("difficulty", nano::to_string_hex (result_difficulty));
auto result_multiplier = nano::difficulty::to_multiplier (result_difficulty, node.default_difficulty (work_version));
response_l.put ("multiplier", nano::to_string (result_multiplier));
}
response_errors ();
}
void nano::json_handler::work_peer_add ()
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.config.work_peers.push_back (std::make_pair (address_text, port));
response_l.put ("success", "");
}
else
{
ec = nano::error_common::invalid_port;
}
response_errors ();
}
void nano::json_handler::work_peers ()
{
boost::property_tree::ptree work_peers_l;
for (auto i (node.config.work_peers.begin ()), n (node.config.work_peers.end ()); i != n; ++i)
{
boost::property_tree::ptree entry;
entry.put ("", boost::str (boost::format ("%1%:%2%") % i->first % i->second));
work_peers_l.push_back (std::make_pair ("", entry));
}
response_l.add_child ("work_peers", work_peers_l);
response_errors ();
}
void nano::json_handler::work_peers_clear ()
{
node.config.work_peers.clear ();
response_l.put ("success", "");
response_errors ();
}
void nano::inprocess_rpc_handler::process_request (std::string const &, std::string const & body_a, std::function<void (std::string const &)> response_a)
{
// Note that if the rpc action is async, the shared_ptr<json_handler> lifetime will be extended by the action handler
auto handler (std::make_shared<nano::json_handler> (node, node_rpc_config, body_a, response_a, [this] () {
this->stop_callback ();
this->stop ();
}));
handler->process_request ();
}
void nano::inprocess_rpc_handler::process_request_v2 (rpc_handler_request_params const & params_a, std::string const & body_a, std::function<void (std::shared_ptr<std::string> const &)> response_a)
{
std::string body_l = params_a.json_envelope (body_a);
auto handler (std::make_shared<nano::ipc::flatbuffers_handler> (node, ipc_server, nullptr, node.config.ipc_config));
handler->process_json (reinterpret_cast<const uint8_t *> (body_l.data ()), body_l.size (), response_a);
}
namespace
{
void construct_json (nano::container_info_component * component, boost::property_tree::ptree & parent)
{
// We are a leaf node, print name and exit
if (!component->is_composite ())
{
auto & leaf_info = static_cast<nano::container_info_leaf *> (component)->get_info ();
boost::property_tree::ptree child;
child.put ("count", leaf_info.count);
child.put ("size", leaf_info.count * leaf_info.sizeof_element);
parent.add_child (leaf_info.name, child);
return;
}
auto composite = static_cast<nano::container_info_composite *> (component);
boost::property_tree::ptree current;
for (auto & child : composite->get_children ())
{
construct_json (child.get (), current);
}
parent.add_child (composite->get_name (), current);
}
// Any RPC handlers which require no arguments (excl default arguments) should go here.
// This is to prevent large if/else chains which compilers can have limits for (MSVC for instance has 128).
ipc_json_handler_no_arg_func_map create_ipc_json_handler_no_arg_func_map ()
{
ipc_json_handler_no_arg_func_map no_arg_funcs;
no_arg_funcs.emplace ("account_balance", &nano::json_handler::account_balance);
no_arg_funcs.emplace ("account_block_count", &nano::json_handler::account_block_count);
no_arg_funcs.emplace ("account_count", &nano::json_handler::account_count);
no_arg_funcs.emplace ("account_create", &nano::json_handler::account_create);
no_arg_funcs.emplace ("account_get", &nano::json_handler::account_get);
no_arg_funcs.emplace ("account_history", &nano::json_handler::account_history);
no_arg_funcs.emplace ("account_info", &nano::json_handler::account_info);
no_arg_funcs.emplace ("account_key", &nano::json_handler::account_key);
no_arg_funcs.emplace ("account_list", &nano::json_handler::account_list);
no_arg_funcs.emplace ("account_move", &nano::json_handler::account_move);
no_arg_funcs.emplace ("account_remove", &nano::json_handler::account_remove);
no_arg_funcs.emplace ("account_representative", &nano::json_handler::account_representative);
no_arg_funcs.emplace ("account_representative_set", &nano::json_handler::account_representative_set);
no_arg_funcs.emplace ("account_weight", &nano::json_handler::account_weight);
no_arg_funcs.emplace ("accounts_balances", &nano::json_handler::accounts_balances);
no_arg_funcs.emplace ("accounts_create", &nano::json_handler::accounts_create);
no_arg_funcs.emplace ("accounts_frontiers", &nano::json_handler::accounts_frontiers);
no_arg_funcs.emplace ("accounts_pending", &nano::json_handler::accounts_pending);
no_arg_funcs.emplace ("active_difficulty", &nano::json_handler::active_difficulty);
no_arg_funcs.emplace ("available_supply", &nano::json_handler::available_supply);
no_arg_funcs.emplace ("block_info", &nano::json_handler::block_info);
no_arg_funcs.emplace ("block", &nano::json_handler::block_info);
no_arg_funcs.emplace ("block_confirm", &nano::json_handler::block_confirm);
no_arg_funcs.emplace ("blocks", &nano::json_handler::blocks);
no_arg_funcs.emplace ("blocks_info", &nano::json_handler::blocks_info);
no_arg_funcs.emplace ("block_account", &nano::json_handler::block_account);
no_arg_funcs.emplace ("block_count", &nano::json_handler::block_count);
no_arg_funcs.emplace ("block_create", &nano::json_handler::block_create);
no_arg_funcs.emplace ("block_hash", &nano::json_handler::block_hash);
no_arg_funcs.emplace ("bootstrap", &nano::json_handler::bootstrap);
no_arg_funcs.emplace ("bootstrap_any", &nano::json_handler::bootstrap_any);
no_arg_funcs.emplace ("bootstrap_lazy", &nano::json_handler::bootstrap_lazy);
no_arg_funcs.emplace ("bootstrap_status", &nano::json_handler::bootstrap_status);
no_arg_funcs.emplace ("confirmation_active", &nano::json_handler::confirmation_active);
no_arg_funcs.emplace ("confirmation_height_currently_processing", &nano::json_handler::confirmation_height_currently_processing);
no_arg_funcs.emplace ("confirmation_history", &nano::json_handler::confirmation_history);
no_arg_funcs.emplace ("confirmation_info", &nano::json_handler::confirmation_info);
no_arg_funcs.emplace ("confirmation_quorum", &nano::json_handler::confirmation_quorum);
no_arg_funcs.emplace ("database_txn_tracker", &nano::json_handler::database_txn_tracker);
no_arg_funcs.emplace ("delegators", &nano::json_handler::delegators);
no_arg_funcs.emplace ("delegators_count", &nano::json_handler::delegators_count);
no_arg_funcs.emplace ("deterministic_key", &nano::json_handler::deterministic_key);
no_arg_funcs.emplace ("epoch_upgrade", &nano::json_handler::epoch_upgrade);
no_arg_funcs.emplace ("frontiers", &nano::json_handler::frontiers);
no_arg_funcs.emplace ("frontier_count", &nano::json_handler::account_count);
no_arg_funcs.emplace ("keepalive", &nano::json_handler::keepalive);
no_arg_funcs.emplace ("key_create", &nano::json_handler::key_create);
no_arg_funcs.emplace ("key_expand", &nano::json_handler::key_expand);
no_arg_funcs.emplace ("ledger", &nano::json_handler::ledger);
no_arg_funcs.emplace ("node_id", &nano::json_handler::node_id);
no_arg_funcs.emplace ("node_id_delete", &nano::json_handler::node_id_delete);
no_arg_funcs.emplace ("password_change", &nano::json_handler::password_change);
no_arg_funcs.emplace ("password_enter", &nano::json_handler::password_enter);
no_arg_funcs.emplace ("wallet_unlock", &nano::json_handler::password_enter);
no_arg_funcs.emplace ("peers", &nano::json_handler::peers);
no_arg_funcs.emplace ("pending", &nano::json_handler::pending);
no_arg_funcs.emplace ("pending_exists", &nano::json_handler::pending_exists);
no_arg_funcs.emplace ("process", &nano::json_handler::process);
no_arg_funcs.emplace ("pruned_exists", &nano::json_handler::pruned_exists);
no_arg_funcs.emplace ("receive", &nano::json_handler::receive);
no_arg_funcs.emplace ("receive_minimum", &nano::json_handler::receive_minimum);
no_arg_funcs.emplace ("receive_minimum_set", &nano::json_handler::receive_minimum_set);
no_arg_funcs.emplace ("representatives", &nano::json_handler::representatives);
no_arg_funcs.emplace ("representatives_online", &nano::json_handler::representatives_online);
no_arg_funcs.emplace ("republish", &nano::json_handler::republish);
no_arg_funcs.emplace ("search_pending", &nano::json_handler::search_pending);
no_arg_funcs.emplace ("search_pending_all", &nano::json_handler::search_pending_all);
no_arg_funcs.emplace ("send", &nano::json_handler::send);
no_arg_funcs.emplace ("sign", &nano::json_handler::sign);
no_arg_funcs.emplace ("stats", &nano::json_handler::stats);
no_arg_funcs.emplace ("stats_clear", &nano::json_handler::stats_clear);
no_arg_funcs.emplace ("stop", &nano::json_handler::stop);
no_arg_funcs.emplace ("telemetry", &nano::json_handler::telemetry);
no_arg_funcs.emplace ("unchecked", &nano::json_handler::unchecked);
no_arg_funcs.emplace ("unchecked_clear", &nano::json_handler::unchecked_clear);
no_arg_funcs.emplace ("unchecked_get", &nano::json_handler::unchecked_get);
no_arg_funcs.emplace ("unchecked_keys", &nano::json_handler::unchecked_keys);
no_arg_funcs.emplace ("unopened", &nano::json_handler::unopened);
no_arg_funcs.emplace ("uptime", &nano::json_handler::uptime);
no_arg_funcs.emplace ("validate_account_number", &nano::json_handler::validate_account_number);
no_arg_funcs.emplace ("version", &nano::json_handler::version);
no_arg_funcs.emplace ("wallet_add", &nano::json_handler::wallet_add);
no_arg_funcs.emplace ("wallet_add_watch", &nano::json_handler::wallet_add_watch);
no_arg_funcs.emplace ("wallet_balances", &nano::json_handler::wallet_balances);
no_arg_funcs.emplace ("wallet_change_seed", &nano::json_handler::wallet_change_seed);
no_arg_funcs.emplace ("wallet_contains", &nano::json_handler::wallet_contains);
no_arg_funcs.emplace ("wallet_create", &nano::json_handler::wallet_create);
no_arg_funcs.emplace ("wallet_destroy", &nano::json_handler::wallet_destroy);
no_arg_funcs.emplace ("wallet_export", &nano::json_handler::wallet_export);
no_arg_funcs.emplace ("wallet_frontiers", &nano::json_handler::wallet_frontiers);
no_arg_funcs.emplace ("wallet_history", &nano::json_handler::wallet_history);
no_arg_funcs.emplace ("wallet_info", &nano::json_handler::wallet_info);
no_arg_funcs.emplace ("wallet_balance_total", &nano::json_handler::wallet_info);
no_arg_funcs.emplace ("wallet_key_valid", &nano::json_handler::wallet_key_valid);
no_arg_funcs.emplace ("wallet_ledger", &nano::json_handler::wallet_ledger);
no_arg_funcs.emplace ("wallet_lock", &nano::json_handler::wallet_lock);
no_arg_funcs.emplace ("wallet_pending", &nano::json_handler::wallet_pending);
no_arg_funcs.emplace ("wallet_representative", &nano::json_handler::wallet_representative);
no_arg_funcs.emplace ("wallet_representative_set", &nano::json_handler::wallet_representative_set);
no_arg_funcs.emplace ("wallet_republish", &nano::json_handler::wallet_republish);
no_arg_funcs.emplace ("wallet_work_get", &nano::json_handler::wallet_work_get);
no_arg_funcs.emplace ("work_generate", &nano::json_handler::work_generate);
no_arg_funcs.emplace ("work_cancel", &nano::json_handler::work_cancel);
no_arg_funcs.emplace ("work_get", &nano::json_handler::work_get);
no_arg_funcs.emplace ("work_set", &nano::json_handler::work_set);
no_arg_funcs.emplace ("work_validate", &nano::json_handler::work_validate);
no_arg_funcs.emplace ("work_peer_add", &nano::json_handler::work_peer_add);
no_arg_funcs.emplace ("work_peers", &nano::json_handler::work_peers);
no_arg_funcs.emplace ("work_peers_clear", &nano::json_handler::work_peers_clear);
return no_arg_funcs;
}
/** Due to the asynchronous nature of updating confirmation heights, it can also be necessary to check active roots */
bool block_confirmed (nano::node & node, nano::transaction & transaction, nano::block_hash const & hash, bool include_active, bool include_only_confirmed)
{
bool is_confirmed = false;
if (include_active && !include_only_confirmed)
{
is_confirmed = true;
}
// Check whether the confirmation height is set
else if (node.ledger.block_confirmed (transaction, hash))
{
is_confirmed = true;
}
// This just checks it's not currently undergoing an active transaction
else if (!include_only_confirmed)
{
auto block (node.store.block.get (transaction, hash));
is_confirmed = (block != nullptr && !node.active.active (*block));
}
return is_confirmed;
}
const char * epoch_as_string (nano::epoch epoch)
{
switch (epoch)
{
case nano::epoch::epoch_2:
return "2";
case nano::epoch::epoch_1:
return "1";
default:
return "0";
}
}
}
| 1 | 16,871 | Is it kept for compatibility? | nanocurrency-nano-node | cpp |
@@ -537,10 +537,16 @@ func (m *StorageMinerNodeConnector) getMinerWorkerAddress(ctx context.Context, t
return address.Undef, xerrors.Errorf("failed to get tip state: %w", err)
}
- _, waddr, err := m.stateViewer.StateView(root).MinerControlAddresses(ctx, m.minerAddr)
+ view := m.stateViewer.StateView(root)
+ _, waddr, err := view.MinerControlAddresses(ctx, m.minerAddr)
if err != nil {
return address.Undef, xerrors.Errorf("failed to get miner control addresses: %w", err)
}
- return waddr, nil
+ workerSigner, err := view.AccountSignerAddress(ctx, waddr)
+ if err != nil {
+ return address.Undef, xerrors.Errorf("failed to lookup signing address for worker address: %s: %w", waddr.String(), err)
+ }
+
+ return workerSigner, nil
} | 1 | package storageminerconnector
import (
"context"
"errors"
"github.com/filecoin-project/go-address"
storagenode "github.com/filecoin-project/go-storage-miner/apis/node"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/internal/pkg/block"
"github.com/filecoin-project/go-filecoin/internal/pkg/chain"
"github.com/filecoin-project/go-filecoin/internal/pkg/chainsampler"
"github.com/filecoin-project/go-filecoin/internal/pkg/encoding"
"github.com/filecoin-project/go-filecoin/internal/pkg/message"
appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state"
"github.com/filecoin-project/go-filecoin/internal/pkg/types"
"github.com/filecoin-project/go-filecoin/internal/pkg/vm"
"github.com/filecoin-project/go-filecoin/internal/pkg/wallet"
)
type chainReader interface {
SampleChainRandomness(ctx context.Context, head block.TipSetKey, tag crypto.DomainSeparationTag, sampleHeight abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
GetTipSetStateRoot(ctx context.Context, tipKey block.TipSetKey) (cid.Cid, error)
GetTipSet(key block.TipSetKey) (block.TipSet, error)
Head() block.TipSetKey
}
// StorageMinerNodeConnector is a struct which satisfies the go-storage-miner
// needs of "the node," e.g. interacting with the blockchain, persisting sector
// states to disk, and so forth.
type StorageMinerNodeConnector struct {
minerAddr address.Address
chainHeightScheduler *chainsampler.HeightThresholdScheduler
chainState chainReader
outbox *message.Outbox
waiter *msg.Waiter
wallet *wallet.Wallet
stateViewer *appstate.Viewer
}
var _ storagenode.Interface = new(StorageMinerNodeConnector)
// NewStorageMinerNodeConnector produces a StorageMinerNodeConnector, which adapts
// types in this codebase to the interface representing "the node" which is
// expected by the go-storage-miner project.
func NewStorageMinerNodeConnector(minerAddress address.Address, chainStore *chain.Store, chainState chainReader, outbox *message.Outbox, waiter *msg.Waiter, wallet *wallet.Wallet, stateViewer *appstate.Viewer) *StorageMinerNodeConnector {
return &StorageMinerNodeConnector{
minerAddr: minerAddress,
chainHeightScheduler: chainsampler.NewHeightThresholdScheduler(chainStore),
chainState: chainState,
outbox: outbox,
waiter: waiter,
wallet: wallet,
stateViewer: stateViewer,
}
}
// StartHeightListener starts the scheduler that manages height listeners.
func (m *StorageMinerNodeConnector) StartHeightListener(ctx context.Context, htc <-chan interface{}) {
m.chainHeightScheduler.StartHeightListener(ctx, htc)
}
// StopHeightListener stops the scheduler that manages height listeners.
func (m *StorageMinerNodeConnector) StopHeightListener() {
m.chainHeightScheduler.Stop()
}
func (m *StorageMinerNodeConnector) handleNewTipSet(ctx context.Context, previousHead block.TipSet) (block.TipSet, error) {
return m.chainHeightScheduler.HandleNewTipSet(ctx, previousHead)
}
// SendSelfDeals creates self-deals and sends them to the network.
func (m *StorageMinerNodeConnector) SendSelfDeals(ctx context.Context, startEpoch, endEpoch abi.ChainEpoch, pieces ...abi.PieceInfo) (cid.Cid, error) {
waddr, err := m.getMinerWorkerAddress(ctx, m.chainState.Head())
if err != nil {
return cid.Undef, err
}
proposals := make([]market.ClientDealProposal, len(pieces))
for i, piece := range pieces {
proposals[i] = market.ClientDealProposal{
Proposal: market.DealProposal{
PieceCID: piece.PieceCID,
PieceSize: piece.Size,
Client: waddr,
Provider: m.minerAddr,
StartEpoch: startEpoch,
EndEpoch: endEpoch,
StoragePricePerEpoch: abi.NewTokenAmount(0),
ProviderCollateral: abi.NewTokenAmount(0),
ClientCollateral: abi.NewTokenAmount(0),
},
}
buf, err := encoding.Encode(proposals[i])
if err != nil {
return cid.Undef, err
}
sig, err := m.wallet.SignBytes(buf, waddr)
if err != nil {
return cid.Undef, err
}
proposals[i].ClientSignature = sig
}
params := market.PublishStorageDealsParams{Deals: proposals}
mcid, cerr, err := m.outbox.Send(
ctx,
waddr,
builtin.StorageMarketActorAddr,
types.ZeroAttoFIL,
types.NewGasPrice(1),
types.GasUnits(300),
true,
builtin.MethodsMarket.PublishStorageDeals,
¶ms,
)
if err != nil {
return cid.Undef, err
}
err = <-cerr
if err != nil {
return cid.Undef, err
}
return mcid, nil
}
// WaitForSelfDeals blocks until the provided storage deal-publishing message is
// mined into a block, producing a slice of deal IDs and an exit code when it is
// mined into a block (or an error, if encountered).
func (m *StorageMinerNodeConnector) WaitForSelfDeals(ctx context.Context, mcid cid.Cid) ([]abi.DealID, uint8, error) {
receiptChan := make(chan *vm.MessageReceipt)
errChan := make(chan error)
go func() {
err := m.waiter.Wait(ctx, mcid, func(b *block.Block, message *types.SignedMessage, r *vm.MessageReceipt) error {
receiptChan <- r
return nil
})
if err != nil {
errChan <- err
}
}()
select {
case receipt := <-receiptChan:
if receipt.ExitCode != 0 {
return nil, (uint8)(receipt.ExitCode), nil
}
var ret market.PublishStorageDealsReturn
err := encoding.Decode(receipt.ReturnValue, &ret)
if err != nil {
return nil, 0, err
}
dealIds := make([]uint64, len(ret.IDs))
for i, id := range ret.IDs {
dealIds[i] = uint64(id)
}
dealIdsPrime := make([]abi.DealID, len(dealIds))
for idx := range dealIds {
dealIdsPrime[idx] = abi.DealID(dealIds[idx])
}
return dealIdsPrime, 0, nil
case err := <-errChan:
return nil, 0, err
case <-ctx.Done():
return nil, 0, errors.New("context ended prematurely")
}
}
// SendPreCommitSector creates a pre-commit sector message and sends it to the
// network.
func (m *StorageMinerNodeConnector) SendPreCommitSector(ctx context.Context, sectorNum abi.SectorNumber, sealedCID cid.Cid, sealRandEpoch, expiration abi.ChainEpoch, pieces ...storagenode.PieceWithDealInfo) (cid.Cid, error) {
waddr, err := m.getMinerWorkerAddress(ctx, m.chainState.Head())
if err != nil {
return cid.Undef, err
}
dealIds := make([]abi.DealID, len(pieces))
for i, piece := range pieces {
dealIds[i] = piece.DealInfo.DealID
}
params := miner.SectorPreCommitInfo{
RegisteredProof: abi.RegisteredProof_StackedDRG32GiBSeal,
SectorNumber: sectorNum,
SealedCID: sealedCID,
SealRandEpoch: sealRandEpoch,
DealIDs: dealIds,
Expiration: expiration,
}
mcid, cerr, err := m.outbox.Send(
ctx,
waddr,
m.minerAddr,
types.ZeroAttoFIL,
types.NewGasPrice(1),
types.GasUnits(300),
true,
builtin.MethodsMiner.PreCommitSector,
¶ms,
)
if err != nil {
return cid.Undef, err
}
err = <-cerr
if err != nil {
return cid.Undef, err
}
return mcid, nil
}
// WaitForPreCommitSector blocks until the pre-commit sector message is mined
// into a block, returning the block's height and message's exit code (or an
// error if one is encountered).
func (m *StorageMinerNodeConnector) WaitForPreCommitSector(ctx context.Context, mcid cid.Cid) (abi.ChainEpoch, uint8, error) {
return m.waitForMessageHeight(ctx, mcid)
}
// SendProveCommitSector creates a commit sector message and sends it to the
// network.
func (m *StorageMinerNodeConnector) SendProveCommitSector(ctx context.Context, sectorNum abi.SectorNumber, proof []byte, deals ...abi.DealID) (cid.Cid, error) {
waddr, err := m.getMinerWorkerAddress(ctx, m.chainState.Head())
if err != nil {
return cid.Undef, err
}
params := miner.ProveCommitSectorParams{
SectorNumber: sectorNum,
Proof: proof,
}
mcid, cerr, err := m.outbox.Send(
ctx,
waddr,
m.minerAddr,
types.ZeroAttoFIL,
types.NewGasPrice(1),
types.GasUnits(300),
true,
builtin.MethodsMiner.ProveCommitSector,
¶ms,
)
if err != nil {
return cid.Undef, err
}
err = <-cerr
if err != nil {
return cid.Undef, err
}
return mcid, nil
}
// WaitForProveCommitSector blocks until the provided pre-commit message has
// been mined into the chainStore, producing the height of the block in which the
// message was mined (and the message's exit code) or an error if any is
// encountered.
func (m *StorageMinerNodeConnector) WaitForProveCommitSector(ctx context.Context, mcid cid.Cid) (uint8, error) {
_, exitCode, err := m.waitForMessageHeight(ctx, mcid)
return exitCode, err
}
// GetSealTicket produces the seal ticket used when pre-committing a sector.
func (m *StorageMinerNodeConnector) GetSealTicket(ctx context.Context, tok storagenode.TipSetToken) (storagenode.SealTicket, error) {
var tsk block.TipSetKey
if err := tsk.UnmarshalCBOR(tok); err != nil {
return storagenode.SealTicket{}, xerrors.Errorf("failed to marshal TipSetToken into a TipSetKey: %w", err)
}
ts, err := m.chainState.GetTipSet(tsk)
if err != nil {
return storagenode.SealTicket{}, xerrors.Errorf("getting head ts for SealTicket failed: %w", err)
}
h, err := ts.Height()
if err != nil {
return storagenode.SealTicket{}, err
}
entropy, err := encoding.Encode(m.minerAddr)
if err != nil {
return storagenode.SealTicket{}, err
}
r, err := m.chainState.SampleChainRandomness(ctx, tsk, crypto.DomainSeparationTag_SealRandomness, h-miner.ChainFinalityish, entropy)
if err != nil {
return storagenode.SealTicket{}, xerrors.Errorf("getting randomness for SealTicket failed: %w", err)
}
return storagenode.SealTicket{
BlockHeight: uint64(h),
TicketBytes: abi.SealRandomness(r),
}, nil
}
func (m *StorageMinerNodeConnector) GetChainHead(ctx context.Context) (storagenode.TipSetToken, abi.ChainEpoch, error) {
tsk := m.chainState.Head()
ts, err := m.chainState.GetTipSet(tsk)
if err != nil {
return nil, 0, xerrors.Errorf("failed to get tip: %w", err)
}
h, err := ts.Height()
if err != nil {
return nil, 0, xerrors.Errorf("failed to get tipset height: %w")
}
tok, err := encoding.Encode(tsk)
if err != nil {
return nil, 0, xerrors.Errorf("failed to marshal TipSetKey to CBOR byte slice for TipSetToken: %w", err)
}
return tok, h, nil
}
// GetSealSeed is used to acquire the interactive seal seed for the provided pre-commit
// message, and provides channels to accommodate chainStore re-orgs. The caller is
// responsible for choosing an interval-value, which is a quantity of blocks to
// wait (after the block in which the pre-commit message is mined) before
// computing and sampling a seed.
func (m *StorageMinerNodeConnector) GetSealSeed(ctx context.Context, preCommitMsg cid.Cid, interval uint64) (<-chan storagenode.SealSeed, <-chan storagenode.SeedInvalidated, <-chan storagenode.FinalityReached, <-chan storagenode.GetSealSeedError) {
sc := make(chan storagenode.SealSeed)
ec := make(chan storagenode.GetSealSeedError)
ic := make(chan storagenode.SeedInvalidated)
dc := make(chan storagenode.FinalityReached)
go func() {
h, exitCode, err := m.waitForMessageHeight(ctx, preCommitMsg)
if err != nil {
ec <- storagenode.NewGetSealSeedError(err, storagenode.GetSealSeedFailedError)
return
}
if exitCode != 0 {
err := xerrors.Errorf("non-zero exit code for pre-commit message %d", exitCode)
ec <- storagenode.NewGetSealSeedError(err, storagenode.GetSealSeedFailedError)
return
}
listener := m.chainHeightScheduler.AddListener(h + abi.ChainEpoch(interval))
// translate tipset key to seal seed handler
for {
select {
case key := <-listener.HitCh:
ts, err := m.chainState.GetTipSet(key)
if err != nil {
ec <- storagenode.NewGetSealSeedError(err, storagenode.GetSealSeedFatalError)
break
}
tsHeight, err := ts.Height()
if err != nil {
ec <- storagenode.NewGetSealSeedError(err, storagenode.GetSealSeedFatalError)
break
}
entropy, err := encoding.Encode(m.minerAddr)
if err != nil {
ec <- storagenode.NewGetSealSeedError(err, storagenode.GetSealSeedFatalError)
break
}
randomness, err := m.chainState.SampleChainRandomness(ctx, key,
crypto.DomainSeparationTag_InteractiveSealChallengeSeed, tsHeight, entropy)
if err != nil {
ec <- storagenode.NewGetSealSeedError(err, storagenode.GetSealSeedFatalError)
break
}
sc <- storagenode.SealSeed{
BlockHeight: uint64(tsHeight),
TicketBytes: abi.InteractiveSealRandomness(randomness),
}
case err := <-listener.ErrCh:
ec <- storagenode.NewGetSealSeedError(err, storagenode.GetSealSeedFailedError)
case <-listener.InvalidCh:
ic <- storagenode.SeedInvalidated{}
case <-listener.DoneCh:
dc <- storagenode.FinalityReached{}
return
case <-ctx.Done():
m.chainHeightScheduler.CancelListener(listener)
return
}
}
}()
return sc, ic, dc, ec
}
type heightAndExitCode struct {
exitCode uint8
height abi.ChainEpoch
}
func (m *StorageMinerNodeConnector) waitForMessageHeight(ctx context.Context, mcid cid.Cid) (abi.ChainEpoch, uint8, error) {
height := make(chan heightAndExitCode)
errChan := make(chan error)
go func() {
err := m.waiter.Wait(ctx, mcid, func(b *block.Block, message *types.SignedMessage, r *vm.MessageReceipt) error {
height <- heightAndExitCode{
height: b.Height,
exitCode: (uint8)(r.ExitCode),
}
return nil
})
if err != nil {
errChan <- err
}
}()
select {
case h := <-height:
return h.height, h.exitCode, nil
case err := <-errChan:
return 0, 0, err
case <-ctx.Done():
return 0, 0, errors.New("context ended prematurely")
}
}
func (m *StorageMinerNodeConnector) GetMinerWorkerAddress(ctx context.Context, tok storagenode.TipSetToken) (address.Address, error) {
var tsk block.TipSetKey
if err := encoding.Decode(tok, &tsk); err != nil {
return address.Undef, xerrors.Errorf("failed to marshal TipSetToken into a TipSetKey: %w", err)
}
return m.getMinerWorkerAddress(ctx, tsk)
}
func (m *StorageMinerNodeConnector) SendReportFaults(ctx context.Context, sectorIDs ...abi.SectorNumber) (cid.Cid, error) {
waddr, err := m.getMinerWorkerAddress(ctx, m.chainState.Head())
if err != nil {
return cid.Undef, err
}
bf := abi.NewBitField()
for _, id := range sectorIDs {
bf.Set(uint64(id))
}
params := miner.DeclareTemporaryFaultsParams{
SectorNumbers: bf,
// TODO: use a real value here
Duration: abi.ChainEpoch(miner.ProvingPeriod),
}
mcid, cerr, err := m.outbox.Send(
ctx,
waddr,
m.minerAddr,
types.ZeroAttoFIL,
types.NewGasPrice(1),
types.GasUnits(300),
true,
builtin.MethodsMiner.DeclareTemporaryFaults,
¶ms,
)
if err != nil {
return cid.Undef, err
}
err = <-cerr
if err != nil {
return cid.Undef, err
}
return mcid, nil
}
func (m *StorageMinerNodeConnector) WaitForReportFaults(ctx context.Context, msgCid cid.Cid) (uint8, error) {
_, exitCode, err := m.waitForMessageHeight(ctx, msgCid)
return exitCode, err
}
func (m *StorageMinerNodeConnector) GetSealedCID(ctx context.Context, tok storagenode.TipSetToken, sectorNum abi.SectorNumber) (sealedCID cid.Cid, wasFound bool, err error) {
var tsk block.TipSetKey
if err := tsk.UnmarshalCBOR(tok); err != nil {
return cid.Undef, false, xerrors.Errorf("failed to marshal TipSetToken into a TipSetKey: %w", err)
}
root, err := m.chainState.GetTipSetStateRoot(ctx, tsk)
if err != nil {
return cid.Undef, false, xerrors.Errorf("failed to get tip state: %w", err)
}
preCommitInfo, found, err := m.stateViewer.StateView(root).MinerGetPrecommittedSector(ctx, m.minerAddr, uint64(sectorNum))
if !found || err != nil {
return cid.Undef, found, err
}
return preCommitInfo.Info.SealedCID, true, nil
}
func (m *StorageMinerNodeConnector) CheckPieces(ctx context.Context, sectorNum abi.SectorNumber, pieces []storagenode.PieceWithDealInfo) *storagenode.CheckPiecesError {
return nil
}
func (m *StorageMinerNodeConnector) CheckSealing(ctx context.Context, commD []byte, dealIDs []abi.DealID, ticket storagenode.SealTicket) *storagenode.CheckSealingError {
return nil
}
func (m *StorageMinerNodeConnector) WalletHas(ctx context.Context, addr address.Address) (bool, error) {
return m.wallet.HasAddress(addr), nil
}
func (m *StorageMinerNodeConnector) getMinerWorkerAddress(ctx context.Context, tsk block.TipSetKey) (address.Address, error) {
root, err := m.chainState.GetTipSetStateRoot(ctx, tsk)
if err != nil {
return address.Undef, xerrors.Errorf("failed to get tip state: %w", err)
}
_, waddr, err := m.stateViewer.StateView(root).MinerControlAddresses(ctx, m.minerAddr)
if err != nil {
return address.Undef, xerrors.Errorf("failed to get miner control addresses: %w", err)
}
return waddr, nil
}
| 1 | 23,158 | I would find it pretty reasonable to add a MinerSigner method on the state view that puts these together. | filecoin-project-venus | go |
@@ -310,6 +310,10 @@ class DBUpgrader {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS +
" ADD COLUMN " + PodDBAdapter.KEY_FEED_SKIP_ENDING + " INTEGER DEFAULT 0;");
}
+ if (oldVersion < 1090001) { // fixme / todo: fix version
+ db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS +
+ " ADD COLUMN " + PodDBAdapter.KEY_EPISODE_NOTIFICATION + " INTEGER DEFAULT 0;");
+ }
}
} | 1 | package de.danoeh.antennapod.core.storage;
import android.content.ContentValues;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.media.MediaMetadataRetriever;
import android.util.Log;
import de.danoeh.antennapod.core.feed.FeedItem;
import static de.danoeh.antennapod.core.feed.FeedPreferences.SPEED_USE_GLOBAL;
class DBUpgrader {
/**
* Upgrades the given database to a new schema version
*/
static void upgrade(final SQLiteDatabase db, final int oldVersion, final int newVersion) {
if (oldVersion <= 1) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS + " ADD COLUMN "
+ PodDBAdapter.KEY_TYPE + " TEXT");
}
if (oldVersion <= 2) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_LINK + " TEXT");
}
if (oldVersion <= 3) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_ITEM_IDENTIFIER + " TEXT");
}
if (oldVersion <= 4) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS + " ADD COLUMN "
+ PodDBAdapter.KEY_FEED_IDENTIFIER + " TEXT");
}
if (oldVersion <= 5) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG
+ " ADD COLUMN " + PodDBAdapter.KEY_REASON_DETAILED + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_DOWNLOAD_LOG
+ " ADD COLUMN " + PodDBAdapter.KEY_DOWNLOADSTATUS_TITLE + " TEXT");
}
if (oldVersion <= 6) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_CHAPTER_TYPE + " INTEGER");
}
if (oldVersion <= 7) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_PLAYBACK_COMPLETION_DATE
+ " INTEGER");
}
if (oldVersion <= 8) {
final int KEY_ID_POSITION = 0;
final int KEY_MEDIA_POSITION = 1;
// Add feeditem column to feedmedia table
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_FEEDITEM
+ " INTEGER");
Cursor feeditemCursor = db.query(PodDBAdapter.TABLE_NAME_FEED_ITEMS,
new String[]{PodDBAdapter.KEY_ID, PodDBAdapter.KEY_MEDIA}, "? > 0",
new String[]{PodDBAdapter.KEY_MEDIA}, null, null, null);
if (feeditemCursor.moveToFirst()) {
db.beginTransaction();
ContentValues contentValues = new ContentValues();
do {
long mediaId = feeditemCursor.getLong(KEY_MEDIA_POSITION);
contentValues.put(PodDBAdapter.KEY_FEEDITEM, feeditemCursor.getLong(KEY_ID_POSITION));
db.update(PodDBAdapter.TABLE_NAME_FEED_MEDIA, contentValues, PodDBAdapter.KEY_ID + "=?", new String[]{String.valueOf(mediaId)});
contentValues.clear();
} while (feeditemCursor.moveToNext());
db.setTransactionSuccessful();
db.endTransaction();
}
feeditemCursor.close();
}
if (oldVersion <= 9) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD
+ " INTEGER DEFAULT 1");
}
if (oldVersion <= 10) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN flattr_status"
+ " INTEGER");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN flattr_status"
+ " INTEGER");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_PLAYED_DURATION
+ " INTEGER");
}
if (oldVersion <= 11) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_USERNAME
+ " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_PASSWORD
+ " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN image"
+ " INTEGER");
}
if (oldVersion <= 12) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_IS_PAGED + " INTEGER DEFAULT 0");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_NEXT_PAGE_LINK + " TEXT");
}
if (oldVersion <= 13) {
// remove duplicate rows in "Chapters" table that were created because of a bug.
db.execSQL(String.format("DELETE FROM %s WHERE %s NOT IN " +
"(SELECT MIN(%s) as %s FROM %s GROUP BY %s,%s,%s,%s,%s)",
PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,
PodDBAdapter.KEY_ID,
PodDBAdapter.KEY_ID,
PodDBAdapter.KEY_ID,
PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS,
PodDBAdapter.KEY_TITLE,
PodDBAdapter.KEY_START,
PodDBAdapter.KEY_FEEDITEM,
PodDBAdapter.KEY_LINK,
PodDBAdapter.KEY_CHAPTER_TYPE));
}
if (oldVersion <= 14) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " INTEGER");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " SET " + PodDBAdapter.KEY_AUTO_DOWNLOAD + " = "
+ "(SELECT " + PodDBAdapter.KEY_AUTO_DOWNLOAD
+ " FROM " + PodDBAdapter.TABLE_NAME_FEEDS
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEEDS + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_FEED + ")");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_HIDE + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_LAST_UPDATE_FAILED + " INTEGER DEFAULT 0");
// create indexes
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_FEED);
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDMEDIA_FEEDITEM);
db.execSQL(PodDBAdapter.CREATE_INDEX_QUEUE_FEEDITEM);
db.execSQL(PodDBAdapter.CREATE_INDEX_SIMPLECHAPTERS_FEEDITEM);
}
if (oldVersion <= 15) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + " INTEGER DEFAULT -1");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=0"
+ " WHERE " + PodDBAdapter.KEY_DOWNLOADED + "=0");
Cursor c = db.rawQuery("SELECT " + PodDBAdapter.KEY_FILE_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " WHERE " + PodDBAdapter.KEY_DOWNLOADED + "=1 "
+ " AND " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=-1", null);
if (c.moveToFirst()) {
MediaMetadataRetriever mmr = new MediaMetadataRetriever();
do {
String fileUrl = c.getString(0);
try {
mmr.setDataSource(fileUrl);
byte[] image = mmr.getEmbeddedPicture();
if (image != null) {
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=1"
+ " WHERE " + PodDBAdapter.KEY_FILE_URL + "='" + fileUrl + "'");
} else {
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " SET " + PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE + "=0"
+ " WHERE " + PodDBAdapter.KEY_FILE_URL + "='" + fileUrl + "'");
}
} catch (Exception e) {
e.printStackTrace();
}
} while (c.moveToNext());
}
c.close();
}
if (oldVersion <= 16) {
String selectNew = "SELECT " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " INNER JOIN " + PodDBAdapter.TABLE_NAME_FEED_MEDIA + " ON "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID + "="
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_FEEDITEM
+ " LEFT OUTER JOIN " + PodDBAdapter.TABLE_NAME_QUEUE + " ON "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_ID + "="
+ PodDBAdapter.TABLE_NAME_QUEUE + "." + PodDBAdapter.KEY_FEEDITEM
+ " WHERE "
+ PodDBAdapter.TABLE_NAME_FEED_ITEMS + "." + PodDBAdapter.KEY_READ + " = 0 AND " // unplayed
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_DOWNLOADED + " = 0 AND " // undownloaded
+ PodDBAdapter.TABLE_NAME_FEED_MEDIA + "." + PodDBAdapter.KEY_POSITION + " = 0 AND " // not partially played
+ PodDBAdapter.TABLE_NAME_QUEUE + "." + PodDBAdapter.KEY_ID + " IS NULL"; // not in queue
String sql = "UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " SET " + PodDBAdapter.KEY_READ + "=" + FeedItem.NEW
+ " WHERE " + PodDBAdapter.KEY_ID + " IN (" + selectNew + ")";
Log.d("Migration", "SQL: " + sql);
db.execSQL(sql);
}
if (oldVersion <= 17) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_AUTO_DELETE_ACTION + " INTEGER DEFAULT 0");
}
if (oldVersion < 1030005) {
db.execSQL("UPDATE FeedItems SET auto_download=0 WHERE " +
"(read=1 OR id IN (SELECT feeditem FROM FeedMedia WHERE position>0 OR downloaded=1)) " +
"AND id NOT IN (SELECT feeditem FROM Queue)");
}
if (oldVersion < 1040001) {
db.execSQL(PodDBAdapter.CREATE_TABLE_FAVORITES);
}
if (oldVersion < 1040002) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_MEDIA
+ " ADD COLUMN " + PodDBAdapter.KEY_LAST_PLAYED_TIME + " INTEGER DEFAULT 0");
}
if (oldVersion < 1040013) {
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_PUBDATE);
db.execSQL(PodDBAdapter.CREATE_INDEX_FEEDITEMS_READ);
}
if (oldVersion < 1050003) {
// Migrates feed list filter data
db.beginTransaction();
// Change to intermediate values to avoid overwriting in the following find/replace
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'unplayed', 'noplay')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'not_queued', 'noqueue')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'not_downloaded', 'nodl')");
// Replace played, queued, and downloaded with their opposites
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'played', 'unplayed')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'queued', 'not_queued')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'downloaded', 'not_downloaded')");
// Now replace intermediates for unplayed, not queued, etc. with their opposites
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'noplay', 'played')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'noqueue', 'queued')");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'nodl', 'downloaded')");
// Paused doesn't have an opposite, so unplayed is the next best option
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + "\n" +
"SET " + PodDBAdapter.KEY_HIDE + " = replace(" + PodDBAdapter.KEY_HIDE + ", 'paused', 'unplayed')");
db.setTransactionSuccessful();
db.endTransaction();
// and now get ready for autodownload filters
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_INCLUDE_FILTER + " TEXT DEFAULT ''");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_EXCLUDE_FILTER + " TEXT DEFAULT ''");
// and now auto refresh
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_KEEP_UPDATED + " INTEGER DEFAULT 1");
}
if (oldVersion < 1050004) {
// prevent old timestamps to be misinterpreted as ETags
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " SET " + PodDBAdapter.KEY_LASTUPDATE + "=NULL");
}
if (oldVersion < 1060200) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_CUSTOM_TITLE + " TEXT");
}
if (oldVersion < 1060596) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE_URL + " TEXT");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE_URL + " TEXT");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + " SET " + PodDBAdapter.KEY_IMAGE_URL + " = ("
+ " SELECT " + PodDBAdapter.KEY_DOWNLOAD_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_IMAGES
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEED_ITEMS + ".image)");
db.execSQL("UPDATE " + PodDBAdapter.TABLE_NAME_FEEDS + " SET " + PodDBAdapter.KEY_IMAGE_URL + " = ("
+ " SELECT " + PodDBAdapter.KEY_DOWNLOAD_URL
+ " FROM " + PodDBAdapter.TABLE_NAME_FEED_IMAGES
+ " WHERE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES + "." + PodDBAdapter.KEY_ID
+ " = " + PodDBAdapter.TABLE_NAME_FEEDS + ".image)");
db.execSQL("DROP TABLE " + PodDBAdapter.TABLE_NAME_FEED_IMAGES);
}
if (oldVersion < 1070400) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FEED_PLAYBACK_SPEED + " REAL DEFAULT " + SPEED_USE_GLOBAL);
}
if (oldVersion < 1070401) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_SORT_ORDER + " TEXT");
}
if (oldVersion < 1090000) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS
+ " ADD COLUMN " + PodDBAdapter.KEY_FEED_VOLUME_ADAPTION + " INTEGER DEFAULT 0");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_SIMPLECHAPTERS
+ " ADD COLUMN " + PodDBAdapter.KEY_IMAGE_URL + " TEXT DEFAULT NULL");
}
if (oldVersion < 1090001) {
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS +
" ADD COLUMN " + PodDBAdapter.KEY_FEED_SKIP_INTRO + " INTEGER DEFAULT 0;");
db.execSQL("ALTER TABLE " + PodDBAdapter.TABLE_NAME_FEEDS +
" ADD COLUMN " + PodDBAdapter.KEY_FEED_SKIP_ENDING + " INTEGER DEFAULT 0;");
}
}
}
| 1 | 17,657 | It's stored in `PodDBAdapter.VERSION`. I usually use the expected release version code for that change. As this will be released in AntennaPod 2.2.0, the code would be `2020000`. | AntennaPod-AntennaPod | java |
@@ -150,7 +150,7 @@ func (task *UpdateTask) execCQLStmts(ver string, stmts []string) error {
log.Println(rmspaceRegex.ReplaceAllString(stmt, " "))
e := task.db.Exec(stmt)
if e != nil {
- return fmt.Errorf("error executing CQL statement:%v", e)
+ return fmt.Errorf("error executing statement:%v", e)
}
}
log.Printf("---- Done ----\n") | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package schema
import (
// In this context md5 is just used for versioning the current schema. It is a weak cryptographic primitive and
// should not be used for anything more important (password hashes etc.). Marking it as #nosec because of how it's
// being used.
"crypto/md5" // #nosec
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"sort"
"strings"
)
type (
// UpdateTask represents a task
// that executes a cassandra schema upgrade
UpdateTask struct {
db DB
config *UpdateConfig
}
// manifest is a value type that represents
// the deserialized manifest.json file within
// a schema version directory
manifest struct {
CurrVersion string
MinCompatibleVersion string
Description string
SchemaUpdateCqlFiles []string
md5 string
}
// changeSet represents all the changes
// corresponding to a single schema version
changeSet struct {
version string
manifest *manifest
cqlStmts []string
}
// byVersion is a comparator type
// for sorting a set of version
// strings
byVersion []string
)
const (
manifestFileName = "manifest.json"
)
var (
whitelistedCQLPrefixes = [4]string{"CREATE", "ALTER", "INSERT", "DROP"}
)
// NewUpdateSchemaTask returns a new instance of UpdateTask
func newUpdateSchemaTask(db DB, config *UpdateConfig) *UpdateTask {
return &UpdateTask{
db: db,
config: config,
}
}
// Run executes the task
func (task *UpdateTask) Run() error {
config := task.config
log.Printf("UpdateSchemeTask started, config=%+v\n", config)
if config.IsDryRun {
if err := task.setupDryrunDatabase(); err != nil {
return fmt.Errorf("error creating dryrun database:%v", err.Error())
}
}
currVer, err := task.db.ReadSchemaVersion()
if err != nil {
return fmt.Errorf("error reading current schema version:%v", err.Error())
}
updates, err := task.buildChangeSet(currVer)
if err != nil {
return err
}
err = task.executeUpdates(currVer, updates)
if err != nil {
return err
}
log.Printf("UpdateSchemeTask done\n")
return nil
}
func (task *UpdateTask) executeUpdates(currVer string, updates []changeSet) error {
if len(updates) == 0 {
log.Printf("found zero updates from current version %v", currVer)
return nil
}
for _, cs := range updates {
err := task.execCQLStmts(cs.version, cs.cqlStmts)
if err != nil {
return err
}
err = task.updateSchemaVersion(currVer, &cs)
if err != nil {
return err
}
log.Printf("Schema updated from %v to %v\n", currVer, cs.version)
currVer = cs.version
}
return nil
}
func (task *UpdateTask) execCQLStmts(ver string, stmts []string) error {
log.Printf("---- Executing updates for version %v ----\n", ver)
for _, stmt := range stmts {
log.Println(rmspaceRegex.ReplaceAllString(stmt, " "))
e := task.db.Exec(stmt)
if e != nil {
return fmt.Errorf("error executing CQL statement:%v", e)
}
}
log.Printf("---- Done ----\n")
return nil
}
func (task *UpdateTask) updateSchemaVersion(oldVer string, cs *changeSet) error {
err := task.db.UpdateSchemaVersion(cs.version, cs.manifest.MinCompatibleVersion)
if err != nil {
return fmt.Errorf("failed to update schema_version table, err=%v", err.Error())
}
err = task.db.WriteSchemaUpdateLog(oldVer, cs.manifest.CurrVersion, cs.manifest.md5, cs.manifest.Description)
if err != nil {
return fmt.Errorf("failed to add entry to schema_update_history, err=%v", err.Error())
}
return nil
}
func (task *UpdateTask) buildChangeSet(currVer string) ([]changeSet, error) {
config := task.config
verDirs, err := readSchemaDir(config.SchemaDir, currVer, config.TargetVersion)
if err != nil {
return nil, fmt.Errorf("error listing schema dir:%v", err.Error())
}
var result []changeSet
for _, vd := range verDirs {
dirPath := config.SchemaDir + "/" + vd
m, e := readManifest(dirPath)
if e != nil {
return nil, fmt.Errorf("error processing manifest for version %v:%v", vd, e.Error())
}
if m.CurrVersion != dirToVersion(vd) {
return nil, fmt.Errorf("manifest version doesn't match with dirname, dir=%v,manifest.version=%v",
vd, m.CurrVersion)
}
stmts, e := task.parseSQLStmts(dirPath, m)
if e != nil {
return nil, e
}
e = validateCQLStmts(stmts)
if e != nil {
return nil, fmt.Errorf("error processing version %v:%v", vd, e.Error())
}
cs := changeSet{}
cs.manifest = m
cs.cqlStmts = stmts
cs.version = m.CurrVersion
result = append(result, cs)
}
return result, nil
}
func (task *UpdateTask) parseSQLStmts(dir string, manifest *manifest) ([]string, error) {
result := make([]string, 0, 4)
for _, file := range manifest.SchemaUpdateCqlFiles {
path := dir + "/" + file
stmts, err := ParseFile(path)
if err != nil {
return nil, fmt.Errorf("error parsing file %v, err=%v", path, err)
}
result = append(result, stmts...)
}
if len(result) == 0 {
return nil, fmt.Errorf("found 0 updates in dir %v", dir)
}
return result, nil
}
func validateCQLStmts(stmts []string) error {
for _, stmt := range stmts {
valid := false
for _, prefix := range whitelistedCQLPrefixes {
if strings.HasPrefix(stmt, prefix) {
valid = true
break
}
}
if !valid {
return fmt.Errorf("CQL prefix not in whitelist, stmt=%v", stmt)
}
}
return nil
}
func readManifest(dirPath string) (*manifest, error) {
filePath := dirPath + "/" + manifestFileName
jsonStr, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
jsonBlob := []byte(jsonStr)
var manifest manifest
err = json.Unmarshal(jsonBlob, &manifest)
if err != nil {
return nil, err
}
currVer, err := parseValidateVersion(manifest.CurrVersion)
if err != nil {
return nil, fmt.Errorf("invalid CurrVersion in manifest")
}
manifest.CurrVersion = currVer
minVer, err := parseValidateVersion(manifest.MinCompatibleVersion)
if err != nil {
return nil, err
}
if len(manifest.MinCompatibleVersion) == 0 {
return nil, fmt.Errorf("invalid MinCompatibleVersion in manifest")
}
manifest.MinCompatibleVersion = minVer
if len(manifest.SchemaUpdateCqlFiles) == 0 {
return nil, fmt.Errorf("manifest missing SchemaUpdateCqlFiles")
}
// See comment above. This is an appropriate usage of md5.
// #nosec
md5Bytes := md5.Sum(jsonBlob)
manifest.md5 = hex.EncodeToString(md5Bytes[:])
return &manifest, nil
}
// readSchemaDir returns a sorted list of subdir names that hold
// the schema changes for versions in the range startVer < ver <= endVer
// when endVer is empty this method returns all subdir names that are greater than startVer
// this method has an assumption that the subdirs containing the
// schema changes will be of the form vx.x, where x.x is the version
// returns error when
// - startVer <= endVer
// - endVer is empty and no subdirs have version >= startVer
// - endVer is non-empty and subdir with version == endVer is not found
func readSchemaDir(dir string, startVer string, endVer string) ([]string, error) {
subdirs, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
hasEndVer := len(endVer) > 0
if hasEndVer && cmpVersion(startVer, endVer) >= 0 {
return nil, fmt.Errorf("startVer (%v) must be less than endVer (%v)", startVer, endVer)
}
var endFound bool
var highestVer string
var result []string
for _, dir := range subdirs {
if !dir.IsDir() {
continue
}
dirname := dir.Name()
if !versionStrRegex.MatchString(dirname) {
continue
}
ver := dirToVersion(dirname)
if len(highestVer) == 0 {
highestVer = ver
} else if cmpVersion(ver, highestVer) > 0 {
highestVer = ver
}
highcmp := 0
lowcmp := cmpVersion(ver, startVer)
if hasEndVer {
highcmp = cmpVersion(ver, endVer)
}
if lowcmp <= 0 || highcmp > 0 {
continue // out of range
}
endFound = endFound || (highcmp == 0)
result = append(result, dirname)
}
// when endVer is specified, atleast one result MUST be found since startVer < endVer
if hasEndVer && !endFound {
return nil, fmt.Errorf("version dir not found for target version %v", endVer)
}
// when endVer is empty and no result is found, then the highest version
// found must be equal to startVer, else return error
if !hasEndVer && len(result) == 0 {
if len(highestVer) == 0 || cmpVersion(startVer, highestVer) != 0 {
return nil, fmt.Errorf("no subdirs found with version >= %v", startVer)
}
return result, nil
}
sort.Sort(byVersion(result))
return result, nil
}
// sets up a temporary dryrun database for
// executing the cassandra schema update
func (task *UpdateTask) setupDryrunDatabase() error {
setupConfig := &SetupConfig{
Overwrite: true,
InitialVersion: "0.0",
}
setupTask := newSetupSchemaTask(task.db, setupConfig)
return setupTask.Run()
}
func dirToVersion(dir string) string {
return dir[1:]
}
func (v byVersion) Len() int {
return len(v)
}
func (v byVersion) Less(i, j int) bool {
v1 := dirToVersion(v[i])
v2 := dirToVersion(v[j])
return cmpVersion(v1, v2) < 0
}
func (v byVersion) Swap(i, j int) {
v[i], v[j] = v[j], v[i]
}
| 1 | 10,578 | What about method name itself? Do we run this for SQL too? | temporalio-temporal | go |
@@ -42,7 +42,7 @@ public class SalesforceHybridTestActivity extends SalesforceDroidGapActivity {
static String refreshToken = "5Aep861KIwKdekr90IDidO4EhfJiYo3fzEvTvsEgM9sfDpGX0qFFeQzHG2mZeUH_.XNSBE0Iz38fnWsyYYkUgTz";
static String authToken = "--will-be-set-through-refresh--";
static String identityUrl = "https://test.salesforce.com";
- static String instanceUrl = "https://cs1.salesforce.com";
+ static String instanceUrl = "https://mobilesdk.cs1.my.salesforce.com";
static String loginUrl = "https://test.salesforce.com";
static String orgId = "00DS0000003E98jMAC";
static String userId = "005S0000004s2iyIAA"; | 1 | /*
* Copyright (c) 2013, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.phonegap.ui;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.rest.ClientManager;
import com.salesforce.androidsdk.rest.ClientManager.LoginOptions;
/**
* Sub-class of SalesforceDroidGapActivity that authenticates using hard-coded credentials
*
*/
public class SalesforceHybridTestActivity extends SalesforceDroidGapActivity {
static String username = "[email protected]";
static String accountName = username + " (SalesforceHybridTest)";
static String refreshToken = "5Aep861KIwKdekr90IDidO4EhfJiYo3fzEvTvsEgM9sfDpGX0qFFeQzHG2mZeUH_.XNSBE0Iz38fnWsyYYkUgTz";
static String authToken = "--will-be-set-through-refresh--";
static String identityUrl = "https://test.salesforce.com";
static String instanceUrl = "https://cs1.salesforce.com";
static String loginUrl = "https://test.salesforce.com";
static String orgId = "00DS0000003E98jMAC";
static String userId = "005S0000004s2iyIAA";
@Override
protected ClientManager buildClientManager() {
final ClientManager clientManager = super.buildClientManager();
final LoginOptions loginOptions = SalesforceSDKManager.getInstance().getLoginOptions();
clientManager.createNewAccount(accountName,
username, refreshToken,
authToken, instanceUrl,
loginUrl, identityUrl,
loginOptions.oauthClientId, orgId,
userId, null);
return clientManager;
}
}
| 1 | 15,597 | `instanceUrl` should be `cs1.salesforce.com`. `communityUrl` would be `mobilesdk.cs1.my.salesforce.com`. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -200,7 +200,10 @@ adios2_error adios2_variable_name(char *name, size_t *size,
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
*size = variableBase->m_Name.size();
- variableBase->m_Name.copy(name, *size);
+ if (name)
+ {
+ variableBase->m_Name.copy(name, *size);
+ }
return adios2_error_none;
}
catch (...) | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* adios2_c_variable.cpp
*
* Created on: Nov 10, 2017
* Author: William F Godoy [email protected]
*/
#include "adios2_c_variable.h"
#include "adios2/core/Variable.h"
#include "adios2/helper/adiosFunctions.h"
#include "adios2_c_internal.h"
namespace
{
adios2_shapeid adios2_ToShapeID(const adios2::ShapeID shapeIDCpp,
const std::string &hint)
{
adios2_shapeid shapeID = adios2_shapeid_unknown;
switch (shapeIDCpp)
{
case (adios2::ShapeID::GlobalValue):
shapeID = adios2_shapeid_global_value;
break;
case (adios2::ShapeID::GlobalArray):
shapeID = adios2_shapeid_global_array;
break;
case (adios2::ShapeID::JoinedArray):
shapeID = adios2_shapeid_joined_array;
break;
case (adios2::ShapeID::LocalValue):
shapeID = adios2_shapeid_local_value;
break;
case (adios2::ShapeID::LocalArray):
shapeID = adios2_shapeid_local_array;
break;
default:
throw std::invalid_argument("ERROR: invalid adios2_shapeid, " + hint +
"\n");
}
return shapeID;
}
} // end anonymous namespace
#ifdef __cplusplus
extern "C" {
#endif
adios2_error adios2_set_shape(adios2_variable *variable, const size_t ndims,
const size_t *shape)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call to "
"adios2_set_shape");
adios2::helper::CheckForNullptr(shape, "for start, in call to "
"adios2_set_shape");
adios2::core::VariableBase *variableBase =
reinterpret_cast<adios2::core::VariableBase *>(variable);
const adios2::Dims shapeV(shape, shape + ndims);
variableBase->SetShape(shapeV);
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_set_shape"));
}
}
adios2_error adios2_set_block_selection(adios2_variable *variable,
const size_t block_id)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call to "
"adios2_set_block_selection");
adios2::core::VariableBase *variableBase =
reinterpret_cast<adios2::core::VariableBase *>(variable);
variableBase->SetBlockSelection(block_id);
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_set_block_selection"));
}
}
adios2_error adios2_set_selection(adios2_variable *variable, const size_t ndims,
const size_t *start, const size_t *count)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call to "
"adios2_set_selection");
adios2::helper::CheckForNullptr(start, "for start, in call to "
"adios2_set_selection");
adios2::helper::CheckForNullptr(count, "for count, in call to "
"adios2_set_selection");
adios2::core::VariableBase *variableBase =
reinterpret_cast<adios2::core::VariableBase *>(variable);
const adios2::Dims startV(start, start + ndims);
const adios2::Dims countV(count, count + ndims);
variableBase->SetSelection({startV, countV});
variableBase->CheckDimensions("in call to adios2_set_selection");
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_set_selection"));
}
}
adios2_error adios2_set_memory_selection(adios2_variable *variable,
const size_t ndims,
const size_t *memory_start,
const size_t *memory_count)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call to "
"adios2_set_memory_selection");
adios2::helper::CheckForNullptr(memory_start,
"for start, in call to "
"adios2_set_memory_selection");
adios2::helper::CheckForNullptr(memory_count,
"for count, in call to "
"adios2_set_memory_selection");
adios2::core::VariableBase *variableBase =
reinterpret_cast<adios2::core::VariableBase *>(variable);
const adios2::Dims memoryStartV(memory_start, memory_start + ndims);
const adios2::Dims memoryCountV(memory_count, memory_count + ndims);
variableBase->SetMemorySelection({memoryStartV, memoryCountV});
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_set_memory_selection"));
}
}
adios2_error adios2_set_step_selection(adios2_variable *variable,
const size_t step_start,
const size_t step_count)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call to "
"adios2_set_step_selection");
adios2::core::VariableBase *variableBase =
reinterpret_cast<adios2::core::VariableBase *>(variable);
variableBase->SetStepSelection(
adios2::Box<size_t>{step_start, step_count});
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_set_selection"));
}
}
adios2_error adios2_variable_name(char *name, size_t *size,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(
variable,
"for const adios2_variable, in call to adios2_variable_name");
adios2::helper::CheckForNullptr(
name, "for char* name, in call to adios2_variable_name");
adios2::helper::CheckForNullptr(
size, "for size_t* length, in call to adios2_variable_name");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
*size = variableBase->m_Name.size();
variableBase->m_Name.copy(name, *size);
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_name"));
}
}
adios2_error adios2_variable_type(adios2_type *type,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(
variable,
"for const adios2_variable, in call to adios2_variable_type");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
auto type_s = variableBase->m_Type;
if (type_s == adios2::helper::GetType<std::string>())
{
*type = adios2_type_string;
}
#define make_case(T) \
else if (type_s == adios2::helper::GetType<MapAdios2Type<T>::Type>()) \
{ \
*type = T; \
}
ADIOS2_FOREACH_C_TYPE_1ARG(make_case)
#undef make_case
else { *type = adios2_type_unknown; }
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_type"));
}
}
adios2_error adios2_variable_type_string(char *type, size_t *size,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for const adios2_variable, in call to "
"adios2_variable_type_string");
adios2::helper::CheckForNullptr(
type, "for char* type, in call to adios2_variable_type_string");
adios2::helper::CheckForNullptr(
size, "for size_t* length, in call to adios2_variable_type_string");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
*size = variableBase->m_Type.size();
variableBase->m_Type.copy(type, *size);
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_type_string"));
}
}
adios2_error adios2_variable_shapeid(adios2_shapeid *shapeid,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for const adios2_variable, in call to "
"adios2_variable_shapeid");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
*shapeid = adios2_ToShapeID(variableBase->m_ShapeID,
"in call to adios2_variable_shapeid");
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_shapeid"));
}
}
adios2_error adios2_variable_ndims(size_t *ndims,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for const adios2_variable, in call to "
"adios2_variable_ndims");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
*ndims = variableBase->m_Shape.size();
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_ndims"));
}
}
adios2_error adios2_variable_shape(size_t *shape,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for const adios2_variable, in call to "
"adios2_variable_shape");
adios2::helper::CheckForNullptr(shape, "for size_t* shape, in call to "
"adios2_variable_shape");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
std::copy(variableBase->m_Shape.begin(), variableBase->m_Shape.end(),
shape);
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_shape"));
}
}
adios2_error adios2_variable_start(size_t *start,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for const adios2_variable, in call to "
"adios2_variable_start");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
std::copy(variableBase->m_Start.begin(), variableBase->m_Start.end(),
start);
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_shape"));
}
}
adios2_error adios2_variable_count(size_t *count,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for const adios2_variable, in call to "
"adios2_variable_count");
adios2::helper::CheckForNullptr(count,
"for const adios2_count, in call to "
"adios2_variable_count");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
std::copy(variableBase->m_Count.begin(), variableBase->m_Count.end(),
count);
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_count"));
}
}
adios2_error adios2_variable_steps_start(size_t *steps_start,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for const adios2_variable, in call to "
"adios2_variable_steps_start");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
*steps_start = variableBase->m_AvailableStepsStart;
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_steps_start"));
}
}
adios2_error adios2_variable_steps(size_t *steps,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for const adios2_variable, in call to "
"adios2_variable_steps");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
*steps = variableBase->m_AvailableStepsCount;
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_steps"));
}
}
adios2_error adios2_selection_size(size_t *size,
const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call to "
"adios2_selection_size");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
*size = variableBase->SelectionSize();
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_selection_size"));
}
}
adios2_error adios2_add_operation(size_t *operation_index,
adios2_variable *variable,
adios2_operator *op, const char *key,
const char *value)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call to "
"adios2_add_operation");
adios2::helper::CheckForNullptr(op, "for adios2_operator, in call to "
"adios2_add_operation");
adios2::helper::CheckForNullptr(key, "for char* key, in call to "
"adios2_add_operation");
adios2::helper::CheckForNullptr(value, "for char* value, in call to "
"adios2_add_operation");
adios2::core::VariableBase *variableBase =
reinterpret_cast<adios2::core::VariableBase *>(variable);
adios2::core::Operator *opCpp =
reinterpret_cast<adios2::core::Operator *>(op);
*operation_index =
variableBase->AddOperation(*opCpp, adios2::Params{{key, value}});
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_add_operation"));
}
}
adios2_error adios2_set_operation_parameter(adios2_variable *variable,
const size_t operation_id,
const char *key, const char *value)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call to "
"adios2_set_operation_parameter");
adios2::core::VariableBase *variableBase =
reinterpret_cast<adios2::core::VariableBase *>(variable);
variableBase->SetOperationParameter(operation_id, key, value);
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_set_operation_parameter"));
}
}
adios2_error adios2_variable_min(void *min, const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call "
"to adios2_variable_min");
adios2::helper::CheckForNullptr(
min, "for void* min, in call to adios2_variable_min");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
const std::string type(variableBase->m_Type);
if (type == "compound")
{
// not supported
}
#define declare_template_instantiation(T) \
else if (type == adios2::helper::GetType<T>()) \
{ \
T *minT = reinterpret_cast<T *>(min); \
const adios2::core::Variable<T> *variableT = \
dynamic_cast<const adios2::core::Variable<T> *>(variableBase); \
*minT = variableT->m_Min; \
}
ADIOS2_FOREACH_STDTYPE_1ARG(declare_template_instantiation)
#undef declare_template_instantiation
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_min"));
}
}
adios2_error adios2_variable_max(void *max, const adios2_variable *variable)
{
try
{
adios2::helper::CheckForNullptr(variable,
"for adios2_variable, in call "
"to adios2_variable_max");
adios2::helper::CheckForNullptr(
max, "for void* max, in call to adios2_variable_max");
const adios2::core::VariableBase *variableBase =
reinterpret_cast<const adios2::core::VariableBase *>(variable);
const std::string type(variableBase->m_Type);
if (type == "compound")
{
// not supported
}
#define declare_template_instantiation(T) \
else if (type == adios2::helper::GetType<T>()) \
{ \
T *maxT = reinterpret_cast<T *>(max); \
const adios2::core::Variable<T> *variableT = \
dynamic_cast<const adios2::core::Variable<T> *>(variableBase); \
*maxT = variableT->m_Max; \
}
ADIOS2_FOREACH_STDTYPE_1ARG(declare_template_instantiation)
#undef declare_template_instantiation
return adios2_error_none;
}
catch (...)
{
return static_cast<adios2_error>(
adios2::helper::ExceptionToError("adios2_variable_max"));
}
}
#ifdef __cplusplus
} // end extern C
#endif
| 1 | 12,497 | Let's use `if(name != nullptr)` to remove ambiguity | ornladios-ADIOS2 | cpp |
@@ -1027,4 +1027,12 @@ public class SurfaceNamer extends NameFormatterDelegator {
public String getReleaseAnnotation(ReleaseLevel releaseLevel) {
return getNotImplementedString("SurfaceNamer.getReleaseAnnotation");
}
+
+ public String getMetadataIdentifier() {
+ return getNotImplementedString("SurfaceNamer.getMetadataIdentifier");
+ }
+
+ public String getMetadataName() {
+ return getNotImplementedString("SurfaceNamer.getMetadataName");
+ }
} | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import com.google.api.codegen.ReleaseLevel;
import com.google.api.codegen.config.FieldConfig;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.config.ResourceNameConfig;
import com.google.api.codegen.config.ResourceNameType;
import com.google.api.codegen.config.SingleResourceNameConfig;
import com.google.api.codegen.config.VisibilityConfig;
import com.google.api.codegen.util.CommonRenderingUtil;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.NameFormatter;
import com.google.api.codegen.util.NameFormatterDelegator;
import com.google.api.codegen.util.NamePath;
import com.google.api.codegen.util.SymbolTable;
import com.google.api.codegen.util.TypeNameConverter;
import com.google.api.codegen.viewmodel.ServiceMethodType;
import com.google.api.tools.framework.aspects.documentation.model.DocumentationUtil;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.ProtoElement;
import com.google.api.tools.framework.model.TypeRef;
import io.grpc.Status;
import java.util.ArrayList;
import java.util.List;
/**
* A SurfaceNamer provides language-specific names for specific components of a view for a surface.
*
* <p>Naming is composed of two steps:
*
* <p>1. Composing a Name instance with the name pieces 2. Formatting the Name for the particular
* type of identifier needed.
*
* <p>This class delegates step 2 to the provided name formatter, which generally would be a
* language-specific namer.
*/
public class SurfaceNamer extends NameFormatterDelegator {
private final ModelTypeFormatter modelTypeFormatter;
private final TypeNameConverter typeNameConverter;
private final String packageName;
public SurfaceNamer(
NameFormatter languageNamer,
ModelTypeFormatter modelTypeFormatter,
TypeNameConverter typeNameConverter,
String packageName) {
super(languageNamer);
this.modelTypeFormatter = modelTypeFormatter;
this.typeNameConverter = typeNameConverter;
this.packageName = packageName;
}
public ModelTypeFormatter getModelTypeFormatter() {
return modelTypeFormatter;
}
public TypeNameConverter getTypeNameConverter() {
return typeNameConverter;
}
public String getPackageName() {
return packageName;
}
public String getNotImplementedString(String feature) {
return "$ NOT IMPLEMENTED: " + feature + " $";
}
/** The full path to the source file */
public String getSourceFilePath(String path, String publicClassName) {
return getNotImplementedString("SurfaceNamer.getSourceFilePath");
}
/** The name of the class that implements a particular proto interface. */
public String getApiWrapperClassName(Interface interfaze) {
return publicClassName(Name.upperCamel(interfaze.getSimpleName(), "Api"));
}
/** The name of the implementation class that implements a particular proto interface. */
public String getApiWrapperClassImplName(Interface interfaze) {
return getNotImplementedString("SurfaceNamer.getApiWrapperClassImplName");
}
/** The name of the class that implements snippets for a particular proto interface. */
public String getApiSnippetsClassName(Interface interfaze) {
return publicClassName(Name.upperCamel(interfaze.getSimpleName(), "ApiSnippets"));
}
/** The name of the class that contains paged list response wrappers. */
public String getPagedResponseWrappersClassName() {
return publicClassName(Name.upperCamel("PagedResponseWrappers"));
}
protected Name getResourceTypeNameObject(ResourceNameConfig resourceNameConfig) {
String entityName = resourceNameConfig.getEntityName();
ResourceNameType resourceNameType = resourceNameConfig.getResourceNameType();
switch (resourceNameType) {
case ANY:
return getAnyResourceTypeName();
case FIXED:
return Name.from(entityName).join("name_fixed");
case ONEOF:
// Remove suffix "_oneof". This allows the collection oneof config to "share" an entity name
// with a collection config.
entityName = removeSuffix(entityName, "_oneof");
return Name.from(entityName).join("name_oneof");
case SINGLE:
return Name.from(entityName).join("name");
case NONE:
default:
throw new UnsupportedOperationException("unexpected entity name type");
}
}
protected Name getAnyResourceTypeName() {
return Name.from("resource_name");
}
private static String removeSuffix(String original, String suffix) {
if (original.endsWith(suffix)) {
original = original.substring(0, original.length() - suffix.length());
}
return original;
}
public String getResourceTypeName(ResourceNameConfig resourceNameConfig) {
return publicClassName(getResourceTypeNameObject(resourceNameConfig));
}
public String getResourceParameterName(ResourceNameConfig resourceNameConfig) {
return localVarName(getResourceTypeNameObject(resourceNameConfig));
}
public String getResourcePropertyName(ResourceNameConfig resourceNameConfig) {
return publicMethodName(getResourceTypeNameObject(resourceNameConfig));
}
public String getResourceEnumName(ResourceNameConfig resourceNameConfig) {
return getResourceTypeNameObject(resourceNameConfig).toUpperUnderscore().toUpperCase();
}
public String getResourceTypeParseMethodName(
ModelTypeTable typeTable, FieldConfig resourceFieldConfig) {
return getNotImplementedString("SurfaceNamer.getResourceTypeParseMethodName");
}
/**
* The name of the iterate method of the PagedListResponse type for a field, returning the
* resource type iterate method if available
*/
public String getPagedResponseIterateMethod(
FeatureConfig featureConfig, FieldConfig fieldConfig) {
if (featureConfig.useResourceNameFormatOption(fieldConfig)) {
Name resourceName = getResourceTypeNameObject(fieldConfig.getResourceNameConfig());
return publicMethodName(Name.from("iterate_all_as").join(resourceName));
} else {
return getPagedResponseIterateMethod();
}
}
/** The name of the iterate method of the PagedListResponse type for a field */
public String getPagedResponseIterateMethod() {
return publicMethodName(Name.from("iterate_all_elements"));
}
/** The name of the create method for the resource one-of for the given field config */
public String getResourceOneofCreateMethod(ModelTypeTable typeTable, FieldConfig fieldConfig) {
return getAndSaveResourceTypeName(typeTable, fieldConfig.getMessageFieldConfig())
+ "."
+ publicMethodName(Name.from("from"));
}
/** The name of the constructor for the service client. The client is VKit generated, not GRPC. */
public String getApiWrapperClassConstructorName(Interface interfaze) {
return publicClassName(Name.upperCamel(interfaze.getSimpleName(), "Api"));
}
/**
* The name of example of the constructor for the service client. The client is VKit generated,
* not GRPC.
*/
public String getApiWrapperClassConstructorExampleName(Interface interfaze) {
return getApiWrapperClassConstructorName(interfaze);
}
/** Constructor name for the type with the given nickname. */
public String getTypeConstructor(String typeNickname) {
return typeNickname;
}
/**
* The name of a variable that holds an instance of the class that implements a particular proto
* interface.
*/
public String getApiWrapperVariableName(Interface interfaze) {
return localVarName(Name.upperCamel(interfaze.getSimpleName(), "Api"));
}
/**
* The name of a variable that holds an instance of the module that contains the implementation of
* a particular proto interface. So far it is used by just NodeJS.
*/
public String getApiWrapperModuleName() {
return getNotImplementedString("SurfaceNamer.getApiWrapperModuleName");
}
/**
* The version of a variable that holds an instance of the module that contains the implementation
* of a particular proto interface. So far it is used by just NodeJS.
*/
public String getApiWrapperModuleVersion() {
return getNotImplementedString("SurfaceNamer.getApiWrapperModuleVersion");
}
/**
* The name of the settings class for a particular proto interface; not used in most languages.
*/
public String getApiSettingsClassName(Interface interfaze) {
return publicClassName(Name.upperCamel(interfaze.getSimpleName(), "Settings"));
}
/** The function name to retrieve default client option */
public String getDefaultApiSettingsFunctionName(Interface service) {
return getNotImplementedString("SurfaceNamer.getDefaultClientOptionFunctionName");
}
/**
* The name of a variable that holds the settings class for a particular proto interface; not used
* in most languages.
*/
public String getApiSettingsVariableName(Interface interfaze) {
return localVarName(Name.upperCamel(interfaze.getSimpleName(), "Settings"));
}
/**
* The name of the builder class for the settings class for a particular proto interface; not used
* in most languages.
*/
public String getApiSettingsBuilderVarName(Interface interfaze) {
return localVarName(Name.upperCamel(interfaze.getSimpleName(), "SettingsBuilder"));
}
/** The variable name for the given identifier that is formatted. */
public String getFormattedVariableName(Name identifier) {
return localVarName(Name.from("formatted").join(identifier));
}
/** The name of the field. */
public String getFieldName(Field field) {
return publicFieldName(Name.from(field.getSimpleName()));
}
/** The function name to set the given proto field. */
public String getFieldSetFunctionName(FeatureConfig featureConfig, FieldConfig fieldConfig) {
Field field = fieldConfig.getField();
if (featureConfig.useResourceNameFormatOption(fieldConfig)) {
return getResourceNameFieldSetFunctionName(fieldConfig.getMessageFieldConfig());
} else {
return getFieldSetFunctionName(field);
}
}
/** The function name to set the given proto field. */
public String getFieldSetFunctionName(Field field) {
return getFieldSetFunctionName(field.getType(), Name.from(field.getSimpleName()));
}
/** The function name to set a field having the given type and name. */
public String getFieldSetFunctionName(TypeRef type, Name identifier) {
if (type.isMap()) {
return publicMethodName(Name.from("put", "all").join(identifier));
} else if (type.isRepeated()) {
return publicMethodName(Name.from("add", "all").join(identifier));
} else {
return publicMethodName(Name.from("set").join(identifier));
}
}
/** The function name to set a field that is a resource name class. */
public String getResourceNameFieldSetFunctionName(FieldConfig fieldConfig) {
TypeRef type = fieldConfig.getField().getType();
Name identifier = Name.from(fieldConfig.getField().getSimpleName());
Name resourceName = getResourceTypeNameObject(fieldConfig.getResourceNameConfig());
if (type.isMap()) {
return getNotImplementedString("SurfaceNamer.getResourceNameFieldSetFunctionName:map-type");
} else if (type.isRepeated()) {
return publicMethodName(
Name.from("add", "all").join(identifier).join("with").join(resourceName).join("list"));
} else {
return publicMethodName(Name.from("set").join(identifier).join("with").join(resourceName));
}
}
/** The function name to get the given proto field. */
public String getFieldGetFunctionName(FeatureConfig featureConfig, FieldConfig fieldConfig) {
Field field = fieldConfig.getField();
if (featureConfig.useResourceNameFormatOption(fieldConfig)) {
return getResourceNameFieldGetFunctionName(fieldConfig.getMessageFieldConfig());
} else {
return getFieldGetFunctionName(field);
}
}
/** The function name to get the given proto field. */
public String getFieldGetFunctionName(Field field) {
return getFieldGetFunctionName(field.getType(), Name.from(field.getSimpleName()));
}
/** The function name to get a field having the given type and name. */
public String getFieldGetFunctionName(TypeRef type, Name identifier) {
if (type.isRepeated() && !type.isMap()) {
return publicMethodName(Name.from("get").join(identifier).join("list"));
} else {
return publicMethodName(Name.from("get").join(identifier));
}
}
/** The function name to get a field that is a resource name class. */
public String getResourceNameFieldGetFunctionName(FieldConfig fieldConfig) {
TypeRef type = fieldConfig.getField().getType();
Name identifier = Name.from(fieldConfig.getField().getSimpleName());
Name resourceName = getResourceTypeNameObject(fieldConfig.getResourceNameConfig());
if (type.isMap()) {
return getNotImplementedString("SurfaceNamer.getResourceNameFieldGetFunctionName:map-type");
} else if (type.isRepeated()) {
return publicMethodName(
Name.from("get").join(identifier).join("list_as").join(resourceName).join("list"));
} else {
return publicMethodName(Name.from("get").join(identifier).join("as").join(resourceName));
}
}
/**
* The function name to get the count of elements in the given field.
*
* @throws IllegalArgumentException if the field is not a repeated field.
*/
public String getFieldCountGetFunctionName(Field field) {
if (field.isRepeated()) {
return publicMethodName(Name.from("get", field.getSimpleName(), "count"));
} else {
throw new IllegalArgumentException(
"Non-repeated field " + field.getSimpleName() + " has no count function.");
}
}
/**
* The function name to get an element by index from the given field.
*
* @throws IllegalArgumentException if the field is not a repeated field.
*/
public String getByIndexGetFunctionName(Field field) {
if (field.isRepeated()) {
return publicMethodName(Name.from("get", field.getSimpleName()));
} else {
throw new IllegalArgumentException(
"Non-repeated field " + field.getSimpleName() + " has no get-by-index function.");
}
}
/** The name of the example package */
public String getExamplePackageName() {
return getNotImplementedString("SurfaceNamer.getExamplePackageName");
}
/** The local (unqualified) name of the package */
public String getLocalPackageName() {
return getNotImplementedString("SurfaceNamer.getLocalPackageName");
}
/** The local (unqualified) name of the example package */
public String getLocalExamplePackageName() {
return getNotImplementedString("SurfaceNamer.getLocalExamplePackageName");
}
/**
* The name of a path template constant for the given collection, to be held in an API wrapper
* class.
*/
public String getPathTemplateName(
Interface service, SingleResourceNameConfig resourceNameConfig) {
return inittedConstantName(Name.from(resourceNameConfig.getEntityName(), "path", "template"));
}
/** The name of a getter function to get a particular path template for the given collection. */
public String getPathTemplateNameGetter(
Interface service, SingleResourceNameConfig resourceNameConfig) {
return publicMethodName(
Name.from("get", resourceNameConfig.getEntityName(), "name", "template"));
}
/** The name of the path template resource, in human format. */
public String getPathTemplateResourcePhraseName(SingleResourceNameConfig resourceNameConfig) {
return Name.from(resourceNameConfig.getEntityName()).toPhrase();
}
/** The function name to format the entity for the given collection. */
public String getFormatFunctionName(
Interface service, SingleResourceNameConfig resourceNameConfig) {
return staticFunctionName(Name.from("format", resourceNameConfig.getEntityName(), "name"));
}
/**
* The function name to parse a variable from the string representing the entity for the given
* collection.
*/
public String getParseFunctionName(String var, SingleResourceNameConfig resourceNameConfig) {
return staticFunctionName(
Name.from("parse", var, "from", resourceNameConfig.getEntityName(), "name"));
}
/** The entity name for the given collection. */
public String getEntityName(SingleResourceNameConfig resourceNameConfig) {
return localVarName(Name.from(resourceNameConfig.getEntityName()));
}
/** The parameter name for the entity for the given collection config. */
public String getEntityNameParamName(SingleResourceNameConfig resourceNameConfig) {
return localVarName(Name.from(resourceNameConfig.getEntityName(), "name"));
}
/** The parameter name for the given lower-case field name. */
public String getParamName(String var) {
return localVarName(Name.from(var));
}
public String getPropertyName(String var) {
return publicMethodName(Name.from(var));
}
/** The documentation name of a parameter for the given lower-case field name. */
public String getParamDocName(String var) {
return localVarName(Name.from(var));
}
/** The method name of the retry filter for the given key */
public String retryFilterMethodName(String key) {
return privateMethodName(Name.from(key).join("retry").join("filter"));
}
/** The method name of the retry backoff for the given key */
public String retryBackoffMethodName(String key) {
return privateMethodName(Name.from("get").join(key).join("retry").join("backoff"));
}
/** The method name of the timeout backoff for the given key */
public String timeoutBackoffMethodName(String key) {
return privateMethodName(Name.from("get").join(key).join("timeout").join("backoff"));
}
/** The page streaming descriptor name for the given method. */
public String getPageStreamingDescriptorName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "PageStreamingDescriptor"));
}
/** The name of the constant to hold the page streaming descriptor for the given method. */
public String getPageStreamingDescriptorConstName(Method method) {
return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("page_str_desc"));
}
/** The page streaming factory name for the given method. */
public String getPagedListResponseFactoryName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "PagedListResponseFactory"));
}
/** The name of the constant to hold the page streaming factory for the given method. */
public String getPagedListResponseFactoryConstName(Method method) {
return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("page_str_fact"));
}
/** The name of the constant to hold the bundling descriptor for the given method. */
public String getBundlingDescriptorConstName(Method method) {
return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("bundling_desc"));
}
/** The key to use in a dictionary for the given method. */
public String getMethodKey(Method method) {
return keyName(Name.upperCamel(method.getSimpleName()));
}
/** The path to the client config for the given interface. */
public String getClientConfigPath(Interface service) {
return getNotImplementedString("SurfaceNamer.getClientConfigPath");
}
/** Human-friendly name of this service */
public String getServicePhraseName(Interface service) {
return Name.upperCamel(service.getSimpleName()).toPhrase();
}
/**
* The type name of the Grpc server class. This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcServerTypeName(Interface service) {
return getNotImplementedString("SurfaceNamer.getGrpcServerTypeName");
}
/**
* The type name of the Grpc client class. This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcClientTypeName(Interface service) {
return getNotImplementedString("SurfaceNamer.getGrpcClientTypeName");
}
/**
* Gets the type name of the Grpc client class, saves it to the type table provided, and returns
* the nickname.
*/
public String getAndSaveNicknameForGrpcClientTypeName(
ModelTypeTable typeTable, Interface service) {
return typeTable.getAndSaveNicknameFor(getGrpcClientTypeName(service));
}
/**
* The type name of the Grpc container class. This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcContainerTypeName(Interface service) {
NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service));
String publicClassName =
publicClassName(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Grpc"));
return qualifiedName(namePath.withHead(publicClassName));
}
/**
* The type name of the Grpc service class This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcServiceClassName(Interface service) {
NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service));
String grpcContainerName =
publicClassName(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Grpc"));
String serviceClassName =
publicClassName(Name.upperCamelKeepUpperAcronyms(service.getSimpleName(), "ImplBase"));
return qualifiedName(namePath.withHead(grpcContainerName).append(serviceClassName));
}
/**
* The type name of the method constant in the Grpc container class. This needs to match what Grpc
* generates for the particular language.
*/
public String getGrpcMethodConstant(Method method) {
return inittedConstantName(
Name.from("method").join(Name.upperCamelKeepUpperAcronyms(method.getSimpleName())));
}
/** The variable name of the rerouted gRPC client. Used in C# */
public String getReroutedGrpcClientVarName(MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getGrpcClientName");
}
/** The method name to create a rerouted gRPC client. Used in C# */
public String getReroutedGrpcMethodName(MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getReroutedGrpcMethodName");
}
/** The name of the surface method which can call the given API method. */
public String getApiMethodName(Method method, VisibilityConfig visibility) {
return visibility.methodName(this, Name.upperCamel(method.getSimpleName()));
}
/** The name of the async surface method which can call the given API method. */
public String getAsyncApiMethodName(Method method, VisibilityConfig visibility) {
return visibility.methodName(this, Name.upperCamel(method.getSimpleName()).join("async"));
}
/** The name of the example for the method. */
public String getApiMethodExampleName(Interface interfaze, Method method) {
return getApiMethodName(method, VisibilityConfig.PUBLIC);
}
/** The name of the example for the async variant of the given method. */
public String getAsyncApiMethodExampleName(Interface interfaze, Method method) {
return getAsyncApiMethodName(method, VisibilityConfig.PUBLIC);
}
/** The name of the GRPC streaming surface method which can call the given API method. */
public String getGrpcStreamingApiMethodName(Method method, VisibilityConfig visibility) {
return getApiMethodName(method, visibility);
}
/**
* The name of the example of the GRPC streaming surface method which can call the given API
* method.
*/
public String getGrpcStreamingApiMethodExampleName(Interface interfaze, Method method) {
return getGrpcStreamingApiMethodName(method, VisibilityConfig.PUBLIC);
}
/**
* The name of a variable to hold a value for the given proto message field (such as a flattened
* parameter).
*/
public String getVariableName(Field field) {
return localVarName(Name.from(field.getSimpleName()));
}
/** Returns true if the request object param type for the given field should be imported. */
public boolean shouldImportRequestObjectParamType(Field field) {
return true;
}
/**
* Returns true if the request object param element type for the given field should be imported.
*/
public boolean shouldImportRequestObjectParamElementType(Field field) {
return true;
}
/** Converts the given text to doc lines in the format of the current language. */
public List<String> getDocLines(String text) {
return CommonRenderingUtil.getDocLines(text);
}
/** Provides the doc lines for the given proto element in the current language. */
public List<String> getDocLines(ProtoElement element) {
return getDocLines(DocumentationUtil.getDescription(element));
}
/** Provides the doc lines for the given method element in the current language. */
public List<String> getDocLines(Method method, MethodConfig methodConfig) {
return getDocLines(method);
}
/** The doc lines that declare what exception(s) are thrown for an API method. */
public List<String> getThrowsDocLines() {
return new ArrayList<>();
}
/** The doc lines that describe the return value for an API method. */
public List<String> getReturnDocLines(
SurfaceTransformerContext context, MethodConfig methodConfig, Synchronicity synchronicity) {
return new ArrayList<>();
}
/** The public access modifier for the current language. */
public String getPublicAccessModifier() {
return "public";
}
/** The private access modifier for the current language. */
public String getPrivateAccessModifier() {
return "private";
}
/** The name used in Grpc for the given API method. This needs to match what Grpc generates. */
public String getGrpcMethodName(Method method) {
// This might seem silly, but it makes clear what we're dealing with (upper camel).
// This is language-independent because of gRPC conventions.
return Name.upperCamelKeepUpperAcronyms(method.getSimpleName()).toUpperCamel();
}
/** The type name for retry settings. */
public String getRetrySettingsTypeName() {
return getNotImplementedString("SurfaceNamer.getRetrySettingsClassName");
}
/** The type name for an optional array argument; not used in most languages. */
public String getOptionalArrayTypeName() {
return getNotImplementedString("SurfaceNamer.getOptionalArrayTypeName");
}
/** The return type name in a dynamic language for the given method. */
public String getDynamicLangReturnTypeName(Method method, MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getDynamicReturnTypeName");
}
/** The return type name in a static language for the given method. */
public String getStaticLangReturnTypeName(Method method, MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getStaticLangReturnTypeName");
}
/** The return type name in a static language that is used by the caller */
public String getStaticLangCallerReturnTypeName(Method method, MethodConfig methodConfig) {
return getStaticLangReturnTypeName(method, methodConfig);
}
/** The async return type name in a static language for the given method. */
public String getStaticLangAsyncReturnTypeName(Method method, MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getStaticLangAsyncReturnTypeName");
}
/**
* Computes the nickname of the operation response type name for the given method, saves it in the
* given type table, and returns it.
*/
public String getAndSaveOperationResponseTypeName(
Method method, ModelTypeTable typeTable, MethodConfig methodConfig) {
return getNotImplementedString("SurfaceNamer.getAndSaveOperationResponseTypeName");
}
/**
* In languages with pointers, strip the pointer, leaving only the base type. Eg, in C, "int*"
* would become "int".
*/
public String valueType(String type) {
return getNotImplementedString("SurfaceNamer.valueType");
}
/** The async return type name in a static language that is used by the caller */
public String getStaticLangCallerAsyncReturnTypeName(Method method, MethodConfig methodConfig) {
return getStaticLangAsyncReturnTypeName(method, methodConfig);
}
/** The GRPC streaming server type name for a given method. */
public String getStreamingServerName(Method method) {
return getNotImplementedString("SurfaceNamer.getStreamingServerName");
}
/** The name of the return type of the given grpc streaming method. */
public String getGrpcStreamingApiReturnTypeName(Method method) {
return publicClassName(
Name.upperCamel(method.getOutputType().getMessageType().getSimpleName()));
}
/** The name of the paged callable variant of the given method. */
public String getPagedCallableMethodName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "PagedCallable"));
}
/** The name of the example for the paged callable variant. */
public String getPagedCallableMethodExampleName(Interface interfaze, Method method) {
return getPagedCallableMethodName(method);
}
/** The name of the callable for the paged callable variant of the given method. */
public String getPagedCallableName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "PagedCallable"));
}
/** The name of the plain callable variant of the given method. */
public String getCallableMethodName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "Callable"));
}
/** The name of the plain callable variant of the given method. */
public String getCallableAsyncMethodName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "CallableAsync"));
}
/** The name of the example for the plain callable variant. */
public String getCallableMethodExampleName(Interface interfaze, Method method) {
return getCallableMethodName(method);
}
/** The name of the operation callable variant of the given method. */
public String getOperationCallableMethodName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "OperationCallable"));
}
/** The name of the example for the operation callable variant of the given method. */
public String getOperationCallableMethodExampleName(Interface interfaze, Method method) {
return getOperationCallableMethodName(method);
}
/** The name of the plain callable for the given method. */
public String getCallableName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "Callable"));
}
/** The name of the operation callable for the given method. */
public String getOperationCallableName(Method method) {
return privateFieldName(Name.upperCamel(method.getSimpleName(), "OperationCallable"));
}
/** The name of the settings member name for the given method. */
public String getSettingsMemberName(Method method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "Settings"));
}
/** The getter function name for the settings for the given method. */
public String getSettingsFunctionName(Method method) {
return getSettingsMemberName(method);
}
/** The name of a method to apply modifications to this method request. */
public String getModifyMethodName(Method method) {
return getNotImplementedString("SurfaceNamer.getModifyMethodName");
}
/** The type name of call options */
public String getCallSettingsTypeName(Interface service) {
return publicClassName(Name.upperCamel(service.getSimpleName(), "Settings"));
}
/** The function name to retrieve default call option */
public String getDefaultCallSettingsFunctionName(Interface service) {
return publicMethodName(Name.upperCamel(service.getSimpleName(), "Settings"));
}
/**
* The generic-aware response type name for the given type. For example, in Java, this will be the
* type used for ListenableFuture<...>.
*/
public String getGenericAwareResponseTypeName(TypeRef outputType) {
return getNotImplementedString("SurfaceNamer.getGenericAwareResponseType");
}
/**
* Computes the nickname of the paged response type name for the given method and resources field,
* saves it in the given type table, and returns it.
*/
public String getAndSavePagedResponseTypeName(
Method method, ModelTypeTable typeTable, FieldConfig resourcesFieldConfig) {
return getNotImplementedString("SurfaceNamer.getAndSavePagedResponseTypeName");
}
/** The inner type name of the paged response type for the given method and resources field. */
public String getPagedResponseTypeInnerName(
Method method, ModelTypeTable typeTable, Field resourcesField) {
return getNotImplementedString("SurfaceNamer.getAndSavePagedResponseTypeInnerName");
}
/**
* Computes the nickname of the async response type name for the given resource type, saves it in
* the given type table, and returns it.
*/
public String getAndSaveAsyncPagedResponseTypeName(
Method method, ModelTypeTable typeTable, FieldConfig resourcesFieldConfig) {
return getNotImplementedString("SurfaceNamer.getAndSavePagedAsyncResponseTypeName");
}
/**
* Computes the nickname of the response type name for the given resource type, as used by the
* caller, saves it in the given type table, and returns it.
*/
public String getAndSaveCallerPagedResponseTypeName(
Method method, ModelTypeTable typeTable, FieldConfig resourcesFieldConfig) {
return getAndSavePagedResponseTypeName(method, typeTable, resourcesFieldConfig);
}
/**
* Computes the nickname of the response type name for the given resource type, as used by the
* caller, saves it in the given type table, and returns it.
*/
public String getAndSaveCallerAsyncPagedResponseTypeName(
Method method, ModelTypeTable typeTable, FieldConfig resourcesFieldConfig) {
return getAndSaveAsyncPagedResponseTypeName(method, typeTable, resourcesFieldConfig);
}
/** The class name of the generated resource type from the entity name. */
public String getAndSaveResourceTypeName(ModelTypeTable typeTable, FieldConfig fieldConfig) {
String resourceClassName =
publicClassName(getResourceTypeNameObject(fieldConfig.getResourceNameConfig()));
return typeTable.getAndSaveNicknameForTypedResourceName(fieldConfig, resourceClassName);
}
/** The class name of the generated resource type from the entity name. */
public String getAndSaveElementResourceTypeName(
ModelTypeTable typeTable, FieldConfig fieldConfig) {
String resourceClassName =
publicClassName(getResourceTypeNameObject(fieldConfig.getResourceNameConfig()));
return typeTable.getAndSaveNicknameForResourceNameElementType(fieldConfig, resourceClassName);
}
/** The test case name for the given method. */
public String getTestCaseName(SymbolTable symbolTable, Method method) {
Name testCaseName = symbolTable.getNewSymbol(Name.upperCamel(method.getSimpleName(), "Test"));
return publicMethodName(testCaseName);
}
/** The exception test case name for the given method. */
public String getExceptionTestCaseName(SymbolTable symbolTable, Method method) {
Name testCaseName =
symbolTable.getNewSymbol(Name.upperCamel(method.getSimpleName(), "ExceptionTest"));
return publicMethodName(testCaseName);
}
/** The unit test class name for the given API service. */
public String getUnitTestClassName(Interface service) {
return publicClassName(Name.upperCamel(service.getSimpleName(), "Test"));
}
/** The smoke test class name for the given API service. */
public String getSmokeTestClassName(Interface service) {
return publicClassName(Name.upperCamel(service.getSimpleName(), "Smoke", "Test"));
}
/** The class name of the mock gRPC service for the given API service. */
public String getMockServiceClassName(Interface service) {
return publicClassName(Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName()));
}
/** The class name of a variable to hold the mock gRPC service for the given API service. */
public String getMockServiceVarName(Interface service) {
return localVarName(Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName()));
}
/** The class name of the mock gRPC service implementation for the given API service. */
public String getMockGrpcServiceImplName(Interface service) {
return publicClassName(
Name.upperCamelKeepUpperAcronyms("Mock", service.getSimpleName(), "Impl"));
}
/** The file name for an API service. */
public String getServiceFileName(Interface service) {
return getNotImplementedString("SurfaceNamer.getServiceFileName");
}
/** The file name for the example of an API service. */
public String getExampleFileName(Interface service) {
return getNotImplementedString("SurfaceNamer.getExampleFileName");
}
/**
* The fully qualified class name of a an API service. TODO: Support the general pattern of
* package + class name in NameFormatter.
*/
public String getFullyQualifiedApiWrapperClassName(Interface interfaze) {
return getNotImplementedString("SurfaceNamer.getFullyQualifiedApiWrapperClassName");
}
/** The name of the variable that will hold the stub for a service. */
public String getStubName(Interface service) {
return privateFieldName(Name.upperCamel(service.getSimpleName(), "Stub"));
}
/** The name of the function that will create a stub. */
public String getCreateStubFunctionName(Interface service) {
return privateMethodName(
Name.upperCamel("Create", service.getSimpleName(), "Stub", "Function"));
}
/** The name of the array which will hold the methods for a given stub. */
public String getStubMethodsArrayName(Interface service) {
return privateMethodName(Name.upperCamel(service.getSimpleName(), "Stub", "Methods"));
}
/** The name of the import for a specific grpcClient */
public String getGrpcClientImportName(Interface service) {
return getNotImplementedString("SurfaceNamer.getGrpcClientImportName");
}
/** The fully qualified type name for the stub of a service. */
public String getFullyQualifiedStubType(Interface service) {
return getNotImplementedString("SurfaceNamer.getFullyQualifiedStubType");
}
/** The name of the variable to hold the grpc client of a service. */
public String getGrpcClientVariableName(Interface service) {
return localVarName(Name.upperCamel(service.getSimpleName(), "Client"));
}
/** The qualified namespace of a service. */
public String getNamespace(Interface service) {
NamePath namePath = typeNameConverter.getNamePath(modelTypeFormatter.getFullNameFor(service));
return qualifiedName(namePath.withoutHead());
}
public String getServiceFileImportFromService(Interface service) {
return getNotImplementedString("SurfaceNamer.getServiceFileImportFromService");
}
public String getProtoFileImportFromService(Interface service) {
return getNotImplementedString("SurfaceNamer.getProtoFileImportFromService");
}
/**
* Returns the service name with common suffixes removed.
*
* <p>For example: "LoggingServiceV2" becomes Name("Logging")
*/
public Name getReducedServiceName(Interface service) {
String name = service.getSimpleName().replaceAll("V[0-9]+$", "");
name = name.replaceAll("Service$", "");
return Name.upperCamel(name);
}
/** The name of an RPC status code */
public String getStatusCodeName(Status.Code code) {
return privateMethodName(Name.upperUnderscore(code.toString()));
}
/* The name of a retry definition */
public String getRetryDefinitionName(String retryDefinitionKey) {
return privateMethodName(Name.from(retryDefinitionKey));
}
/** The name of the IAM resource getter function. */
public String getIamResourceGetterFunctionName(Field field) {
return getNotImplementedString("SurfaceNamer.getIamResourceGetterFunctionName");
}
/** The example name of the IAM resource getter function. */
public String getIamResourceGetterFunctionExampleName(Interface service, Field field) {
return getIamResourceGetterFunctionName(field);
}
/** The parameter name of the IAM resource. */
public String getIamResourceParamName(Field field) {
return localVarName(Name.upperCamel(field.getParent().getSimpleName()));
}
/** Inject random value generator code to the given string. */
public String injectRandomStringGeneratorCode(String randomString) {
return getNotImplementedString("SurfaceNamer.getRandomStringValue");
}
/** Function used to register the GRPC server. */
public String getServerRegisterFunctionName(Interface service) {
return getNotImplementedString("SurfaceNamer.getServerRegisterFunctionName");
}
/** The type name of the API callable class for this service method type. */
public String getApiCallableTypeName(ServiceMethodType serviceMethodType) {
return getNotImplementedString("SurfaceNamer.getApiCallableTypeName");
}
public String getReleaseAnnotation(ReleaseLevel releaseLevel) {
return getNotImplementedString("SurfaceNamer.getReleaseAnnotation");
}
}
| 1 | 20,233 | I wonder if it would make sense to have a separate namer for metadata? Metadata files tend to be expressed in a different language from the repo language, and have mutually exclusive concepts. So, `PackageMetadataNamer`. | googleapis-gapic-generator | java |
@@ -37,6 +37,15 @@ import org.apache.logging.log4j.Logger;
public class BftValidatorsValidationRule implements AttachedBlockHeaderValidationRule {
private static final Logger LOGGER = LogManager.getLogger();
+ private final boolean extraDataValidatorsAndVoteMustBeEmpty;
+
+ public BftValidatorsValidationRule() {
+ this(false);
+ }
+
+ public BftValidatorsValidationRule(final boolean extraDataValidatorsAndVoteMustBeEmpty) {
+ this.extraDataValidatorsAndVoteMustBeEmpty = extraDataValidatorsAndVoteMustBeEmpty;
+ }
@Override
public boolean validate( | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.common.bft.headervalidationrules;
import org.hyperledger.besu.consensus.common.bft.BftContext;
import org.hyperledger.besu.consensus.common.bft.BftExtraData;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.mainnet.AttachedBlockHeaderValidationRule;
import org.hyperledger.besu.ethereum.rlp.RLPException;
import java.util.Collection;
import java.util.NavigableSet;
import java.util.TreeSet;
import com.google.common.collect.Iterables;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Ensures the Validators listed in the block header match that tracked in memory (which was in-turn
* created by tracking votes included on the block chain).
*/
public class BftValidatorsValidationRule implements AttachedBlockHeaderValidationRule {
private static final Logger LOGGER = LogManager.getLogger();
@Override
public boolean validate(
final BlockHeader header, final BlockHeader parent, final ProtocolContext context) {
try {
final BftContext bftContext = context.getConsensusState(BftContext.class);
final BftExtraData bftExtraData = bftContext.getBlockInterface().getExtraData(header);
final NavigableSet<Address> sortedReportedValidators =
new TreeSet<>(bftExtraData.getValidators());
if (!Iterables.elementsEqual(bftExtraData.getValidators(), sortedReportedValidators)) {
LOGGER.info(
"Invalid block header: Validators are not sorted in ascending order. Expected {} but got {}.",
sortedReportedValidators,
bftExtraData.getValidators());
return false;
}
final Collection<Address> storedValidators =
bftContext.getValidatorProvider().getValidatorsAfterBlock(parent);
if (!Iterables.elementsEqual(bftExtraData.getValidators(), storedValidators)) {
LOGGER.info(
"Invalid block header: Incorrect validators. Expected {} but got {}.",
storedValidators,
bftExtraData.getValidators());
return false;
}
} catch (final RLPException ex) {
LOGGER.info(
"Invalid block header: ExtraData field was unable to be deserialized into an BFT Struct.",
ex);
return false;
} catch (final IllegalArgumentException ex) {
LOGGER.info("Invalid block header: Failed to verify extra data", ex);
return false;
}
return true;
}
}
| 1 | 25,695 | This rule is quite specific to qbft and so I don't think it should be part of the common rules. Would rather the common bft code didn't know anything about contract based voting/validator governance. | hyperledger-besu | java |
@@ -163,6 +163,15 @@ public interface Set<T> extends Traversable<T>, Function1<T, Boolean>, Serializa
*/
Set<T> removeAll(Iterable<? extends T> elements);
+ /**
+ * Reverse the order of all given elements in this set, if present.
+ *
+ * @return A new set consisting of the elements in reverse order of this set
+ */
+ default Set<T> reverse() {
+ throw new UnsupportedOperationException("Operation not supported");
+ }
+
/**
* Converts this Vavr {@code Set} to a {@code java.util.Set} while preserving characteristics
* like insertion order ({@code LinkedHashSet}) and sort order ({@code SortedSet}). | 1 | /* ____ ______________ ________________________ __________
* \ \/ / \ \/ / __/ / \ \/ / \
* \______/___/\___\______/___/_____/___/\___\______/___/\___\
*
* Copyright 2019 Vavr, http://vavr.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vavr.collection;
import io.vavr.*;
import io.vavr.control.Option;
import java.io.Serializable;
import java.util.Comparator;
import java.util.function.*;
/**
* An immutable {@code Set} interface.
* <p>
* CAUTION: The Vavr {@code Set} implementations generally support {@code null} elements. However {@code SortedSet}
* implementations require an element {@code Comparator}, which may not support {@code null} elements.
* <p>
* Examples:
*
* <pre>{@code Set<?> addNull(Set<?> set) {
*
* // CAUTION: Do not expect a Set to accept null values in general!
* return set.add(null);
*
* }
*
* void test() {
*
* // ok
* addNull(HashSet.of(1));
*
* // ok
* addNull(TreeSet.of(nullsFirst(naturalOrder()), 1));
*
* // ok
* addNull(TreeSet.empty());
*
* // throws NPE!
* addNull(TreeSet.of(1));
*
* }}</pre>
*
* <p>
* Basic operations:
*
* <ul>
* <li>{@link #add(Object)}</li>
* <li>{@link #addAll(Iterable)}</li>
* <li>{@link #diff(Set)}</li>
* <li>{@link #intersect(Set)}</li>
* <li>{@link #remove(Object)}</li>
* <li>{@link #removeAll(Iterable)}</li>
* <li>{@link #union(Set)}</li>
* </ul>
*
* Conversion:
*
* <ul>
* <li>{@link #toJavaSet()}</li>
* </ul>
*
* @param <T> Component type
*/
@SuppressWarnings("deprecation")
public interface Set<T> extends Traversable<T>, Function1<T, Boolean>, Serializable {
long serialVersionUID = 1L;
/**
* Narrows a widened {@code Set<? extends T>} to {@code Set<T>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param set A {@code Set}.
* @param <T> Component type of the {@code Set}.
* @return the given {@code set} instance as narrowed type {@code Set<T>}.
*/
@SuppressWarnings("unchecked")
static <T> Set<T> narrow(Set<? extends T> set) {
return (Set<T>) set;
}
/**
* Add the given element to this set, if it is not already contained.
*
* @param element The element to be added.
* @return A new set containing all elements of this set and also {@code element}.
*/
Set<T> add(T element);
/**
* Adds all of the given elements to this set, if not already contained.
*
* @param elements The elements to be added.
* @return A new set containing all elements of this set and the given {@code elements}, if not already contained.
*/
Set<T> addAll(Iterable<? extends T> elements);
/**
* Tests if a given {@code element} is contained in this {@code Set}.
* <p>
* This method is equivalent to {@link #contains(Object)}.
*
* @param element the element to test for membership.
* @return {@code true} if the given {@code element} is contained, {@code false} otherwise.
* @deprecated Will be removed
*/
@Override
@Deprecated
default Boolean apply(T element) {
return contains(element);
}
/**
* Calculates the difference between this set and another set.
* <p>
* See also {@link #removeAll(Iterable)}.
*
* @param that Elements to be removed from this set.
* @return A new Set containing all elements of this set which are not located in {@code that} set.
*/
Set<T> diff(Set<? extends T> that);
/**
* Computes the intersection between this set and another set.
* <p>
* See also {@link #retainAll(Iterable)}.
*
* @param that the set to intersect with.
* @return A new Set consisting of all elements that are both in this set and in the given set {@code that}.
*/
Set<T> intersect(Set<? extends T> that);
/**
* Removes a specific element from this set, if present.
*
* @param element The element to be removed from this set.
* @return A new set consisting of the elements of this set, without the given {@code element}.
*/
Set<T> remove(T element);
/**
* Removes all of the given elements from this set, if present.
*
* @param elements The elements to be removed from this set.
* @return A new set consisting of the elements of this set, without the given {@code elements}.
*/
Set<T> removeAll(Iterable<? extends T> elements);
/**
* Converts this Vavr {@code Set} to a {@code java.util.Set} while preserving characteristics
* like insertion order ({@code LinkedHashSet}) and sort order ({@code SortedSet}).
*
* @return a new {@code java.util.Set} instance
*/
@Override
java.util.Set<T> toJavaSet();
/**
* Adds all of the elements of {@code that} set to this set, if not already present.
* <p>
* See also {@link #addAll(Iterable)}.
*
* @param that The set to form the union with.
* @return A new set that contains all distinct elements of this and {@code that} set.
*/
Set<T> union(Set<? extends T> that);
// -- Adjusted return types of Traversable methods
@Override
<R> Set<R> collect(PartialFunction<? super T, ? extends R> partialFunction);
@Override
boolean contains(T element);
@Override
Set<T> distinct();
@Override
Set<T> distinctBy(Comparator<? super T> comparator);
@Override
<U> Set<T> distinctBy(Function<? super T, ? extends U> keyExtractor);
@Override
Set<T> drop(int n);
@Override
Set<T> dropRight(int n);
@Override
Set<T> dropUntil(Predicate<? super T> predicate);
@Override
Set<T> dropWhile(Predicate<? super T> predicate);
@Override
Set<T> filter(Predicate<? super T> predicate);
@Override
Set<T> filterNot(Predicate<? super T> predicate);
@Deprecated
@Override
Set<T> reject(Predicate<? super T> predicate);
@Override
<U> Set<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper);
@Override
<C> Map<C, ? extends Set<T>> groupBy(Function<? super T, ? extends C> classifier);
@Override
Iterator<? extends Set<T>> grouped(int size);
@Override
Set<T> init();
@Override
Option<? extends Set<T>> initOption();
@Override
default boolean isDistinct() {
return true;
}
@Override
Iterator<T> iterator();
@Override
int length();
@Override
<U> Set<U> map(Function<? super T, ? extends U> mapper);
@Override
Set<T> orElse(Iterable<? extends T> other);
@Override
Set<T> orElse(Supplier<? extends Iterable<? extends T>> supplier);
@Override
Tuple2<? extends Set<T>, ? extends Set<T>> partition(Predicate<? super T> predicate);
@Override
Set<T> peek(Consumer<? super T> action);
@Override
Set<T> replace(T currentElement, T newElement);
@Override
Set<T> replaceAll(T currentElement, T newElement);
@Override
Set<T> retainAll(Iterable<? extends T> elements);
@Override
Set<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation);
@Override
<U> Set<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation);
@Override
<U> Set<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation);
@Override
Iterator<? extends Set<T>> slideBy(Function<? super T, ?> classifier);
@Override
Iterator<? extends Set<T>> sliding(int size);
@Override
Iterator<? extends Set<T>> sliding(int size, int step);
@Override
Tuple2<? extends Set<T>, ? extends Set<T>> span(Predicate<? super T> predicate);
@Override
Set<T> tail();
@Override
Option<? extends Set<T>> tailOption();
@Override
Set<T> take(int n);
@Override
Set<T> takeRight(int n);
@Override
Set<T> takeUntil(Predicate<? super T> predicate);
@Override
Set<T> takeWhile(Predicate<? super T> predicate);
@Override
<T1, T2> Tuple2<? extends Set<T1>, ? extends Set<T2>> unzip(Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper);
@Override
<T1, T2, T3> Tuple3<? extends Set<T1>, ? extends Set<T2>, ? extends Set<T3>> unzip3(Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper);
@Override
<U> Set<Tuple2<T, U>> zip(Iterable<? extends U> that);
@Override
<U, R> Set<R> zipWith(Iterable<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper);
@Override
<U> Set<Tuple2<T, U>> zipAll(Iterable<? extends U> that, T thisElem, U thatElem);
@Override
Set<Tuple2<T, Integer>> zipWithIndex();
@Override
<U> Set<U> zipWithIndex(BiFunction<? super T, ? super Integer, ? extends U> mapper);
}
| 1 | 13,338 | This makes our life harder than it needs to be. A Set is unordered by definition. Please remove this method from Set and add it to SortedSet, but with no default implementation. We need to duplicate it then to LinkedHashSet but that's ok, it is the best we can do. Could you please add it also to SortedMap and LinkedHashMap? We are then able to close the original issue. | vavr-io-vavr | java |
@@ -366,6 +366,10 @@ class S3Connection(AWSAuthConnection):
if version_id is not None:
params['VersionId'] = version_id
+ if response_headers is not None:
+ for header, value in response_headers.items():
+ params[header] = value
+
http_request = self.build_base_http_request(method, path, auth_path,
headers=headers, host=host,
params=params) | 1 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import base64
from boto.compat import six, urllib
import time
from boto.auth import detect_potential_s3sigv4
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
from boto.exception import BotoClientError, S3ResponseError
def check_lowercase_bucketname(n):
"""
Bucket names must not contain uppercase characters. We check for
this by appending a lowercase character and testing with islower().
Note this also covers cases like numeric bucket names with dashes.
>>> check_lowercase_bucketname("Aaaa")
Traceback (most recent call last):
...
BotoClientError: S3Error: Bucket names cannot contain upper-case
characters when using either the sub-domain or virtual hosting calling
format.
>>> check_lowercase_bucketname("1234-5678-9123")
True
>>> check_lowercase_bucketname("abcdefg1234")
True
"""
if not (n + 'a').islower():
raise BotoClientError("Bucket names cannot contain upper-case " \
"characters when using either the sub-domain or virtual " \
"hosting calling format.")
return True
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and check_lowercase_bucketname(args[2]):
pass
return f(*args, **kwargs)
return wrapper
class _CallingFormat(object):
def get_bucket_server(self, server, bucket):
return ''
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '%s://' % protocol
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
def build_host(self, server, bucket):
if bucket == '':
return server
else:
return self.get_bucket_server(server, bucket)
def build_auth_path(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path = ''
if bucket != '':
path = '/' + bucket
return path + '/%s' % urllib.parse.quote(key)
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
return '/%s' % urllib.parse.quote(key)
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
class OrdinaryCallingFormat(_CallingFormat):
def get_bucket_server(self, server, bucket):
return server
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path_base = '/'
if bucket:
path_base += "%s/" % bucket
return path_base + urllib.parse.quote(key)
class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '//'
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
class Location(object):
DEFAULT = '' # US Classic Region
EU = 'EU'
USWest = 'us-west-1'
USWest2 = 'us-west-2'
SAEast = 'sa-east-1'
APNortheast = 'ap-northeast-1'
APSoutheast = 'ap-southeast-1'
APSoutheast2 = 'ap-southeast-2'
CNNorth1 = 'cn-north-1'
class NoHostProvided(object):
# An identifying object to help determine whether the user provided a
# ``host`` or not. Never instantiated.
pass
class HostRequiredError(BotoClientError):
pass
class S3Connection(AWSAuthConnection):
DefaultHost = boto.config.get('s3', 'host', 's3.amazonaws.com')
DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat')
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=NoHostProvided, debug=0, https_connection_factory=None,
calling_format=DefaultCallingFormat, path='/',
provider='aws', bucket_class=Bucket, security_token=None,
suppress_consec_slashes=True, anon=False,
validate_certs=None, profile_name=None):
no_host_provided = False
if host is NoHostProvided:
no_host_provided = True
host = self.DefaultHost
if isinstance(calling_format, six.string_types):
calling_format=boto.utils.find_class(calling_format)()
self.calling_format = calling_format
self.bucket_class = bucket_class
self.anon = anon
super(S3Connection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
path=path, provider=provider, security_token=security_token,
suppress_consec_slashes=suppress_consec_slashes,
validate_certs=validate_certs, profile_name=profile_name)
# We need to delay until after the call to ``super`` before checking
# to see if SigV4 is in use.
if no_host_provided:
if 'hmac-v4-s3' in self._required_auth_capability():
raise HostRequiredError(
"When using SigV4, you must specify a 'host' parameter."
)
@detect_potential_s3sigv4
def _required_auth_capability(self):
if self.anon:
return ['anon']
else:
return ['s3']
def __iter__(self):
for bucket in self.get_all_buckets():
yield bucket
def __contains__(self, bucket_name):
return not (self.lookup(bucket_name) is None)
def set_bucket_class(self, bucket_class):
"""
Set the Bucket class associated with this bucket. By default, this
would be the boto.s3.key.Bucket class but if you want to subclass that
for some reason this allows you to associate your new class.
:type bucket_class: class
:param bucket_class: A subclass of Bucket that can be more specific
"""
self.bucket_class = bucket_class
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
assert isinstance(expiration_time, time.struct_time), \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
def build_post_form_args(self, bucket_name, key, expires_in=6000,
acl=None, success_action_redirect=None,
max_content_length=None,
http_method='http', fields=None,
conditions=None, storage_class='STANDARD',
server_side_encryption=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the
actual form. This does not return the file input field which also
needs to be added
:type bucket_name: string
:param bucket_name: Bucket to submit to
:type key: string
:param key: Key name, optionally add ${filename} to the end to
attach the submitted filename
:type expires_in: integer
:param expires_in: Time (in seconds) before this expires, defaults
to 6000
:type acl: string
:param acl: A canned ACL. One of:
* private
* public-read
* public-read-write
* authenticated-read
* bucket-owner-read
* bucket-owner-full-control
:type success_action_redirect: string
:param success_action_redirect: URL to redirect to on success
:type max_content_length: integer
:param max_content_length: Maximum size for this file
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:type storage_class: string
:param storage_class: Storage class to use for storing the object.
Valid values: STANDARD | REDUCED_REDUNDANCY
:type server_side_encryption: string
:param server_side_encryption: Specifies server-side encryption
algorithm to use when Amazon S3 creates an object.
Valid values: None | AES256
:rtype: dict
:return: A dictionary containing field names/values as well as
a url to POST to
.. code-block:: python
"""
if fields is None:
fields = []
if conditions is None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({"name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({"name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
if self.provider.security_token:
fields.append({'name': 'x-amz-security-token',
'value': self.provider.security_token})
conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token)
if storage_class:
fields.append({'name': 'x-amz-storage-class',
'value': storage_class})
conditions.append('{"x-amz-storage-class": "%s"}' % storage_class)
if server_side_encryption:
fields.append({'name': 'x-amz-server-side-encryption',
'value': server_side_encryption})
conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption)
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId",
"value": self.aws_access_key_id})
# Add signature for encoded policy document as the
# 'signature' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method,
self.calling_format.build_host(self.server_name(),
bucket_name))
return {"action": url, "fields": fields}
def generate_url_sigv4(self, expires_in, method, bucket='', key='',
headers=None, force_http=False,
response_headers=None, version_id=None,
iso_date=None):
path = self.calling_format.build_path_base(bucket, key)
auth_path = self.calling_format.build_auth_path(bucket, key)
host = self.calling_format.build_host(self.server_name(), bucket)
# For presigned URLs we should ignore the port if it's HTTPS
if host.endswith(':443'):
host = host[:-4]
params = {}
if version_id is not None:
params['VersionId'] = version_id
http_request = self.build_base_http_request(method, path, auth_path,
headers=headers, host=host,
params=params)
return self._auth_handler.presign(http_request, expires_in,
iso_date=iso_date)
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None):
if self._auth_handler.capability[0] == 'hmac-v4-s3':
# Handle the special sigv4 case
return self.generate_url_sigv4(expires_in, method, bucket=bucket,
key=key, headers=headers, force_http=force_http,
response_headers=response_headers, version_id=version_id)
headers = headers or {}
if expires_in_absolute:
expires = int(expires_in)
else:
expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
auth_path = self.get_path(auth_path)
# optional version_id and response_headers need to be added to
# the query param list.
extra_qp = []
if version_id is not None:
extra_qp.append("versionId=%s" % version_id)
if response_headers:
for k, v in response_headers.items():
extra_qp.append("%s=%s" % (k, urllib.parse.quote(v)))
if self.provider.security_token:
headers['x-amz-security-token'] = self.provider.security_token
if extra_qp:
delimiter = '?' if '?' not in auth_path else '&'
auth_path += delimiter + '&'.join(extra_qp)
c_string = boto.utils.canonical_string(method, auth_path, headers,
expires, self.provider)
b64_hmac = self._auth_handler.sign_string(c_string)
encoded_canonical = urllib.parse.quote(b64_hmac, safe='')
self.calling_format.build_path_base(bucket, key)
if query_auth:
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
else:
query_part = ''
if headers:
hdr_prefix = self.provider.header_prefix
for k, v in headers.items():
if k.startswith(hdr_prefix):
# headers used for sig generation must be
# included in the url also.
extra_qp.append("%s=%s" % (k, urllib.parse.quote(v)))
if extra_qp:
delimiter = '?' if not query_part else '&'
query_part += delimiter + '&'.join(extra_qp)
if force_http:
protocol = 'http'
port = 80
else:
protocol = self.protocol
port = self.port
return self.calling_format.build_url_base(self, protocol,
self.server_name(port),
bucket, key) + query_part
def get_all_buckets(self, headers=None):
response = self.make_request('GET', headers=headers)
body = response.read()
if response.status > 300:
raise self.provider.storage_response_error(
response.status, response.reason, body)
rs = ResultSet([('Bucket', self.bucket_class)])
h = handler.XmlHandler(rs, self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs
def get_canonical_user_id(self, headers=None):
"""
Convenience method that returns the "CanonicalUserID" of the
user who's credentials are associated with the connection.
The only way to get this value is to do a GET request on the
service which returns all buckets associated with the account.
As part of that response, the canonical userid is returned.
This method simply does all of that and then returns just the
user id.
:rtype: string
:return: A string containing the canonical user id.
"""
rs = self.get_all_buckets(headers=headers)
return rs.owner.id
def get_bucket(self, bucket_name, validate=True, headers=None):
"""
Retrieves a bucket by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised. If
you are unsure if the bucket exists or not, you can use the
``S3Connection.lookup`` method, which will either return a valid bucket
or ``None``.
If ``validate=False`` is passed, no request is made to the service (no
charge/communication delay). This is only safe to do if you are **sure**
the bucket exists.
If the default ``validate=True`` is passed, a request is made to the
service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched
a list of keys (but with a max limit set to ``0``, always returning an empty
list) in the bucket (& included better error messages), at an
increased expense. As of Boto v2.25.0, this now performs a HEAD request
(less expensive but worse error messages).
If you were relying on parsing the error message before, you should call
something like::
bucket = conn.get_bucket('<bucket_name>', validate=False)
bucket.get_all_keys(maxkeys=0)
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to verify the bucket exists
on the service-side. (Default: ``True``)
"""
if validate:
return self.head_bucket(bucket_name, headers=headers)
else:
return self.bucket_class(self, bucket_name)
def head_bucket(self, bucket_name, headers=None):
"""
Determines if a bucket exists by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:returns: A <Bucket> object
"""
response = self.make_request('HEAD', bucket_name, headers=headers)
body = response.read()
if response.status == 200:
return self.bucket_class(self, bucket_name)
elif response.status == 403:
# For backward-compatibility, we'll populate part of the exception
# with the most-common default.
err = self.provider.storage_response_error(
response.status,
response.reason,
body
)
err.error_code = 'AccessDenied'
err.error_message = 'Access Denied'
raise err
elif response.status == 404:
# For backward-compatibility, we'll populate part of the exception
# with the most-common default.
err = self.provider.storage_response_error(
response.status,
response.reason,
body
)
err.error_code = 'NoSuchBucket'
err.error_message = 'The specified bucket does not exist'
raise err
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def lookup(self, bucket_name, validate=True, headers=None):
"""
Attempts to get a bucket from S3.
Works identically to ``S3Connection.get_bucket``, save for that it
will return ``None`` if the bucket does not exist instead of throwing
an exception.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to fetch all keys within the
given bucket. (Default: ``True``)
"""
try:
bucket = self.get_bucket(bucket_name, validate, headers=headers)
except:
bucket = None
return bucket
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create a European bucket (S3) or European Union bucket
(GCS).
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: str
:param location: The location of the new bucket. You can use one of the
constants in :class:`boto.s3.connection.Location` (e.g. Location.EU,
Location.USWest, etc.).
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header: policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConfiguration><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConfiguration>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def delete_bucket(self, bucket, headers=None):
"""
Removes an S3 bucket.
In order to remove the bucket, it must first be empty. If the bucket is
not empty, an ``S3ResponseError`` will be raised.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
"""
response = self.make_request('DELETE', bucket, headers=headers)
body = response.read()
if response.status != 204:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
query_args=None, sender=None, override_num_retries=None,
retry_handler=None):
if isinstance(bucket, self.bucket_class):
bucket = bucket.name
if isinstance(key, Key):
key = key.name
path = self.calling_format.build_path_base(bucket, key)
boto.log.debug('path=%s' % path)
auth_path = self.calling_format.build_auth_path(bucket, key)
boto.log.debug('auth_path=%s' % auth_path)
host = self.calling_format.build_host(self.server_name(), bucket)
if query_args:
path += '?' + query_args
boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
boto.log.debug('auth_path=%s' % auth_path)
return super(S3Connection, self).make_request(
method, path, headers,
data, host, auth_path, sender,
override_num_retries=override_num_retries,
retry_handler=retry_handler
)
| 1 | 11,454 | we can replace `for` with `params.update(response_headers)` | boto-boto | py |
@@ -118,13 +118,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Https
return certificate2;
}
-#if NETSTANDARD1_3
- // conversion X509Certificate to X509Certificate2 not supported
- // https://github.com/dotnet/corefx/issues/4510
return null;
-#else
- return new X509Certificate2(certificate);
-#endif
}
private class HttpsAdaptedConnection : IAdaptedConnection | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.IO;
using System.Net.Security;
using System.Security.Cryptography.X509Certificates;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core.Adapter.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Https.Internal;
using Microsoft.Extensions.Logging;
namespace Microsoft.AspNetCore.Server.Kestrel.Https
{
public class HttpsConnectionAdapter : IConnectionAdapter
{
private static readonly ClosedAdaptedConnection _closedAdaptedConnection = new ClosedAdaptedConnection();
private readonly HttpsConnectionAdapterOptions _options;
private readonly ILogger _logger;
public HttpsConnectionAdapter(HttpsConnectionAdapterOptions options)
: this(options, loggerFactory: null)
{
}
public HttpsConnectionAdapter(HttpsConnectionAdapterOptions options, ILoggerFactory loggerFactory)
{
if (options == null)
{
throw new ArgumentNullException(nameof(options));
}
if (options.ServerCertificate == null)
{
throw new ArgumentException("The server certificate parameter is required.");
}
_options = options;
_logger = loggerFactory?.CreateLogger(nameof(HttpsConnectionAdapter));
}
public bool IsHttps => true;
public async Task<IAdaptedConnection> OnConnectionAsync(ConnectionAdapterContext context)
{
SslStream sslStream;
bool certificateRequired;
if (_options.ClientCertificateMode == ClientCertificateMode.NoCertificate)
{
sslStream = new SslStream(context.ConnectionStream);
certificateRequired = false;
}
else
{
sslStream = new SslStream(context.ConnectionStream, leaveInnerStreamOpen: false,
userCertificateValidationCallback: (sender, certificate, chain, sslPolicyErrors) =>
{
if (certificate == null)
{
return _options.ClientCertificateMode != ClientCertificateMode.RequireCertificate;
}
if (_options.ClientCertificateValidation == null)
{
if (sslPolicyErrors != SslPolicyErrors.None)
{
return false;
}
}
var certificate2 = ConvertToX509Certificate2(certificate);
if (certificate2 == null)
{
return false;
}
if (_options.ClientCertificateValidation != null)
{
if (!_options.ClientCertificateValidation(certificate2, chain, sslPolicyErrors))
{
return false;
}
}
return true;
});
certificateRequired = true;
}
try
{
await sslStream.AuthenticateAsServerAsync(_options.ServerCertificate, certificateRequired,
_options.SslProtocols, _options.CheckCertificateRevocation);
}
catch (IOException ex)
{
_logger?.LogInformation(1, ex, "Failed to authenticate HTTPS connection.");
sslStream.Dispose();
return _closedAdaptedConnection;
}
return new HttpsAdaptedConnection(sslStream);
}
private static X509Certificate2 ConvertToX509Certificate2(X509Certificate certificate)
{
if (certificate == null)
{
return null;
}
X509Certificate2 certificate2 = certificate as X509Certificate2;
if (certificate2 != null)
{
return certificate2;
}
#if NETSTANDARD1_3
// conversion X509Certificate to X509Certificate2 not supported
// https://github.com/dotnet/corefx/issues/4510
return null;
#else
return new X509Certificate2(certificate);
#endif
}
private class HttpsAdaptedConnection : IAdaptedConnection
{
private readonly SslStream _sslStream;
public HttpsAdaptedConnection(SslStream sslStream)
{
_sslStream = sslStream;
}
public Stream ConnectionStream => _sslStream;
public void PrepareRequest(IFeatureCollection requestFeatures)
{
var clientCertificate = ConvertToX509Certificate2(_sslStream.RemoteCertificate);
if (clientCertificate != null)
{
requestFeatures.Set<ITlsConnectionFeature>(new TlsConnectionFeature { ClientCertificate = clientCertificate });
}
requestFeatures.Get<IHttpRequestFeature>().Scheme = "https";
}
}
private class ClosedAdaptedConnection : IAdaptedConnection
{
public Stream ConnectionStream { get; } = new ClosedStream();
public void PrepareRequest(IFeatureCollection requestFeatures)
{
}
}
}
}
| 1 | 13,130 | Any chance this is available in .NET Core 2.0 now? | aspnet-KestrelHttpServer | .cs |
@@ -8,16 +8,15 @@ package blocksync
import (
"context"
- "net"
"github.com/golang/protobuf/proto"
+ peerstore "github.com/libp2p/go-libp2p-peerstore"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
- "github.com/iotexproject/iotex-core/p2p/node"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/routine" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blocksync
import (
"context"
"net"
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/p2p/node"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/routine"
"github.com/iotexproject/iotex-core/proto"
)
type (
// Unicast sends a unicast message to the given address
Unicast func(addr net.Addr, msg proto.Message) error
// Neighbors returns the neighbors' addresses
Neighbors func() []net.Addr
)
// Config represents the config to setup blocksync
type Config struct {
unicastHandler Unicast
neighborsHandler Neighbors
}
// Option is the option to override the blocksync config
type Option func(cfg *Config) error
// WithUnicast is the option to set the unicast callback
func WithUnicast(unicastHandler Unicast) Option {
return func(cfg *Config) error {
cfg.unicastHandler = unicastHandler
return nil
}
}
// WithNeighbors is the option to set the neighbors callback
func WithNeighbors(neighborsHandler Neighbors) Option {
return func(cfg *Config) error {
cfg.neighborsHandler = neighborsHandler
return nil
}
}
// BlockSync defines the interface of blocksyncer
type BlockSync interface {
lifecycle.StartStopper
TargetHeight() uint64
ProcessSyncRequest(sender string, sync *iproto.BlockSync) error
ProcessBlock(blk *block.Block) error
ProcessBlockSync(blk *block.Block) error
}
// blockSyncer implements BlockSync interface
type blockSyncer struct {
ackBlockCommit bool // acknowledges latest committed block
ackBlockSync bool // acknowledges old block from sync request
ackSyncReq bool // acknowledges incoming Sync request
commitHeight uint64 // last commit block height
buf *blockBuffer
worker *syncWorker
bc blockchain.Blockchain
unicastHandler Unicast
neighborsHandler Neighbors
chaser *routine.RecurringTask
}
// NewBlockSyncer returns a new block syncer instance
func NewBlockSyncer(
cfg config.Config,
chain blockchain.Blockchain,
ap actpool.ActPool,
opts ...Option,
) (BlockSync, error) {
bufSize := cfg.BlockSync.BufferSize
if cfg.IsFullnode() {
bufSize <<= 3
}
buf := &blockBuffer{
blocks: make(map[uint64]*block.Block),
bc: chain,
ap: ap,
size: bufSize,
}
bsCfg := Config{}
for _, opt := range opts {
if err := opt(&bsCfg); err != nil {
return nil, err
}
}
bs := &blockSyncer{
ackBlockCommit: cfg.IsDelegate() || cfg.IsFullnode(),
ackBlockSync: cfg.IsDelegate() || cfg.IsFullnode(),
ackSyncReq: cfg.IsDelegate() || cfg.IsFullnode(),
bc: chain,
buf: buf,
unicastHandler: bsCfg.unicastHandler,
neighborsHandler: bsCfg.neighborsHandler,
worker: newSyncWorker(chain.ChainID(), cfg, bsCfg.unicastHandler, bsCfg.neighborsHandler, buf),
}
bs.chaser = routine.NewRecurringTask(bs.Chase, cfg.BlockSync.Interval*10)
return bs, nil
}
// TargetHeight returns the target height to sync to
func (bs *blockSyncer) TargetHeight() uint64 {
bs.worker.mu.RLock()
defer bs.worker.mu.RUnlock()
return bs.worker.targetHeight
}
// Start starts a block syncer
func (bs *blockSyncer) Start(ctx context.Context) error {
log.L().Debug("Starting block syncer.")
bs.commitHeight = bs.buf.CommitHeight()
if err := bs.chaser.Start(ctx); err != nil {
return err
}
return bs.worker.Start(ctx)
}
// Stop stops a block syncer
func (bs *blockSyncer) Stop(ctx context.Context) error {
log.L().Debug("Stopping block syncer.")
if err := bs.chaser.Stop(ctx); err != nil {
return err
}
return bs.worker.Stop(ctx)
}
// ProcessBlock processes an incoming latest committed block
func (bs *blockSyncer) ProcessBlock(blk *block.Block) error {
if !bs.ackBlockCommit {
// node is not meant to handle latest committed block, simply exit
return nil
}
var needSync bool
moved, re := bs.buf.Flush(blk)
switch re {
case bCheckinLower:
log.L().Debug("Drop block lower than buffer's accept height.")
case bCheckinExisting:
log.L().Debug("Drop block exists in buffer.")
case bCheckinHigher:
needSync = true
case bCheckinValid:
needSync = !moved
case bCheckinSkipNil:
needSync = false
}
if needSync {
bs.worker.SetTargetHeight(blk.Height())
}
return nil
}
func (bs *blockSyncer) ProcessBlockSync(blk *block.Block) error {
if !bs.ackBlockSync {
// node is not meant to handle sync block, simply exit
return nil
}
bs.buf.Flush(blk)
if bs.bc.TipHeight() == bs.TargetHeight() {
bs.worker.SetTargetHeight(bs.TargetHeight() + bs.buf.bufSize())
}
return nil
}
// ProcessSyncRequest processes a block sync request
func (bs *blockSyncer) ProcessSyncRequest(sender string, sync *iproto.BlockSync) error {
if !bs.ackSyncReq {
// node is not meant to handle sync request, simply exit
return nil
}
for i := sync.Start; i <= sync.End; i++ {
blk, err := bs.bc.GetBlockByHeight(i)
if err != nil {
return err
}
// TODO: send back multiple blocks in one shot
if err := bs.unicastHandler(
node.NewTCPNode(sender),
&iproto.BlockContainer{Block: blk.ConvertToBlockPb()},
); err != nil {
log.L().Warn("Failed to response to ProcessSyncRequest.", zap.Error(err))
}
}
return nil
}
// Chase sets the block sync target height to be blockchain height + 1
func (bs *blockSyncer) Chase() {
if bs.commitHeight != bs.buf.CommitHeight() {
bs.commitHeight = bs.buf.CommitHeight()
return
}
// commit height hasn't changed since last chase interval
bs.worker.SetTargetHeight(bs.bc.TipHeight() + 1)
log.L().Info("Chaser is chasing.", zap.Uint64("stuck", bs.commitHeight))
}
| 1 | 14,598 | I wonder if it's necessary to let app pass in the context. What app level context should be sent? If no, is it enough for p2p agent to compose a context with network info there? | iotexproject-iotex-core | go |
@@ -26,8 +26,11 @@
#define LBANN_INPUT_LAYER_INSTANTIATE
#include "lbann/layers/io/input_layer.hpp"
-#include "lbann/utils/profiling.hpp"
+
#include "lbann/callbacks/imcomm.hpp"
+#include "lbann/execution_contexts/execution_context.hpp"
+#include "lbann/execution_contexts/sgd_execution_context.hpp"
+#include "lbann/utils/profiling.hpp"
#include "lbann/utils/serialize.hpp"
namespace lbann { | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_INPUT_LAYER_INSTANTIATE
#include "lbann/layers/io/input_layer.hpp"
#include "lbann/utils/profiling.hpp"
#include "lbann/callbacks/imcomm.hpp"
#include "lbann/utils/serialize.hpp"
namespace lbann {
template <typename TensorDataType,
data_layout T_layout,
El::Device Dev>
void input_layer<TensorDataType, T_layout, Dev>::
setup_dims(DataReaderMetaData& dr_metadata) {
data_type_layer<TensorDataType>::setup_dims(dr_metadata);
for (int i = 0; i < this->get_num_children(); ++i) {
this->set_output_dims(get_data_dims(dr_metadata, i), i);
}
}
template <typename TensorDataType,
data_layout T_layout,
El::Device Dev>
void input_layer<TensorDataType, T_layout, Dev>::setup_data(size_t max_mini_batch_size) {
data_type_layer<TensorDataType>::setup_data(max_mini_batch_size);
// Resize output to maximum mini-batch size
for (int i = 0; i < this->get_num_children(); ++i) {
auto& output = this->get_activations(i);
output.Resize(output.Height(), max_mini_batch_size);
}
}
template <typename TensorDataType,
data_layout T_layout,
El::Device Dev>
void input_layer<TensorDataType, T_layout, Dev>::fp_setup_outputs(El::Int mini_batch_size) {
/// During model setup there is no valid execution context, but
/// during execution there is a context
if(this->m_model->has_valid_execution_context()) {
auto& c = static_cast<sgd_execution_context&>(this->m_model->get_execution_context());
auto mode = c.get_execution_mode();
data_coordinator& dc = c.get_trainer().get_data_coordinator();
// Determine model mini-batch size and effective mini-batch size
// Note: If inter-model communication is activated, the effective
// mini-batch is equal to the global mini-batch size.
/// @todo This functionality should probably be moved elsewhere
mini_batch_size = dc.get_current_mini_batch_size(mode);
auto effective_mini_batch_size = mini_batch_size;
for (auto&& cb : this->m_model->get_callbacks()) {
if (dynamic_cast<callback::imcomm*>(cb) != nullptr) {
effective_mini_batch_size = dc.get_current_global_mini_batch_size(mode);
break;
}
}
// Set mini-batch size in model
c.set_current_mini_batch_size(mini_batch_size);
c.set_effective_mini_batch_size(effective_mini_batch_size);
}
// Initialize matrices
data_type_layer<TensorDataType>::fp_setup_outputs(mini_batch_size);
}
template <typename TensorDataType,
data_layout T_layout,
El::Device Dev>
void input_layer<TensorDataType, T_layout, Dev>::fp_compute() {
execution_mode mode = this->m_model->get_execution_context().get_execution_mode();
buffered_data_coordinator<TensorDataType>& dc = static_cast<buffered_data_coordinator<TensorDataType>&>(this->m_model->get_execution_context().get_trainer().get_data_coordinator());
// partitioned_io_buffer<TensorDataType>* io_buffer = dc.get_active_buffer(mode);
// generic_io_buffer<TensorDataType>* io_buffer = dc.m_io_buffers[dc.get_active_buffer_idx(mode) % dc.m_io_buffers.size()];
// if(dynamic_cast<partitioned_io_buffer<TensorDataType>*>(io_buffer) != nullptr) {
// Use the predetermined size of the mini-batch to set the current
// batch size for the neural network
int num_samples_in_batch = dc.get_current_mini_batch_size(mode);
dc.update_num_samples_processed(mode, num_samples_in_batch);
std::map<input_data_type, AbsDistMatrixType*> input_buffers;
input_buffers[input_data_type::SAMPLES] = &(this->get_activations(0));
if(this->m_expected_num_child_layers > 1) {
if(is_for_regression()) {
input_buffers[input_data_type::RESPONSES] = &(this->get_activations(1));
}else {
input_buffers[input_data_type::LABELS] = &(this->get_activations(1));
}
}
dc.distribute_from_local_matrix(mode, input_buffers);
#ifdef LBANN_HAS_DISTCONV
if (this->distconv_enabled()) {
get_distconv_adapter().fp_compute();
}
#endif // LBANN_HAS_DISTCONV
}
template <typename TensorDataType,
data_layout T_layout,
El::Device Dev>
std::vector<int> input_layer<TensorDataType, T_layout, Dev>::
get_data_dims(DataReaderMetaData& dr_metadata, int child_index) const {
if(child_index == 0) {
return dr_metadata.data_dims[data_reader_target_mode::INPUT];
}else if(child_index == 1) {
return dr_metadata.data_dims[this->m_data_reader_mode];
}else {
LBANN_ERROR("get_data_dims: Invalid child index");
}
return std::vector<int>(1, 0);
}
#ifdef LBANN_HAS_DISTCONV
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
input_distconv_adapter<TensorDataType, T_layout, Dev>::
input_distconv_adapter(Layer& layer, const bool shuffle_required)
: data_type_distconv_adapter<TensorDataType>(layer),
m_shuffle_required(shuffle_required) {
// Input data is only processed when its consumer layer is also
// enabled for distconv
for (int i = 0; i < layer.get_num_children(); ++i) {
m_is_input_processed.push_back(layer.get_child_layers()[i]->distconv_enabled());
}
if (m_shuffle_required) {
m_shufflers.resize(layer.get_num_children());
}
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
bool input_distconv_adapter<TensorDataType, T_layout, Dev>::
is_input_processed(size_t index) const {
if (index >= m_is_input_processed.size()) {
LBANN_ERROR("Invalid index: ", index);
}
return m_is_input_processed[index];
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
typename input_distconv_adapter<TensorDataType, T_layout, Dev>::TensorHostShuffler&
input_distconv_adapter<TensorDataType, T_layout, Dev>::get_shuffler(
const TensorHost &src, const TensorHost &dst, int mat_idx) {
size_t cur_mb_size = src.get_shape()[dc::get_sample_dim()];
auto src_buf = m_shuffler_src_buf.get();
auto dst_buf = m_shuffler_dst_buf.get();
int shfl_idx = -1;
const auto& context = this->layer().get_model()->get_execution_context();
if (cur_mb_size == context.get_trainer().get_max_mini_batch_size()) {
shfl_idx = 0;
} else {
// The last remaining mini-batches for the train, validation, and
// testing modes
auto mode = context.get_execution_mode();
shfl_idx = 1 + static_cast<int>(mode);
}
assert_always(shfl_idx >= 0 && shfl_idx < 4);
auto &shfl = m_shufflers[mat_idx][shfl_idx];
if (shfl == nullptr) {
shfl = make_unique<TensorHostShuffler>(
src, dst, src_buf, dst_buf);
}
return *shfl;
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
void input_distconv_adapter<TensorDataType, T_layout, Dev>::setup_fp_tensors() {
const auto sample_dist = dc::get_hydrogen_data_parallel_distribution(
dc::get_num_dims(this->layer()));
for (int mat_idx = 0; mat_idx < this->layer().get_num_children(); ++mat_idx) {
if (!is_input_processed(mat_idx)) continue;
const auto shape = this->get_activations_shape(mat_idx);
auto local_shape = shape;
if (m_shuffle_required) {
local_shape[dc::get_sample_dim()] = 0;
} else {
local_shape = 0;
}
// Use the same MPI communicator for both IO buffers. This seems
// to work around MPI errors likely caused with the alltoallv for
// shuffling.
const dc::LocaleMPI loc(dc::get_mpi_comm(), false);
auto dist = this->get_activations_dist();
if (mat_idx == 1) {
// assumes no halo for the ground-truth data
dist.clear_overlap();
}
auto dist_no_halo = dist;
dist_no_halo.clear_overlap();
const auto original_host_tensor_dist = m_shuffle_required ?
sample_dist : dist_no_halo;
// Create a view to the host LBANN matrix
m_original_host_tensors.emplace_back(
make_unique<TensorHost>(shape, loc, original_host_tensor_dist, local_shape));
// When shuffled, host tensor will have the same distribution as
// the final output; otherwise, it is just a view to the host
// LBANN matrix, so no overlap.
auto host_tensor_dist = m_shuffle_required ? dist : dist_no_halo;
m_host_tensors.emplace_back(
make_unique<TensorHost>(shape, loc, host_tensor_dist));
if (m_shuffle_required) {
// TODO: This is a temporary hack. Should use
// CUDAHostPooledAllocator, but the shuffler is
// only specialized for BaseAllocator.
size_t buf_size = m_host_tensors.back()->get_local_real_size()
* sizeof(TensorDataType);
TensorDataType *buf = nullptr;
CHECK_CUDA(cudaMallocHost(&buf, buf_size));
// Note buf should be deallocated.
dc::tensor::View(*m_host_tensors.back(), buf);
setup_shuffler_buffers(*m_original_host_tensors.back(),
*m_host_tensors.back());
}
}
this->setup_activations();
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
std::unique_ptr<typename input_distconv_adapter<TensorDataType, T_layout, Dev>::TensorDevType>
input_distconv_adapter<TensorDataType, T_layout, Dev>::
setup_activations_i(int index) const {
if (!is_input_processed(index)) return nullptr;
if (index == 0) {
return data_type_distconv_adapter<TensorDataType>::
setup_activations_i(index);
} else {
assert_eq(index, 1);
// Note: the default setup_activations_i can't be used because
// the distribution might need to be changed to remove
// overlap. This can be fixed by making each tensor hav a
// different distribution.
const dc::LocaleMPI loc(dc::get_mpi_comm(), false);
auto dist = this->get_activations_dist();
dist.clear_overlap();
const auto shape = get_activations_shape(index);
const auto local_shape = get_activations_local_shape(index);
auto t = make_unique<TensorDevType>(shape, loc, dist, local_shape);
assert0(t->allocate());
t->zero(hydrogen::cuda::GetDefaultStream());
return t;
}
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
dc::Shape input_distconv_adapter<TensorDataType, T_layout, Dev>::
get_activations_local_shape(int index) const {
// No enforced local shape as the activations tensor is always
// copied from the El matrix.
return dc::Shape(dc::get_num_dims(this->layer()), 0);
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
dc::Shape input_distconv_adapter<TensorDataType, T_layout, Dev>::
get_activations_shape(int index) const {
if (index == 0) {
return data_type_distconv_adapter<TensorDataType>::
get_activations_shape(index);
} else {
assert_eq(index, 1);
// TODO: This is a temporary hack. The label tensor shape should
//be set based on the shape set by the data reader, but the data
//reader does not provide it. Using the shape shape as the data
//tensor works fine for the U-Net model.
auto shape = this->get_activations_shape(0);
auto label_size = data_type_distconv_adapter<TensorDataType>::
get_activations_shape(1).reduce_prod();
const std::string env = std::getenv("DISTCONV_LABEL_NUM_CHANNELS");
auto num_channels = env != ""
? std::stoi(env) : label_size / shape.reduce_prod();
shape[-2] = num_channels;
return shape;
}
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
void input_distconv_adapter<TensorDataType, T_layout, Dev>::
setup_shuffler_buffers(const TensorHost &src, const TensorHost &dst) {
auto shuffler_src_size = TensorHostShuffler::get_buf_size(src);
if (m_shuffler_src_buf_size < shuffler_src_size) {
m_shuffler_src_buf_size = shuffler_src_size;
m_shuffler_src_buf =
std::unique_ptr<TensorDataType>(static_cast<TensorDataType*>(
dc::util::aligned_malloc(m_shuffler_src_buf_size)));
}
auto shuffler_dst_size = TensorHostShuffler::get_buf_size(dst);
if (m_shuffler_dst_buf_size < shuffler_dst_size) {
m_shuffler_dst_buf_size = shuffler_dst_size;
m_shuffler_dst_buf =
std::unique_ptr<TensorDataType>(static_cast<TensorDataType*>(
dc::util::aligned_malloc(m_shuffler_dst_buf_size)));
}
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
bool input_distconv_adapter<TensorDataType, T_layout, Dev>::
child_copy_required(size_t output_index) const {
// Not required when label is not handled.
if (output_index == 1 && !is_input_processed(1)) {
return false;
} else {
return data_type_distconv_adapter<TensorDataType>::
child_copy_required(output_index);
}
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
bool input_distconv_adapter<TensorDataType, T_layout, Dev>::
child_shuffle_required(size_t output_index) const {
// Not required when label is not handled.
if (output_index == 1 && !is_input_processed(1)) {
return false;
} else {
return data_type_distconv_adapter<TensorDataType>::
child_shuffle_required(output_index);
}
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
void input_distconv_adapter<TensorDataType, T_layout, Dev>::fp_compute() {
auto &l = dynamic_cast<input_layer<
TensorDataType, T_layout, Dev>&>(this->layer());
auto stream = hydrogen::cuda::GetDefaultStream();
// Note that the mini-batch size of the data reader is not
// actually the one for the current mini-batch as the mini-batch
// index is already updated by fp_compute.
const int mb_size = static_cast<sgd_execution_context&>(
l.get_model()->get_execution_context()).get_current_mini_batch_size();
for (int mat_idx = 0; mat_idx < l.get_num_children(); ++mat_idx) {
if (!is_input_processed(mat_idx)) continue;
// TODO: This is diabled as it raises an error when the HDF5 data
// reader with hyperslab labels is used. Remove this assertion or
// reshape the actiavtion tensor (mat_idx=1).
// assert_eq(mb_size * dc::get_number_of_io_partitions(),
// l.get_activations(mat_idx).Width());
auto &original_tensor = *m_original_host_tensors[mat_idx];
auto &host_tensor = *m_host_tensors[mat_idx];
auto &device_tensor = this->get_activations(mat_idx);
// Adjust the mini-batch size
original_tensor.set_outermost_dimension(mb_size);
host_tensor.set_outermost_dimension(mb_size);
device_tensor.set_outermost_dimension(mb_size);
// Setup view
assert0(dc::tensor::View(
original_tensor,
l.get_activations(mat_idx).LockedBuffer()));
// Shuffle if necessary
if (m_shuffle_required) {
get_shuffler(
original_tensor, host_tensor, mat_idx).shuffle_forward(
original_tensor.get_const_base_ptr(),
host_tensor.get_base_ptr());
} else {
// The input buffer is already partitioned
assert0(dc::tensor::View(
host_tensor, original_tensor.get_const_buffer()));
}
// After this, there is no inter-process communication, so it's
// safe to exit if the local tensor is empty.
if (host_tensor.get_local_size() == 0) {
continue;
}
prof_region_begin("copy-to-device", prof_colors[1], false);
assert0(dc::tensor::Copy(
device_tensor, host_tensor, stream));
prof_region_end("copy-to-device", false);
}
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
const input_distconv_adapter<TensorDataType, T_layout, Dev>&
input_layer<TensorDataType, T_layout, Dev>::get_distconv_adapter() const {
return dynamic_cast<const input_distconv_adapter<
TensorDataType, T_layout, Dev>&>(
data_type_layer<TensorDataType>::get_distconv_adapter());
}
template <typename TensorDataType,
data_layout T_layout, El::Device Dev>
input_distconv_adapter<TensorDataType, T_layout, Dev>&
input_layer<TensorDataType, T_layout, Dev>::get_distconv_adapter() {
return const_cast<input_distconv_adapter<
TensorDataType, T_layout, Dev>&>(
static_cast<const input_layer<
TensorDataType, T_layout, Dev>&>(*this).get_distconv_adapter());
}
template <typename TensorDataType,
data_layout T_layout,
El::Device Dev>
bool input_layer<TensorDataType, T_layout, Dev>::
keep_original_outputs(int index) const {
// The original output matrices are always needed as we copy them
// into distconv tensors.
return true;
}
#endif // LBANN_HAS_DISTCONV
#define PROTO_DEVICE(T, Device) \
template class input_layer<T, data_layout::DATA_PARALLEL, Device>
#include "lbann/macros/instantiate_device.hpp"
}// namespace lbann
| 1 | 16,159 | Why do we need both includes here? | LLNL-lbann | cpp |
@@ -600,7 +600,7 @@ public class SmartStore {
List<String> soupNames = new ArrayList<String>();
Cursor cursor = null;
try {
- cursor = DBHelper.getInstance(db).query(db, SOUP_NAMES_TABLE, new String[]{SOUP_NAME_COL}, null, null, null);
+ cursor = DBHelper.getInstance(db).query(db, SOUP_NAMES_TABLE, new String[]{SOUP_NAME_COL}, SOUP_NAME_COL, null, null);
if (cursor.moveToFirst()) {
do {
soupNames.add(cursor.getString(0)); | 1 | /*
* Copyright (c) 2012-2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartstore.store;
import android.content.ContentValues;
import android.database.Cursor;
import android.text.TextUtils;
import android.util.Log;
import com.salesforce.androidsdk.smartstore.store.LongOperation.LongOperationType;
import com.salesforce.androidsdk.smartstore.store.QuerySpec.QueryType;
import net.sqlcipher.database.SQLiteDatabase;
import net.sqlcipher.database.SQLiteOpenHelper;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Smart store
*
* Provides a secure means for SalesforceMobileSDK Container-based applications to store objects in a persistent
* and searchable manner. Similar in some ways to CouchDB, SmartStore stores documents as JSON values.
* SmartStore is inspired by the Apple Newton OS Soup/Store model.
* The main challenge here is how to effectively store documents with dynamic fields, and still allow indexing and searching.
*/
public class SmartStore {
// Default
public static final int DEFAULT_PAGE_SIZE = 10;
// Table to keep track of soup names
protected static final String SOUP_NAMES_TABLE = "soup_names";
// Fts table suffix
public static final String FTS_SUFFIX = "_fts";
// Table to keep track of soup's index specs
protected static final String SOUP_INDEX_MAP_TABLE = "soup_index_map";
// Table to keep track of status of long operations in flight
protected static final String LONG_OPERATIONS_STATUS_TABLE = "long_operations_status";
// Columns of the soup index map table
protected static final String SOUP_NAME_COL = "soupName";
protected static final String PATH_COL = "path";
protected static final String COLUMN_NAME_COL = "columnName";
protected static final String COLUMN_TYPE_COL = "columnType";
// Columns of a soup table
protected static final String ID_COL = "id";
protected static final String CREATED_COL = "created";
protected static final String LAST_MODIFIED_COL = "lastModified";
protected static final String SOUP_COL = "soup";
// Column of a fts soup table
protected static final String DOCID_COL = "docid";
// Columns of long operations status table
protected static final String TYPE_COL = "type";
protected static final String DETAILS_COL = "details";
protected static final String STATUS_COL = "status";
// JSON fields added to soup element on insert/update
public static final String SOUP_ENTRY_ID = "_soupEntryId";
public static final String SOUP_LAST_MODIFIED_DATE = "_soupLastModifiedDate";
// Predicates
protected static final String SOUP_NAME_PREDICATE = SOUP_NAME_COL + " = ?";
protected static final String ID_PREDICATE = ID_COL + " = ?";
protected static final String DOCID_PREDICATE = DOCID_COL + " =?";
// Backing database
protected SQLiteDatabase dbLocal;
protected SQLiteOpenHelper dbOpenHelper;
private String passcode;
/**
* Changes the encryption key on the smartstore.
*
* @param db Database object.
* @param newKey New encryption key.
*/
public static synchronized void changeKey(SQLiteDatabase db, String newKey) {
synchronized(db) {
if (newKey != null && !newKey.trim().equals("")) {
db.execSQL("PRAGMA rekey = '" + newKey + "'");
}
}
}
/**
* Create soup index map table to keep track of soups' index specs
* Create soup name map table to keep track of soup name to table name mappings
* Called when the database is first created
*
* @param db
*/
public static void createMetaTables(SQLiteDatabase db) {
synchronized(db) {
// Create soup_index_map table
StringBuilder sb = new StringBuilder();
sb.append("CREATE TABLE ").append(SOUP_INDEX_MAP_TABLE).append(" (")
.append(SOUP_NAME_COL).append(" TEXT")
.append(",").append(PATH_COL).append(" TEXT")
.append(",").append(COLUMN_NAME_COL).append(" TEXT")
.append(",").append(COLUMN_TYPE_COL).append(" TEXT")
.append(")");
db.execSQL(sb.toString());
// Add index on soup_name column
db.execSQL(String.format("CREATE INDEX %s on %s ( %s )", SOUP_INDEX_MAP_TABLE + "_0", SOUP_INDEX_MAP_TABLE, SOUP_NAME_COL));
// Create soup_names table
// The table name for the soup will simply be table_<soupId>
sb = new StringBuilder();
sb.append("CREATE TABLE ").append(SOUP_NAMES_TABLE).append(" (")
.append(ID_COL).append(" INTEGER PRIMARY KEY AUTOINCREMENT")
.append(",").append(SOUP_NAME_COL).append(" TEXT")
.append(")");
db.execSQL(sb.toString());
// Add index on soup_name column
db.execSQL(String.format("CREATE INDEX %s on %s ( %s )", SOUP_NAMES_TABLE + "_0", SOUP_NAMES_TABLE, SOUP_NAME_COL));
// Create alter_soup_status table
createLongOperationsStatusTable(db);
}
}
/**
* Create long_operations_status table
* @param db
*/
public static void createLongOperationsStatusTable(SQLiteDatabase db) {
synchronized(SmartStore.class) {
StringBuilder sb = new StringBuilder();
sb.append("CREATE TABLE IF NOT EXISTS ").append(LONG_OPERATIONS_STATUS_TABLE).append(" (")
.append(ID_COL).append(" INTEGER PRIMARY KEY AUTOINCREMENT")
.append(",").append(TYPE_COL).append(" TEXT")
.append(",").append(DETAILS_COL).append(" TEXT")
.append(",").append(STATUS_COL).append(" TEXT")
.append(", ").append(CREATED_COL).append(" INTEGER")
.append(", ").append(LAST_MODIFIED_COL).append(" INTEGER")
.append(")");
db.execSQL(sb.toString());
}
}
/**
* @param db
*/
@Deprecated
public SmartStore(SQLiteDatabase db) {
this.dbLocal = db;
}
/**
* Relies on SQLiteOpenHelper for database handling.
*
* @param dbOpenHelper DB open helper.
* @param passcode Passcode.
*/
public SmartStore(SQLiteOpenHelper dbOpenHelper, String passcode) {
this.dbOpenHelper = dbOpenHelper;
this.passcode = passcode;
}
/**
* Return db
*/
public SQLiteDatabase getDatabase() {
if (dbLocal != null) {
return dbLocal;
} else {
return this.dbOpenHelper.getWritableDatabase(passcode);
}
}
/**
* Get database size
*/
public int getDatabaseSize() {
return (int) (new File(getDatabase().getPath()).length()); // XXX That cast will be trouble if the file is more than 2GB
}
/**
* Start transaction
*/
public void beginTransaction() {
getDatabase().beginTransaction();
}
/**
* End transaction (commit or rollback)
*/
public void endTransaction() {
getDatabase().endTransaction();
}
/**
* Mark transaction as successful (next call to endTransaction will be a commit)
*/
public void setTransactionSuccessful() {
getDatabase().setTransactionSuccessful();
}
/**
* Register a soup
*
* Create table for soupName with a column for the soup itself and columns for paths specified in indexSpecs
* Create indexes on the new table to make lookup faster
* Create rows in soup index map table for indexSpecs
* @param soupName
* @param indexSpecs
*/
public void registerSoup(String soupName, IndexSpec[] indexSpecs) {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
if (soupName == null) throw new SmartStoreException("Bogus soup name:" + soupName);
if (indexSpecs.length == 0) throw new SmartStoreException("No indexSpecs specified for soup: " + soupName);
if (hasSoup(soupName)) return; // soup already exist - do nothing
// First get a table name
String soupTableName = null;
ContentValues soupMapValues = new ContentValues();
soupMapValues.put(SOUP_NAME_COL, soupName);
try {
db.beginTransaction();
long soupId = DBHelper.getInstance(db).insert(db, SOUP_NAMES_TABLE, soupMapValues);
soupTableName = getSoupTableName(soupId);
db.setTransactionSuccessful();
} finally {
db.endTransaction();
}
// Do the rest - create table / indexes
registerSoupUsingTableName(soupName, indexSpecs, soupTableName);
}
}
/**
* Helper method for registerSoup
*
* @param soupName
* @param indexSpecs
* @param soupTableName
*/
protected void registerSoupUsingTableName(String soupName, IndexSpec[] indexSpecs, String soupTableName) {
// Prepare SQL for creating soup table and its indices
StringBuilder createTableStmt = new StringBuilder(); // to create new soup table
StringBuilder createFtsStmt = new StringBuilder(); // to create fts table
List<String> createIndexStmts = new ArrayList<String>(); // to create indices on new soup table
List<ContentValues> soupIndexMapInserts = new ArrayList<ContentValues>(); // to be inserted in soup index map table
IndexSpec[] indexSpecsToCache = new IndexSpec[indexSpecs.length];
List<String> columnsForFts = new ArrayList<String>();
createTableStmt.append("CREATE TABLE ").append(soupTableName).append(" (")
.append(ID_COL).append(" INTEGER PRIMARY KEY AUTOINCREMENT")
.append(", ").append(SOUP_COL).append(" TEXT")
.append(", ").append(CREATED_COL).append(" INTEGER")
.append(", ").append(LAST_MODIFIED_COL).append(" INTEGER");
int i = 0;
for (IndexSpec indexSpec : indexSpecs) {
// for create table
String columnName = soupTableName + "_" + i;
String columnType = indexSpec.type.getColumnType();
createTableStmt.append(", ").append(columnName).append(" ").append(columnType);
// for fts
if (indexSpec.type == Type.full_text) {
columnsForFts.add(columnName);
}
// for insert
ContentValues values = new ContentValues();
values.put(SOUP_NAME_COL, soupName);
values.put(PATH_COL, indexSpec.path);
values.put(COLUMN_NAME_COL, columnName);
values.put(COLUMN_TYPE_COL, indexSpec.type.toString());
soupIndexMapInserts.add(values);
// for create index
String indexName = soupTableName + "_" + i + "_idx";
createIndexStmts.add(String.format("CREATE INDEX %s on %s ( %s )", indexName, soupTableName, columnName));;
// for the cache
indexSpecsToCache[i] = new IndexSpec(indexSpec.path, indexSpec.type, columnName);
i++;
}
createTableStmt.append(")");
// fts
if (columnsForFts.size() > 0) {
createFtsStmt.append(String.format("CREATE VIRTUAL TABLE %s%s USING fts4(%s)", soupTableName, FTS_SUFFIX, TextUtils.join(",", columnsForFts)));
}
// Run SQL for creating soup table and its indices
final SQLiteDatabase db = getDatabase();
db.execSQL(createTableStmt.toString());
if (columnsForFts.size() > 0) {
db.execSQL(createFtsStmt.toString());
}
for (String createIndexStmt : createIndexStmts) {
db.execSQL(createIndexStmt.toString());
}
try {
db.beginTransaction();
for (ContentValues values : soupIndexMapInserts) {
DBHelper.getInstance(db).insert(db, SOUP_INDEX_MAP_TABLE, values);
}
db.setTransactionSuccessful();
// Add to soupNameToTableNamesMap
DBHelper.getInstance(db).cacheTableName(soupName, soupTableName);
// Add to soupNameToIndexSpecsMap
DBHelper.getInstance(db).cacheIndexSpecs(soupName, indexSpecsToCache);
} finally {
db.endTransaction();
}
}
/**
* Finish long operations that were interrupted
*/
public void resumeLongOperations() {
synchronized(SmartStore.class) {
for (LongOperation longOperation : getLongOperations()) {
try {
longOperation.run();
} catch (Exception e) {
Log.e("SmartStore.resumeLongOperations", "Unexpected error", e);
}
}
}
}
/**
* @return unfinished long operations
*/
public LongOperation[] getLongOperations() {
List<LongOperation> longOperations = new ArrayList<LongOperation>();
synchronized(SmartStore.class) {
Cursor cursor = null;
final SQLiteDatabase db = getDatabase();
try {
cursor = DBHelper.getInstance(db).query(db,
LONG_OPERATIONS_STATUS_TABLE, new String[] {ID_COL, TYPE_COL, DETAILS_COL, STATUS_COL},
null, null, null);
if (cursor.moveToFirst()) {
do {
try {
long rowId = cursor.getLong(0);
LongOperationType operationType = LongOperationType.valueOf(cursor.getString(1));
JSONObject details = new JSONObject(cursor.getString(2));
String statusStr = cursor.getString(3);
longOperations.add(operationType.getOperation(this, rowId, details, statusStr));
}
catch (Exception e) {
Log.e("SmartStore.getLongOperations", "Unexpected error", e);
}
}
while (cursor.moveToNext());
}
} finally {
safeClose(cursor);
}
}
return longOperations.toArray(new LongOperation[0]);
}
/**
* Alter soup
*
* @param soupName
* @param indexSpecs array of index specs
* @param reIndexData
* @throws JSONException
*/
public void alterSoup(String soupName, IndexSpec[] indexSpecs,
boolean reIndexData) throws JSONException {
AlterSoupLongOperation operation = new AlterSoupLongOperation(this, soupName, indexSpecs, reIndexData);
operation.run();
}
/**
* Re-index all soup elements for passed indexPaths
* NB: only indexPath that have IndexSpec on them will be indexed
*
* @param soupName
* @param indexPaths
* @param handleTx
*/
public void reIndexSoup(String soupName, String[] indexPaths, boolean handleTx) {
synchronized(SmartStore.class) {
final SQLiteDatabase db = getDatabase();
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
// Getting index specs from indexPaths
Map<String, IndexSpec> mapAllSpecs = IndexSpec.mapForIndexSpecs(getSoupIndexSpecs(soupName));
List<IndexSpec> indexSpecsList = new ArrayList<IndexSpec>();
for (String indexPath : indexPaths) {
if (mapAllSpecs.containsKey(indexPath)) {
indexSpecsList.add(mapAllSpecs.get(indexPath));
}
else {
Log.w("SmartStore.reIndexSoup", "Cannot re-index " + indexPath + " - it does not have an index");
}
}
IndexSpec[] indexSpecs = indexSpecsList.toArray(new IndexSpec[0]);
if (indexSpecs.length == 0) {
// Nothing to do
return;
}
boolean hasFts = IndexSpec.hasFTS(indexSpecs);
if (handleTx) {
db.beginTransaction();
}
Cursor cursor = null;
try {
cursor = DBHelper.getInstance(db).query(db, soupTableName, new String[] {ID_COL, SOUP_COL}, null, null, null);
if (cursor.moveToFirst()) {
do {
String soupEntryId = cursor.getString(0);
String soupRaw = cursor.getString(1);
try {
JSONObject soupElt = new JSONObject(soupRaw);
ContentValues contentValues = new ContentValues();
projectIndexedPaths(soupElt, contentValues, indexSpecs, null);
DBHelper.getInstance(db).update(db, soupTableName, contentValues, ID_PREDICATE, soupEntryId + "");
// Fts
if (hasFts) {
String soupTableNameFts = soupTableName + FTS_SUFFIX;
ContentValues contentValuesFts = new ContentValues();
projectIndexedPaths(soupElt, contentValuesFts, indexSpecs, Type.full_text);
DBHelper.getInstance(db).update(db, soupTableNameFts, contentValuesFts, DOCID_PREDICATE, soupEntryId + "");
}
}
catch (JSONException e) {
Log.w("SmartStore.alterSoup", "Could not parse soup element " + soupEntryId, e);
// Should not have happen - just keep going
}
}
while (cursor.moveToNext());
}
}
finally {
if (handleTx) {
db.setTransactionSuccessful();
db.endTransaction();
}
safeClose(cursor);
}
}
}
/**
* Return indexSpecs of soup
*
* @param soupName
* @return
*/
public IndexSpec[] getSoupIndexSpecs(String soupName) {
synchronized(SmartStore.class) {
final SQLiteDatabase db = getDatabase();
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
return DBHelper.getInstance(db).getIndexSpecs(db, soupName);
}
}
/**
* Clear all rows from a soup
* @param soupName
*/
public void clearSoup(String soupName) {
synchronized(SmartStore.class) {
final SQLiteDatabase db = getDatabase();
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
db.beginTransaction();
try {
DBHelper.getInstance(db).delete(db, soupTableName, null);
if (hasFTS(soupName)) {
DBHelper.getInstance(db).delete(db, soupTableName + FTS_SUFFIX, null);
}
} finally {
db.setTransactionSuccessful();
db.endTransaction();
}
}
}
/**
* Check if soup exists
*
* @param soupName
* @return true if soup exists, false otherwise
*/
public boolean hasSoup(String soupName) {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
return DBHelper.getInstance(db).getSoupTableName(db, soupName) != null;
}
}
/**
* Destroy a soup
*
* Drop table for soupName
* Cleanup entries in soup index map table
* @param soupName
*/
public void dropSoup(String soupName) {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName != null) {
db.execSQL("DROP TABLE IF EXISTS " + soupTableName);
if (hasFTS(soupName)) {
db.execSQL("DROP TABLE IF EXISTS " + soupTableName + FTS_SUFFIX);
}
try {
db.beginTransaction();
DBHelper.getInstance(db).delete(db, SOUP_NAMES_TABLE, SOUP_NAME_PREDICATE, soupName);
DBHelper.getInstance(db).delete(db, SOUP_INDEX_MAP_TABLE, SOUP_NAME_PREDICATE, soupName);
db.setTransactionSuccessful();
// Remove from cache
DBHelper.getInstance(db).removeFromCache(soupName);
} finally {
db.endTransaction();
}
}
}
}
/**
* Destroy all the soups in the smartstore
*/
public void dropAllSoups() {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
List<String> soupNames = getAllSoupNames();
for(String soupName : soupNames) {
dropSoup(soupName);
}
}
}
/**
* @return all soup names in the smartstore
*/
public List<String> getAllSoupNames() {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
List<String> soupNames = new ArrayList<String>();
Cursor cursor = null;
try {
cursor = DBHelper.getInstance(db).query(db, SOUP_NAMES_TABLE, new String[]{SOUP_NAME_COL}, null, null, null);
if (cursor.moveToFirst()) {
do {
soupNames.add(cursor.getString(0));
}
while (cursor.moveToNext());
}
}
finally {
safeClose(cursor);
}
return soupNames;
}
}
/**
* Run a query given by its query Spec, only returned results from selected page
* @param querySpec
* @param pageIndex
* @throws JSONException
*/
public JSONArray query(QuerySpec querySpec, int pageIndex) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
QueryType qt = querySpec.queryType;
String sql = convertSmartSql(querySpec.smartSql);
// Page
int offsetRows = querySpec.pageSize * pageIndex;
int numberRows = querySpec.pageSize;
String limit = offsetRows + "," + numberRows;
Cursor cursor = null;
try {
cursor = DBHelper.getInstance(db).limitRawQuery(db, sql, limit, querySpec.getArgs());
JSONArray results = new JSONArray();
if (cursor.moveToFirst()) {
do {
// Smart queries
if (qt == QueryType.smart) {
results.put(getDataFromRow(cursor));
}
// Exact/like/range queries
else {
results.put(new JSONObject(cursor.getString(0)));
}
} while (cursor.moveToNext());
}
return results;
} finally {
safeClose(cursor);
}
}
}
/**
* Return JSONArray for one row of data from cursor
* @param cursor
* @return
* @throws JSONException
*/
private JSONArray getDataFromRow(Cursor cursor) throws JSONException {
JSONArray row = new JSONArray();
int columnCount = cursor.getColumnCount();
for (int i=0; i<columnCount; i++) {
String raw = cursor.getString(i);
// Is this column holding a serialized json object?
if (cursor.getColumnName(i).endsWith(SOUP_COL)) {
row.put(new JSONObject(raw));
// Note: we could end up returning a string if you aliased the column
}
else {
// TODO Leverage cursor.getType once our min api is 11 or above
// For now, we do our best to guess
// Is it holding a integer ?
try {
Long n = Long.parseLong(raw);
row.put(n);
// Note: we could end up returning an integer for a string column if you have a string value that contains just an integer
}
// Is it holding a floating ?
catch (NumberFormatException e) {
try {
Double d = Double.parseDouble(raw);
// No exception, let's get the value straight from the cursor
// XXX Double.parseDouble(cursor.getString(i)) is sometimes different from cursor.getDouble(i) !!!
d = cursor.getDouble(i);
row.put(d);
// Note: we could end up returning an integer for a string column if you have a string value that contains just an integer
}
// It must be holding a string then
catch (NumberFormatException ne) {
row.put(raw);
}
}
}
}
return row;
}
/**
* @param querySpec
* @return count of results for a query
*/
public int countQuery(QuerySpec querySpec) {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
String countSql = convertSmartSql(querySpec.countSmartSql);
return DBHelper.getInstance(db).countRawCountQuery(db, countSql, querySpec.getArgs());
}
}
/**
* @param smartSql
* @return
*/
public String convertSmartSql(String smartSql) {
final SQLiteDatabase db = getDatabase();
synchronized (db) {
return SmartSqlHelper.getInstance(db).convertSmartSql(db, smartSql);
}
}
/**
* Create (and commits)
* Note: Passed soupElt is modified (last modified date and soup entry id fields)
* @param soupName
* @param soupElt
* @return soupElt created or null if creation failed
* @throws JSONException
*/
public JSONObject create(String soupName, JSONObject soupElt) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
return create(soupName, soupElt, true);
}
}
/**
* Create
* Note: Passed soupElt is modified (last modified date and soup entry id fields)
* @param soupName
* @param soupElt
* @return
* @throws JSONException
*/
public JSONObject create(String soupName, JSONObject soupElt, boolean handleTx) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
IndexSpec[] indexSpecs = DBHelper.getInstance(db).getIndexSpecs(db, soupName);
try {
if (handleTx) {
db.beginTransaction();
}
long now = System.currentTimeMillis();
long soupEntryId = DBHelper.getInstance(db).getNextId(db, soupTableName);
// Adding fields to soup element
soupElt.put(SOUP_ENTRY_ID, soupEntryId);
soupElt.put(SOUP_LAST_MODIFIED_DATE, now);
ContentValues contentValues = new ContentValues();
contentValues.put(ID_COL, soupEntryId);
contentValues.put(SOUP_COL, "");
contentValues.put(CREATED_COL, now);
contentValues.put(LAST_MODIFIED_COL, now);
contentValues.put(SOUP_COL, soupElt.toString());
projectIndexedPaths(soupElt, contentValues, indexSpecs, null);
// Inserting into database
boolean success = DBHelper.getInstance(db).insert(db, soupTableName, contentValues) == soupEntryId;
// Fts
if (success && hasFTS(soupName)) {
String soupTableNameFts = soupTableName + FTS_SUFFIX;
ContentValues contentValuesFts = new ContentValues();
projectIndexedPaths(soupElt, contentValuesFts, indexSpecs, Type.full_text);
// InsertHelper not working against virtual fts table
db.insert(soupTableNameFts, null, contentValuesFts);
}
// Commit if successful
if (success) {
if (handleTx) {
db.setTransactionSuccessful();
}
return soupElt;
} else {
return null;
}
}
finally {
if (handleTx) {
db.endTransaction();
}
}
}
}
/**
* @soupName
* @return true if soup has at least one full-text search index
*/
private boolean hasFTS(String soupName) {
SQLiteDatabase db = getDatabase();
synchronized (db) {
return DBHelper.getInstance(db).hasFTS(db, soupName);
}
}
/**
* Populate content values by projecting index specs that match typeFilter (or all if typeFilter is null)
* @param soupElt
* @param contentValues
* @param indexSpecs
* @param typeFilter pass null for all
*/
private void projectIndexedPaths(JSONObject soupElt, ContentValues contentValues, IndexSpec[] indexSpecs, Type typeFilter) {
for (IndexSpec indexSpec : indexSpecs) {
if (typeFilter == null || typeFilter == indexSpec.type) {
projectIndexedPath(soupElt, contentValues, indexSpec);
}
}
}
/**
* @param soupElt
* @param contentValues
* @param indexSpec
*/
private void projectIndexedPath(JSONObject soupElt, ContentValues contentValues, IndexSpec indexSpec) {
Object value = project(soupElt, indexSpec.path);
switch (indexSpec.type) {
case integer:
contentValues.put(indexSpec.columnName, value != null ? ((Number) value).longValue() : null); break;
case string:
case full_text:
contentValues.put(indexSpec.columnName, value != null ? value.toString() : null); break;
case floating:
contentValues.put(indexSpec.columnName, value != null ? ((Number) value).doubleValue() : null); break;
}
}
/**
* Retrieve
* @param soupName
* @param soupEntryIds
* @return JSONArray of JSONObject's with the given soupEntryIds
* @throws JSONException
*/
public JSONArray retrieve(String soupName, Long... soupEntryIds) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
Cursor cursor = null;
try {
JSONArray result = new JSONArray();
cursor = DBHelper.getInstance(db).query(db, soupTableName, new String[] {SOUP_COL}, null, null, getSoupEntryIdsPredicate(soupEntryIds), (String[]) null);
if (!cursor.moveToFirst()) {
return result;
}
do {
String raw = cursor.getString(cursor.getColumnIndex(SOUP_COL));
result.put(new JSONObject(raw));
}
while (cursor.moveToNext());
return result;
}
finally {
safeClose(cursor);
}
}
}
/**
* Update (and commits)
* Note: Passed soupElt is modified (last modified date and soup entry id fields)
* @param soupName
* @param soupElt
* @param soupEntryId
* @return soupElt updated or null if update failed
* @throws JSONException
*/
public JSONObject update(String soupName, JSONObject soupElt, long soupEntryId) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
return update(soupName, soupElt, soupEntryId, true);
}
}
/**
* Update
* Note: Passed soupElt is modified (last modified date and soup entry id fields)
* @param soupName
* @param soupElt
* @param soupEntryId
* @return
* @throws JSONException
*/
public JSONObject update(String soupName, JSONObject soupElt, long soupEntryId, boolean handleTx) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
try {
if (handleTx) {
db.beginTransaction();
}
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
IndexSpec[] indexSpecs = DBHelper.getInstance(db).getIndexSpecs(db, soupName);
long now = System.currentTimeMillis();
// In the case of an upsert with external id, _soupEntryId won't be in soupElt
soupElt.put(SOUP_ENTRY_ID, soupEntryId);
// Updating last modified field in soup element
soupElt.put(SOUP_LAST_MODIFIED_DATE, now);
// Preparing data for row
ContentValues contentValues = new ContentValues();
contentValues.put(SOUP_COL, soupElt.toString());
contentValues.put(LAST_MODIFIED_COL, now);
projectIndexedPaths(soupElt, contentValues, indexSpecs, null);
// Updating database
boolean success = DBHelper.getInstance(db).update(db, soupTableName, contentValues, ID_PREDICATE, soupEntryId + "") == 1;
// Fts
if (success && hasFTS(soupName)) {
String soupTableNameFts = soupTableName + FTS_SUFFIX;
ContentValues contentValuesFts = new ContentValues();
projectIndexedPaths(soupElt, contentValuesFts, indexSpecs, Type.full_text);
success = DBHelper.getInstance(db).update(db, soupTableNameFts, contentValuesFts, DOCID_PREDICATE, soupEntryId + "") == 1;
}
if (success) {
if (handleTx) {
db.setTransactionSuccessful();
}
return soupElt;
} else {
return null;
}
} finally {
if (handleTx) {
db.endTransaction();
}
}
}
}
/**
* Upsert (and commits)
* @param soupName
* @param soupElt
* @param externalIdPath
* @return soupElt upserted or null if upsert failed
* @throws JSONException
*/
public JSONObject upsert(String soupName, JSONObject soupElt, String externalIdPath) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
return upsert(soupName, soupElt, externalIdPath, true);
}
}
/**
* Upsert (and commits) expecting _soupEntryId in soupElt for updates
* @param soupName
* @param soupElt
* @return
* @throws JSONException
*/
public JSONObject upsert(String soupName, JSONObject soupElt) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
return upsert(soupName, soupElt, SOUP_ENTRY_ID);
}
}
/**
* Upsert
* @param soupName
* @param soupElt
* @param externalIdPath
* @param handleTx
* @return
* @throws JSONException
*/
public JSONObject upsert(String soupName, JSONObject soupElt, String externalIdPath, boolean handleTx) throws JSONException {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
long entryId = -1;
if (externalIdPath.equals(SOUP_ENTRY_ID)) {
if (soupElt.has(SOUP_ENTRY_ID)) {
entryId = soupElt.getLong(SOUP_ENTRY_ID);
}
} else {
Object externalIdObj = project(soupElt, externalIdPath);
if (externalIdObj != null) {
entryId = lookupSoupEntryId(soupName, externalIdPath, externalIdObj + "");
}
}
// If we have an entryId, let's do an update, otherwise let's do a create
if (entryId != -1) {
return update(soupName, soupElt, entryId, handleTx);
} else {
return create(soupName, soupElt, handleTx);
}
}
}
/**
* Look for a soup element where fieldPath's value is fieldValue
* Return its soupEntryId
* Return -1 if not found
* Throw an exception if fieldName is not indexed
* Throw an exception if more than one soup element are found
*
* @param soupName
* @param fieldPath
* @param fieldValue
*/
public long lookupSoupEntryId(String soupName, String fieldPath, String fieldValue) {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
String columnName = DBHelper.getInstance(db).getColumnNameForPath(db, soupName, fieldPath);
Cursor cursor = null;
try {
cursor = db.query(soupTableName, new String[] {ID_COL}, columnName + " = ?", new String[] { fieldValue }, null, null, null);
if (cursor.getCount() > 1) {
throw new SmartStoreException(String.format("There are more than one soup elements where %s is %s", fieldPath, fieldValue));
}
if (cursor.moveToFirst()) {
return cursor.getLong(0);
} else {
return -1; // not found
}
} finally {
safeClose(cursor);
}
}
}
/**
* Delete (and commits)
* @param soupName
* @param soupEntryIds
*/
public void delete(String soupName, Long... soupEntryIds) {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
delete(soupName, soupEntryIds, true);
}
}
/**
* Delete
* @param soupName
* @param soupEntryIds
* @param handleTx
*/
public void delete(String soupName, Long[] soupEntryIds, boolean handleTx) {
final SQLiteDatabase db = getDatabase();
synchronized(db) {
String soupTableName = DBHelper.getInstance(db).getSoupTableName(db, soupName);
if (soupTableName == null) throw new SmartStoreException("Soup: " + soupName + " does not exist");
if (handleTx) {
db.beginTransaction();
}
try {
db.delete(soupTableName, getSoupEntryIdsPredicate(soupEntryIds), (String []) null);
if (hasFTS(soupName)) {
db.delete(soupTableName + FTS_SUFFIX, getDocidsPredicate(soupEntryIds), (String[]) null);
}
if (handleTx) {
db.setTransactionSuccessful();
}
} finally {
if (handleTx) {
db.endTransaction();
}
}
}
}
/**
* @return predicate to match soup entries by id
*/
private String getSoupEntryIdsPredicate(Long[] soupEntryIds) {
return ID_COL + " IN (" + TextUtils.join(",", soupEntryIds)+ ")";
}
/**
* @return predicate to match entries by docid
*/
private String getDocidsPredicate(Long[] docids) {
return DOCID_COL + " IN (" + TextUtils.join(",", docids)+ ")";
}
/**
* @param soupId
* @return
*/
public static String getSoupTableName(long soupId) {
return "TABLE_" + soupId;
}
/**
* @param cursor
*/
private void safeClose(Cursor cursor) {
if (cursor != null) {
cursor.close();
}
}
/**
* @param soup
* @param path
* @return object at path in soup
*/
public static Object project(JSONObject soup, String path) {
if (soup == null) {
return null;
}
if (path == null || path.equals("")) {
return soup;
}
String[] pathElements = path.split("[.]");
Object o = soup;
for (String pathElement : pathElements) {
if (o != null) {
o = ((JSONObject) o).opt(pathElement);
}
}
return o;
}
/**
* Enum for column type
*/
public enum Type {
string("TEXT"), integer("INTEGER"), floating("REAL"), full_text("TEXT");
private String columnType;
private Type(String columnType) {
this.columnType = columnType;
}
public String getColumnType() {
return columnType;
}
}
/**
* Exception thrown by smart store
*
*/
public static class SmartStoreException extends RuntimeException {
public SmartStoreException(String message) {
super(message);
}
private static final long serialVersionUID = -6369452803270075464L;
}
}
| 1 | 14,766 | SmartStoreInspectorTest expected results in a certain order | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -310,6 +310,11 @@ const conduit::Node& data_reader_jag_conduit::get_conduit_node(const conduit::No
}
bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) const {
+
+ if (m_io_thread_pool != nullptr && m_using_random_node.count(m_io_thread_pool->get_local_thread_id())) {
+ LBANN_ERROR("previously retrieved a random conduit node from data_store, so shouldn't be here");
+ }
+
const sample_t& s = m_sample_list[i];
const std::string& sample_name = s.second;
const std::string path = sample_name + key; | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
////////////////////////////////////////////////////////////////////////////////
#include "lbann/data_readers/data_reader_jag_conduit.hpp"
#include "lbann/io/data_buffers/partitioned_io_buffer.hpp"
#include "lbann/data_store/data_store_jag.hpp"
#include "lbann/models/model.hpp"
#ifdef LBANN_HAS_CONDUIT
#include "lbann/utils/file_utils.hpp" // for add_delimiter() in load()
#include "lbann/data_readers/opencv_extensions.hpp"
#include <limits> // numeric_limits
#include <algorithm> // max_element
#include <numeric> // accumulate
#include <functional> // multiplies
#include <type_traits>// is_same
#include <set>
#include <map>
#include "lbann/data_readers/image_utils.hpp"
#include <omp.h>
#include "lbann/utils/timer.hpp"
#include "lbann/utils/glob.hpp"
#include "lbann/utils/peek_map.hpp"
#include "conduit/conduit_relay.hpp"
#include "conduit/conduit_relay_io_hdf5.hpp"
#include <cereal/archives/binary.hpp>
#include <sstream>
#define SAMPLE_ID_PAD 7
// This macro may be moved to a global scope
#define _THROW_LBANN_EXCEPTION_(_CLASS_NAME_,_MSG_) { \
std::stringstream _err; \
_err << __FILE__ << ' ' << __LINE__ << " :: " \
<< (_CLASS_NAME_) << "::" << (_MSG_); \
throw lbann_exception(_err.str()); \
}
#define _THROW_LBANN_EXCEPTION2_(_CLASS_NAME_,_MSG1_,_MSG2_) { \
std::stringstream _err; \
_err << __FILE__ << ' ' << __LINE__ << " :: " \
<< (_CLASS_NAME_) << "::" << (_MSG1_) << (_MSG2_); \
throw lbann_exception(_err.str()); \
}
// This comes after all the headers, and is only visible within the current implementation file.
// To make sure, we put '#undef _CN_' at the end of this file
#define _CN_ "data_reader_jag_conduit"
namespace lbann {
std::unordered_map<std::string, int> data_reader_jag_conduit::m_num_local_readers;
const std::set<std::string> data_reader_jag_conduit::non_numeric_vars = {
"fusion_reaction",
"fusion_model_reaction",
"radial_profile",
"postp_timeseries_vars",
"name",
"solver",
"mesh_def",
"hs_volume_integral",
"fusion_model_sv",
"shell_model",
"shape_model",
"ablation_cv_model",
"infalling_model",
"radiation_model",
"hotspot_model",
"shape_model_initial_velocity_amplitude",
"stopping_model",
"energy_balance_model_ablation_cv_model",
"solver_method",
"conduction_model_conductivity",
"solver_mode"
};
void data_reader_jag_conduit::set_io_buffer_type(const std::string io_buffer) {
m_io_buffer_type = io_buffer;
}
void data_reader_jag_conduit::set_local_id(const std::string role) {
m_local_reader_id = m_num_local_readers[role]++;
}
int data_reader_jag_conduit::get_local_id(const std::string role) const {
return m_local_reader_id;
}
void data_reader_jag_conduit::set_leading_reader(data_reader_jag_conduit* r) {
m_leading_reader = r;
}
data_reader_jag_conduit* data_reader_jag_conduit::get_leading_reader() {
return m_leading_reader;
}
void data_reader_jag_conduit::shuffle_indices(rng_gen& gen) {
if ((m_leading_reader != this) && (m_leading_reader != nullptr)) {
m_shuffled_indices = m_leading_reader->get_shuffled_indices();
return;
}
generic_data_reader::shuffle_indices(gen);
m_sample_list.compute_epochs_file_usage(get_shuffled_indices(), get_mini_batch_size(), *m_comm);
}
int data_reader_jag_conduit::compute_max_num_parallel_readers() {
if (m_io_buffer_type == "partitioned") {
set_num_parallel_readers(partitioned_io_buffer::compute_max_num_parallel_readers(
0, get_mini_batch_size(),
get_num_parallel_readers(), get_comm()));
set_sample_stride(get_num_parallel_readers());
set_iteration_stride(1);
} else {
_THROW_LBANN_EXCEPTION_(get_type(), " unknown io_buffer type: " + m_io_buffer_type);
}
return get_num_parallel_readers();
}
bool data_reader_jag_conduit::check_num_parallel_readers(long data_set_size) {
return true;
}
data_reader_jag_conduit::data_reader_jag_conduit(const std::shared_ptr<cv_process>& pp, bool shuffle)
: generic_data_reader(shuffle) {
set_defaults();
if (!pp) {
_THROW_LBANN_EXCEPTION_(get_type(), " construction error: no image processor");
}
m_master_pps = lbann::make_unique<cv_process>(*pp);
}
void data_reader_jag_conduit::copy_members(const data_reader_jag_conduit& rhs) {
m_independent = rhs.m_independent;
m_independent_groups = rhs.m_independent_groups;
m_dependent = rhs.m_dependent;
m_dependent_groups = rhs.m_dependent_groups;
m_image_width = rhs.m_image_width;
m_image_height = rhs.m_image_height;
m_image_num_channels = rhs.m_image_num_channels;
m_num_img_srcs = rhs.m_num_img_srcs;
m_split_channels = rhs.m_split_channels;
set_linearized_image_size();
m_is_data_loaded = rhs.m_is_data_loaded;
m_emi_image_keys = rhs.m_emi_image_keys;
m_scalar_keys = rhs.m_scalar_keys;
m_input_keys = rhs.m_input_keys;
if (!rhs.m_master_pps) {
_THROW_LBANN_EXCEPTION_(get_type(), " construction error: no image processor");
}
m_master_pps = lbann::make_unique<cv_process>(*rhs.m_master_pps);
m_uniform_input_type = rhs.m_uniform_input_type;
m_output_scalar_prefix = rhs.m_output_scalar_prefix;
m_output_image_prefix = rhs.m_output_image_prefix;
m_input_prefix = rhs.m_input_prefix;
m_scalar_filter = rhs.m_scalar_filter;
m_scalar_prefix_filter = rhs.m_scalar_prefix_filter;
m_input_filter = rhs.m_input_filter;
m_input_prefix_filter = rhs.m_input_prefix_filter;
m_io_buffer_type = rhs.m_io_buffer_type;
m_local_reader_id = rhs.m_local_reader_id;
//TODO: need to make sure this is what we want
m_leading_reader = rhs.m_leading_reader;
El::Copy(rhs.m_data_cache, m_data_cache);
El::Copy(rhs.m_response_cache, m_response_cache);
El::Copy(rhs.m_label_cache, m_label_cache);
m_cached_data_mb_size = rhs.m_cached_data_mb_size;
m_cached_response_mb_size = rhs.m_cached_response_mb_size;
m_cached_label_mb_size = rhs.m_cached_label_mb_size;
m_image_normalization_params = rhs.m_image_normalization_params;
m_scalar_normalization_params = rhs.m_scalar_normalization_params;
m_input_normalization_params = rhs.m_input_normalization_params;
m_sample_list = rhs.m_sample_list;
m_list_per_trainer = rhs.m_list_per_trainer;
m_list_per_model = rhs.m_list_per_model;
}
data_reader_jag_conduit::data_reader_jag_conduit(const data_reader_jag_conduit& rhs)
: generic_data_reader(rhs) {
copy_members(rhs);
}
data_reader_jag_conduit& data_reader_jag_conduit::operator=(const data_reader_jag_conduit& rhs) {
// check for self-assignment
if (this == &rhs) {
return (*this);
}
generic_data_reader::operator=(rhs);
copy_members(rhs);
return (*this);
}
data_reader_jag_conduit::~data_reader_jag_conduit() {
}
void data_reader_jag_conduit::set_defaults() {
m_jag_store = nullptr;
m_independent.clear();
m_independent_groups.clear();
m_dependent.clear();
m_dependent_groups.clear();
m_image_width = 0;
m_image_height = 0;
m_image_num_channels = 1;
set_linearized_image_size();
m_num_img_srcs = 1u;
m_split_channels = false;
m_is_data_loaded = false;
m_num_labels = 0;
m_emi_image_keys.clear();
m_scalar_keys.clear();
m_input_keys.clear();
m_uniform_input_type = false;
m_output_scalar_prefix = "";
m_output_image_prefix = "";
m_input_prefix = "";
m_scalar_filter.clear();
m_scalar_prefix_filter.clear();
m_input_filter.clear();
m_input_prefix_filter.clear();
m_io_buffer_type = "";
m_local_reader_id = 0;
m_leading_reader = this;
m_cached_data_mb_size = 0;
m_cached_response_mb_size = 0;
m_cached_label_mb_size = 0;
m_image_normalization_params.clear();
m_scalar_normalization_params.clear();
m_input_normalization_params.clear();
m_sample_list.clear();
m_list_per_trainer = false;
m_list_per_model = false;
}
void data_reader_jag_conduit::setup(int num_io_threads, std::shared_ptr<thread_pool> io_thread_pool) {
generic_data_reader::setup(num_io_threads, io_thread_pool);
replicate_processor(*m_master_pps, num_io_threads);
}
/// Replicate image processor for each I/O thread
bool data_reader_jag_conduit::replicate_processor(const cv_process& pp, const int nthreads) {
m_pps.resize(nthreads);
// Construct thread private preprocessing objects out of a shared pointer
for (int i = 0; i < nthreads; ++i) {
m_pps[i] = lbann::make_unique<cv_process>(pp);
}
bool ok = true;
for (int i = 0; ok && (i < nthreads); ++i) {
if (!m_pps[i]) ok = false;
}
if (!ok || (nthreads <= 0)) {
_THROW_LBANN_EXCEPTION_(get_type(), " cannot replicate image processor");
return false;
}
const std::vector<unsigned int> dims = pp.get_data_dims();
if ((dims.size() == 2u) && (dims[0] != 0u) && (dims[1] != 0u)) {
m_image_width = static_cast<int>(dims[0]);
m_image_height = static_cast<int>(dims[1]);
}
return true;
}
const conduit::Node& data_reader_jag_conduit::get_conduit_node(const conduit::Node& n_base, const std::string key) {
return n_base[key];
}
bool data_reader_jag_conduit::load_conduit_node(const size_t i, const std::string& key, conduit::Node& node) const {
const sample_t& s = m_sample_list[i];
const std::string& sample_name = s.second;
const std::string path = sample_name + key;
sample_file_id_t id = s.first;
hid_t h = m_sample_list.get_samples_hdf5_handle(id);
if (h <= static_cast<hid_t>(0) || !conduit::relay::io::hdf5_has_path(h, path)) {
const std::string& file_name = m_sample_list.get_samples_filename(id);
LBANN_ERROR(get_type() + ":: Cannot open file " + file_name + \
" for sample "+ sample_name);
return false;
}
conduit::relay::io::hdf5_read(h, path, node);
return true;
}
bool data_reader_jag_conduit::has_conduit_path(const size_t i, const std::string& key) const {
const sample_t& s = m_sample_list[i];
sample_file_id_t id = s.first;
const std::string& file_name = m_sample_list.get_samples_filename(id);
const std::string& sample_name = s.second;
const hid_t h = m_sample_list.get_samples_hdf5_handle(id);
const std::string path = sample_name + key;
if (h <= static_cast<hid_t>(0) || !conduit::relay::io::hdf5_has_path(h, path)) {
_THROW_LBANN_EXCEPTION_(get_type(), "Cannot open file " + file_name + \
" for sample "+ sample_name);
return false;
}
return conduit::relay::io::hdf5_has_path(h, std::string("/") + sample_name + key);
}
void data_reader_jag_conduit::set_independent_variable_type(
const std::vector< std::vector<data_reader_jag_conduit::variable_t> >& independent) {
m_independent_groups = independent;
m_independent.clear();
for (const auto& group: independent) {
for (const auto type: group) {
add_independent_variable_type(type);
}
}
}
void data_reader_jag_conduit::add_independent_variable_type(
const data_reader_jag_conduit::variable_t independent) {
if (!(independent == JAG_Image || independent == JAG_Scalar || independent == JAG_Input)) {
_THROW_LBANN_EXCEPTION_(_CN_, "unrecognized independent variable type ");
}
m_independent.push_back(independent);
}
void data_reader_jag_conduit::set_dependent_variable_type(
const std::vector< std::vector<data_reader_jag_conduit::variable_t> >& dependent) {
m_dependent_groups = dependent;
m_dependent.clear();
for (const auto& group: dependent) {
for (const auto type: group) {
add_dependent_variable_type(type);
}
}
}
void data_reader_jag_conduit::add_dependent_variable_type(
const data_reader_jag_conduit::variable_t dependent) {
if (!(dependent == JAG_Image || dependent == JAG_Scalar || dependent == JAG_Input)) {
_THROW_LBANN_EXCEPTION_(_CN_, "unrecognized dependent variable type ");
}
m_dependent.push_back(dependent);
}
std::vector<data_reader_jag_conduit::variable_t>
data_reader_jag_conduit::get_independent_variable_type() const {
return m_independent;
}
std::vector<data_reader_jag_conduit::variable_t>
data_reader_jag_conduit::get_dependent_variable_type() const {
return m_dependent;
}
void data_reader_jag_conduit::set_image_dims(const int width, const int height, const int ch) {
if ((width > 0) && (height > 0) && (ch > 0)) { // set and valid
m_image_width = width;
m_image_height = height;
m_image_num_channels = ch;
} else if (!((width == 0) && (height == 0) && (ch == 1))) { // set but not valid
_THROW_LBANN_EXCEPTION_(_CN_, "set_image_dims() : invalid image dims");
}
set_linearized_image_size();
}
void data_reader_jag_conduit::set_image_choices(const std::vector<std::string> image_keys) {
m_emi_image_keys = image_keys;
// For example, in the data reader prototext file, have a line similar to the one below
// image_keys: ["(0.0, 0.0)/0.0","(90.0, 0.0)/0.0","(90.0, 78.0)/0.0"];
m_num_img_srcs = m_emi_image_keys.size();
}
const std::vector<std::string>& data_reader_jag_conduit::get_image_choices() const {
return m_emi_image_keys;
}
void data_reader_jag_conduit::add_scalar_filter(const std::string& key) {
m_scalar_filter.insert(key);
}
void data_reader_jag_conduit::add_scalar_prefix_filter(const prefix_t& p) {
m_scalar_prefix_filter.push_back((p.first.length() > p.second)? prefix_t(p.first, p.first.length()) : p);
}
void data_reader_jag_conduit::add_input_filter(const std::string& key) {
m_input_filter.insert(key);
}
void data_reader_jag_conduit::add_input_prefix_filter(const prefix_t& p) {
m_input_prefix_filter.push_back((p.first.length() > p.second)? prefix_t(p.first, p.first.length()) : p);
}
/**
* First, it checks if the key is in the list of keys to filter.
* Then, it checks if the key contains any prefix string to filter
* while sayisfying the mininum length requirement.
*/
bool data_reader_jag_conduit::filter(const std::set<std::string>& key_filter,
const std::vector<data_reader_jag_conduit::prefix_t>& prefix_filter, const std::string& key) const {
if (key_filter.find(key) != key_filter.cend()) {
return true;
}
for (const auto& pf: prefix_filter) {
if (key.length() < pf.second) { // minimum length requirement
continue;
}
if (key.compare(0, pf.first.length(), pf.first) == 0) { // match
return true;
}
}
return false;
}
void data_reader_jag_conduit::set_scalar_choices(const std::vector<std::string>& keys) {
m_scalar_keys = keys;
check_scalar_keys();
}
void data_reader_jag_conduit::set_all_scalar_choices() {
if (m_sample_list.empty()) {
return;
}
conduit::Node n_scalar;
load_conduit_node(0, m_output_scalar_prefix, n_scalar);
m_scalar_keys.reserve(n_scalar.number_of_children());
const std::vector<std::string>& child_names = n_scalar.child_names();
for (const auto& key: child_names) {
if (filter(m_scalar_filter, m_scalar_prefix_filter, key)) {
continue;
}
m_scalar_keys.push_back(key);
}
}
const std::vector<std::string>& data_reader_jag_conduit::get_scalar_choices() const {
return m_scalar_keys;
}
/**
* To use no key, set 'Undefined' to the corresponding variable type,
* or call this with an empty vector argument after loading data.
*/
void data_reader_jag_conduit::set_input_choices(const std::vector<std::string>& keys) {
m_input_keys = keys;
check_input_keys();
}
void data_reader_jag_conduit::set_all_input_choices() {
//@TODO revisit later -- don't know how to handle this yet
if (m_data_store != nullptr) {
return;
}
if (m_sample_list.empty()) {
return;
}
conduit::Node n_input;
load_conduit_node(0, "/inputs", n_input);
m_input_keys.reserve(n_input.number_of_children());
const std::vector<std::string>& child_names = n_input.child_names();
for (const auto& key: child_names) {
if (filter(m_input_filter, m_input_prefix_filter, key)) {
continue;
}
m_input_keys.push_back(key);
}
}
const std::vector<std::string>& data_reader_jag_conduit::get_input_choices() const {
return m_input_keys;
}
void data_reader_jag_conduit::set_linearized_image_size() {
m_image_linearized_size = m_image_width * m_image_height * m_image_num_channels;
m_1ch_image_linearized_size = m_image_width * m_image_height;
}
void data_reader_jag_conduit::check_image_data() {
//@TODO revisit later -- don't know how to handle this yet
if (m_data_store != nullptr) {
return;
}
if (m_sample_list.empty()) {
return;
}
size_t first_idx = (m_sample_list[0]).first;
if (!has_conduit_path(first_idx, "")) {
_THROW_LBANN_EXCEPTION_(_CN_, "check_image_data() : no sample by " + m_sample_list[first_idx].second);
return;
}
conduit::Node n_imageset;
load_conduit_node(first_idx, m_output_image_prefix, n_imageset);
if (static_cast<size_t>(n_imageset.number_of_children()) == 0u) {
return;
}
if (m_emi_image_keys.size() == 0u) {
return;
}
for (const auto& emi_tag: m_emi_image_keys) {
if (!has_conduit_path(first_idx, m_output_image_prefix + emi_tag)) {
_THROW_LBANN_EXCEPTION_(_CN_, "check_image_data() : no emi image by " + emi_tag);
return;
}
}
conduit::Node n_image;
load_conduit_node(first_idx, m_output_image_prefix + m_emi_image_keys[0], n_image);
conduit_ch_t emi = n_image.value();
if (m_image_linearized_size != static_cast<size_t>(emi.number_of_elements())) {
if ((m_image_width == 0) && (m_image_height == 0)) {
m_image_height = 1;
m_image_width = static_cast<int>(emi.number_of_elements());
m_image_num_channels = 1;
set_linearized_image_size();
} else {
std::string msg = "expected linearized emi image size: "
+ std::to_string(emi.number_of_elements()) + '\n';
_THROW_LBANN_EXCEPTION_(_CN_, msg + get_description());
}
}
if (m_image_normalization_params.empty()) {
m_image_normalization_params.assign(m_emi_image_keys.size()*m_image_num_channels, linear_transform_t(1.0, 0.0));
} else if (m_image_normalization_params.size() != static_cast<size_t>(m_image_num_channels)) {
_THROW_LBANN_EXCEPTION_(_CN_, "Incorrect number of image normalization parameter sets!" \
+ std::to_string(m_image_normalization_params.size()) + " != " \
+ std::to_string(m_image_num_channels));
}
#if defined(LBANN_DEBUG)
std::cout << "image normalization parameters: " << std::endl;
for (size_t i = 0u, s = 0u; s < m_emi_image_keys.size(); ++s) {
for (int c = 0; c < m_image_num_channels; ++c) {
const auto& param = m_image_normalization_params[i*m_image_num_channels + c];
std::cout << " scale: \t" << param.first << " \tbias: \t" << param.second
<< " \t" << m_emi_image_keys[s] << ":C" << c << std::endl;
}
}
#endif
}
void data_reader_jag_conduit::check_scalar_keys() {
//@TODO revisit later -- don't know how to handle this yet
if (m_data_store != nullptr) {
return;
}
if (m_scalar_keys.empty()) {
return;
}
if (!m_is_data_loaded) {
return;
}
if (m_sample_list.empty()) {
//m_scalar_keys.clear();
return;
}
// If this call is made after loading data, check if the keys are in data
size_t num_found = 0u;
std::vector<bool> found(m_scalar_keys.size(), false);
std::set<std::string> keys_conduit;
conduit::Node n_scalar;
size_t first_idx = (m_sample_list[0]).first;
load_conduit_node(first_idx, m_output_scalar_prefix, n_scalar);
const std::vector<std::string>& child_names = n_scalar.child_names();
for (const auto& key: child_names) {
keys_conduit.insert(key);
}
for (size_t i=0u; i < m_scalar_keys.size(); ++i) {
std::set<std::string>::const_iterator it = keys_conduit.find(m_scalar_keys[i]);
if (it != keys_conduit.cend()) {
num_found ++;
found[i] = true;
}
}
if (num_found != m_scalar_keys.size()) {
std::string msg = "keys not found:";
for (size_t i=0u; i < m_scalar_keys.size(); ++i) {
if (!found[i]) {
msg += ' ' + m_scalar_keys[i];
}
}
_THROW_LBANN_EXCEPTION_(_CN_, "check_scalar_keys() : " + msg);
}
if (m_scalar_normalization_params.empty()) {
m_scalar_normalization_params.assign(m_scalar_keys.size(), linear_transform_t(1.0, 0.0));
} else if (m_scalar_normalization_params.size() != m_scalar_keys.size()) {
_THROW_LBANN_EXCEPTION_(_CN_, "Incorrect number of scalar normalization parameter sets! " \
+ std::to_string(m_scalar_normalization_params.size()) + " != " \
+ std::to_string(m_scalar_keys.size()));
}
#if defined(LBANN_DEBUG)
std::cout << "scalar normalization parameters: " << std::endl;
for (size_t i = 0u; i < m_scalar_normalization_params.size(); ++i) {
const auto& param = m_scalar_normalization_params[i];
std::cout << " scale: \t" << param.first << " \tbias: \t" << param.second << "\t " << m_scalar_keys[i] << std::endl;
}
#endif
}
void data_reader_jag_conduit::check_input_keys() {
//@TODO revisit later -- don't know how to handle this yet
if (m_data_store != nullptr) {
return;
}
if (m_input_keys.empty()) {
return;
}
if (!m_is_data_loaded) {
return;
}
if (m_sample_list.empty()) {
//m_input_keys.clear();
return;
}
// If this call is made after loading data, check if the keys
size_t num_found = 0u;
std::vector<bool> found(m_input_keys.size(), false);
std::map<std::string, TypeID> keys_conduit;
conduit::Node n_input;
size_t first_idx = (m_sample_list[0]).first;
load_conduit_node(first_idx, "/inputs", n_input);
conduit::NodeConstIterator itr = n_input.children();
while (itr.has_next()) {
const conduit::Node & n = itr.next();
keys_conduit.insert(std::pair<std::string, TypeID>(itr.name(), static_cast<TypeID>(n.dtype().id())));
}
bool is_input_t = true;
for (size_t i=0u; i < m_input_keys.size(); ++i) {
std::map<std::string, TypeID>::const_iterator it = keys_conduit.find(m_input_keys[i]);
if (it != keys_conduit.cend()) {
num_found ++;
found[i] = true;
is_input_t = is_input_t && is_same_type<input_t>(it->second);
}
}
if (num_found != m_input_keys.size()) {
std::string msg = "keys not found:";
for (size_t i=0u; i < m_input_keys.size(); ++i) {
if (!found[i]) {
msg += ' ' + m_input_keys[i];
}
}
_THROW_LBANN_EXCEPTION_(_CN_, "check_input_keys() : " + msg);
}
m_uniform_input_type = (m_input_keys.size() == 0u)? false : is_input_t;
if (m_input_normalization_params.empty()) {
m_input_normalization_params.assign(m_input_keys.size(), linear_transform_t(1.0, 0.0));
} else if (m_input_normalization_params.size() != m_input_keys.size()) {
_THROW_LBANN_EXCEPTION_(_CN_, "Incorrect number of input normalization parameter sets! " \
+ std::to_string(m_input_normalization_params.size()) + " != " \
+ std::to_string(m_input_keys.size()));
}
#if defined(LBANN_DEBUG)
std::cout << "input normalization parameters: " << std::endl;
for (size_t i = 0u; i < m_input_normalization_params.size(); ++i) {
const auto& param = m_input_normalization_params[i];
std::cout << " scale: \t" << param.first << " \tbias: \t" << param.second << " \t" << m_input_keys[i] << std::endl;
}
#endif
}
void data_reader_jag_conduit::load() {
if(m_gan_labelling) {
m_num_labels=2;
}
if (is_master()) {
std::cout << "JAG load GAN m_gan_labelling : label_value "
<< m_gan_labelling <<" : " << m_gan_label_value << std::endl;
}
if ((m_leading_reader != this) && (m_leading_reader != nullptr)) {
// The following member variables of the leadering reader should have been
// copied when this was copy-constructed: m_sample_list, and m_open_hdf5_files
return;
}
m_shuffled_indices.clear();
const std::string data_dir = add_delimiter(get_file_dir());
const std::string sample_list_file = data_dir + get_data_index_list();
/// The use of these flags need to be updated to properly separate
/// how index lists are used between trainers and models
/// @todo m_list_per_trainer || m_list_per_model
load_list_of_samples(sample_list_file, m_comm->get_procs_per_trainer(), m_comm->get_rank_in_trainer());
/// Check the data that each rank loaded
if (!m_is_data_loaded) {
m_is_data_loaded = true;
/// Open the first sample to make sure that all of the fields are correct
size_t data_id = (m_sample_list[0]).first;
m_sample_list.open_samples_hdf5_handle(data_id, true);
if (m_scalar_keys.size() == 0u) {
set_all_scalar_choices(); // use all by default if none is specified
}
check_scalar_keys();
if (m_input_keys.size() == 0u) {
set_all_input_choices(); // use all by default if none is specified
}
check_input_keys();
check_image_data();
m_sample_list.close_if_done_samples_hdf5_handle(data_id);
}
/// Merge all of the sample lists
m_sample_list.all_gather_packed_lists(*m_comm);
std::stringstream s;
std::string basename = get_basename_without_ext(sample_list_file);
std::string ext = get_ext_name(sample_list_file);
s << "r" << m_comm->get_rank_in_trainer() << "_per_rank_" << basename << "." << ext;
m_sample_list.write(s.str());
m_shuffled_indices.resize(m_sample_list.size());
std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0);
select_subset_of_data();
}
void data_reader_jag_conduit::load_list_of_samples(const std::string sample_list_file, size_t stride, size_t offset) {
// load the sample list
double tm1 = get_time();
m_sample_list.load(sample_list_file, stride, offset);
double tm2 = get_time();
if (is_master()) {
std::cout << "Time to load sample list: " << tm2 - tm1 << std::endl;
}
}
void data_reader_jag_conduit::load_list_of_samples_from_archive(const std::string& sample_list_archive) {
// load the sample list
double tm1 = get_time();
std::stringstream ss(sample_list_archive); // any stream can be used
cereal::BinaryInputArchive iarchive(ss); // Create an input archive
iarchive(m_sample_list); // Read the data from the archive
double tm2 = get_time();
if (is_master()) {
std::cout << "Time to load sample list from archive: " << tm2 - tm1 << std::endl;
}
}
unsigned int data_reader_jag_conduit::get_num_img_srcs() const {
return m_num_img_srcs;
}
size_t data_reader_jag_conduit::get_linearized_image_size() const {
return m_image_linearized_size;
}
size_t data_reader_jag_conduit::get_linearized_1ch_image_size() const {
return m_1ch_image_linearized_size;
}
size_t data_reader_jag_conduit::get_linearized_scalar_size() const {
return m_scalar_keys.size();
}
size_t data_reader_jag_conduit::get_linearized_input_size() const {
return m_input_keys.size();
}
size_t data_reader_jag_conduit::get_linearized_size(const data_reader_jag_conduit::variable_t t) const {
switch (t) {
case JAG_Image:
return get_linearized_image_size() * get_num_img_srcs();
case JAG_Scalar:
return get_linearized_scalar_size();
case JAG_Input:
return get_linearized_input_size();
default: { // includes Unefined case
_THROW_LBANN_EXCEPTION2_(_CN_, "get_linearized_size() : ", \
"unknown or undefined variable type");
}
}
return 0u;
}
int data_reader_jag_conduit::get_linearized_data_size() const {
size_t sz = 0u;
for (const auto t: m_independent) {
sz += get_linearized_size(t);
}
return static_cast<int>(sz);
}
int data_reader_jag_conduit::get_linearized_response_size() const {
size_t sz = 0u;
for (const auto t: m_dependent) {
sz += get_linearized_size(t);
}
return static_cast<int>(sz);
}
std::vector<size_t> data_reader_jag_conduit::get_linearized_data_sizes() const {
std::vector<size_t> all_dim;
all_dim.reserve(m_independent.size());
for (const auto t: m_independent) {
all_dim.push_back(get_linearized_size(t));
}
if (all_dim.empty()) {
return {0u};
}
return all_dim;
}
std::vector<size_t> data_reader_jag_conduit::get_linearized_response_sizes() const {
std::vector<size_t> all_dim;
all_dim.reserve(m_dependent.size());
for (const auto t: m_dependent) {
all_dim.push_back(get_linearized_size(t));
}
if (all_dim.empty()) {
return {0u};
}
return all_dim;
}
const std::vector<int> data_reader_jag_conduit::get_dims(const data_reader_jag_conduit::variable_t t) const {
switch (t) {
case JAG_Image:
return {static_cast<int>(get_num_img_srcs()), m_image_height, m_image_width};
//return {static_cast<int>(get_linearized_image_size())};
case JAG_Scalar:
return {static_cast<int>(get_linearized_scalar_size())};
case JAG_Input:
return {static_cast<int>(get_linearized_input_size())};
default: { // includes Undefined case
_THROW_LBANN_EXCEPTION2_(_CN_, "get_dims() : ", \
"unknown or undefined variable type");
}
}
return {};
}
const std::vector<int> data_reader_jag_conduit::get_data_dims() const {
#if 1
return {get_linearized_data_size()};
#else
std::vector<int> all_dim;
for (const auto t: m_independent) {
const std::vector<int> ld = get_dims(t);
all_dim.insert(all_dim.end(), ld.begin(), ld.end());
}
if (all_dim.empty()) {
return {0u};
}
return all_dim;
#endif
}
std::vector<El::Int> data_reader_jag_conduit::get_slice_points(const std::vector< std::vector<data_reader_jag_conduit::variable_t> >& var) const {
std::vector<El::Int> points(var.size()+1u, static_cast<El::Int>(0));
for (size_t i = 0u; i < var.size(); ++i) {
const auto& group = var[i];
size_t size = 0u;
for (const auto type: group) {
size += get_linearized_size(type);
}
points[i+1] = points[i] + static_cast<El::Int>(size);
}
return points;
}
std::vector<El::Int> data_reader_jag_conduit::get_slice_points_independent() const {
return get_slice_points(m_independent_groups);
}
std::vector<El::Int> data_reader_jag_conduit::get_slice_points_dependent() const {
return get_slice_points(m_independent_groups);
}
int data_reader_jag_conduit::get_num_data() const {
return (int)m_shuffled_indices.size();
}
int data_reader_jag_conduit::get_num_labels() const {
return m_num_labels;
}
int data_reader_jag_conduit::get_linearized_label_size() const {
return m_num_labels;
}
int data_reader_jag_conduit::get_linearized_size(const std::string& desc) const {
if (desc == "JAG_Image") {
return get_linearized_size(JAG_Image);
} else if (desc == "JAG_Scalar") {
return get_linearized_size(JAG_Scalar);
} else if (desc == "JAG_Input") {
return get_linearized_size(JAG_Input);
} else {
_THROW_LBANN_EXCEPTION_(_CN_, "get_linearized_size() : unknown key " + desc);
}
return generic_data_reader::get_linearized_size(desc);
}
void data_reader_jag_conduit::set_split_image_channels() {
m_split_channels = true;
}
void data_reader_jag_conduit::unset_split_image_channels() {
m_split_channels = false;
}
bool data_reader_jag_conduit::check_split_image_channels() const {
return m_split_channels;
}
std::string data_reader_jag_conduit::to_string(const variable_t t) {
switch (t) {
case Undefined: return "Undefined";
case JAG_Image: return "JAG_Image";
case JAG_Scalar: return "JAG_Scalar";
case JAG_Input: return "JAG_Input";
}
return "Undefined";
}
std::string data_reader_jag_conduit::to_string(const std::vector<data_reader_jag_conduit::variable_t>& vec) {
std::string str("[");
for (const auto& el: vec) {
str += ' ' + data_reader_jag_conduit::to_string(el);
}
str += " ]";
return str;
}
std::string data_reader_jag_conduit::to_string(const std::vector< std::vector<data_reader_jag_conduit::variable_t> >& vec) {
std::string str("[");
for (const auto& el: vec) {
str += ' ' + data_reader_jag_conduit::to_string(el);
}
str += " ]";
return str;
}
std::string data_reader_jag_conduit::get_description() const {
std::stringstream leading_reader;
leading_reader << m_leading_reader;
std::string ret = std::string("data_reader_jag_conduit:\n")
+ " - independent: " + data_reader_jag_conduit::to_string(m_independent_groups) + "\n"
+ " - dependent: " + data_reader_jag_conduit::to_string(m_dependent_groups) + "\n"
+ " - images: " + std::to_string(m_num_img_srcs) + " of "
+ std::to_string(m_image_num_channels) + 'x'
+ std::to_string(m_image_width) + 'x'
+ std::to_string(m_image_height) + "\n"
+ " - scalars: " + std::to_string(get_linearized_scalar_size()) + "\n"
+ " - inputs: " + std::to_string(get_linearized_input_size()) + "\n"
+ " - linearized data size: " + std::to_string(get_linearized_data_size()) + "\n"
+ " - uniform_input_type: " + (m_uniform_input_type? "true" : "false") + "\n"
+ " - leading DR: " + (m_leading_reader == this ? "true" : "false")
+ " (ptr=" + leading_reader.str() + ")\n";
if (!m_scalar_filter.empty()) {
ret += " - scalar filter:";
for (const auto& f: m_scalar_filter) {
ret += " \"" + f + '"';
}
ret += '\n';
}
if (!m_scalar_prefix_filter.empty()) {
ret += " - scalar prefix filter:";
for (const auto& f: m_scalar_prefix_filter) {
ret += " [\"" + f.first + "\" " + std::to_string(f.second) + ']';
}
ret += '\n';
}
if (!m_input_filter.empty()) {
ret += " - input filter:";
for (const auto& f: m_input_filter) {
ret += " \"" + f + '"';
}
ret += '\n';
}
if (!m_input_prefix_filter.empty()) {
ret += " - input prefix filter:";
for (const auto& f: m_input_prefix_filter) {
ret += " [\"" + f.first + "\" " + std::to_string(f.second) + ']';
}
ret += '\n';
}
return ret;
}
bool data_reader_jag_conduit::check_non_numeric(const std::string key) {
std::set<std::string>::const_iterator kit = non_numeric_vars.find(key);
if (kit != non_numeric_vars.cend()) {
std::string err = "data_reader_jag_conduit::add_val() : non-numeric '" + key
+ "' requires a conversion method.";
#if 1
std::cerr << err << " Skipping for now." << std::endl;
#else
throw lbann_exception(err);
#endif
return true;
}
return false;
}
std::vector< std::vector<data_reader_jag_conduit::ch_t> >
data_reader_jag_conduit::get_image_data(const size_t sample_id, conduit::Node& sample) const {
std::vector< std::vector<ch_t> > image_ptrs;
image_ptrs.reserve(m_emi_image_keys.size());
for (const auto& emi_tag : m_emi_image_keys) {
const std::string conduit_field = m_output_image_prefix + emi_tag;
const std::string conduit_obj = '/' + pad(std::to_string(sample_id), SAMPLE_ID_PAD, '0') + '/' + conduit_field;
if(sample[conduit_obj].schema().dtype().is_empty()) {
if (data_store_active()) {
LBANN_ERROR("Unable to find field " + conduit_obj
+ " in conduit node: " + std::to_string(sample_id));
}
conduit::Node n_image;
load_conduit_node(sample_id, conduit_field, n_image);
sample[conduit_obj].set(n_image);
}
conduit_ch_t emi = sample[conduit_obj].value();
const size_t num_vals = emi.number_of_elements();
const ch_t* emi_data = sample[conduit_obj].value();
image_ptrs.emplace_back(emi_data, emi_data + num_vals);
}
return image_ptrs;
}
cv::Mat data_reader_jag_conduit::cast_to_cvMat(
const std::pair<size_t, const ch_t*> img, const int height, const int num_ch) {
const int num_pixels = static_cast<int>(img.first);
const ch_t* ptr = img.second;
// add a zero copying view to data
using InputBuf_T = cv_image_type<ch_t>;
const cv::Mat image(num_pixels, 1, InputBuf_T::T(1u),
reinterpret_cast<void*>(const_cast<ch_t*>(ptr)));
// reshape the image. Furter need to clone (deep-copy) the image
// to preserve the constness of the original data
return (image.reshape(num_ch, height));
}
/// Assumes the same parameters for the same channel from different views
void data_reader_jag_conduit::image_normalization(cv::Mat& img, size_t i, size_t ch) const {
const auto& tr = m_image_normalization_params.at(ch);
img.convertTo(img, -1, tr.first, tr.second);
}
std::vector<cv::Mat> data_reader_jag_conduit::get_cv_images(const size_t sample_id, conduit::Node& sample) const {
const std::vector< std::vector<ch_t> > img_data(get_image_data(sample_id, sample));
std::vector<cv::Mat> images;
if (m_split_channels) {
images.reserve(img_data.size()*m_image_num_channels);
for (size_t i = 0u; i < img_data.size(); ++i) {
const auto& img = img_data[i];
cv::Mat ch[m_image_num_channels];
cv::split(cast_to_cvMat(std::make_pair(img.size(), img.data()), m_image_height, m_image_num_channels), ch);
for(int c = 0; c < m_image_num_channels; ++c) {
#if 1 // with normalization
image_normalization(ch[c], i, static_cast<size_t>(c));
#endif
images.emplace_back(ch[c].clone());
}
}
} else {
images.reserve(img_data.size());
for (size_t i = 0u; i < img_data.size(); ++i) {
const auto& img = img_data[i];
#if 1 // with normalization
cv::Mat ch[m_image_num_channels];
cv::split(cast_to_cvMat(std::make_pair(img.size(), img.data()), m_image_height, m_image_num_channels), ch);
for(int c = 0; c < m_image_num_channels; ++c) {
image_normalization(ch[c], i, static_cast<size_t>(c));
}
cv::Mat img_normalized;
cv::merge(ch, m_image_num_channels, img_normalized);
images.emplace_back(img_normalized);
#else
images.emplace_back(cast_to_cvMat(std::make_pair(img.size(), img.data()), m_image_height, m_image_num_channels).clone());
#endif
}
}
return images;
}
std::vector<data_reader_jag_conduit::ch_t> data_reader_jag_conduit::get_images(const size_t sample_id, conduit::Node& sample) const {
std::vector< std::vector<ch_t> > img_data(get_image_data(sample_id, sample));
std::vector<ch_t> images;
if (m_split_channels) {
images.resize(get_linearized_size(JAG_Image));
size_t i = 0u;
size_t j = 0u;
for (const auto& img: img_data) {
const ch_t * const ptr_end = img.data() + img.size();
for (int c=0; c < m_image_num_channels; ++c) {
const auto& tr = m_image_normalization_params.at(c);
for (const ch_t* ptr = img.data() + c; ptr < ptr_end; ptr += m_image_num_channels) {
#if 1 // with normalization
images[i++] = cv::saturate_cast<ch_t>(*ptr * tr.first + tr.second);
#else
images[i++] = *ptr;
#endif
}
}
j ++;
}
} else {
images.reserve(get_linearized_size(JAG_Image));
for (const auto& img: img_data) {
#if 1 // with normalization
// TODO: normalization needed
_THROW_LBANN_EXCEPTION_(_CN_, "get_images() : normalization not implemented yet");
(void) img;
#else
images.insert(images.end(), img.cbegin(), ptr + img.cend());
#endif
}
}
return images;
}
std::vector<data_reader_jag_conduit::scalar_t> data_reader_jag_conduit::get_scalars(const size_t sample_id, conduit::Node& sample) const {
std::vector<scalar_t> scalars;
scalars.reserve(m_scalar_keys.size());
auto tr = m_scalar_normalization_params.cbegin();
for(const auto key: m_scalar_keys) {
std::string conduit_field = m_output_scalar_prefix + key;
std::string conduit_obj = '/' + pad(std::to_string(sample_id), SAMPLE_ID_PAD, '0') + '/' + conduit_field;
if(sample[conduit_obj].schema().dtype().is_empty()) {
if (data_store_active()) {
LBANN_ERROR("Unable to find field " + conduit_obj
+ " in conduit node: " + std::to_string(sample_id));
}
conduit::Node n_scalar;
load_conduit_node(sample_id, conduit_field, n_scalar);
sample[conduit_obj].set(n_scalar);
}
const scalar_t val_raw = static_cast<scalar_t>(sample[conduit_obj].to_value());
const scalar_t val = static_cast<scalar_t>(val_raw * tr->first + tr->second);
scalars.push_back(val);
tr ++;
}
return scalars;
}
std::vector<data_reader_jag_conduit::input_t> data_reader_jag_conduit::get_inputs(const size_t sample_id, conduit::Node& sample) const {
std::vector<input_t> inputs;
inputs.reserve(m_input_keys.size());
// The sequence of normalization parameters should follow the same order as
// that of the variable keys.
auto tr = m_input_normalization_params.cbegin();
// automatically determine which method to use based on if all the variables are of input_t
if (m_uniform_input_type) {
// avoid some overhead by taking advantage of the fact that all the variables are of the same type
for(const auto key: m_input_keys) {
const std::string conduit_field = m_input_prefix + key;
const std::string conduit_obj = '/' + pad(std::to_string(sample_id), SAMPLE_ID_PAD, '0') + '/' + conduit_field;
if(sample[conduit_obj].schema().dtype().is_empty()) {
if (data_store_active()) {
LBANN_ERROR("Unable to find field " + conduit_obj
+ " in conduit node: " + std::to_string(sample_id));
}
conduit::Node n_input;
load_conduit_node(sample_id, conduit_field, n_input);
sample[conduit_obj].set(n_input);
}
const input_t val_raw = static_cast<input_t>(sample[conduit_obj].value());
const input_t val = static_cast<input_t>(val_raw * tr->first + tr->second);
inputs.push_back(val);
tr ++;
}
} else {
for(const auto key: m_input_keys) {
const std::string conduit_field = m_input_prefix + key;
const std::string conduit_obj = '/' + pad(std::to_string(sample_id), SAMPLE_ID_PAD, '0') + '/' + conduit_field;
if(sample[conduit_obj].schema().dtype().is_empty()) {
if (data_store_active()) {
LBANN_ERROR("Unable to find field " + conduit_obj
+ " in conduit node: " + std::to_string(sample_id));
}
conduit::Node n_input;
load_conduit_node(sample_id, conduit_field, n_input);
sample[conduit_obj].set(n_input);
}
add_val(key, sample[conduit_obj], inputs); // more overhead but general
input_t& val = inputs.back();
val = static_cast<input_t>(val * tr->first + tr->second);
tr ++;
}
}
return inputs;
}
std::vector<CPUMat>
data_reader_jag_conduit::create_datum_views(CPUMat& X, const std::vector<size_t>& sizes, const int mb_idx) const {
std::vector<CPUMat> X_v(sizes.size());
El::Int h = 0;
for(size_t i=0u; i < sizes.size(); ++i) {
const El::Int h_end = h + static_cast<El::Int>(sizes[i]);
El::View(X_v[i], X, El::IR(h, h_end), El::IR(mb_idx, mb_idx + 1));
h = h_end;
}
return X_v;
}
bool data_reader_jag_conduit::fetch(CPUMat& X, int data_id, conduit::Node& sample, int mb_idx, int tid,
const data_reader_jag_conduit::variable_t vt, const std::string tag) {
switch (vt) {
case JAG_Image: {
const size_t num_images = get_num_img_srcs()
* static_cast<size_t>(m_split_channels? m_image_num_channels : 1u);
const size_t image_size = m_split_channels? get_linearized_1ch_image_size() : get_linearized_image_size();
const std::vector<size_t> sizes(num_images, image_size);
std::vector<CPUMat> X_v = create_datum_views(X, sizes, mb_idx);
std::vector<cv::Mat> images = get_cv_images(data_id, sample);
if (images.size() != num_images) {
_THROW_LBANN_EXCEPTION2_(_CN_, "fetch() : the number of images is not as expected", \
std::to_string(images.size()) + "!=" + std::to_string(num_images));
}
for(size_t i=0u; i < num_images; ++i) {
int width, height, img_type;
image_utils::process_image(images[i], width, height, img_type, *(m_pps[tid]), X_v[i]);
}
break;
}
case JAG_Scalar: {
const std::vector<scalar_t> scalars(get_scalars(data_id, sample));
set_minibatch_item<scalar_t>(X, mb_idx, scalars.data(), get_linearized_scalar_size());
break;
}
case JAG_Input: {
const std::vector<input_t> inputs(get_inputs(data_id, sample));
set_minibatch_item<input_t>(X, mb_idx, inputs.data(), get_linearized_input_size());
break;
}
default: { // includes Undefined case
_THROW_LBANN_EXCEPTION_(_CN_, "fetch_" + tag + "() : unknown or undefined variable type");
}
}
return true;
}
int data_reader_jag_conduit::reuse_data(CPUMat& X) {
El::Copy(m_data_cache, X);
return m_cached_data_mb_size;
}
int data_reader_jag_conduit::reuse_responses(CPUMat& Y) {
El::Copy(m_response_cache, Y);
return m_cached_response_mb_size;
}
int data_reader_jag_conduit::reuse_labels(CPUMat& Y) {
El::Copy(m_label_cache, Y);
return m_cached_label_mb_size;
}
int data_reader_jag_conduit::fetch_data(CPUMat& X, El::Matrix<El::Int>& indices_fetched) {
if ((m_leading_reader != this) && (m_leading_reader != nullptr)) {
return m_leading_reader->reuse_data(X);
}
m_cached_data_mb_size = generic_data_reader::fetch_data(X, indices_fetched);
El::Copy(X, m_data_cache);
return m_cached_data_mb_size;
}
int data_reader_jag_conduit::fetch_responses(CPUMat& Y) {
if ((m_leading_reader != this) && (m_leading_reader != nullptr)) {
return m_leading_reader->reuse_responses(Y);
}
m_cached_response_mb_size = generic_data_reader::fetch_responses(Y);
El::Copy(Y, m_response_cache);
return m_cached_response_mb_size;
}
int data_reader_jag_conduit::fetch_labels(CPUMat& Y) {
if ((m_leading_reader != this) && (m_leading_reader != nullptr)) {
return m_leading_reader->reuse_labels(Y);
}
m_cached_label_mb_size = generic_data_reader::fetch_labels(Y);
El::Copy(Y, m_label_cache);
return m_cached_label_mb_size;
}
bool data_reader_jag_conduit::fetch_datum(CPUMat& X, int data_id, int mb_idx) {
int tid = m_io_thread_pool->get_local_thread_id();
std::vector<size_t> sizes = get_linearized_data_sizes();
std::vector<CPUMat> X_v = create_datum_views(X, sizes, mb_idx);
bool ok = true;
// Create a node to hold all of the data
conduit::Node node;
if (data_store_active()) {
const conduit::Node& ds_node = m_jag_store->get_conduit_node(data_id);
node.set_external(ds_node);
}else {
m_sample_list.open_samples_hdf5_handle(data_id);
}
for(size_t i = 0u; ok && (i < X_v.size()); ++i) {
// The third argument mb_idx below is 0 because it is for the view of X not X itself
ok = fetch(X_v[i], data_id, node, 0, tid, m_independent[i], "datum");
}
if (priming_data_store()) {
// Once the node has been populated save it in the data store
m_jag_store->set_conduit_node(data_id, node);
}
m_sample_list.close_if_done_samples_hdf5_handle(data_id);
return ok;
}
bool data_reader_jag_conduit::fetch_response(CPUMat& X, int data_id, int mb_idx) {
int tid = m_io_thread_pool->get_local_thread_id();
std::vector<size_t> sizes = get_linearized_response_sizes();
std::vector<CPUMat> X_v = create_datum_views(X, sizes, mb_idx);
bool ok = true;
// Create a node to hold all of the data
conduit::Node node;
if (m_jag_store != nullptr && m_model->get_epoch() > 0) {
const conduit::Node& ds_node = m_jag_store->get_conduit_node(data_id);
node.set_external(ds_node);
}
for(size_t i = 0u; ok && (i < X_v.size()); ++i) {
ok = fetch(X_v[i], data_id, node, 0, tid, m_dependent[i], "response");
}
if (m_jag_store != nullptr && m_model->get_epoch() == 0) {
// Once the node has been populated save it in the data store
if (m_jag_store != nullptr) {
m_jag_store->set_conduit_node(data_id, node);
}
}
return ok;
}
bool data_reader_jag_conduit::fetch_label(CPUMat& Y, int data_id, int mb_idx) {
if(m_gan_label_value) Y.Set(m_gan_label_value,mb_idx,1); //fake sample is set to 1; adversarial model
else { //fake sample (second half of minibatch is set to 0;discriminator model
//mb_idx < (m_mb_size/2) ? Y.Set(1,mb_idx,1) : Y.Set(m_gan_label_value,mb_idx,1);
mb_idx < (get_current_mini_batch_size()/2) ? Y.Set(1,mb_idx,1) : Y.Set(m_gan_label_value,mb_idx,1);
}
//Y.Set(m_gan_label_value, mb_idx, 1);
return true;
}
void data_reader_jag_conduit::setup_data_store(model *m, int mini_batch_size) {
if (m_data_store != nullptr) {
delete m_data_store;
}
m_jag_store = new data_store_jag(this, m); // *data_store_jag
m_data_store = m_jag_store; // *generic_data_store
m_data_store->setup(mini_batch_size);
}
void data_reader_jag_conduit::save_image(Mat& pixels, const std::string filename, bool do_scale) {
internal_save_image(pixels, filename, m_image_height, m_image_width, 1, do_scale);
}
void data_reader_jag_conduit::print_schema(const size_t sample_id) const {
//@TODO revisit later -- don't know how to handle this yet
if (m_data_store != nullptr) {
return;
}
conduit::Node n;
load_conduit_node(sample_id, "", n);
n.schema().print();
}
void data_reader_jag_conduit::clear_image_normalization_params() {
m_image_normalization_params.clear();
}
void data_reader_jag_conduit::clear_scalar_normalization_params() {
m_scalar_normalization_params.clear();
}
void data_reader_jag_conduit::clear_input_normalization_params() {
m_input_normalization_params.clear();
}
void data_reader_jag_conduit::add_image_normalization_param(const data_reader_jag_conduit::linear_transform_t& t) {
m_image_normalization_params.push_back(t);
}
void data_reader_jag_conduit::add_scalar_normalization_param(const data_reader_jag_conduit::linear_transform_t& t) {
m_scalar_normalization_params.push_back(t);
}
void data_reader_jag_conduit::add_input_normalization_param(const data_reader_jag_conduit::linear_transform_t& t) {
m_input_normalization_params.push_back(t);
}
} // end of namespace lbann
#undef _CN_
#endif // LBANN_HAS_CONDUIT
| 1 | 14,066 | I think that you need something like `m_using_random_node.emplace(m_io_thread_pool->get_local_thread_id());` | LLNL-lbann | cpp |
@@ -175,6 +175,9 @@ public class UnusedAssignmentRule extends AbstractJavaRule {
if (isIgnorablePrefixIncrement(entry.rhs)) {
continue;
}
+ if (isUsedLocalVarWithoutInitializer(entry, result.usedVariables)) {
+ continue;
+ }
Set<AssignmentEntry> killers = result.killRecord.get(entry);
final String reason; | 1 | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.rule.bestpractices;
import static net.sourceforge.pmd.lang.java.rule.codestyle.ConfusingTernaryRule.unwrapParentheses;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import net.sourceforge.pmd.RuleContext;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.java.ast.ASTAllocationExpression;
import net.sourceforge.pmd.lang.java.ast.ASTAnyTypeBodyDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTAnyTypeDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTAssignmentOperator;
import net.sourceforge.pmd.lang.java.ast.ASTBlock;
import net.sourceforge.pmd.lang.java.ast.ASTBlockStatement;
import net.sourceforge.pmd.lang.java.ast.ASTBreakStatement;
import net.sourceforge.pmd.lang.java.ast.ASTCatchStatement;
import net.sourceforge.pmd.lang.java.ast.ASTClassOrInterfaceBody;
import net.sourceforge.pmd.lang.java.ast.ASTCompilationUnit;
import net.sourceforge.pmd.lang.java.ast.ASTConditionalAndExpression;
import net.sourceforge.pmd.lang.java.ast.ASTConditionalExpression;
import net.sourceforge.pmd.lang.java.ast.ASTConditionalOrExpression;
import net.sourceforge.pmd.lang.java.ast.ASTConstructorDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTContinueStatement;
import net.sourceforge.pmd.lang.java.ast.ASTDoStatement;
import net.sourceforge.pmd.lang.java.ast.ASTEnumBody;
import net.sourceforge.pmd.lang.java.ast.ASTExpression;
import net.sourceforge.pmd.lang.java.ast.ASTFieldDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTFinallyStatement;
import net.sourceforge.pmd.lang.java.ast.ASTForInit;
import net.sourceforge.pmd.lang.java.ast.ASTForStatement;
import net.sourceforge.pmd.lang.java.ast.ASTForUpdate;
import net.sourceforge.pmd.lang.java.ast.ASTFormalParameter;
import net.sourceforge.pmd.lang.java.ast.ASTIfStatement;
import net.sourceforge.pmd.lang.java.ast.ASTInitializer;
import net.sourceforge.pmd.lang.java.ast.ASTLabeledStatement;
import net.sourceforge.pmd.lang.java.ast.ASTLambdaExpression;
import net.sourceforge.pmd.lang.java.ast.ASTLocalVariableDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTMethodDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTName;
import net.sourceforge.pmd.lang.java.ast.ASTPostfixExpression;
import net.sourceforge.pmd.lang.java.ast.ASTPreDecrementExpression;
import net.sourceforge.pmd.lang.java.ast.ASTPreIncrementExpression;
import net.sourceforge.pmd.lang.java.ast.ASTPrimaryExpression;
import net.sourceforge.pmd.lang.java.ast.ASTPrimaryPrefix;
import net.sourceforge.pmd.lang.java.ast.ASTPrimarySuffix;
import net.sourceforge.pmd.lang.java.ast.ASTResourceSpecification;
import net.sourceforge.pmd.lang.java.ast.ASTReturnStatement;
import net.sourceforge.pmd.lang.java.ast.ASTStatement;
import net.sourceforge.pmd.lang.java.ast.ASTStatementExpression;
import net.sourceforge.pmd.lang.java.ast.ASTSwitchExpression;
import net.sourceforge.pmd.lang.java.ast.ASTSwitchLabel;
import net.sourceforge.pmd.lang.java.ast.ASTSwitchLabeledRule;
import net.sourceforge.pmd.lang.java.ast.ASTSwitchStatement;
import net.sourceforge.pmd.lang.java.ast.ASTThrowStatement;
import net.sourceforge.pmd.lang.java.ast.ASTTryStatement;
import net.sourceforge.pmd.lang.java.ast.ASTTypeDeclaration;
import net.sourceforge.pmd.lang.java.ast.ASTVariableDeclarator;
import net.sourceforge.pmd.lang.java.ast.ASTVariableDeclaratorId;
import net.sourceforge.pmd.lang.java.ast.ASTVariableInitializer;
import net.sourceforge.pmd.lang.java.ast.ASTWhileStatement;
import net.sourceforge.pmd.lang.java.ast.ASTYieldStatement;
import net.sourceforge.pmd.lang.java.ast.JavaNode;
import net.sourceforge.pmd.lang.java.ast.JavaParserVisitorAdapter;
import net.sourceforge.pmd.lang.java.rule.AbstractJavaRule;
import net.sourceforge.pmd.lang.java.rule.internal.JavaRuleUtil;
import net.sourceforge.pmd.lang.java.symboltable.ClassScope;
import net.sourceforge.pmd.lang.java.symboltable.VariableNameDeclaration;
import net.sourceforge.pmd.lang.symboltable.Scope;
import net.sourceforge.pmd.properties.PropertyDescriptor;
import net.sourceforge.pmd.properties.PropertyFactory;
public class UnusedAssignmentRule extends AbstractJavaRule {
/*
Detects unused assignments. This performs a reaching definition
analysis. This makes the assumption that there is no dead code.
Since we have the reaching definitions at each variable usage, we
could also use that to detect other kinds of bug, eg conditions
that are always true, or dereferences that will always NPE. In
the general case though, this is complicated and better left to
a DFA library, eg google Z3.
This analysis may be used as-is to detect switch labels that
fall-through, which could be useful to improve accuracy of other
rules.
TODO
* labels on arbitrary statements (currently only loops)
* explicit ctor call (hard to impossible without type res,
or at least proper graph algorithms like toposort)
-> this is pretty invisible as it causes false negatives, not FPs
* test ternary expr
DONE
* conditionals
* loops
* switch
* loop labels
* try/catch/finally
* lambdas
* constructors + initializers
* anon class
* test this.field in ctors
* foreach var should be reassigned from one iter to another
* test local class/anonymous class
* shortcut conditionals have their own control-flow
* parenthesized expressions
* conditional exprs in loops
* ignore variables that start with 'ignore'
* ignore params of native methods
* ignore params of abstract methods
*/
private static final PropertyDescriptor<Boolean> CHECK_PREFIX_INCREMENT =
PropertyFactory.booleanProperty("checkUnusedPrefixIncrement")
.desc("Report expressions like ++i that may be replaced with (i + 1)")
.defaultValue(false)
.build();
private static final PropertyDescriptor<Boolean> REPORT_UNUSED_VARS =
PropertyFactory.booleanProperty("reportUnusedVariables")
.desc("Report variables that are only initialized, and never read at all. "
+ "The rule UnusedVariable already cares for that, but you can enable it if needed")
.defaultValue(false)
.build();
public UnusedAssignmentRule() {
definePropertyDescriptor(CHECK_PREFIX_INCREMENT);
definePropertyDescriptor(REPORT_UNUSED_VARS);
addRuleChainVisit(ASTCompilationUnit.class);
}
@Override
public Object visit(ASTCompilationUnit node, Object data) {
for (JavaNode child : node.children()) {
if (child instanceof ASTTypeDeclaration) {
ASTAnyTypeDeclaration typeDecl = (ASTAnyTypeDeclaration) child.getChild(child.getNumChildren() - 1);
GlobalAlgoState result = new GlobalAlgoState();
typeDecl.jjtAccept(ReachingDefsVisitor.ONLY_LOCALS, new SpanInfo(result));
reportFinished(result, (RuleContext) data);
}
}
return data;
}
private void reportFinished(GlobalAlgoState result, RuleContext ruleCtx) {
if (result.usedAssignments.size() < result.allAssignments.size()) {
Set<AssignmentEntry> unused = result.allAssignments;
// note that this mutates allAssignments, so the global
// state is unusable after this
unused.removeAll(result.usedAssignments);
for (AssignmentEntry entry : unused) {
if (isIgnorablePrefixIncrement(entry.rhs)) {
continue;
}
Set<AssignmentEntry> killers = result.killRecord.get(entry);
final String reason;
if (killers == null || killers.isEmpty()) {
// var went out of scope before being used (no assignment kills it, yet it's unused)
if (entry.var.isField()) {
// assignments to fields don't really go out of scope
continue;
} else if (suppressUnusedVariableRuleOverlap(entry)) {
// see REPORT_UNUSED_VARS property
continue;
}
// This is a "DU" anomaly, the others are "DD"
reason = null;
} else if (killers.size() == 1) {
AssignmentEntry k = killers.iterator().next();
if (k.rhs.equals(entry.rhs)) {
// assignment reassigns itself, only possible in a loop
if (suppressUnusedVariableRuleOverlap(entry)) {
continue;
} else if (entry.rhs instanceof ASTVariableDeclaratorId) {
reason = null; // unused foreach variable
} else {
reason = "reassigned every iteration";
}
} else {
reason = "overwritten on line " + k.rhs.getBeginLine();
}
} else {
reason = joinLines("overwritten on lines ", killers);
}
if (reason == null && JavaRuleUtil.isExplicitUnusedVarName(entry.var.getName())) {
// Then the variable is never used (cf UnusedVariable)
// We ignore those that start with "ignored", as that is standard
// practice for exceptions, and may be useful for resources/foreach vars
continue;
}
addViolationWithMessage(ruleCtx, entry.rhs, makeMessage(entry, reason, entry.var.isField()));
}
}
}
private boolean suppressUnusedVariableRuleOverlap(AssignmentEntry entry) {
return !getProperty(REPORT_UNUSED_VARS) && (entry.rhs instanceof ASTVariableInitializer
|| entry.rhs instanceof ASTVariableDeclaratorId);
}
private static String getKind(ASTVariableDeclaratorId id) {
if (id.isField()) {
return "field";
} else if (id.isResourceDeclaration()) {
return "resource";
} else if (id.isExceptionBlockParameter()) {
return "exception parameter";
} else if (id.getNthParent(3) instanceof ASTForStatement) {
return "loop variable";
} else if (id.isFormalParameter()) {
return "parameter";
}
return "variable";
}
private boolean isIgnorablePrefixIncrement(JavaNode assignment) {
if (assignment instanceof ASTPreIncrementExpression
|| assignment instanceof ASTPreDecrementExpression) {
// the variable value is used if it was found somewhere else
// than in statement position
return !getProperty(CHECK_PREFIX_INCREMENT) && !(assignment.getParent() instanceof ASTStatementExpression);
}
return false;
}
private static String makeMessage(AssignmentEntry assignment, /* Nullable */ String reason, boolean isField) {
// if reason is null, then the variable is unused (at most assigned to)
String varName = assignment.var.getName();
StringBuilder result = new StringBuilder(64);
if (assignment.rhs instanceof ASTVariableInitializer) {
result.append(isField ? "the field initializer for"
: "the initializer for variable");
} else if (assignment.rhs instanceof ASTVariableDeclaratorId) {
if (reason != null) {
result.append("the initial value of ");
}
result.append(getKind(assignment.var));
} else {
if (assignment.rhs instanceof ASTPreIncrementExpression
|| assignment.rhs instanceof ASTPreDecrementExpression
|| assignment.rhs instanceof ASTPostfixExpression) {
result.append("the updated value of ");
} else {
result.append("the value assigned to ");
}
result.append(isField ? "field" : "variable");
}
result.append(" ''").append(varName).append("''");
result.append(" is never used");
if (reason != null) {
result.append(" (").append(reason).append(")");
}
result.setCharAt(0, Character.toUpperCase(result.charAt(0)));
return result.toString();
}
private static String joinLines(String prefix, Set<AssignmentEntry> killers) {
StringBuilder sb = new StringBuilder(prefix);
ArrayList<AssignmentEntry> sorted = new ArrayList<>(killers);
Collections.sort(sorted, new Comparator<AssignmentEntry>() {
@Override
public int compare(AssignmentEntry o1, AssignmentEntry o2) {
int lineRes = Integer.compare(o1.rhs.getBeginLine(), o2.rhs.getBeginLine());
return lineRes != 0 ? lineRes
: Integer.compare(o1.rhs.getBeginColumn(), o2.rhs.getBeginColumn());
}
});
sb.append(sorted.get(0).rhs.getBeginLine());
for (int i = 1; i < sorted.size() - 1; i++) {
sb.append(", ").append(sorted.get(i).rhs.getBeginLine());
}
sb.append(" and ").append(sorted.get(sorted.size() - 1).rhs.getBeginLine());
return sb.toString();
}
private static class ReachingDefsVisitor extends JavaParserVisitorAdapter {
static final ReachingDefsVisitor ONLY_LOCALS = new ReachingDefsVisitor(null);
// The class scope for the "this" reference, used to find fields
// of this class
// null if we're not processing instance/static initializers,
// so in methods we don't care about fields
// If not null, fields are effectively treated as locals
private final ClassScope enclosingClassScope;
private ReachingDefsVisitor(ClassScope scope) {
this.enclosingClassScope = scope;
}
// following deals with control flow structures
@Override
public Object visit(JavaNode node, Object data) {
for (JavaNode child : node.children()) {
// each output is passed as input to the next (most relevant for blocks)
data = child.jjtAccept(this, data);
}
return data;
}
@Override
public Object visit(ASTBlock node, final Object data) {
// variables local to a loop iteration must be killed before the
// next iteration
SpanInfo state = (SpanInfo) data;
Set<ASTVariableDeclaratorId> localsToKill = new HashSet<>();
for (JavaNode child : node.children()) {
// each output is passed as input to the next (most relevant for blocks)
state = acceptOpt(child, state);
if (child instanceof ASTBlockStatement
&& child.getChild(0) instanceof ASTLocalVariableDeclaration) {
ASTLocalVariableDeclaration local = (ASTLocalVariableDeclaration) child.getChild(0);
for (ASTVariableDeclaratorId id : local) {
localsToKill.add(id);
}
}
}
for (ASTVariableDeclaratorId var : localsToKill) {
state.deleteVar(var);
}
return state;
}
@Override
public Object visit(ASTSwitchStatement node, Object data) {
return processSwitch(node, (SpanInfo) data, node.getTestedExpression());
}
@Override
public Object visit(ASTSwitchExpression node, Object data) {
return processSwitch(node, (SpanInfo) data, node.getChild(0));
}
private SpanInfo processSwitch(JavaNode switchLike, SpanInfo data, JavaNode testedExpr) {
GlobalAlgoState global = data.global;
SpanInfo before = acceptOpt(testedExpr, data);
global.breakTargets.push(before.fork());
SpanInfo current = before;
for (int i = 1; i < switchLike.getNumChildren(); i++) {
JavaNode child = switchLike.getChild(i);
if (child instanceof ASTSwitchLabel) {
current = before.fork().absorb(current);
} else if (child instanceof ASTSwitchLabeledRule) {
current = acceptOpt(child.getChild(1), before.fork());
current = global.breakTargets.doBreak(current, null); // process this as if it was followed by a break
} else {
// statement in a regular fallthrough switch block
current = acceptOpt(child, current);
}
}
before = global.breakTargets.pop();
// join with the last state, which is the exit point of the
// switch, if it's not closed by a break;
return before.absorb(current);
}
@Override
public Object visit(ASTIfStatement node, Object data) {
SpanInfo before = (SpanInfo) data;
return makeConditional(before, node.getCondition(), node.getThenBranch(), node.getElseBranch());
}
@Override
public Object visit(ASTConditionalExpression node, Object data) {
SpanInfo before = (SpanInfo) data;
return makeConditional(before, node.getCondition(), node.getChild(1), node.getChild(2));
}
// This will be much easier with the 7.0 grammar.....
SpanInfo makeConditional(SpanInfo before, JavaNode condition, JavaNode thenBranch, JavaNode elseBranch) {
SpanInfo thenState = before.fork();
SpanInfo elseState = elseBranch != null ? before.fork() : before;
linkConditional(before, condition, thenState, elseState, true);
thenState = acceptOpt(thenBranch, thenState);
elseState = acceptOpt(elseBranch, elseState);
return elseState.absorb(thenState);
}
/*
* This recursive procedure translates shortcut conditionals
* that occur in condition position in the following way:
*
* if (a || b) <then> if (a) <then>
* else <else> ~> else
* if (b) <then>
* else <else>
*
*
* if (a && b) <then> if (a)
* else <else> ~> if (b) <then>
* else <else>
* else <else>
*
* The new conditions are recursively processed to translate
* bigger conditions, like `a || b && c`
*
* This is how it works, but the <then> and <else> branch are
* visited only once, because it's not done in this method, but
* in makeConditional.
*
* @return the state in which all expressions have been evaluated
* Eg for `a || b`, this is the `else` state (all evaluated to false)
* Eg for `a && b`, this is the `then` state (all evaluated to true)
*
*/
private SpanInfo linkConditional(SpanInfo before, JavaNode condition, SpanInfo thenState, SpanInfo elseState, boolean isTopLevel) {
if (condition == null) {
return before;
}
condition = unwrapParentheses(condition);
if (condition instanceof ASTConditionalOrExpression) {
return visitShortcutOrExpr(condition, before, thenState, elseState);
} else if (condition instanceof ASTConditionalAndExpression) {
// To mimic a shortcut AND expr, swap the thenState and the elseState
// See explanations in method
return visitShortcutOrExpr(condition, before, elseState, thenState);
} else if (condition instanceof ASTExpression && condition.getNumChildren() == 1) {
return linkConditional(before, condition.getChild(0), thenState, elseState, isTopLevel);
} else {
SpanInfo state = acceptOpt(condition, before);
if (isTopLevel) {
thenState.absorb(state);
elseState.absorb(state);
}
return state;
}
}
SpanInfo visitShortcutOrExpr(JavaNode orExpr,
SpanInfo before,
SpanInfo thenState,
SpanInfo elseState) {
// if (<a> || <b> || ... || <n>) <then>
// else <else>
//
// in <then>, we are sure that at least <a> was evaluated,
// but really any prefix of <a> ... <n> is possible so they're all merged
// in <else>, we are sure that all of <a> ... <n> were evaluated (to false)
// If you replace || with &&, then the above holds if you swap <then> and <else>
// So this method handles the OR expr, the caller can swap the arguments to make an AND
// ---
// This method side effects on thenState and elseState to
// set the variables.
Iterator<? extends JavaNode> iterator = orExpr.children().iterator();
SpanInfo cur = before;
do {
JavaNode cond = iterator.next();
cur = linkConditional(cur, cond, thenState, elseState, false);
thenState.absorb(cur);
} while (iterator.hasNext());
elseState.absorb(cur);
return cur;
}
@Override
public Object visit(ASTTryStatement node, Object data) {
final SpanInfo before = (SpanInfo) data;
ASTFinallyStatement finallyClause = node.getFinallyClause();
/*
<before>
try (<resources>) {
<body>
} catch (IOException e) {
<catch>
} finally {
<finally>
}
<end>
There is a path <before> -> <resources> -> <body> -> <finally> -> <end>
and for each catch, <before> -> <catch> -> <finally> -> <end>
Except that abrupt completion before the <finally> jumps
to the <finally> and completes abruptly for the same
reason (if the <finally> completes normally), which
means it doesn't go to <end>
*/
if (finallyClause != null) {
before.myFinally = before.forkEmpty();
}
final List<ASTCatchStatement> catchClauses = node.getCatchClauses();
final List<SpanInfo> catchSpans = catchClauses.isEmpty() ? Collections.<SpanInfo>emptyList()
: new ArrayList<SpanInfo>();
// pre-fill catch spans
for (int i = 0; i < catchClauses.size(); i++) {
catchSpans.add(before.forkEmpty());
}
ASTResourceSpecification resources = node.getFirstChildOfType(ASTResourceSpecification.class);
SpanInfo bodyState = before.fork();
bodyState = bodyState.withCatchBlocks(catchSpans);
bodyState = acceptOpt(resources, bodyState);
bodyState = acceptOpt(node.getBody(), bodyState);
bodyState = bodyState.withCatchBlocks(Collections.<SpanInfo>emptyList());
SpanInfo exceptionalState = null;
for (int i = 0; i < catchClauses.size(); i++) {
ASTCatchStatement catchClause = catchClauses.get(i);
SpanInfo current = acceptOpt(catchClause, catchSpans.get(i));
exceptionalState = current.absorb(exceptionalState);
}
SpanInfo finalState;
finalState = bodyState.absorb(exceptionalState);
if (finallyClause != null) {
// this represents the finally clause when it was entered
// because of abrupt completion
// since we don't know when it terminated we must join it with before
SpanInfo abruptFinally = before.myFinally.absorb(before);
acceptOpt(finallyClause, abruptFinally);
before.myFinally = null;
abruptFinally.abruptCompletionByThrow(false); // propagate to enclosing catch/finallies
// this is the normal finally
finalState = acceptOpt(finallyClause, finalState);
}
// In the 7.0 grammar, the resources should be explicitly
// used here. For now they don't trigger anything as their
// node is not a VariableDeclaratorId. There's a test to
// check that.
return finalState;
}
@Override
public Object visit(ASTCatchStatement node, Object data) {
SpanInfo result = (SpanInfo) visit((JavaNode) node, data);
result.deleteVar(node.getExceptionId());
return result;
}
@Override
public Object visit(ASTLambdaExpression node, Object data) {
// Lambda expression have control flow that is separate from the method
// So we fork the context, but don't join it
// Reaching definitions of the enclosing context still reach in the lambda
// Since those definitions are [effectively] final, they actually can't be
// killed, but they can be used in the lambda
SpanInfo before = (SpanInfo) data;
JavaNode lambdaBody = node.getChild(node.getNumChildren() - 1);
// if it's an expression, then no assignments may occur in it,
// but it can still use some variables of the context
acceptOpt(lambdaBody, before.forkCapturingNonLocal());
return before;
}
@Override
public Object visit(ASTWhileStatement node, Object data) {
return handleLoop(node, (SpanInfo) data, null, node.getCondition(), null, node.getBody(), true, null);
}
@Override
public Object visit(ASTDoStatement node, Object data) {
return handleLoop(node, (SpanInfo) data, null, node.getCondition(), null, node.getBody(), false, null);
}
@Override
public Object visit(ASTForStatement node, Object data) {
ASTStatement body = node.getBody();
if (node.isForeach()) {
// the iterable expression
JavaNode init = node.getChild(1);
ASTVariableDeclaratorId foreachVar = ((ASTLocalVariableDeclaration) node.getChild(0)).iterator().next();
return handleLoop(node, (SpanInfo) data, init, null, null, body, true, foreachVar);
} else {
ASTForInit init = node.getFirstChildOfType(ASTForInit.class);
ASTExpression cond = node.getCondition();
ASTForUpdate update = node.getFirstChildOfType(ASTForUpdate.class);
return handleLoop(node, (SpanInfo) data, init, cond, update, body, true, null);
}
}
private SpanInfo handleLoop(JavaNode loop,
SpanInfo before,
JavaNode init,
JavaNode cond,
JavaNode update,
JavaNode body,
boolean checkFirstIter,
ASTVariableDeclaratorId foreachVar) {
final GlobalAlgoState globalState = before.global;
SpanInfo breakTarget = before.forkEmpty();
SpanInfo continueTarget = before.forkEmpty();
pushTargets(loop, breakTarget, continueTarget);
// perform a few "iterations", to make sure that assignments in
// the body can affect themselves in the next iteration, and
// that they affect the condition, etc
before = acceptOpt(init, before);
if (checkFirstIter && cond != null) { // false for do-while
SpanInfo ifcondTrue = before.forkEmpty();
linkConditional(before, cond, ifcondTrue, breakTarget, true);
before = ifcondTrue;
}
if (foreachVar != null) {
// in foreach loops, the loop variable is assigned before the first iteration
before.assign(foreachVar, foreachVar);
}
// make the defs of the body reach the other parts of the loop,
// including itself
SpanInfo iter = acceptOpt(body, before.fork());
if (foreachVar != null && iter.hasVar(foreachVar)) {
// in foreach loops, the loop variable is reassigned on each update
iter.assign(foreachVar, foreachVar);
} else {
iter = acceptOpt(update, iter);
}
linkConditional(iter, cond, iter, breakTarget, true);
iter = acceptOpt(body, iter);
breakTarget = globalState.breakTargets.peek();
continueTarget = globalState.continueTargets.peek();
if (!continueTarget.symtable.isEmpty()) {
// make assignments before a continue reach the other parts of the loop
linkConditional(continueTarget, cond, continueTarget, breakTarget, true);
continueTarget = acceptOpt(body, continueTarget);
continueTarget = acceptOpt(update, continueTarget);
}
SpanInfo result = popTargets(loop, breakTarget, continueTarget);
result = result.absorb(iter);
if (checkFirstIter) {
// if the first iteration is checked,
// then it could be false on the first try, meaning
// the definitions before the loop reach after too
result = result.absorb(before);
}
if (foreachVar != null) {
result.deleteVar(foreachVar);
}
return result;
}
private void pushTargets(JavaNode loop, SpanInfo breakTarget, SpanInfo continueTarget) {
GlobalAlgoState globalState = breakTarget.global;
globalState.breakTargets.unnamedTargets.push(breakTarget);
globalState.continueTargets.unnamedTargets.push(continueTarget);
Node parent = loop.getNthParent(2);
while (parent instanceof ASTLabeledStatement) {
String label = parent.getImage();
globalState.breakTargets.namedTargets.put(label, breakTarget);
globalState.continueTargets.namedTargets.put(label, continueTarget);
parent = parent.getNthParent(2);
}
}
private SpanInfo popTargets(JavaNode loop, SpanInfo breakTarget, SpanInfo continueTarget) {
GlobalAlgoState globalState = breakTarget.global;
globalState.breakTargets.unnamedTargets.pop();
globalState.continueTargets.unnamedTargets.pop();
SpanInfo total = breakTarget.absorb(continueTarget);
Node parent = loop.getNthParent(2);
while (parent instanceof ASTLabeledStatement) {
String label = parent.getImage();
total = total.absorb(globalState.breakTargets.namedTargets.remove(label));
total = total.absorb(globalState.continueTargets.namedTargets.remove(label));
parent = parent.getNthParent(2);
}
return total;
}
private SpanInfo acceptOpt(JavaNode node, SpanInfo before) {
return node == null ? before : (SpanInfo) node.jjtAccept(this, before);
}
@Override
public Object visit(ASTContinueStatement node, Object data) {
SpanInfo state = (SpanInfo) data;
return state.global.continueTargets.doBreak(state, node.getImage());
}
@Override
public Object visit(ASTBreakStatement node, Object data) {
SpanInfo state = (SpanInfo) data;
return state.global.breakTargets.doBreak(state, node.getImage());
}
@Override
public Object visit(ASTYieldStatement node, Object data) {
super.visit(node, data); // visit expression
SpanInfo state = (SpanInfo) data;
// treat as break, ie abrupt completion + link reaching defs to outer context
return state.global.breakTargets.doBreak(state, null);
}
// both of those exit the scope of the method/ctor, so their assignments go dead
@Override
public Object visit(ASTThrowStatement node, Object data) {
super.visit(node, data);
return ((SpanInfo) data).abruptCompletionByThrow(false);
}
@Override
public Object visit(ASTReturnStatement node, Object data) {
super.visit(node, data);
return ((SpanInfo) data).abruptCompletion(null);
}
// following deals with assignment
@Override
public Object visit(ASTFormalParameter node, Object data) {
if (!node.isExplicitReceiverParameter()) {
ASTVariableDeclaratorId id = node.getVariableDeclaratorId();
((SpanInfo) data).assign(id, id);
}
return data;
}
@Override
public Object visit(ASTVariableDeclarator node, Object data) {
ASTVariableDeclaratorId var = node.getVariableId();
ASTVariableInitializer rhs = node.getInitializer();
if (rhs != null) {
rhs.jjtAccept(this, data);
((SpanInfo) data).assign(var, rhs);
} else {
((SpanInfo) data).assign(var, node.getVariableId());
}
return data;
}
@Override
public Object visit(ASTExpression node, Object data) {
return checkAssignment(node, data);
}
@Override
public Object visit(ASTStatementExpression node, Object data) {
return checkAssignment(node, data);
}
public Object checkAssignment(JavaNode node, Object data) {
SpanInfo result = (SpanInfo) data;
if (node.getNumChildren() == 3) {
// assignment
assert node.getChild(1) instanceof ASTAssignmentOperator;
// visit the rhs as it is evaluated before
JavaNode rhs = node.getChild(2);
result = acceptOpt(rhs, result);
ASTVariableDeclaratorId lhsVar = getVarFromExpression(node.getChild(0), true, result);
if (lhsVar != null) {
// in that case lhs is a normal variable (array access not supported)
if (node.getChild(1).getImage().length() >= 2) {
// compound assignment, to use BEFORE assigning
result.use(lhsVar);
}
result.assign(lhsVar, rhs);
} else {
result = acceptOpt(node.getChild(0), result);
}
return result;
} else {
return visit(node, data);
}
}
@Override
public Object visit(ASTPreDecrementExpression node, Object data) {
return checkIncOrDecrement(node, (SpanInfo) data);
}
@Override
public Object visit(ASTPreIncrementExpression node, Object data) {
return checkIncOrDecrement(node, (SpanInfo) data);
}
@Override
public Object visit(ASTPostfixExpression node, Object data) {
return checkIncOrDecrement(node, (SpanInfo) data);
}
private SpanInfo checkIncOrDecrement(JavaNode unary, SpanInfo data) {
ASTVariableDeclaratorId var = getVarFromExpression(unary.getChild(0), true, data);
if (var != null) {
data.use(var);
data.assign(var, unary);
}
return data;
}
// variable usage
@Override
public Object visit(ASTPrimaryExpression node, Object data) {
SpanInfo state = (SpanInfo) visit((JavaNode) node, data); // visit subexpressions
ASTVariableDeclaratorId var = getVarFromExpression(node, false, state);
if (var != null) {
state.use(var);
}
maybeThrowUncheckedExceptions(node, state);
return state;
}
private void maybeThrowUncheckedExceptions(ASTPrimaryExpression e, SpanInfo state) {
// Note that this doesn't really respect the order of evaluation of subexpressions
// This can be easily fixed in the 7.0 tree, but this is rare enough to not deserve
// the effort on master.
// For the record this has problems with call chains with side effects, like
// a.foo(a = 2).bar(a = 3);
// In 7.0, with the precise type/overload resolution, we
// could only target methods that throw checked exceptions
// (unless some catch block catches an unchecked exceptions)
for (JavaNode child : e.children()) {
if (child instanceof ASTPrimarySuffix && ((ASTPrimarySuffix) child).isArguments()
|| child instanceof ASTPrimarySuffix && child.getNumChildren() > 0 && child.getChild(0) instanceof ASTAllocationExpression
|| child instanceof ASTPrimaryPrefix && child.getNumChildren() > 0 && child.getChild(0) instanceof ASTAllocationExpression) {
state.abruptCompletionByThrow(true); // this is a noop if we're outside a try block that has catch/finally
}
}
}
/**
* Get the variable accessed from a primary.
*/
private ASTVariableDeclaratorId getVarFromExpression(JavaNode primary, boolean inLhs, SpanInfo state) {
if (primary instanceof ASTPrimaryExpression) {
ASTPrimaryPrefix prefix = (ASTPrimaryPrefix) primary.getChild(0);
// this.x = 2;
if (prefix.usesThisModifier() && this.enclosingClassScope != null) {
int numChildren = primary.getNumChildren();
if (numChildren < 2 || numChildren > 2 && inLhs) {
if (numChildren == 3 || numChildren == 1) {
// method call on this, or just bare `this` reference
state.recordThisLeak(true, enclosingClassScope);
}
return null;
}
ASTPrimarySuffix suffix = (ASTPrimarySuffix) primary.getChild(1);
if (suffix.getImage() == null) {
return null;
} else if (primary.getNumChildren() > 2 && ((ASTPrimarySuffix) primary.getChild(2)).isArguments()) {
// this.foo()
// first suffix is the name, second is the arguments
state.recordThisLeak(true, enclosingClassScope);
return null;
}
return findVar(primary.getScope(), true, suffix.getImage());
} else {
if (prefix.getNumChildren() > 0 && prefix.getChild(0) instanceof ASTName) {
String prefixImage = prefix.getChild(0).getImage();
String varname = identOf(inLhs, prefixImage);
if (primary.getNumChildren() > 1) {
if (primary.getNumChildren() > 2 && inLhs) {
// this is for chains like `foo.m().field = 3`
return null;
}
ASTPrimarySuffix suffix = (ASTPrimarySuffix) primary.getChild(1);
if (suffix.isArguments()) {
// then the prefix has the method name
varname = methodLhsName(prefixImage);
} else if (suffix.isArrayDereference() && inLhs) {
return null;
}
}
return findVar(prefix.getScope(), false, varname);
}
}
}
return null;
}
private static String identOf(boolean inLhs, String str) {
int i = str.indexOf('.');
if (i < 0) {
return str;
} else if (inLhs) {
// a qualified name in LHS, so
// the assignment doesn't assign the variable but one of its fields
return null;
}
return str.substring(0, i);
}
private static String methodLhsName(String name) {
int i = name.indexOf('.');
return i < 0 ? null // no lhs, the name is just the method name
: name.substring(0, i);
}
private ASTVariableDeclaratorId findVar(Scope scope, boolean isField, String name) {
if (name == null) {
return null;
}
if (isField) {
return getFromSingleScope(enclosingClassScope, name);
}
while (scope != null) {
ASTVariableDeclaratorId result = getFromSingleScope(scope, name);
if (result != null) {
if (scope instanceof ClassScope && scope != enclosingClassScope) { // NOPMD CompareObjectsWithEqual this is what we want
// don't handle fields
return null;
}
return result;
}
scope = scope.getParent();
}
return null;
}
private ASTVariableDeclaratorId getFromSingleScope(Scope scope, String name) {
if (scope != null) {
for (VariableNameDeclaration decl : scope.getDeclarations(VariableNameDeclaration.class).keySet()) {
if (decl.getImage().equals(name)) {
return (ASTVariableDeclaratorId) decl.getNode();
}
}
}
return null;
}
// ctor/initializer handling
// this is the common denominator between anonymous class & astAnyTypeDeclaration on master
@Override
public Object visit(ASTClassOrInterfaceBody node, Object data) {
visitTypeBody(node, (SpanInfo) data);
return data; // type doesn't contribute anything to the enclosing control flow
}
@Override
public Object visit(ASTEnumBody node, Object data) {
visitTypeBody(node, (SpanInfo) data);
return data; // type doesn't contribute anything to the enclosing control flow
}
private void visitTypeBody(JavaNode typeBody, SpanInfo data) {
List<ASTAnyTypeBodyDeclaration> declarations = typeBody.findChildrenOfType(ASTAnyTypeBodyDeclaration.class);
processInitializers(declarations, data, (ClassScope) typeBody.getScope());
for (ASTAnyTypeBodyDeclaration decl : declarations) {
JavaNode d = decl.getDeclarationNode();
if (d instanceof ASTMethodDeclaration) {
ASTMethodDeclaration method = (ASTMethodDeclaration) d;
if (!method.isAbstract() && !method.isNative()) {
ONLY_LOCALS.acceptOpt(d, data.forkCapturingNonLocal());
}
} else if (d instanceof ASTAnyTypeDeclaration) {
JavaNode body = d.getChild(d.getNumChildren() - 1);
visitTypeBody(body, data.forkEmptyNonLocal());
}
}
}
private static void processInitializers(List<ASTAnyTypeBodyDeclaration> declarations,
SpanInfo beforeLocal,
ClassScope scope) {
ReachingDefsVisitor visitor = new ReachingDefsVisitor(scope);
// All field initializers + instance initializers
SpanInfo ctorHeader = beforeLocal.forkCapturingNonLocal();
// All static field initializers + static initializers
SpanInfo staticInit = beforeLocal.forkEmptyNonLocal();
List<ASTConstructorDeclaration> ctors = new ArrayList<>();
for (ASTAnyTypeBodyDeclaration declaration : declarations) {
JavaNode node = declaration.getDeclarationNode();
final boolean isStatic;
if (node instanceof ASTFieldDeclaration) {
isStatic = ((ASTFieldDeclaration) node).isStatic();
} else if (node instanceof ASTInitializer) {
isStatic = ((ASTInitializer) node).isStatic();
} else if (node instanceof ASTConstructorDeclaration) {
ctors.add((ASTConstructorDeclaration) node);
continue;
} else {
continue;
}
if (isStatic) {
staticInit = visitor.acceptOpt(node, staticInit);
} else {
ctorHeader = visitor.acceptOpt(node, ctorHeader);
}
}
SpanInfo ctorEndState = ctors.isEmpty() ? ctorHeader : null;
for (ASTConstructorDeclaration ctor : ctors) {
SpanInfo state = visitor.acceptOpt(ctor, ctorHeader.forkCapturingNonLocal());
ctorEndState = ctorEndState == null ? state : ctorEndState.absorb(state);
}
// assignments that reach the end of any constructor must
// be considered used
useAllSelfFields(staticInit, ctorEndState, visitor.enclosingClassScope);
}
static void useAllSelfFields(/*nullable*/SpanInfo staticState, SpanInfo instanceState, ClassScope classScope) {
for (VariableNameDeclaration field : classScope.getVariableDeclarations().keySet()) {
ASTVariableDeclaratorId var = field.getDeclaratorId();
if (!field.isRecordComponent() && field.getAccessNodeParent().isStatic()) {
if (staticState != null) {
staticState.use(var);
}
} else {
instanceState.use(var);
}
}
}
}
/**
* The shared state for all {@link SpanInfo} instances in the same
* toplevel class.
*/
private static class GlobalAlgoState {
final Set<AssignmentEntry> allAssignments;
final Set<AssignmentEntry> usedAssignments;
// track which assignments kill which
// assignment -> killers(assignment)
final Map<AssignmentEntry, Set<AssignmentEntry>> killRecord;
final TargetStack breakTargets = new TargetStack();
// continue jumps to the condition check, while break jumps to after the loop
final TargetStack continueTargets = new TargetStack();
private GlobalAlgoState(Set<AssignmentEntry> allAssignments,
Set<AssignmentEntry> usedAssignments,
Map<AssignmentEntry, Set<AssignmentEntry>> killRecord) {
this.allAssignments = allAssignments;
this.usedAssignments = usedAssignments;
this.killRecord = killRecord;
}
private GlobalAlgoState() {
this(new HashSet<AssignmentEntry>(),
new HashSet<AssignmentEntry>(),
new HashMap<AssignmentEntry, Set<AssignmentEntry>>());
}
}
// Information about a variable in a code span.
static class VarLocalInfo {
Set<AssignmentEntry> reachingDefs;
VarLocalInfo(Set<AssignmentEntry> reachingDefs) {
this.reachingDefs = reachingDefs;
}
VarLocalInfo absorb(VarLocalInfo other) {
if (this.equals(other)) {
return this;
}
Set<AssignmentEntry> merged = new HashSet<>(reachingDefs.size() + other.reachingDefs.size());
merged.addAll(reachingDefs);
merged.addAll(other.reachingDefs);
return new VarLocalInfo(merged);
}
@Override
public String toString() {
return "VarLocalInfo{reachingDefs=" + reachingDefs + '}';
}
public VarLocalInfo copy() {
return new VarLocalInfo(this.reachingDefs);
}
}
/**
* Information about a span of code.
*/
private static class SpanInfo {
// spans are arranged in a tree, to look for enclosing finallies
// when abrupt completion occurs. Blocks that have non-local
// control-flow (lambda bodies, anonymous classes, etc) aren't
// linked to the outer parents.
final SpanInfo parent;
// If != null, then abrupt completion in this span of code (and any descendant)
// needs to go through the finally span (the finally must absorb it)
SpanInfo myFinally = null;
/**
* Inside a try block, we assume that any method/ctor call may
* throw, which means, any assignment reaching such a method call
* may reach the catch blocks if there are any.
*/
List<SpanInfo> myCatches;
final GlobalAlgoState global;
final Map<ASTVariableDeclaratorId, VarLocalInfo> symtable;
private SpanInfo(GlobalAlgoState global) {
this(null, global, new HashMap<ASTVariableDeclaratorId, VarLocalInfo>());
}
private SpanInfo(SpanInfo parent,
GlobalAlgoState global,
Map<ASTVariableDeclaratorId, VarLocalInfo> symtable) {
this.parent = parent;
this.global = global;
this.symtable = symtable;
this.myCatches = Collections.emptyList();
}
boolean hasVar(ASTVariableDeclaratorId var) {
return symtable.containsKey(var);
}
void assign(ASTVariableDeclaratorId var, JavaNode rhs) {
AssignmentEntry entry = new AssignmentEntry(var, rhs);
VarLocalInfo previous = symtable.put(var, new VarLocalInfo(Collections.singleton(entry)));
if (previous != null) {
// those assignments were overwritten ("killed")
for (AssignmentEntry killed : previous.reachingDefs) {
if (killed.rhs instanceof ASTVariableDeclaratorId
&& killed.rhs.getParent() instanceof ASTVariableDeclarator
&& killed.rhs != rhs) {
continue;
}
// java8: computeIfAbsent
Set<AssignmentEntry> killers = global.killRecord.get(killed);
if (killers == null) {
killers = new HashSet<>(1);
global.killRecord.put(killed, killers);
}
killers.add(entry);
}
}
global.allAssignments.add(entry);
}
void use(ASTVariableDeclaratorId var) {
VarLocalInfo info = symtable.get(var);
// may be null for implicit assignments, like method parameter
if (info != null) {
global.usedAssignments.addAll(info.reachingDefs);
}
}
void deleteVar(ASTVariableDeclaratorId var) {
symtable.remove(var);
}
/**
* Record a leak of the `this` reference in a ctor (including field initializers).
*
* <p>This means, all defs reaching this point, for all fields
* of `this`, may be used in the expression. We assume that the
* ctor finishes its execution atomically, that is, following
* definitions are not observable at an arbitrary point (that
* would be too conservative).
*
* <p>Constructs that are considered to leak the `this` reference
* (only processed if they occur in a ctor):
* - using `this` as a method/ctor argument
* - using `this` as the receiver of a method/ctor invocation
*
* <p>Because `this` may be aliased (eg in a field, a local var,
* inside an anon class or capturing lambda, etc), any method
* call, on any receiver, may actually observe field definitions
* of `this`. So the analysis may show some false positives, which
* hopefully should be rare enough.
*/
public void recordThisLeak(boolean thisIsLeaking, ClassScope enclosingClassScope) {
if (thisIsLeaking && enclosingClassScope != null) {
// all reaching defs to fields until now may be observed
ReachingDefsVisitor.useAllSelfFields(null, this, enclosingClassScope);
}
}
// Forks duplicate this context, to preserve the reaching defs
// of the current context while analysing a sub-block
// Forks must be merged later if control flow merges again, see ::absorb
SpanInfo fork() {
return doFork(this, copyTable());
}
SpanInfo forkEmpty() {
return doFork(this, new HashMap<ASTVariableDeclaratorId, VarLocalInfo>());
}
SpanInfo forkEmptyNonLocal() {
return doFork(null, new HashMap<ASTVariableDeclaratorId, VarLocalInfo>());
}
SpanInfo forkCapturingNonLocal() {
return doFork(null, copyTable());
}
private Map<ASTVariableDeclaratorId, VarLocalInfo> copyTable() {
HashMap<ASTVariableDeclaratorId, VarLocalInfo> copy = new HashMap<>(this.symtable.size());
for (ASTVariableDeclaratorId var : this.symtable.keySet()) {
copy.put(var, this.symtable.get(var).copy());
}
return copy;
}
private SpanInfo doFork(/*nullable*/ SpanInfo parent, Map<ASTVariableDeclaratorId, VarLocalInfo> reaching) {
return new SpanInfo(parent, this.global, reaching);
}
/** Abrupt completion for return, continue, break. */
SpanInfo abruptCompletion(SpanInfo target) {
// if target == null then this will unwind all the parents
SpanInfo parent = this;
while (parent != target && parent != null) { // NOPMD CompareObjectsWithEqual this is what we want
if (parent.myFinally != null) {
parent.myFinally.absorb(this);
// stop on the first finally, its own end state will
// be merged into the nearest enclosing finally
return this;
}
parent = parent.parent;
}
this.symtable.clear();
return this;
}
/**
* Record an abrupt completion occurring because of a thrown
* exception.
*
* @param byMethodCall If true, a method/ctor call threw the exception
* (we conservatively consider they do inside try blocks).
* Otherwise, a throw statement threw.
*/
SpanInfo abruptCompletionByThrow(boolean byMethodCall) {
// Find the first block that has a finally
// Be absorbed into every catch block on the way.
// In 7.0, with the precise type/overload resolution, we
// can target the specific catch block that would catch the
// exception.
SpanInfo parent = this;
while (parent != null) {
if (!parent.myCatches.isEmpty()) {
for (SpanInfo c : parent.myCatches) {
c.absorb(this);
}
}
if (parent.myFinally != null) {
// stop on the first finally, its own end state will
// be merged into the nearest enclosing finally
parent.myFinally.absorb(this);
return this;
}
parent = parent.parent;
}
if (!byMethodCall) {
this.symtable.clear(); // following is dead code
}
return this;
}
SpanInfo withCatchBlocks(List<SpanInfo> catchStmts) {
assert myCatches.isEmpty() || catchStmts.isEmpty() : "Cannot set catch blocks twice";
myCatches = Collections.unmodifiableList(catchStmts); // we own the list now, to avoid copying
return this;
}
SpanInfo absorb(SpanInfo other) {
// Merge reaching defs of the other scope into this
// This is used to join paths after the control flow has forked
// a spanInfo may be absorbed several times so this method should not
// destroy the parameter
if (this.equals(other) || other == null || other.symtable.isEmpty()) {
return this;
}
// we don't have to double the capacity since they're normally of the same size
// (vars are deleted when exiting a block)
Set<ASTVariableDeclaratorId> keysUnion = new HashSet<>(this.symtable.keySet());
keysUnion.addAll(other.symtable.keySet());
for (ASTVariableDeclaratorId var : keysUnion) {
VarLocalInfo thisInfo = this.symtable.get(var);
VarLocalInfo otherInfo = other.symtable.get(var);
if (thisInfo == otherInfo) { // NOPMD CompareObjectsWithEqual this is what we want
continue;
}
if (otherInfo != null && thisInfo != null) {
this.symtable.put(var, thisInfo.absorb(otherInfo));
} else if (otherInfo != null) {
this.symtable.put(var, otherInfo.copy());
}
}
return this;
}
@Override
public String toString() {
return symtable.toString();
}
}
static class TargetStack {
final Deque<SpanInfo> unnamedTargets = new ArrayDeque<>();
final Map<String, SpanInfo> namedTargets = new HashMap<>();
void push(SpanInfo state) {
unnamedTargets.push(state);
}
SpanInfo pop() {
return unnamedTargets.pop();
}
SpanInfo peek() {
return unnamedTargets.getFirst();
}
SpanInfo doBreak(SpanInfo data, /* nullable */ String label) {
// basically, reaching defs at the point of the break
// also reach after the break (wherever it lands)
SpanInfo target;
if (label == null) {
target = unnamedTargets.getFirst();
} else {
target = namedTargets.get(label);
}
if (target != null) { // otherwise CT error
target.absorb(data);
}
return data.abruptCompletion(target);
}
}
static class AssignmentEntry {
final ASTVariableDeclaratorId var;
// this is not necessarily an expression, it may be also the
// variable declarator of a foreach loop
final JavaNode rhs;
AssignmentEntry(ASTVariableDeclaratorId var, JavaNode rhs) {
this.var = var;
this.rhs = rhs;
}
@Override
public String toString() {
return var.getName() + " := " + rhs;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AssignmentEntry that = (AssignmentEntry) o;
return Objects.equals(rhs, that.rhs);
}
@Override
public int hashCode() {
return rhs.hashCode();
}
}
}
| 1 | 18,664 | I don't think we need to maintain a separate set. The problem here is that the "assignment" that is killed for this variable is not really an assignment. If we just don't `assign` the variable with the non-existent value, it will not be reported. I pushed a fix. | pmd-pmd | java |
@@ -209,4 +209,9 @@ public interface TableScan {
*/
boolean isCaseSensitive();
+ /**
+ * Returns the target split size for this scan.
+ */
+ long targetSplitSize();
+
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.Collection;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
/**
* API for configuring a table scan.
* <p>
* TableScan objects are immutable and can be shared between threads. Refinement methods, like
* {@link #select(Collection)} and {@link #filter(Expression)}, create new TableScan instances.
*/
public interface TableScan {
/**
* Returns the {@link Table} from which this scan loads data.
*
* @return this scan's table
*/
Table table();
/**
* Create a new {@link TableScan} from this scan's configuration that will use the given snapshot
* by ID.
*
* @param snapshotId a snapshot ID
* @return a new scan based on this with the given snapshot ID
* @throws IllegalArgumentException if the snapshot cannot be found
*/
TableScan useSnapshot(long snapshotId);
/**
* Create a new {@link TableScan} from this scan's configuration that will use the most recent
* snapshot as of the given time in milliseconds.
*
* @param timestampMillis a timestamp in milliseconds.
* @return a new scan based on this with the current snapshot at the given time
* @throws IllegalArgumentException if the snapshot cannot be found
*/
TableScan asOfTime(long timestampMillis);
/**
* Create a new {@link TableScan} from this scan's configuration that will override the {@link Table}'s behavior based
* on the incoming pair. Unknown properties will be ignored.
*
* @param property name of the table property to be overridden
* @param value value to override with
* @return a new scan based on this with overridden behavior
*/
TableScan option(String property, String value);
/**
* Create a new {@link TableScan} from this with the schema as its projection.
*
* @param schema a projection schema
* @return a new scan based on this with the given projection
*/
TableScan project(Schema schema);
/**
* Create a new {@link TableScan} from this that, if data columns where selected
* via {@link #select(java.util.Collection)}, controls whether the match to the schema will be done
* with case sensitivity.
*
* @return a new scan based on this with case sensitivity as stated
*/
TableScan caseSensitive(boolean caseSensitive);
/**
* Create a new {@link TableScan} from this that loads the column stats with each data file.
* <p>
* Column stats include: value count, null value count, lower bounds, and upper bounds.
*
* @return a new scan based on this that loads column stats.
*/
TableScan includeColumnStats();
/**
* Create a new {@link TableScan} from this that will read the given data columns. This produces
* an expected schema that includes all fields that are either selected or used by this scan's
* filter expression.
*
* @param columns column names from the table's schema
* @return a new scan based on this with the given projection columns
*/
default TableScan select(String... columns) {
return select(Lists.newArrayList(columns));
}
/**
* Create a new {@link TableScan} from this that will read the given data columns. This produces
* an expected schema that includes all fields that are either selected or used by this scan's
* filter expression.
*
* @param columns column names from the table's schema
* @return a new scan based on this with the given projection columns
*/
TableScan select(Collection<String> columns);
/**
* Create a new {@link TableScan} from the results of this filtered by the {@link Expression}.
*
* @param expr a filter expression
* @return a new scan based on this with results filtered by the expression
*/
TableScan filter(Expression expr);
/**
* Returns this scan's filter {@link Expression}.
*
* @return this scan's filter expression
*/
Expression filter();
/**
* Create a new {@link TableScan} from this that applies data filtering to files but not to rows in those files.
*
* @return a new scan based on this that does not filter rows in files.
*/
TableScan ignoreResiduals();
/**
* Create a new {@link TableScan} to read appended data from {@code fromSnapshotId} exclusive to {@code toSnapshotId}
* inclusive.
*
* @param fromSnapshotId the last snapshot id read by the user, exclusive
* @param toSnapshotId read append data up to this snapshot id
* @return a table scan which can read append data from {@code fromSnapshotId}
* exclusive and up to {@code toSnapshotId} inclusive
*/
TableScan appendsBetween(long fromSnapshotId, long toSnapshotId);
/**
* Create a new {@link TableScan} to read appended data from {@code fromSnapshotId} exclusive to the current snapshot
* inclusive.
*
* @param fromSnapshotId - the last snapshot id read by the user, exclusive
* @return a table scan which can read append data from {@code fromSnapshotId}
* exclusive and up to current snapshot inclusive
*/
TableScan appendsAfter(long fromSnapshotId);
/**
* Plan the {@link FileScanTask files} that will be read by this scan.
* <p>
* Each file has a residual expression that should be applied to filter the file's rows.
* <p>
* This simple plan returns file scans for each file from position 0 to the file's length. For
* planning that will combine small files, split large files, and attempt to balance work, use
* {@link #planTasks()} instead.
*
* @return an Iterable of file tasks that are required by this scan
*/
CloseableIterable<FileScanTask> planFiles();
/**
* Plan the {@link CombinedScanTask tasks} for this scan.
* <p>
* Tasks created by this method may read partial input files, multiple input files, or both.
*
* @return an Iterable of tasks for this scan
*/
CloseableIterable<CombinedScanTask> planTasks();
/**
* Returns this scan's projection {@link Schema}.
* <p>
* If the projection schema was set directly using {@link #project(Schema)}, returns that schema.
* <p>
* If the projection schema was set by calling {@link #select(Collection)}, returns a projection
* schema that includes the selected data fields and any fields used in the filter expression.
*
* @return this scan's projection schema
*/
Schema schema();
/**
* Returns the {@link Snapshot} that will be used by this scan.
* <p>
* If the snapshot was not configured using {@link #asOfTime(long)} or {@link #useSnapshot(long)}, the current table
* snapshot will be used.
*
* @return the Snapshot this scan will use
*/
Snapshot snapshot();
/**
* Returns whether this scan should apply column name case sensitiveness as per {@link #caseSensitive(boolean)}.
* @return true if case sensitive, false otherwise.
*/
boolean isCaseSensitive();
}
| 1 | 34,208 | Let me know if this is too pervasive. It is currently a private method in `BaseTableScan`. It seems both `SparkBatchQueryScan` and `SparkMergeScan` need to know the scan-specific split size when planning tasks. Therefore, I made it open. Another approach is to move all the `planTasks` logic to scan implementations, but for the combine tasks by partition feature, it requires grouping scan tasks by partition first, instead of returning them in an iterator fashion. I'm not sure if this is OK. `SparkMergeScan` also seems to re-implemented its own plan tasks logic. | apache-iceberg | java |
@@ -1550,7 +1550,8 @@ func (c *client) flushSignal() {
func (c *client) traceMsg(msg []byte) {
maxTrace := c.srv.getOpts().MaxTracedMsgLen
if maxTrace > 0 && (len(msg)-LEN_CR_LF) > maxTrace {
- c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", msg[:maxTrace])
+ tm := fmt.Sprintf("%q", msg[:maxTrace])
+ c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", tm[1:maxTrace+1])
} else {
c.Tracef("<<- MSG_PAYLOAD: [%q]", msg[:len(msg)-LEN_CR_LF])
} | 1 | // Copyright 2012-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"net/url"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
)
// Type of client connection.
const (
// CLIENT is an end user.
CLIENT = iota
// ROUTER represents another server in the cluster.
ROUTER
// GATEWAY is a link between 2 clusters.
GATEWAY
// SYSTEM is an internal system client.
SYSTEM
// LEAF is for leaf node connections.
LEAF
// JETSTREAM is an internal jetstream client.
JETSTREAM
// ACCOUNT is for the internal client for accounts.
ACCOUNT
)
// Extended type of a CLIENT connection. This is returned by c.clientType()
// and indicate what type of client connection we are dealing with.
// If invoked on a non CLIENT connection, NON_CLIENT type is returned.
const (
// If the connection is not a CLIENT connection.
NON_CLIENT = iota
// Regular NATS client.
NATS
// MQTT client.
MQTT
// Websocket client.
WS
)
const (
// ClientProtoZero is the original Client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
ClientProtoZero = iota
// ClientProtoInfo signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
ClientProtoInfo
)
const (
pingProto = "PING" + _CRLF_
pongProto = "PONG" + _CRLF_
errProto = "-ERR '%s'" + _CRLF_
okProto = "+OK" + _CRLF_
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
// Scratch buffer size for the processMsg() calls.
msgScratchSize = 1024
msgHeadProto = "RMSG "
msgHeadProtoLen = len(msgHeadProto)
// For controlling dynamic buffer sizes.
startBufSize = 512 // For INFO/CONNECT block
minBufSize = 64 // Smallest to shrink to for PING/PONG
maxBufSize = 65536 // 64k
shortsToShrink = 2 // Trigger to shrink dynamic buffers
maxFlushPending = 10 // Max fsps to have in order to wait for writeLoop
readLoopReport = 2 * time.Second
// Server should not send a PING (for RTT) before the first PONG has
// been sent to the client. However, in case some client libs don't
// send CONNECT+PING, cap the maximum time before server can send
// the RTT PING.
maxNoRTTPingBeforeFirstPong = 2 * time.Second
// For stalling fast producers
stallClientMinDuration = 100 * time.Millisecond
stallClientMaxDuration = time.Second
)
var readLoopReportThreshold = readLoopReport
// Represent client booleans with a bitmask
type clientFlag uint16
const (
hdrLine = "NATS/1.0\r\n"
emptyHdrLine = "NATS/1.0\r\n\r\n"
)
// Some client state represented as flags
const (
connectReceived clientFlag = 1 << iota // The CONNECT proto has been received
infoReceived // The INFO protocol has been received
firstPongSent // The first PONG has been sent
handshakeComplete // For TLS clients, indicate that the handshake is complete
flushOutbound // Marks client as having a flushOutbound call in progress.
noReconnect // Indicate that on close, this connection should not attempt a reconnect
closeConnection // Marks that closeConnection has already been called.
connMarkedClosed // Marks that markConnAsClosed has already been called.
writeLoopStarted // Marks that the writeLoop has been started.
skipFlushOnClose // Marks that flushOutbound() should not be called on connection close.
expectConnect // Marks if this connection is expected to send a CONNECT
connectProcessFinished // Marks if this connection has finished the connect process.
)
// set the flag (would be equivalent to set the boolean to true)
func (cf *clientFlag) set(c clientFlag) {
*cf |= c
}
// clear the flag (would be equivalent to set the boolean to false)
func (cf *clientFlag) clear(c clientFlag) {
*cf &= ^c
}
// isSet returns true if the flag is set, false otherwise
func (cf clientFlag) isSet(c clientFlag) bool {
return cf&c != 0
}
// setIfNotSet will set the flag `c` only if that flag was not already
// set and return true to indicate that the flag has been set. Returns
// false otherwise.
func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
if *cf&c == 0 {
*cf |= c
return true
}
return false
}
// ClosedState is the reason client was closed. This will
// be passed into calls to clearConnection, but will only
// be stored in ConnInfo for monitoring.
type ClosedState int
const (
ClientClosed = ClosedState(iota + 1)
AuthenticationTimeout
AuthenticationViolation
TLSHandshakeError
SlowConsumerPendingBytes
SlowConsumerWriteDeadline
WriteError
ReadError
ParseError
StaleConnection
ProtocolViolation
BadClientProtocolVersion
WrongPort
MaxAccountConnectionsExceeded
MaxConnectionsExceeded
MaxPayloadExceeded
MaxControlLineExceeded
MaxSubscriptionsExceeded
DuplicateRoute
RouteRemoved
ServerShutdown
AuthenticationExpired
WrongGateway
MissingAccount
Revocation
InternalClient
MsgHeaderViolation
NoRespondersRequiresHeaders
ClusterNameConflict
DuplicateRemoteLeafnodeConnection
DuplicateClientID
)
// Some flags passed to processMsgResults
const pmrNoFlag int = 0
const (
pmrCollectQueueNames int = 1 << iota
pmrIgnoreEmptyQueueFilter
pmrAllowSendFromRouteToRoute
pmrMsgImportedFromService
)
type client struct {
// Here first because of use of atomics, and memory alignment.
stats
gwReplyMapping
kind int
srv *Server
acc *Account
perms *permissions
in readCache
parseState
opts ClientOpts
rrTracking *rrTracking
mpay int32
msubs int32
mcl int32
mu sync.Mutex
cid uint64
start time.Time
nonce []byte
pubKey string
nc net.Conn
ncs atomic.Value
out outbound
user *NkeyUser
host string
port uint16
subs map[string]*subscription
replies map[string]*resp
mperms *msgDeny
darray []string
pcd map[*client]struct{}
atmr *time.Timer
ping pinfo
msgb [msgScratchSize]byte
last time.Time
headers bool
rtt time.Duration
rttStart time.Time
route *route
gw *gateway
leaf *leaf
ws *websocket
mqtt *mqtt
flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
rref byte
trace bool
echo bool
noIcb bool
tags jwt.TagList
nameTag string
}
type rrTracking struct {
rmap map[string]*remoteLatency
ptmr *time.Timer
lrt time.Duration
}
// Struct for PING initiation from the server.
type pinfo struct {
tmr *time.Timer
last time.Time
out int
}
// outbound holds pending data for a socket.
type outbound struct {
p []byte // Primary write buffer
s []byte // Secondary for use post flush
nb net.Buffers // net.Buffers for writev IO
sz int32 // limit size per []byte, uses variable BufSize constants, start, min, max.
sws int32 // Number of short writes, used for dynamic resizing.
pb int64 // Total pending/queued bytes.
pm int32 // Total pending/queued messages.
fsp int32 // Flush signals that are pending per producer from readLoop's pcd.
sg *sync.Cond // To signal writeLoop that there is data to flush.
wdl time.Duration // Snapshot of write deadline.
mp int64 // Snapshot of max pending for client.
lft time.Duration // Last flush time for Write.
stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in.
}
type perm struct {
allow *Sublist
deny *Sublist
}
type permissions struct {
// Have these 2 first for memory alignment due to the use of atomic.
pcsz int32
prun int32
sub perm
pub perm
resp *ResponsePermission
pcache sync.Map
}
// This is used to dynamically track responses and reply subjects
// for dynamic permissioning.
type resp struct {
t time.Time
n int
}
// msgDeny is used when a user permission for subscriptions has a deny
// clause but a subscription could be made that is of broader scope.
// e.g. deny = "foo", but user subscribes to "*". That subscription should
// succeed but no message sent on foo should be delivered.
type msgDeny struct {
deny *Sublist
dcache map[string]bool
}
// routeTarget collects information regarding routes and queue groups for
// sending information to a remote.
type routeTarget struct {
sub *subscription
qs []byte
_qs [32]byte
}
const (
maxResultCacheSize = 512
maxDenyPermCacheSize = 256
maxPermCacheSize = 128
pruneSize = 32
routeTargetInit = 8
replyPermLimit = 4096
)
// Represent read cache booleans with a bitmask
type readCacheFlag uint16
const (
hasMappings readCacheFlag = 1 << iota // For account subject mappings.
)
// Used in readloop to cache hot subject lookups and group statistics.
type readCache struct {
// These are for clients who are bound to a single account.
genid uint64
results map[string]*SublistResult
// This is for routes and gateways to have their own L1 as well that is account aware.
pacache map[string]*perAccountCache
// This is for when we deliver messages across a route. We use this structure
// to make sure to only send one message and properly scope to queues as needed.
rts []routeTarget
prand *rand.Rand
// These are all temporary totals for an invocation of a read in readloop.
msgs int32
bytes int32
subs int32
rsz int32 // Read buffer size
srs int32 // Short reads, used for dynamic buffer resizing.
// These are for readcache flags to avoind locks.
flags readCacheFlag
}
// set the flag (would be equivalent to set the boolean to true)
func (rcf *readCacheFlag) set(c readCacheFlag) {
*rcf |= c
}
// clear the flag (would be equivalent to set the boolean to false)
func (rcf *readCacheFlag) clear(c readCacheFlag) {
*rcf &= ^c
}
// isSet returns true if the flag is set, false otherwise
func (rcf readCacheFlag) isSet(c readCacheFlag) bool {
return rcf&c != 0
}
const (
defaultMaxPerAccountCacheSize = 4096
defaultPrunePerAccountCacheSize = 256
defaultClosedSubsCheckInterval = 5 * time.Minute
)
var (
maxPerAccountCacheSize = defaultMaxPerAccountCacheSize
prunePerAccountCacheSize = defaultPrunePerAccountCacheSize
closedSubsCheckInterval = defaultClosedSubsCheckInterval
)
// perAccountCache is for L1 semantics for inbound messages from a route or gateway to mimic the performance of clients.
type perAccountCache struct {
acc *Account
results *SublistResult
genid uint64
}
func (c *client) String() (id string) {
loaded := c.ncs.Load()
if loaded != nil {
return loaded.(string)
}
return _EMPTY_
}
// GetName returns the application supplied name for the connection.
func (c *client) GetName() string {
c.mu.Lock()
name := c.opts.Name
c.mu.Unlock()
return name
}
// GetOpts returns the client options provided by the application.
func (c *client) GetOpts() *ClientOpts {
return &c.opts
}
// GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
// otherwise. Implements the ClientAuth interface.
func (c *client) GetTLSConnectionState() *tls.ConnectionState {
c.mu.Lock()
defer c.mu.Unlock()
if c.nc == nil {
return nil
}
tc, ok := c.nc.(*tls.Conn)
if !ok {
return nil
}
state := tc.ConnectionState()
return &state
}
// For CLIENT connections, this function returns the client type, that is,
// NATS (for regular clients), MQTT or WS for websocket.
// If this is invoked for a non CLIENT connection, NON_CLIENT is returned.
//
// This function does not lock the client and accesses fields that are supposed
// to be immutable and therefore it can be invoked outside of the client's lock.
func (c *client) clientType() int {
switch c.kind {
case CLIENT:
if c.isMqtt() {
return MQTT
} else if c.isWebsocket() {
return WS
}
return NATS
default:
return NON_CLIENT
}
}
var clientTypeStringMap = map[int]string{
NON_CLIENT: _EMPTY_,
NATS: "nats",
WS: "websocket",
MQTT: "mqtt",
}
func (c *client) clientTypeString() string {
if typeStringVal, ok := clientTypeStringMap[c.clientType()]; ok {
return typeStringVal
}
return _EMPTY_
}
// This is the main subscription struct that indicates
// interest in published messages.
// FIXME(dlc) - This is getting bloated for normal subs, need
// to optionally have an opts section for non-normal stuff.
type subscription struct {
client *client
im *streamImport // This is for import stream support.
rsi bool
si bool
shadow []*subscription // This is to track shadowed accounts.
icb msgHandler
subject []byte
queue []byte
sid []byte
origin []byte
nm int64
max int64
qw int32
closed int32
mqtt *mqttSub
}
// Indicate that this subscription is closed.
// This is used in pruning of route and gateway cache items.
func (s *subscription) close() {
atomic.StoreInt32(&s.closed, 1)
}
// Return true if this subscription was unsubscribed
// or its connection has been closed.
func (s *subscription) isClosed() bool {
return atomic.LoadInt32(&s.closed) == 1
}
type ClientOpts struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
TLSRequired bool `json:"tls_required"`
Nkey string `json:"nkey,omitempty"`
JWT string `json:"jwt,omitempty"`
Sig string `json:"sig,omitempty"`
Token string `json:"auth_token,omitempty"`
Username string `json:"user,omitempty"`
Password string `json:"pass,omitempty"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
Account string `json:"account,omitempty"`
AccountNew bool `json:"new_account,omitempty"`
Headers bool `json:"headers,omitempty"`
NoResponders bool `json:"no_responders,omitempty"`
// Routes and Leafnodes only
Import *SubjectPermission `json:"import,omitempty"`
Export *SubjectPermission `json:"export,omitempty"`
}
var defaultOpts = ClientOpts{Verbose: true, Pedantic: true, Echo: true}
var internalOpts = ClientOpts{Verbose: false, Pedantic: false, Echo: false}
func (c *client) setTraceLevel() {
if c.kind == SYSTEM && !(atomic.LoadInt32(&c.srv.logging.traceSysAcc) != 0) {
c.trace = false
} else {
c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
}
}
// Lock should be held
func (c *client) initClient() {
s := c.srv
c.cid = atomic.AddUint64(&s.gcid, 1)
// Outbound data structure setup
c.out.sz = startBufSize
c.out.sg = sync.NewCond(&(c.mu))
opts := s.getOpts()
// Snapshots to avoid mutex access in fast paths.
c.out.wdl = opts.WriteDeadline
c.out.mp = opts.MaxPending
// Snapshot max control line since currently can not be changed on reload and we
// were checking it on each call to parse. If this changes and we allow MaxControlLine
// to be reloaded without restart, this code will need to change.
c.mcl = int32(opts.MaxControlLine)
if c.mcl == 0 {
c.mcl = MAX_CONTROL_LINE_SIZE
}
c.subs = make(map[string]*subscription)
c.echo = true
c.setTraceLevel()
// This is a scratch buffer used for processMsg()
// The msg header starts with "RMSG ", which can be used
// for both local and routes.
// in bytes that is [82 77 83 71 32].
c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32}
// This is to track pending clients that have data to be flushed
// after we process inbound msgs from our own connection.
c.pcd = make(map[*client]struct{})
// snapshot the string version of the connection
var conn string
if c.nc != nil {
if addr := c.nc.RemoteAddr(); addr != nil {
if conn = addr.String(); conn != _EMPTY_ {
host, port, _ := net.SplitHostPort(conn)
iPort, _ := strconv.Atoi(port)
c.host, c.port = host, uint16(iPort)
// Now that we have extracted host and port, escape
// the string because it is going to be used in Sprintf
conn = strings.ReplaceAll(conn, "%", "%%")
}
}
}
switch c.kind {
case CLIENT:
switch c.clientType() {
case NATS:
c.ncs.Store(fmt.Sprintf("%s - cid:%d", conn, c.cid))
case WS:
c.ncs.Store(fmt.Sprintf("%s - wid:%d", conn, c.cid))
case MQTT:
c.ncs.Store(fmt.Sprintf("%s - mid:%d", conn, c.cid))
}
case ROUTER:
c.ncs.Store(fmt.Sprintf("%s - rid:%d", conn, c.cid))
case GATEWAY:
c.ncs.Store(fmt.Sprintf("%s - gid:%d", conn, c.cid))
case LEAF:
var ws string
if c.isWebsocket() {
ws = "_ws"
}
c.ncs.Store(fmt.Sprintf("%s - lid%s:%d", conn, ws, c.cid))
case SYSTEM:
c.ncs.Store("SYSTEM")
case JETSTREAM:
c.ncs.Store("JETSTREAM")
case ACCOUNT:
c.ncs.Store("ACCOUNT")
}
}
// RemoteAddress expose the Address of the client connection,
// nil when not connected or unknown
func (c *client) RemoteAddress() net.Addr {
c.mu.Lock()
defer c.mu.Unlock()
if c.nc == nil {
return nil
}
return c.nc.RemoteAddr()
}
// Helper function to report errors.
func (c *client) reportErrRegisterAccount(acc *Account, err error) {
if err == ErrTooManyAccountConnections {
c.maxAccountConnExceeded()
return
}
c.Errorf("Problem registering with account %q: %s", acc.Name, err)
c.sendErr("Failed Account Registration")
}
// Kind returns the client kind and will be one of the defined constants like CLIENT, ROUTER, GATEWAY, LEAF
func (c *client) Kind() int {
c.mu.Lock()
kind := c.kind
c.mu.Unlock()
return kind
}
// registerWithAccount will register the given user with a specific
// account. This will change the subject namespace.
func (c *client) registerWithAccount(acc *Account) error {
if acc == nil || acc.sl == nil {
return ErrBadAccount
}
// If we were previously registered, usually to $G, do accounting here to remove.
if c.acc != nil {
if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
c.srv.decActiveAccounts()
}
}
c.mu.Lock()
kind := c.kind
srv := c.srv
c.acc = acc
c.applyAccountLimits()
c.mu.Unlock()
// Check if we have a max connections violation
if kind == CLIENT && acc.MaxTotalConnectionsReached() {
return ErrTooManyAccountConnections
} else if kind == LEAF && acc.MaxTotalLeafNodesReached() {
return ErrTooManyAccountConnections
}
// Add in new one.
if prev := acc.addClient(c); prev == 0 && srv != nil {
srv.incActiveAccounts()
}
return nil
}
// Helper to determine if we have met or exceeded max subs.
func (c *client) subsAtLimit() bool {
return c.msubs != jwt.NoLimit && len(c.subs) >= int(c.msubs)
}
func minLimit(value *int32, limit int32) bool {
if *value != jwt.NoLimit {
if limit != jwt.NoLimit {
if limit < *value {
*value = limit
return true
}
}
} else if limit != jwt.NoLimit {
*value = limit
return true
}
return false
}
// Apply account limits
// Lock is held on entry.
// FIXME(dlc) - Should server be able to override here?
func (c *client) applyAccountLimits() {
if c.acc == nil || (c.kind != CLIENT && c.kind != LEAF) {
return
}
c.mpay = jwt.NoLimit
c.msubs = jwt.NoLimit
if c.opts.JWT != _EMPTY_ { // user jwt implies account
if uc, _ := jwt.DecodeUserClaims(c.opts.JWT); uc != nil {
c.mpay = int32(uc.Limits.Payload)
c.msubs = int32(uc.Limits.Subs)
if uc.IssuerAccount != _EMPTY_ && uc.IssuerAccount != uc.Issuer {
if scope, ok := c.acc.signingKeys[uc.Issuer]; ok {
if userScope, ok := scope.(*jwt.UserScope); ok {
// if signing key disappeared or changed and we don't get here, the client will be disconnected
c.mpay = int32(userScope.Template.Limits.Payload)
c.msubs = int32(userScope.Template.Limits.Subs)
}
}
}
}
}
minLimit(&c.mpay, c.acc.mpay)
minLimit(&c.msubs, c.acc.msubs)
s := c.srv
opts := s.getOpts()
mPay := opts.MaxPayload
// options encode unlimited differently
if mPay == 0 {
mPay = jwt.NoLimit
}
mSubs := int32(opts.MaxSubs)
if mSubs == 0 {
mSubs = jwt.NoLimit
}
wasUnlimited := c.mpay == jwt.NoLimit
if minLimit(&c.mpay, mPay) && !wasUnlimited {
c.Errorf("Max Payload set to %d from server overrides account or user config", opts.MaxPayload)
}
wasUnlimited = c.msubs == jwt.NoLimit
if minLimit(&c.msubs, mSubs) && !wasUnlimited {
c.Errorf("Max Subscriptions set to %d from server overrides account or user config", opts.MaxSubs)
}
if c.subsAtLimit() {
go func() {
c.maxSubsExceeded()
time.Sleep(20 * time.Millisecond)
c.closeConnection(MaxSubscriptionsExceeded)
}()
}
}
// RegisterUser allows auth to call back into a new client
// with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterUser(user *User) {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.reportErrRegisterAccount(user.Account, err)
return
}
}
c.mu.Lock()
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
} else {
c.setPermissions(user.Permissions)
}
// allows custom authenticators to set a username to be reported in
// server events and more
if user.Username != _EMPTY_ {
c.opts.Username = user.Username
}
c.mu.Unlock()
}
// RegisterNkeyUser allows auth to call back into a new nkey
// client with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterNkeyUser(user *NkeyUser) error {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.reportErrRegisterAccount(user.Account, err)
return err
}
}
c.mu.Lock()
c.user = user
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
} else {
c.setPermissions(user.Permissions)
}
c.mu.Unlock()
return nil
}
func splitSubjectQueue(sq string) ([]byte, []byte, error) {
vals := strings.Fields(strings.TrimSpace(sq))
s := []byte(vals[0])
var q []byte
if len(vals) == 2 {
q = []byte(vals[1])
} else if len(vals) > 2 {
return nil, nil, fmt.Errorf("invalid subject-queue %q", sq)
}
return s, q, nil
}
// Initializes client.perms structure.
// Lock is held on entry.
func (c *client) setPermissions(perms *Permissions) {
if perms == nil {
return
}
c.perms = &permissions{}
// Loop over publish permissions
if perms.Publish != nil {
if perms.Publish.Allow != nil {
c.perms.pub.allow = NewSublistWithCache()
}
for _, pubSubject := range perms.Publish.Allow {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.allow.Insert(sub)
}
if len(perms.Publish.Deny) > 0 {
c.perms.pub.deny = NewSublistWithCache()
}
for _, pubSubject := range perms.Publish.Deny {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.deny.Insert(sub)
}
}
// Check if we are allowed to send responses.
if perms.Response != nil {
rp := *perms.Response
c.perms.resp = &rp
c.replies = make(map[string]*resp)
}
// Loop over subscribe permissions
if perms.Subscribe != nil {
var err error
if len(perms.Subscribe.Allow) > 0 {
c.perms.sub.allow = NewSublistWithCache()
}
for _, subSubject := range perms.Subscribe.Allow {
sub := &subscription{}
sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
if err != nil {
c.Errorf("%s", err.Error())
continue
}
c.perms.sub.allow.Insert(sub)
}
if len(perms.Subscribe.Deny) > 0 {
c.perms.sub.deny = NewSublistWithCache()
// Also hold onto this array for later.
c.darray = perms.Subscribe.Deny
}
for _, subSubject := range perms.Subscribe.Deny {
sub := &subscription{}
sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
if err != nil {
c.Errorf("%s", err.Error())
continue
}
c.perms.sub.deny.Insert(sub)
}
}
// If we are a leafnode and we are the hub copy the extracted perms
// to resend back to soliciting server. These are reversed from the
// way routes interpret them since this is how the soliciting server
// will receive these back in an update INFO.
if c.isHubLeafNode() {
c.opts.Import = perms.Subscribe
c.opts.Export = perms.Publish
}
}
// Merge client.perms structure with additional pub deny permissions
// Lock is held on entry.
func (c *client) mergePubDenyPermissions(denyPubs []string) {
if len(denyPubs) == 0 {
return
}
if c.perms == nil {
c.perms = &permissions{}
}
if c.perms.pub.deny == nil {
c.perms.pub.deny = NewSublistWithCache()
}
for _, pubSubject := range denyPubs {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.deny.Insert(sub)
}
}
// Check to see if we have an expiration for the user JWT via base claims.
// FIXME(dlc) - Clear on connect with new JWT.
func (c *client) setExpiration(claims *jwt.ClaimsData, validFor time.Duration) {
if claims.Expires == 0 {
if validFor != 0 {
c.setExpirationTimer(validFor)
}
return
}
expiresAt := time.Duration(0)
tn := time.Now().Unix()
if claims.Expires > tn {
expiresAt = time.Duration(claims.Expires-tn) * time.Second
}
if validFor != 0 && validFor < expiresAt {
c.setExpirationTimer(validFor)
} else {
c.setExpirationTimer(expiresAt)
}
}
// This will load up the deny structure used for filtering delivered
// messages based on a deny clause for subscriptions.
// Lock should be held.
func (c *client) loadMsgDenyFilter() {
c.mperms = &msgDeny{NewSublistWithCache(), make(map[string]bool)}
for _, sub := range c.darray {
c.mperms.deny.Insert(&subscription{subject: []byte(sub)})
}
}
// writeLoop is the main socket write functionality.
// Runs in its own Go routine.
func (c *client) writeLoop() {
defer c.srv.grWG.Done()
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return
}
c.flags.set(writeLoopStarted)
c.mu.Unlock()
// Used to check that we did flush from last wake up.
waitOk := true
var closed bool
// Main loop. Will wait to be signaled and then will use
// buffered outbound structure for efficient writev to the underlying socket.
for {
c.mu.Lock()
if closed = c.isClosed(); !closed {
owtf := c.out.fsp > 0 && c.out.pb < maxBufSize && c.out.fsp < maxFlushPending
if waitOk && (c.out.pb == 0 || owtf) {
c.out.sg.Wait()
// Check that connection has not been closed while lock was released
// in the conditional wait.
closed = c.isClosed()
}
}
if closed {
c.flushAndClose(false)
c.mu.Unlock()
// We should always call closeConnection() to ensure that state is
// properly cleaned-up. It will be a no-op if already done.
c.closeConnection(WriteError)
// Now explicitly call reconnect(). Thanks to ref counting, we know
// that the reconnect will execute only after connection has been
// removed from the server state.
c.reconnect()
return
}
// Flush data
waitOk = c.flushOutbound()
c.mu.Unlock()
}
}
// flushClients will make sure to flush any clients we may have
// sent to during processing. We pass in a budget as a time.Duration
// for how much time to spend in place flushing for this client.
func (c *client) flushClients(budget time.Duration) time.Time {
return c.flushClientsWithCheck(budget, false)
}
// flushClientsWithCheck will make sure to flush any clients we may have
// sent to during processing. We pass in a budget as a time.Duration
// for how much time to spend in place flushing for this client.
// The 'clientsKindOnly' boolean indicates whether to check kind of client
// and pending client to run flushOutbound in flushClientsWithCheck.
// flushOutbound() could block the caller up to the write deadline when
// the receiving client cannot drain data from the socket fast enough.
func (c *client) flushClientsWithCheck(budget time.Duration, clientsKindOnly bool) time.Time {
last := time.Now().UTC()
// Check pending clients for flush.
for cp := range c.pcd {
// TODO(dlc) - Wonder if it makes more sense to create a new map?
delete(c.pcd, cp)
// Queue up a flush for those in the set
cp.mu.Lock()
// Update last activity for message delivery
cp.last = last
// Remove ourselves from the pending list.
cp.out.fsp--
// Just ignore if this was closed.
if cp.isClosed() {
cp.mu.Unlock()
continue
}
if budget > 0 && (!clientsKindOnly || c.kind == CLIENT && cp.kind == CLIENT) && cp.out.lft < 2*budget && cp.flushOutbound() {
budget -= cp.out.lft
} else {
cp.flushSignal()
}
cp.mu.Unlock()
}
return last
}
// readLoop is the main socket read functionality.
// Runs in its own Go routine.
func (c *client) readLoop(pre []byte) {
// Grab the connection off the client, it will be cleared on a close.
// We check for that after the loop, but want to avoid a nil dereference
c.mu.Lock()
s := c.srv
defer s.grWG.Done()
if c.isClosed() {
c.mu.Unlock()
return
}
nc := c.nc
ws := c.isWebsocket()
if c.isMqtt() {
c.mqtt.r = &mqttReader{reader: nc}
}
c.in.rsz = startBufSize
// Check the per-account-cache for closed subscriptions
cpacc := c.kind == ROUTER || c.kind == GATEWAY
// Last per-account-cache check for closed subscriptions
lpacc := time.Now()
acc := c.acc
var masking bool
if ws {
masking = c.ws.maskread
}
c.mu.Unlock()
defer func() {
if c.isMqtt() {
s.mqttHandleClosedClient(c)
}
// These are used only in the readloop, so we can set them to nil
// on exit of the readLoop.
c.in.results, c.in.pacache = nil, nil
}()
// Start read buffer.
b := make([]byte, c.in.rsz)
// Websocket clients will return several slices if there are multiple
// websocket frames in the blind read. For non WS clients though, we
// will always have 1 slice per loop iteration. So we define this here
// so non WS clients will use bufs[0] = b[:n].
var _bufs [1][]byte
bufs := _bufs[:1]
var wsr *wsReadInfo
if ws {
wsr = &wsReadInfo{mask: masking}
wsr.init()
}
for {
var n int
var err error
// If we have a pre buffer parse that first.
if len(pre) > 0 {
b = pre
n = len(pre)
pre = nil
} else {
n, err = nc.Read(b)
// If we have any data we will try to parse and exit at the end.
if n == 0 && err != nil {
c.closeConnection(closedStateForErr(err))
return
}
}
if ws {
bufs, err = c.wsRead(wsr, nc, b[:n])
if bufs == nil && err != nil {
if err != io.EOF {
c.Errorf("read error: %v", err)
}
c.closeConnection(closedStateForErr(err))
} else if bufs == nil {
continue
}
} else {
bufs[0] = b[:n]
}
start := time.Now()
// Check if the account has mappings and if so set the local readcache flag.
// We check here to make sure any changes such as config reload are reflected here.
if c.kind == CLIENT || c.kind == LEAF {
if acc.hasMappings() {
c.in.flags.set(hasMappings)
} else {
c.in.flags.clear(hasMappings)
}
}
// Clear inbound stats cache
c.in.msgs = 0
c.in.bytes = 0
c.in.subs = 0
// Main call into parser for inbound data. This will generate callouts
// to process messages, etc.
for i := 0; i < len(bufs); i++ {
if err := c.parse(bufs[i]); err != nil {
if dur := time.Since(start); dur >= readLoopReportThreshold {
c.Warnf("Readloop processing time: %v", dur)
}
// Need to call flushClients because some of the clients have been
// assigned messages and their "fsp" incremented, and need now to be
// decremented and their writeLoop signaled.
c.flushClients(0)
// handled inline
if err != ErrMaxPayload && err != ErrAuthentication {
c.Error(err)
c.closeConnection(ProtocolViolation)
}
return
}
}
// Updates stats for client and server that were collected
// from parsing through the buffer.
if c.in.msgs > 0 {
atomic.AddInt64(&c.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&c.inBytes, int64(c.in.bytes))
atomic.AddInt64(&s.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&s.inBytes, int64(c.in.bytes))
}
// Budget to spend in place flushing outbound data.
// Client will be checked on several fronts to see
// if applicable. Routes and Gateways will never
// spend time flushing outbound in place.
var budget time.Duration
if c.kind == CLIENT {
budget = time.Millisecond
}
// Flush, or signal to writeLoop to flush to socket.
last := c.flushClientsWithCheck(budget, true)
// Update activity, check read buffer size.
c.mu.Lock()
// Activity based on interest changes or data/msgs.
if c.in.msgs > 0 || c.in.subs > 0 {
c.last = last
}
if n >= cap(b) {
c.in.srs = 0
} else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to.
c.in.srs++
}
// Update read buffer size as/if needed.
if n >= cap(b) && cap(b) < maxBufSize {
// Grow
c.in.rsz = int32(cap(b) * 2)
b = make([]byte, c.in.rsz)
} else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink {
// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
c.in.rsz = int32(cap(b) / 2)
b = make([]byte, c.in.rsz)
}
// re-snapshot the account since it can change during reload, etc.
acc = c.acc
// Refresh nc because in some cases, we have upgraded c.nc to TLS.
nc = c.nc
c.mu.Unlock()
// Connection was closed
if nc == nil {
return
}
if dur := time.Since(start); dur >= readLoopReportThreshold {
c.Warnf("Readloop processing time: %v", dur)
}
// We could have had a read error from above but still read some data.
// If so do the close here unconditionally.
if err != nil {
c.closeConnection(closedStateForErr(err))
return
}
if cpacc && (start.Sub(lpacc)) >= closedSubsCheckInterval {
c.pruneClosedSubFromPerAccountCache()
lpacc = time.Now()
}
}
}
// Returns the appropriate closed state for a given read error.
func closedStateForErr(err error) ClosedState {
if err == io.EOF {
return ClientClosed
}
return ReadError
}
// collapsePtoNB will place primary onto nb buffer as needed in prep for WriteTo.
// This will return a copy on purpose.
func (c *client) collapsePtoNB() (net.Buffers, int64) {
if c.isWebsocket() {
return c.wsCollapsePtoNB()
}
if c.out.p != nil {
p := c.out.p
c.out.p = nil
return append(c.out.nb, p), c.out.pb
}
return c.out.nb, c.out.pb
}
// This will handle the fixup needed on a partial write.
// Assume pending has been already calculated correctly.
func (c *client) handlePartialWrite(pnb net.Buffers) {
if c.isWebsocket() {
c.ws.frames = append(pnb, c.ws.frames...)
return
}
nb, _ := c.collapsePtoNB()
// The partial needs to be first, so append nb to pnb
c.out.nb = append(pnb, nb...)
}
// flushOutbound will flush outbound buffer to a client.
// Will return true if data was attempted to be written.
// Lock must be held
func (c *client) flushOutbound() bool {
if c.flags.isSet(flushOutbound) {
// For CLIENT connections, it is possible that the readLoop calls
// flushOutbound(). If writeLoop and readLoop compete and we are
// here we should release the lock to reduce the risk of spinning.
c.mu.Unlock()
runtime.Gosched()
c.mu.Lock()
return false
}
c.flags.set(flushOutbound)
defer c.flags.clear(flushOutbound)
// Check for nothing to do.
if c.nc == nil || c.srv == nil || c.out.pb == 0 {
return true // true because no need to queue a signal.
}
// Place primary on nb, assign primary to secondary, nil out nb and secondary.
nb, attempted := c.collapsePtoNB()
c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil
if nb == nil {
return true
}
// For selecting primary replacement.
cnb := nb
var lfs int
if len(cnb) > 0 {
lfs = len(cnb[0])
}
// In case it goes away after releasing the lock.
nc := c.nc
apm := c.out.pm
// Capture this (we change the value in some tests)
wdl := c.out.wdl
// Do NOT hold lock during actual IO.
c.mu.Unlock()
// flush here
start := time.Now()
// FIXME(dlc) - writev will do multiple IOs past 1024 on
// most platforms, need to account for that with deadline?
nc.SetWriteDeadline(start.Add(wdl))
// Actual write to the socket.
n, err := nb.WriteTo(nc)
nc.SetWriteDeadline(time.Time{})
lft := time.Since(start)
// Re-acquire client lock.
c.mu.Lock()
// Ignore ErrShortWrite errors, they will be handled as partials.
if err != nil && err != io.ErrShortWrite {
// Handle timeout error (slow consumer) differently
if ne, ok := err.(net.Error); ok && ne.Timeout() {
if closed := c.handleWriteTimeout(n, attempted, len(cnb)); closed {
return true
}
} else {
// Other errors will cause connection to be closed.
// For clients, report as debug but for others report as error.
report := c.Debugf
if c.kind != CLIENT {
report = c.Errorf
}
report("Error flushing: %v", err)
c.markConnAsClosed(WriteError)
return true
}
}
// Update flush time statistics.
c.out.lft = lft
// Subtract from pending bytes and messages.
c.out.pb -= n
if c.isWebsocket() {
c.ws.fs -= n
}
c.out.pm -= apm // FIXME(dlc) - this will not be totally accurate on partials.
// Check for partial writes
// TODO(dlc) - zero write with no error will cause lost message and the writeloop to spin.
if n != attempted && n > 0 {
c.handlePartialWrite(nb)
} else if int32(n) >= c.out.sz {
c.out.sws = 0
}
// Adjust based on what we wrote plus any pending.
pt := n + c.out.pb
// Adjust sz as needed downward, keeping power of 2.
// We do this at a slower rate.
if pt < int64(c.out.sz) && c.out.sz > minBufSize {
c.out.sws++
if c.out.sws > shortsToShrink {
c.out.sz >>= 1
}
}
// Adjust sz as needed upward, keeping power of 2.
if pt > int64(c.out.sz) && c.out.sz < maxBufSize {
c.out.sz <<= 1
}
// Check to see if we can reuse buffers.
if lfs != 0 && n >= int64(lfs) {
oldp := cnb[0][:0]
if cap(oldp) >= int(c.out.sz) {
// Replace primary or secondary if they are nil, reusing same buffer.
if c.out.p == nil {
c.out.p = oldp
} else if c.out.s == nil || cap(c.out.s) < int(c.out.sz) {
c.out.s = oldp
}
}
}
// Check that if there is still data to send and writeLoop is in wait,
// then we need to signal.
if c.out.pb > 0 {
c.flushSignal()
}
// Check if we have a stalled gate and if so and we are recovering release
// any stalled producers. Only kind==CLIENT will stall.
if c.out.stc != nil && (n == attempted || c.out.pb < c.out.mp/2) {
close(c.out.stc)
c.out.stc = nil
}
return true
}
// This is invoked from flushOutbound() for io/timeout error (slow consumer).
// Returns a boolean to indicate if the connection has been closed or not.
// Lock is held on entry.
func (c *client) handleWriteTimeout(written, attempted int64, numChunks int) bool {
if tlsConn, ok := c.nc.(*tls.Conn); ok {
if !tlsConn.ConnectionState().HandshakeComplete {
// Likely a TLSTimeout error instead...
c.markConnAsClosed(TLSHandshakeError)
// Would need to coordinate with tlstimeout()
// to avoid double logging, so skip logging
// here, and don't report a slow consumer error.
return true
}
} else if c.flags.isSet(expectConnect) && !c.flags.isSet(connectReceived) {
// Under some conditions, a connection may hit a slow consumer write deadline
// before the authorization timeout. If that is the case, then we handle
// as slow consumer though we do not increase the counter as that can be
// misleading.
c.markConnAsClosed(SlowConsumerWriteDeadline)
return true
}
// Slow consumer here..
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: WriteDeadline of %v exceeded with %d chunks of %d total bytes.",
c.out.wdl, numChunks, attempted)
// We always close CLIENT connections, or when nothing was written at all...
if c.kind == CLIENT || written == 0 {
c.markConnAsClosed(SlowConsumerWriteDeadline)
return true
}
return false
}
// Marks this connection has closed with the given reason.
// Sets the connMarkedClosed flag and skipFlushOnClose depending on the reason.
// Depending on the kind of connection, the connection will be saved.
// If a writeLoop has been started, the final flush will be done there, otherwise
// flush and close of TCP connection is done here in place.
// Returns true if closed in place, flase otherwise.
// Lock is held on entry.
func (c *client) markConnAsClosed(reason ClosedState) {
// Possibly set skipFlushOnClose flag even if connection has already been
// mark as closed. The rationale is that a connection may be closed with
// a reason that justifies a flush (say after sending an -ERR), but then
// the flushOutbound() gets a write error. If that happens, connection
// being lost, there is no reason to attempt to flush again during the
// teardown when the writeLoop exits.
var skipFlush bool
switch reason {
case ReadError, WriteError, SlowConsumerPendingBytes, SlowConsumerWriteDeadline, TLSHandshakeError:
c.flags.set(skipFlushOnClose)
skipFlush = true
}
if c.flags.isSet(connMarkedClosed) {
return
}
c.flags.set(connMarkedClosed)
// For a websocket client, unless we are told not to flush, enqueue
// a websocket CloseMessage based on the reason.
if !skipFlush && c.isWebsocket() && !c.ws.closeSent {
c.wsEnqueueCloseMessage(reason)
}
// Be consistent with the creation: for routes, gateways and leaf,
// we use Noticef on create, so use that too for delete.
if c.srv != nil {
if c.kind == LEAF {
c.Noticef("%s connection closed: %s account: %s", c.kindString(), reason, c.acc.traceLabel())
} else if c.kind == ROUTER || c.kind == GATEWAY {
c.Noticef("%s connection closed: %s", c.kindString(), reason)
} else { // Client, System, Jetstream, and Account connections.
c.Debugf("%s connection closed: %s", c.kindString(), reason)
}
}
// Save off the connection if its a client or leafnode.
if c.kind == CLIENT || c.kind == LEAF {
if nc := c.nc; nc != nil && c.srv != nil {
// TODO: May want to send events to single go routine instead
// of creating a new go routine for each save.
go c.srv.saveClosedClient(c, nc, reason)
}
}
// If writeLoop exists, let it do the final flush, close and teardown.
if c.flags.isSet(writeLoopStarted) {
// Since we want the writeLoop to do the final flush and tcp close,
// we want the reconnect to be done there too. However, it should'nt
// happen before the connection has been removed from the server
// state (end of closeConnection()). This ref count allows us to
// guarantee that.
c.rref++
c.flushSignal()
return
}
// Flush (if skipFlushOnClose is not set) and close in place. If flushing,
// use a small WriteDeadline.
c.flushAndClose(true)
}
// flushSignal will use server to queue the flush IO operation to a pool of flushers.
// Lock must be held.
func (c *client) flushSignal() {
c.out.sg.Signal()
}
// Traces a message.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceMsg(msg []byte) {
maxTrace := c.srv.getOpts().MaxTracedMsgLen
if maxTrace > 0 && (len(msg)-LEN_CR_LF) > maxTrace {
c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", msg[:maxTrace])
} else {
c.Tracef("<<- MSG_PAYLOAD: [%q]", msg[:len(msg)-LEN_CR_LF])
}
}
// Traces an incoming operation.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceInOp(op string, arg []byte) {
c.traceOp("<<- %s", op, arg)
}
// Traces an outgoing operation.
// Will NOT check if tracing is enabled, does NOT need the client lock.
func (c *client) traceOutOp(op string, arg []byte) {
c.traceOp("->> %s", op, arg)
}
func (c *client) traceOp(format, op string, arg []byte) {
opa := []interface{}{}
if op != "" {
opa = append(opa, op)
}
if arg != nil {
opa = append(opa, string(arg))
}
c.Tracef(format, opa)
}
// Process the information messages from Clients and other Routes.
func (c *client) processInfo(arg []byte) error {
info := Info{}
if err := json.Unmarshal(arg, &info); err != nil {
return err
}
switch c.kind {
case ROUTER:
c.processRouteInfo(&info)
case GATEWAY:
c.processGatewayInfo(&info)
case LEAF:
c.processLeafnodeInfo(&info)
}
return nil
}
func (c *client) processErr(errStr string) {
close := true
switch c.kind {
case CLIENT:
c.Errorf("Client Error %s", errStr)
case ROUTER:
c.Errorf("Route Error %s", errStr)
case GATEWAY:
c.Errorf("Gateway Error %s", errStr)
case LEAF:
c.Errorf("Leafnode Error %s", errStr)
c.leafProcessErr(errStr)
close = false
case JETSTREAM:
c.Errorf("JetStream Error %s", errStr)
}
if close {
c.closeConnection(ParseError)
}
}
// Password pattern matcher.
var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`)
// removePassFromTrace removes any notion of passwords from trace
// messages for logging.
func removePassFromTrace(arg []byte) []byte {
if !bytes.Contains(arg, []byte(`pass`)) {
return arg
}
// Take a copy of the connect proto just for the trace message.
var _arg [4096]byte
buf := append(_arg[:0], arg...)
m := passPat.FindAllSubmatchIndex(buf, -1)
if len(m) == 0 {
return arg
}
redactedPass := []byte("[REDACTED]")
for _, i := range m {
if len(i) < 4 {
continue
}
start := i[2]
end := i[3]
// Replace password substring.
buf = append(buf[:start], append(redactedPass, buf[end:]...)...)
break
}
return buf
}
// Returns the RTT by computing the elapsed time since now and `start`.
// On Windows VM where I (IK) run tests, time.Since() will return 0
// (I suspect some time granularity issues). So return at minimum 1ns.
func computeRTT(start time.Time) time.Duration {
rtt := time.Since(start)
if rtt <= 0 {
rtt = time.Nanosecond
}
return rtt
}
// processConnect will process a client connect op.
func (c *client) processConnect(arg []byte) error {
supportsHeaders := c.srv.supportsHeaders()
c.mu.Lock()
// If we can't stop the timer because the callback is in progress...
if !c.clearAuthTimer() {
// wait for it to finish and handle sending the failure back to
// the client.
for !c.isClosed() {
c.mu.Unlock()
time.Sleep(25 * time.Millisecond)
c.mu.Lock()
}
c.mu.Unlock()
return nil
}
c.last = time.Now().UTC()
// Estimate RTT to start.
if c.kind == CLIENT {
c.rtt = computeRTT(c.start)
if c.srv != nil {
c.clearPingTimer()
c.srv.setFirstPingTimer(c)
}
}
kind := c.kind
srv := c.srv
// Moved unmarshalling of clients' Options under the lock.
// The client has already been added to the server map, so it is possible
// that other routines lookup the client, and access its options under
// the client's lock, so unmarshalling the options outside of the lock
// would cause data RACEs.
if err := json.Unmarshal(arg, &c.opts); err != nil {
c.mu.Unlock()
return err
}
// Indicate that the CONNECT protocol has been received, and that the
// server now knows which protocol this client supports.
c.flags.set(connectReceived)
// Capture these under lock
c.echo = c.opts.Echo
proto := c.opts.Protocol
verbose := c.opts.Verbose
lang := c.opts.Lang
account := c.opts.Account
accountNew := c.opts.AccountNew
if c.kind == CLIENT {
var ncs string
if c.opts.Version != _EMPTY_ {
ncs = fmt.Sprintf("v%s", c.opts.Version)
}
if c.opts.Lang != _EMPTY_ {
if c.opts.Version == _EMPTY_ {
ncs = c.opts.Lang
} else {
ncs = fmt.Sprintf("%s:%s", ncs, c.opts.Lang)
}
}
if c.opts.Name != _EMPTY_ {
if c.opts.Version == _EMPTY_ && c.opts.Lang == _EMPTY_ {
ncs = c.opts.Name
} else {
ncs = fmt.Sprintf("%s:%s", ncs, c.opts.Name)
}
}
if ncs != _EMPTY_ {
c.ncs.Store(fmt.Sprintf("%s - %q", c, ncs))
}
}
// If websocket client and JWT not in the CONNECT, use the cookie JWT (possibly empty).
if ws := c.ws; ws != nil && c.opts.JWT == "" {
c.opts.JWT = ws.cookieJwt
}
// when not in operator mode, discard the jwt
if srv != nil && srv.trustedKeys == nil {
c.opts.JWT = _EMPTY_
}
ujwt := c.opts.JWT
// For headers both client and server need to support.
c.headers = supportsHeaders && c.opts.Headers
c.mu.Unlock()
if srv != nil {
// Applicable to clients only:
// As soon as c.opts is unmarshalled and if the proto is at
// least ClientProtoInfo, we need to increment the following counter.
// This is decremented when client is removed from the server's
// clients map.
if kind == CLIENT && proto >= ClientProtoInfo {
srv.mu.Lock()
srv.cproto++
srv.mu.Unlock()
}
// Check for Auth
if ok := srv.checkAuthentication(c); !ok {
// We may fail here because we reached max limits on an account.
if ujwt != _EMPTY_ {
c.mu.Lock()
acc := c.acc
c.mu.Unlock()
srv.mu.Lock()
tooManyAccCons := acc != nil && acc != srv.gacc
srv.mu.Unlock()
if tooManyAccCons {
return ErrTooManyAccountConnections
}
}
c.authViolation()
return ErrAuthentication
}
// Check for Account designation, this section should be only used when there is not a jwt.
if account != _EMPTY_ {
var acc *Account
var wasNew bool
var err error
if !srv.NewAccountsAllowed() {
acc, err = srv.LookupAccount(account)
if err != nil {
c.Errorf(err.Error())
c.sendErr(ErrMissingAccount.Error())
return err
} else if accountNew && acc != nil {
c.sendErrAndErr(ErrAccountExists.Error())
return ErrAccountExists
}
} else {
// We can create this one on the fly.
acc, wasNew = srv.LookupOrRegisterAccount(account)
if accountNew && !wasNew {
c.sendErrAndErr(ErrAccountExists.Error())
return ErrAccountExists
}
}
// If we are here we can register ourselves with the new account.
if err := c.registerWithAccount(acc); err != nil {
c.reportErrRegisterAccount(acc, err)
return ErrBadAccount
}
} else if c.acc == nil {
// By default register with the global account.
c.registerWithAccount(srv.globalAccount())
}
}
switch kind {
case CLIENT:
// Check client protocol request if it exists.
if proto < ClientProtoZero || proto > ClientProtoInfo {
c.sendErr(ErrBadClientProtocol.Error())
c.closeConnection(BadClientProtocolVersion)
return ErrBadClientProtocol
}
// Check to see that if no_responders is requested
// they have header support on as well.
c.mu.Lock()
misMatch := c.opts.NoResponders && !c.headers
c.mu.Unlock()
if misMatch {
c.sendErr(ErrNoRespondersRequiresHeaders.Error())
c.closeConnection(NoRespondersRequiresHeaders)
return ErrNoRespondersRequiresHeaders
}
if verbose {
c.sendOK()
}
case ROUTER:
// Delegate the rest of processing to the route
return c.processRouteConnect(srv, arg, lang)
case GATEWAY:
// Delegate the rest of processing to the gateway
return c.processGatewayConnect(arg)
case LEAF:
// Delegate the rest of processing to the leaf node
return c.processLeafNodeConnect(srv, arg, lang)
}
return nil
}
func (c *client) sendErrAndErr(err string) {
c.sendErr(err)
c.Errorf(err)
}
func (c *client) sendErrAndDebug(err string) {
c.sendErr(err)
c.Debugf(err)
}
func (c *client) authTimeout() {
c.sendErrAndDebug("Authentication Timeout")
c.closeConnection(AuthenticationTimeout)
}
func (c *client) authExpired() {
c.sendErrAndDebug("User Authentication Expired")
c.closeConnection(AuthenticationExpired)
}
func (c *client) accountAuthExpired() {
c.sendErrAndDebug("Account Authentication Expired")
c.closeConnection(AuthenticationExpired)
}
func (c *client) authViolation() {
var s *Server
var hasTrustedNkeys, hasNkeys, hasUsers bool
if s = c.srv; s != nil {
s.mu.Lock()
hasTrustedNkeys = s.trustedKeys != nil
hasNkeys = s.nkeys != nil
hasUsers = s.users != nil
s.mu.Unlock()
defer s.sendAuthErrorEvent(c)
}
if hasTrustedNkeys {
c.Errorf("%v", ErrAuthentication)
} else if hasNkeys {
c.Errorf("%s - Nkey %q",
ErrAuthentication.Error(),
c.opts.Nkey)
} else if hasUsers {
c.Errorf("%s - User %q",
ErrAuthentication.Error(),
c.opts.Username)
} else {
c.Errorf(ErrAuthentication.Error())
}
if c.isMqtt() {
c.mqttEnqueueConnAck(mqttConnAckRCNotAuthorized, false)
} else {
c.sendErr("Authorization Violation")
}
c.closeConnection(AuthenticationViolation)
}
func (c *client) maxAccountConnExceeded() {
c.sendErrAndErr(ErrTooManyAccountConnections.Error())
c.closeConnection(MaxAccountConnectionsExceeded)
}
func (c *client) maxConnExceeded() {
c.sendErrAndErr(ErrTooManyConnections.Error())
c.closeConnection(MaxConnectionsExceeded)
}
func (c *client) maxSubsExceeded() {
if c.acc.shouldLogMaxSubErr() {
c.Errorf(ErrTooManySubs.Error())
}
c.sendErr(ErrTooManySubs.Error())
}
func (c *client) maxPayloadViolation(sz int, max int32) {
c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
c.sendErr("Maximum Payload Violation")
c.closeConnection(MaxPayloadExceeded)
}
// queueOutbound queues data for a clientconnection.
// Lock should be held.
func (c *client) queueOutbound(data []byte) {
// Do not keep going if closed
if c.isClosed() {
return
}
// Add to pending bytes total.
c.out.pb += int64(len(data))
// Check for slow consumer via pending bytes limit.
// ok to return here, client is going away.
if c.kind == CLIENT && c.out.pb > c.out.mp {
// Perf wise, it looks like it is faster to optimistically add than
// checking current pb+len(data) and then add to pb.
c.out.pb -= int64(len(data))
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp)
c.markConnAsClosed(SlowConsumerPendingBytes)
return
}
if c.out.p == nil && len(data) < maxBufSize {
if c.out.sz == 0 {
c.out.sz = startBufSize
}
if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) {
c.out.p = c.out.s
c.out.s = nil
} else {
// FIXME(dlc) - make power of 2 if less than maxBufSize?
c.out.p = make([]byte, 0, c.out.sz)
}
}
// Determine if we copy or reference
available := cap(c.out.p) - len(c.out.p)
if len(data) > available {
// We can't fit everything into existing primary, but message will
// fit in next one we allocate or utilize from the secondary.
// So copy what we can.
if available > 0 && len(data) < int(c.out.sz) {
c.out.p = append(c.out.p, data[:available]...)
data = data[available:]
}
// Put the primary on the nb if it has a payload
if len(c.out.p) > 0 {
c.out.nb = append(c.out.nb, c.out.p)
c.out.p = nil
}
// TODO: It was found with LeafNode and Websocket that referencing
// the data buffer when > maxBufSize would cause corruption
// (reproduced with small maxBufSize=10 and TestLeafNodeWSNoBufferCorruption).
// So always make a copy for now.
// We will copy to primary.
if c.out.p == nil {
// Grow here
if (c.out.sz << 1) <= maxBufSize {
c.out.sz <<= 1
}
if len(data) > int(c.out.sz) {
c.out.p = make([]byte, 0, len(data))
} else {
if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { // TODO(dlc) - Size mismatch?
c.out.p = c.out.s
c.out.s = nil
} else {
c.out.p = make([]byte, 0, c.out.sz)
}
}
}
}
c.out.p = append(c.out.p, data...)
// Check here if we should create a stall channel if we are falling behind.
// We do this here since if we wait for consumer's writeLoop it could be
// too late with large number of fan in producers.
if c.out.pb > c.out.mp/2 && c.out.stc == nil {
c.out.stc = make(chan struct{})
}
}
// Assume the lock is held upon entry.
func (c *client) enqueueProtoAndFlush(proto []byte, doFlush bool) {
if c.isClosed() {
return
}
c.queueOutbound(proto)
if !(doFlush && c.flushOutbound()) {
c.flushSignal()
}
}
// Queues and then flushes the connection. This should only be called when
// the writeLoop cannot be started yet. Use enqueueProto() otherwise.
// Lock is held on entry.
func (c *client) sendProtoNow(proto []byte) {
c.enqueueProtoAndFlush(proto, true)
}
// Enqueues the given protocol and signal the writeLoop if necessary.
// Lock is held on entry.
func (c *client) enqueueProto(proto []byte) {
c.enqueueProtoAndFlush(proto, false)
}
// Assume the lock is held upon entry.
func (c *client) sendPong() {
if c.trace {
c.traceOutOp("PONG", nil)
}
c.enqueueProto([]byte(pongProto))
}
// Used to kick off a RTT measurement for latency tracking.
func (c *client) sendRTTPing() bool {
c.mu.Lock()
sent := c.sendRTTPingLocked()
c.mu.Unlock()
return sent
}
// Used to kick off a RTT measurement for latency tracking.
// This is normally called only when the caller has checked that
// the c.rtt is 0 and wants to force an update by sending a PING.
// Client lock held on entry.
func (c *client) sendRTTPingLocked() bool {
if c.isMqtt() {
return false
}
// Most client libs send a CONNECT+PING and wait for a PONG from the
// server. So if firstPongSent flag is set, it is ok for server to
// send the PING. But in case we have client libs that don't do that,
// allow the send of the PING if more than 2 secs have elapsed since
// the client TCP connection was accepted.
if !c.isClosed() &&
(c.flags.isSet(firstPongSent) || time.Since(c.start) > maxNoRTTPingBeforeFirstPong) {
c.sendPing()
return true
}
return false
}
// Assume the lock is held upon entry.
func (c *client) sendPing() {
c.rttStart = time.Now().UTC()
c.ping.out++
if c.trace {
c.traceOutOp("PING", nil)
}
c.enqueueProto([]byte(pingProto))
}
// Generates the INFO to be sent to the client with the client ID included.
// info arg will be copied since passed by value.
// Assume lock is held.
func (c *client) generateClientInfoJSON(info Info) []byte {
info.CID = c.cid
info.ClientIP = c.host
info.MaxPayload = c.mpay
if c.isWebsocket() {
info.ClientConnectURLs = info.WSConnectURLs
}
info.WSConnectURLs = nil
// Generate the info json
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
return bytes.Join(pcs, []byte(" "))
}
func (c *client) sendErr(err string) {
c.mu.Lock()
if c.trace {
c.traceOutOp("-ERR", []byte(err))
}
if !c.isMqtt() {
c.enqueueProto([]byte(fmt.Sprintf(errProto, err)))
}
c.mu.Unlock()
}
func (c *client) sendOK() {
c.mu.Lock()
if c.trace {
c.traceOutOp("OK", nil)
}
c.enqueueProto([]byte(okProto))
c.mu.Unlock()
}
func (c *client) processPing() {
c.mu.Lock()
if c.isClosed() {
c.mu.Unlock()
return
}
c.sendPong()
// Record this to suppress us sending one if this
// is within a given time interval for activity.
c.ping.last = time.Now()
// If not a CLIENT, we are done. Also the CONNECT should
// have been received, but make sure it is so before proceeding
if c.kind != CLIENT || !c.flags.isSet(connectReceived) {
c.mu.Unlock()
return
}
// If we are here, the CONNECT has been received so we know
// if this client supports async INFO or not.
var (
checkInfoChange bool
srv = c.srv
)
// For older clients, just flip the firstPongSent flag if not already
// set and we are done.
if c.opts.Protocol < ClientProtoInfo || srv == nil {
c.flags.setIfNotSet(firstPongSent)
} else {
// This is a client that supports async INFO protocols.
// If this is the first PING (so firstPongSent is not set yet),
// we will need to check if there was a change in cluster topology
// or we have a different max payload. We will send this first before
// pong since most clients do flush after connect call.
checkInfoChange = !c.flags.isSet(firstPongSent)
}
c.mu.Unlock()
if checkInfoChange {
opts := srv.getOpts()
srv.mu.Lock()
c.mu.Lock()
// Now that we are under both locks, we can flip the flag.
// This prevents sendAsyncInfoToClients() and code here to
// send a double INFO protocol.
c.flags.set(firstPongSent)
// If there was a cluster update since this client was created,
// send an updated INFO protocol now.
if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) {
c.enqueueProto(c.generateClientInfoJSON(srv.copyInfo()))
}
c.mu.Unlock()
srv.mu.Unlock()
}
}
func (c *client) processPong() {
c.mu.Lock()
c.ping.out = 0
c.rtt = computeRTT(c.rttStart)
srv := c.srv
reorderGWs := c.kind == GATEWAY && c.gw.outbound
c.mu.Unlock()
if reorderGWs {
srv.gateway.orderOutboundConnections()
}
}
// Will return the parts from the raw wire msg.
func (c *client) msgParts(data []byte) (hdr []byte, msg []byte) {
if c != nil && c.pa.hdr > 0 {
return data[:c.pa.hdr], data[c.pa.hdr:]
}
return nil, data
}
// Header pubs take form HPUB <subject> [reply] <hdr_len> <total_len>\r\n
func (c *client) processHeaderPub(arg []byte) error {
if !c.headers {
return ErrMsgHeadersNotSupported
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_HPUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 3:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.hdr = parseSize(args[1])
c.pa.size = parseSize(args[2])
c.pa.hdb = args[1]
c.pa.szb = args[2]
case 4:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.hdr = parseSize(args[2])
c.pa.size = parseSize(args[3])
c.pa.hdb = args[2]
c.pa.szb = args[3]
default:
return fmt.Errorf("processHeaderPub Parse Error: '%s'", arg)
}
if c.pa.hdr < 0 {
return fmt.Errorf("processHeaderPub Bad or Missing Header Size: '%s'", arg)
}
// If number overruns an int64, parseSize() will have returned a negative value
if c.pa.size < 0 {
return fmt.Errorf("processHeaderPub Bad or Missing Total Size: '%s'", arg)
}
if c.pa.hdr > c.pa.size {
return fmt.Errorf("processHeaderPub Header Size larger then TotalSize: '%s'", arg)
}
maxPayload := atomic.LoadInt32(&c.mpay)
// Use int64() to avoid int32 overrun...
if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Publish Subject")
}
return nil
}
func (c *client) processPub(arg []byte) error {
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_PUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 2:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.size = parseSize(args[1])
c.pa.szb = args[1]
case 3:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.size = parseSize(args[2])
c.pa.szb = args[2]
default:
return fmt.Errorf("processPub Parse Error: '%s'", arg)
}
// If number overruns an int64, parseSize() will have returned a negative value
if c.pa.size < 0 {
return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg)
}
maxPayload := atomic.LoadInt32(&c.mpay)
// Use int64() to avoid int32 overrun...
if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Publish Subject")
}
return nil
}
func splitArg(arg []byte) [][]byte {
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
return args
}
func (c *client) parseSub(argo []byte, noForward bool) error {
// Copy so we do not reference a potentially large buffer
// FIXME(dlc) - make more efficient.
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
var (
subject []byte
queue []byte
sid []byte
)
switch len(args) {
case 2:
subject = args[0]
queue = nil
sid = args[1]
case 3:
subject = args[0]
queue = args[1]
sid = args[2]
default:
return fmt.Errorf("processSub Parse Error: '%s'", arg)
}
// If there was an error, it has been sent to the client. We don't return an
// error here to not close the connection as a parsing error.
c.processSub(subject, queue, sid, nil, noForward)
return nil
}
func (c *client) processSub(subject, queue, bsid []byte, cb msgHandler, noForward bool) (*subscription, error) {
return c.processSubEx(subject, queue, bsid, cb, noForward, false, false)
}
func (c *client) processSubEx(subject, queue, bsid []byte, cb msgHandler, noForward, si, rsi bool) (*subscription, error) {
// Create the subscription
sub := &subscription{client: c, subject: subject, queue: queue, sid: bsid, icb: cb, si: si, rsi: rsi}
c.mu.Lock()
// Indicate activity.
c.in.subs++
// Grab connection type, account and server info.
kind := c.kind
acc := c.acc
srv := c.srv
sid := string(sub.sid)
// This check does not apply to SYSTEM or JETSTREAM or ACCOUNT clients (because they don't have a `nc`...)
if c.isClosed() && (kind != SYSTEM && kind != JETSTREAM && kind != ACCOUNT) {
c.mu.Unlock()
return nil, ErrConnectionClosed
}
// Check permissions if applicable.
if kind == CLIENT {
// First do a pass whether queue subscription is valid. This does not necessarily
// mean that it will not be able to plain subscribe.
//
// allow = ["foo"] -> can subscribe or queue subscribe to foo using any queue
// allow = ["foo v1"] -> can only queue subscribe to 'foo v1', no plain subs allowed.
// allow = ["foo", "foo v1"] -> can subscribe to 'foo' but can only queue subscribe to 'foo v1'
//
if sub.queue != nil {
if !c.canQueueSubscribe(string(sub.subject), string(sub.queue)) {
c.mu.Unlock()
c.subPermissionViolation(sub)
return nil, ErrSubscribePermissionViolation
}
} else if !c.canSubscribe(string(sub.subject)) {
c.mu.Unlock()
c.subPermissionViolation(sub)
return nil, ErrSubscribePermissionViolation
}
}
// Check if we have a maximum on the number of subscriptions.
if c.subsAtLimit() {
c.mu.Unlock()
c.maxSubsExceeded()
return nil, ErrTooManySubs
}
var updateGWs bool
var err error
// Subscribe here.
es := c.subs[sid]
if es == nil {
c.subs[sid] = sub
if acc != nil && acc.sl != nil {
err = acc.sl.Insert(sub)
if err != nil {
delete(c.subs, sid)
} else {
updateGWs = c.srv.gateway.enabled
}
}
}
// Unlocked from here onward
c.mu.Unlock()
if err != nil {
c.sendErr("Invalid Subject")
return nil, ErrMalformedSubject
} else if c.opts.Verbose && kind != SYSTEM {
c.sendOK()
}
// If it was already registered, return it.
if es != nil {
return es, nil
}
// No account just return.
if acc == nil {
return sub, nil
}
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
if noForward {
return sub, nil
}
// If we are routing and this is a local sub, add to the route map for the associated account.
if kind == CLIENT || kind == SYSTEM || kind == JETSTREAM || kind == ACCOUNT {
srv.updateRouteSubscriptionMap(acc, sub, 1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
}
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, 1)
return sub, nil
}
// Used to pass stream import matches to addShadowSub
type ime struct {
im *streamImport
overlapSubj string
dyn bool
}
// If the client's account has stream imports and there are matches for
// this subscription's subject, then add shadow subscriptions in the
// other accounts that export this subject.
func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error {
if acc == nil {
return ErrMissingAccount
}
var (
_ims [16]ime
ims = _ims[:0]
imTsa [32]string
tokens []string
tsa [32]string
hasWC bool
tokensModified bool
)
acc.mu.RLock()
subj := string(sub.subject)
if len(acc.imports.streams) > 0 {
tokens = tokenizeSubjectIntoSlice(tsa[:0], subj)
for _, tk := range tokens {
if tk == pwcs {
hasWC = true
break
}
}
if !hasWC && tokens[len(tokens)-1] == fwcs {
hasWC = true
}
}
// Loop over the import subjects. We have 4 scenarios. If we have an
// exact match or a superset match we should use the from field from
// the import. If we are a subset or overlap, we have to dynamically calculate
// the subject. On overlap, ime requires the overlap subject.
for _, im := range acc.imports.streams {
if im.invalid {
continue
}
if subj == im.to {
ims = append(ims, ime{im, _EMPTY_, false})
continue
}
if tokensModified {
// re-tokenize subj to overwrite modifications from a previous iteration
tokens = tokenizeSubjectIntoSlice(tsa[:0], subj)
tokensModified = false
}
imTokens := tokenizeSubjectIntoSlice(imTsa[:0], im.to)
if isSubsetMatchTokenized(tokens, imTokens) {
ims = append(ims, ime{im, _EMPTY_, true})
} else if hasWC {
if isSubsetMatchTokenized(imTokens, tokens) {
ims = append(ims, ime{im, _EMPTY_, false})
} else {
imTokensLen := len(imTokens)
for i, t := range tokens {
if i >= imTokensLen {
break
}
if t == pwcs && imTokens[i] != fwcs {
tokens[i] = imTokens[i]
tokensModified = true
}
}
tokensLen := len(tokens)
lastIdx := tokensLen - 1
if tokens[lastIdx] == fwcs {
if imTokensLen >= tokensLen {
// rewrite ">" in tokens to be more specific
tokens[lastIdx] = imTokens[lastIdx]
tokensModified = true
if imTokensLen > tokensLen {
// copy even more specific parts from import
tokens = append(tokens, imTokens[tokensLen:]...)
}
}
}
if isSubsetMatchTokenized(tokens, imTokens) {
// As isSubsetMatchTokenized was already called with tokens and imTokens,
// we wouldn't be here if it where not for tokens being modified.
// Hence, Join to re compute the subject string
ims = append(ims, ime{im, strings.Join(tokens, tsep), true})
}
}
}
}
acc.mu.RUnlock()
var shadow []*subscription
if len(ims) > 0 {
shadow = make([]*subscription, 0, len(ims))
}
// Now walk through collected stream imports that matched.
for i := 0; i < len(ims); i++ {
ime := &ims[i]
// We will create a shadow subscription.
nsub, err := c.addShadowSub(sub, ime)
if err != nil {
return err
}
shadow = append(shadow, nsub)
}
if shadow != nil {
c.mu.Lock()
sub.shadow = shadow
c.mu.Unlock()
}
return nil
}
// Add in the shadow subscription.
func (c *client) addShadowSub(sub *subscription, ime *ime) (*subscription, error) {
im := ime.im
nsub := *sub // copy
nsub.im = im
if !im.usePub && ime.dyn {
if im.rtr == nil {
im.rtr = im.tr.reverse()
}
s := string(nsub.subject)
if ime.overlapSubj != _EMPTY_ {
s = ime.overlapSubj
}
subj, err := im.rtr.transformSubject(s)
if err != nil {
return nil, err
}
nsub.subject = []byte(subj)
} else if !im.usePub || !ime.dyn {
if ime.overlapSubj != _EMPTY_ {
nsub.subject = []byte(ime.overlapSubj)
} else {
nsub.subject = []byte(im.from)
}
}
// Else use original subject
c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name)
if err := im.acc.sl.Insert(&nsub); err != nil {
errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name)
c.Debugf(errs)
return nil, fmt.Errorf(errs)
}
// Update our route map here.
c.srv.updateRemoteSubscription(im.acc, &nsub, 1)
return &nsub, nil
}
// canSubscribe determines if the client is authorized to subscribe to the
// given subject. Assumes caller is holding lock.
func (c *client) canSubscribe(subject string) bool {
if c.perms == nil {
return true
}
allowed := true
// Check allow list. If no allow list that means all are allowed. Deny can overrule.
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
allowed = len(r.psubs) != 0
// Leafnodes operate slightly differently in that they allow broader scoped subjects.
// They will prune based on publish perms before sending to a leafnode client.
if !allowed && c.kind == LEAF && subjectHasWildcard(subject) {
r := c.perms.sub.allow.ReverseMatch(subject)
allowed = len(r.psubs) != 0
}
}
// If we have a deny list and we think we are allowed, check that as well.
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
allowed = len(r.psubs) == 0
// We use the actual subscription to signal us to spin up the deny mperms
// and cache. We check if the subject is a wildcard that contains any of
// the deny clauses.
// FIXME(dlc) - We could be smarter and track when these go away and remove.
if allowed && c.mperms == nil && subjectHasWildcard(subject) {
// Whip through the deny array and check if this wildcard subject is within scope.
for _, sub := range c.darray {
if subjectIsSubsetMatch(sub, subject) {
c.loadMsgDenyFilter()
break
}
}
}
}
return allowed
}
func queueMatches(queue string, qsubs [][]*subscription) bool {
if len(qsubs) == 0 {
return true
}
for _, qsub := range qsubs {
qs := qsub[0]
qname := string(qs.queue)
// NOTE: '*' and '>' tokens can also be valid
// queue names so we first check against the
// literal name. e.g. v1.* == v1.*
if queue == qname || (subjectHasWildcard(qname) && subjectIsSubsetMatch(queue, qname)) {
return true
}
}
return false
}
func (c *client) canQueueSubscribe(subject, queue string) bool {
if c.perms == nil {
return true
}
allowed := true
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
// If perms DO NOT have queue name, then psubs will be greater than
// zero. If perms DO have queue name, then qsubs will be greater than
// zero.
allowed = len(r.psubs) > 0
if len(r.qsubs) > 0 {
// If the queue appears in the allow list, then DO allow.
allowed = queueMatches(queue, r.qsubs)
}
}
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
// If perms DO NOT have queue name, then psubs will be greater than
// zero. If perms DO have queue name, then qsubs will be greater than
// zero.
allowed = len(r.psubs) == 0
if len(r.qsubs) > 0 {
// If the queue appears in the deny list, then DO NOT allow.
allowed = !queueMatches(queue, r.qsubs)
}
}
return allowed
}
// Low level unsubscribe for a given client.
func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool) {
c.mu.Lock()
if !force && sub.max > 0 && sub.nm < sub.max {
c.Debugf(
"Deferring actual UNSUB(%s): %d max, %d received",
string(sub.subject), sub.max, sub.nm)
c.mu.Unlock()
return
}
if c.trace {
c.traceOp("<-> %s", "DELSUB", sub.sid)
}
if c.kind != CLIENT && c.kind != SYSTEM {
c.removeReplySubTimeout(sub)
}
// Remove accounting if requested. This will be false when we close a connection
// with open subscriptions.
if remove {
delete(c.subs, string(sub.sid))
if acc != nil {
acc.sl.Remove(sub)
}
}
// Check to see if we have shadow subscriptions.
var updateRoute bool
var updateGWs bool
shadowSubs := sub.shadow
sub.shadow = nil
if len(shadowSubs) > 0 {
updateRoute = (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil
if updateRoute {
updateGWs = c.srv.gateway.enabled
}
}
sub.close()
c.mu.Unlock()
// Process shadow subs if we have them.
for _, nsub := range shadowSubs {
if err := nsub.im.acc.sl.Remove(nsub); err != nil {
c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name)
} else {
if updateRoute {
c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1)
}
if updateGWs {
c.srv.gatewayUpdateSubInterest(nsub.im.acc.Name, nsub, -1)
}
}
// Now check on leafnode updates.
c.srv.updateLeafNodes(nsub.im.acc, nsub, -1)
}
// Now check to see if this was part of a respMap entry for service imports.
if acc != nil {
acc.checkForReverseEntry(string(sub.subject), nil, true)
}
}
func (c *client) processUnsub(arg []byte) error {
args := splitArg(arg)
var sid []byte
max := int64(-1)
switch len(args) {
case 1:
sid = args[0]
case 2:
sid = args[0]
max = int64(parseSize(args[1]))
default:
return fmt.Errorf("processUnsub Parse Error: '%s'", arg)
}
var sub *subscription
var ok, unsub bool
c.mu.Lock()
// Indicate activity.
c.in.subs++
// Grab connection type.
kind := c.kind
srv := c.srv
var acc *Account
updateGWs := false
if sub, ok = c.subs[string(sid)]; ok {
acc = c.acc
if max > 0 && max > sub.nm {
sub.max = max
} else {
// Clear it here to override
sub.max = 0
unsub = true
}
updateGWs = srv.gateway.enabled
}
c.mu.Unlock()
if c.opts.Verbose {
c.sendOK()
}
if unsub {
c.unsubscribe(acc, sub, false, true)
if acc != nil && (kind == CLIENT || kind == SYSTEM || kind == ACCOUNT || kind == JETSTREAM) {
srv.updateRouteSubscriptionMap(acc, sub, -1)
if updateGWs {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
}
// Now check on leafnode updates.
srv.updateLeafNodes(acc, sub, -1)
}
return nil
}
// checkDenySub will check if we are allowed to deliver this message in the
// presence of deny clauses for subscriptions. Deny clauses will not prevent
// larger scoped wildcard subscriptions, so we need to check at delivery time.
// Lock should be held.
func (c *client) checkDenySub(subject string) bool {
if denied, ok := c.mperms.dcache[subject]; ok {
return denied
} else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 {
c.mperms.dcache[subject] = true
return true
} else {
c.mperms.dcache[subject] = false
}
if len(c.mperms.dcache) > maxDenyPermCacheSize {
c.pruneDenyCache()
}
return false
}
// Create a message header for routes or leafnodes. Header and origin cluster aware.
func (c *client) msgHeaderForRouteOrLeaf(subj, reply []byte, rt *routeTarget, acc *Account) []byte {
hasHeader := c.pa.hdr > 0
canReceiveHeader := rt.sub.client.headers
mh := c.msgb[:msgHeadProtoLen]
kind := rt.sub.client.kind
var lnoc bool
if kind == ROUTER {
// If we are coming from a leaf with an origin cluster we need to handle differently
// if we can. We will send a route based LMSG which has origin cluster and headers
// by default.
if c.kind == LEAF && c.remoteCluster() != _EMPTY_ && rt.sub.client.route.lnoc {
mh[0] = 'L'
mh = append(mh, c.remoteCluster()...)
mh = append(mh, ' ')
lnoc = true
} else {
// Router (and Gateway) nodes are RMSG. Set here since leafnodes may rewrite.
mh[0] = 'R'
}
mh = append(mh, acc.Name...)
mh = append(mh, ' ')
} else {
// Leaf nodes are LMSG
mh[0] = 'L'
// Remap subject if its a shadow subscription, treat like a normal client.
if rt.sub.im != nil {
if rt.sub.im.tr != nil {
to, _ := rt.sub.im.tr.transformSubject(string(subj))
subj = []byte(to)
} else if !rt.sub.im.usePub {
subj = []byte(rt.sub.im.to)
}
}
}
mh = append(mh, subj...)
mh = append(mh, ' ')
if len(rt.qs) > 0 {
if reply != nil {
mh = append(mh, "+ "...) // Signal that there is a reply.
mh = append(mh, reply...)
mh = append(mh, ' ')
} else {
mh = append(mh, "| "...) // Only queues
}
mh = append(mh, rt.qs...)
} else if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
if lnoc {
// leafnode origin LMSG always have a header entry even if zero.
if c.pa.hdr <= 0 {
mh = append(mh, '0')
} else {
mh = append(mh, c.pa.hdb...)
}
mh = append(mh, ' ')
mh = append(mh, c.pa.szb...)
} else if hasHeader {
if canReceiveHeader {
mh[0] = 'H'
mh = append(mh, c.pa.hdb...)
mh = append(mh, ' ')
mh = append(mh, c.pa.szb...)
} else {
// If we are here we need to truncate the payload size
nsz := strconv.Itoa(c.pa.size - c.pa.hdr)
mh = append(mh, nsz...)
}
} else {
mh = append(mh, c.pa.szb...)
}
return append(mh, _CRLF_...)
}
// Create a message header for clients. Header aware.
func (c *client) msgHeader(subj, reply []byte, sub *subscription) []byte {
// See if we should do headers. We have to have a headers msg and
// the client we are going to deliver to needs to support headers as well.
hasHeader := c.pa.hdr > 0
canReceiveHeader := sub.client != nil && sub.client.headers
var mh []byte
if hasHeader && canReceiveHeader {
mh = c.msgb[:msgHeadProtoLen]
mh[0] = 'H'
} else {
mh = c.msgb[1:msgHeadProtoLen]
}
mh = append(mh, subj...)
mh = append(mh, ' ')
if len(sub.sid) > 0 {
mh = append(mh, sub.sid...)
mh = append(mh, ' ')
}
if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
if hasHeader {
if canReceiveHeader {
mh = append(mh, c.pa.hdb...)
mh = append(mh, ' ')
mh = append(mh, c.pa.szb...)
} else {
// If we are here we need to truncate the payload size
nsz := strconv.Itoa(c.pa.size - c.pa.hdr)
mh = append(mh, nsz...)
}
} else {
mh = append(mh, c.pa.szb...)
}
mh = append(mh, _CRLF_...)
return mh
}
func (c *client) stalledWait(producer *client) {
stall := c.out.stc
ttl := stallDuration(c.out.pb, c.out.mp)
c.mu.Unlock()
defer c.mu.Lock()
select {
case <-stall:
case <-time.After(ttl):
producer.Debugf("Timed out of fast producer stall (%v)", ttl)
}
}
func stallDuration(pb, mp int64) time.Duration {
ttl := stallClientMinDuration
if pb >= mp {
ttl = stallClientMaxDuration
} else if hmp := mp / 2; pb > hmp {
bsz := hmp / 10
additional := int64(ttl) * ((pb - hmp) / bsz)
ttl += time.Duration(additional)
}
return ttl
}
// Used to treat maps as efficient set
var needFlush = struct{}{}
// deliverMsg will deliver a message to a matching subscription and its underlying client.
// We process all connection/client types. mh is the part that will be protocol/client specific.
func (c *client) deliverMsg(sub *subscription, acc *Account, subject, reply, mh, msg []byte, gwrply bool) bool {
if sub.client == nil {
return false
}
client := sub.client
client.mu.Lock()
// Check echo
if c == client && !client.echo {
client.mu.Unlock()
return false
}
// Check if we have a subscribe deny clause. This will trigger us to check the subject
// for a match against the denied subjects.
if client.mperms != nil && client.checkDenySub(string(subject)) {
client.mu.Unlock()
return false
}
// New race detector forces this now.
if sub.isClosed() {
client.mu.Unlock()
return false
}
// Check if we are a leafnode and have perms to check.
if client.kind == LEAF && client.perms != nil {
if !client.pubAllowedFullCheck(string(subject), true, true) {
client.mu.Unlock()
client.Debugf("Not permitted to deliver to %q", subject)
return false
}
}
srv := client.srv
sub.nm++
// Check if we should auto-unsubscribe.
if sub.max > 0 {
if client.kind == ROUTER && sub.nm >= sub.max {
// The only router based messages that we will see here are remoteReplies.
// We handle these slightly differently.
defer client.removeReplySub(sub)
} else {
// For routing..
shouldForward := client.kind == CLIENT || client.kind == SYSTEM && client.srv != nil
// If we are at the exact number, unsubscribe but
// still process the message in hand, otherwise
// unsubscribe and drop message on the floor.
if sub.nm == sub.max {
client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'", sub.max, string(sub.sid))
// Due to defer, reverse the code order so that execution
// is consistent with other cases where we unsubscribe.
if shouldForward {
defer srv.updateRemoteSubscription(client.acc, sub, -1)
}
defer client.unsubscribe(client.acc, sub, true, true)
} else if sub.nm > sub.max {
client.Debugf("Auto-unsubscribe limit [%d] exceeded", sub.max)
client.mu.Unlock()
client.unsubscribe(client.acc, sub, true, true)
if shouldForward {
srv.updateRemoteSubscription(client.acc, sub, -1)
}
return false
}
}
}
// Check here if we have a header with our message. If this client can not
// support we need to strip the headers from the payload.
// The actual header would have been processed correctly for us, so just
// need to update payload.
if c.pa.hdr > 0 && !sub.client.headers {
msg = msg[c.pa.hdr:]
}
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
msgSize := int64(len(msg))
prodIsMQTT := c.isMqtt()
// MQTT producers send messages without CR_LF, so don't remove it for them.
if !prodIsMQTT {
msgSize -= int64(LEN_CR_LF)
}
// No atomic needed since accessed under client lock.
// Monitor is reading those also under client's lock.
client.outMsgs++
client.outBytes += msgSize
// Check for internal subscriptions.
if sub.icb != nil && !c.noIcb {
if gwrply {
// We will store in the account, not the client since it will likely
// be a different client that will send the reply.
srv.trackGWReply(nil, client.acc, reply, c.pa.reply)
}
client.mu.Unlock()
// Internal account clients are for service imports and need the '\r\n'.
if client.kind == ACCOUNT {
sub.icb(sub, c, acc, string(subject), string(reply), msg)
} else {
sub.icb(sub, c, acc, string(subject), string(reply), msg[:msgSize])
}
return true
}
// We don't count internal deliveries so we update server statistics here.
atomic.AddInt64(&srv.outMsgs, 1)
atomic.AddInt64(&srv.outBytes, msgSize)
// If we are a client and we detect that the consumer we are
// sending to is in a stalled state, go ahead and wait here
// with a limit.
if c.kind == CLIENT && client.out.stc != nil {
client.stalledWait(c)
}
// Check for closed connection
if client.isClosed() {
client.mu.Unlock()
return false
}
// Do a fast check here to see if we should be tracking this from a latency
// perspective. This will be for a request being received for an exported service.
// This needs to be from a non-client (otherwise tracking happens at requestor).
//
// Also this check captures if the original reply (c.pa.reply) is a GW routed
// reply (since it is known to be > minReplyLen). If that is the case, we need to
// track the binding between the routed reply and the reply set in the message
// header (which is c.pa.reply without the GNR routing prefix).
if client.kind == CLIENT && len(c.pa.reply) > minReplyLen {
if gwrply {
// Note that we keep track of the GW routed reply in the destination
// connection (`client`). The routed reply subject is in `c.pa.reply`,
// should that change, we would have to pass the GW routed reply as
// a parameter of deliverMsg().
srv.trackGWReply(client, nil, reply, c.pa.reply)
}
// If we do not have a registered RTT queue that up now.
if client.rtt == 0 {
client.sendRTTPingLocked()
}
// FIXME(dlc) - We may need to optimize this.
// We will have tagged this with a suffix ('.T') if we are tracking. This is
// needed from sampling. Not all will be tracked.
if c.kind != CLIENT && isTrackedReply(c.pa.reply) {
client.trackRemoteReply(string(subject), string(c.pa.reply))
}
}
// Queue to outbound buffer
client.queueOutbound(mh)
client.queueOutbound(msg)
if prodIsMQTT {
// Need to add CR_LF since MQTT producers don't send CR_LF
client.queueOutbound([]byte(CR_LF))
}
client.out.pm++
// If we are tracking dynamic publish permissions that track reply subjects,
// do that accounting here. We only look at client.replies which will be non-nil.
if client.replies != nil && len(reply) > 0 {
client.replies[string(reply)] = &resp{time.Now(), 0}
if len(client.replies) > replyPermLimit {
client.pruneReplyPerms()
}
}
// Check outbound threshold and queue IO flush if needed.
// This is specifically looking at situations where we are getting behind and may want
// to intervene before this producer goes back to top of readloop. We are in the producer's
// readloop go routine at this point.
// FIXME(dlc) - We may call this alot, maybe suppress after first call?
if client.out.pm > 1 && client.out.pb > maxBufSize*2 {
client.flushSignal()
}
// Add the data size we are responsible for here. This will be processed when we
// return to the top of the readLoop.
c.addToPCD(client)
if client.trace {
client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil)
}
client.mu.Unlock()
return true
}
// Add the given sub's client to the list of clients that need flushing.
// This must be invoked from `c`'s readLoop. No lock for c is required,
// however, `client` lock must be held on entry. This holds true even
// if `client` is same than `c`.
func (c *client) addToPCD(client *client) {
if _, ok := c.pcd[client]; !ok {
client.out.fsp++
c.pcd[client] = needFlush
}
}
// This will track a remote reply for an exported service that has requested
// latency tracking.
// Lock assumed to be held.
func (c *client) trackRemoteReply(subject, reply string) {
a := c.acc
if a == nil {
return
}
var lrt time.Duration
var respThresh time.Duration
a.mu.RLock()
se := a.getServiceExport(subject)
if se != nil {
lrt = a.lowestServiceExportResponseTime()
respThresh = se.respThresh
}
a.mu.RUnlock()
if se == nil {
return
}
if c.rrTracking == nil {
c.rrTracking = &rrTracking{
rmap: make(map[string]*remoteLatency),
ptmr: time.AfterFunc(lrt, c.pruneRemoteTracking),
lrt: lrt,
}
}
rl := remoteLatency{
Account: a.Name,
ReqId: reply,
respThresh: respThresh,
}
rl.M2.RequestStart = time.Now().UTC()
c.rrTracking.rmap[reply] = &rl
}
// pruneRemoteTracking will prune any remote tracking objects
// that are too old. These are orphaned when a service is not
// sending reponses etc.
// Lock should be held upon entry.
func (c *client) pruneRemoteTracking() {
c.mu.Lock()
if c.rrTracking == nil {
c.mu.Unlock()
return
}
now := time.Now()
for subject, rl := range c.rrTracking.rmap {
if now.After(rl.M2.RequestStart.Add(rl.respThresh)) {
delete(c.rrTracking.rmap, subject)
}
}
if len(c.rrTracking.rmap) > 0 {
t := c.rrTracking.ptmr
t.Stop()
t.Reset(c.rrTracking.lrt)
} else {
c.rrTracking.ptmr.Stop()
c.rrTracking = nil
}
c.mu.Unlock()
}
// pruneReplyPerms will remove any stale or expired entries
// in our reply cache. We make sure to not check too often.
func (c *client) pruneReplyPerms() {
// Make sure we do not check too often.
if c.perms.resp == nil {
return
}
mm := c.perms.resp.MaxMsgs
ttl := c.perms.resp.Expires
now := time.Now()
for k, resp := range c.replies {
if mm > 0 && resp.n >= mm {
delete(c.replies, k)
} else if ttl > 0 && now.Sub(resp.t) > ttl {
delete(c.replies, k)
}
}
}
// pruneDenyCache will prune the deny cache via randomly
// deleting items. Doing so pruneSize items at a time.
// Lock must be held for this one since it is shared under
// deliverMsg.
func (c *client) pruneDenyCache() {
r := 0
for subject := range c.mperms.dcache {
delete(c.mperms.dcache, subject)
if r++; r > pruneSize {
break
}
}
}
// prunePubPermsCache will prune the cache via randomly
// deleting items. Doing so pruneSize items at a time.
func (c *client) prunePubPermsCache() {
// There is a case where we can invoke this from multiple go routines,
// (in deliverMsg() if sub.client is a LEAF), so we make sure to prune
// from only one go routine at a time.
if !atomic.CompareAndSwapInt32(&c.perms.prun, 0, 1) {
return
}
const maxPruneAtOnce = 1000
r := 0
c.perms.pcache.Range(func(k, _ interface{}) bool {
c.perms.pcache.Delete(k)
if r++; (r > pruneSize && atomic.LoadInt32(&c.perms.pcsz) < int32(maxPermCacheSize)) ||
(r > maxPruneAtOnce) {
return false
}
return true
})
atomic.AddInt32(&c.perms.pcsz, -int32(r))
atomic.StoreInt32(&c.perms.prun, 0)
}
// pubAllowed checks on publish permissioning.
// Lock should not be held.
func (c *client) pubAllowed(subject string) bool {
return c.pubAllowedFullCheck(subject, true, false)
}
// pubAllowedFullCheck checks on all publish permissioning depending
// on the flag for dynamic reply permissions.
func (c *client) pubAllowedFullCheck(subject string, fullCheck, hasLock bool) bool {
if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) {
return true
}
// Check if published subject is allowed if we have permissions in place.
v, ok := c.perms.pcache.Load(subject)
if ok {
return v.(bool)
}
allowed := true
// Cache miss, check allow then deny as needed.
if c.perms.pub.allow != nil {
r := c.perms.pub.allow.Match(subject)
allowed = len(r.psubs) != 0
}
// If we have a deny list and are currently allowed, check that as well.
if allowed && c.perms.pub.deny != nil {
r := c.perms.pub.deny.Match(subject)
allowed = len(r.psubs) == 0
}
// If we are currently not allowed but we are tracking reply subjects
// dynamically, check to see if we are allowed here but avoid pcache.
// We need to acquire the lock though.
if !allowed && fullCheck && c.perms.resp != nil {
if !hasLock {
c.mu.Lock()
}
if resp := c.replies[subject]; resp != nil {
resp.n++
// Check if we have sent too many responses.
if c.perms.resp.MaxMsgs > 0 && resp.n > c.perms.resp.MaxMsgs {
delete(c.replies, subject)
} else if c.perms.resp.Expires > 0 && time.Since(resp.t) > c.perms.resp.Expires {
delete(c.replies, subject)
} else {
allowed = true
}
}
if !hasLock {
c.mu.Unlock()
}
} else {
// Update our cache here.
c.perms.pcache.Store(string(subject), allowed)
if n := atomic.AddInt32(&c.perms.pcsz, 1); n > maxPermCacheSize {
c.prunePubPermsCache()
}
}
return allowed
}
// Test whether a reply subject is a service import reply.
func isServiceReply(reply []byte) bool {
// This function is inlined and checking this way is actually faster
// than byte-by-byte comparison.
return len(reply) > 3 && string(reply[:4]) == replyPrefix
}
// Test whether a reply subject is a service import or a gateway routed reply.
func isReservedReply(reply []byte) bool {
if isServiceReply(reply) {
return true
}
// Faster to check with string([:]) than byte-by-byte
if len(reply) > gwReplyPrefixLen && string(reply[:gwReplyPrefixLen]) == gwReplyPrefix {
return true
}
return false
}
// This will decide to call the client code or router code.
func (c *client) processInboundMsg(msg []byte) {
switch c.kind {
case CLIENT:
c.processInboundClientMsg(msg)
case ROUTER:
c.processInboundRoutedMsg(msg)
case GATEWAY:
c.processInboundGatewayMsg(msg)
case LEAF:
c.processInboundLeafMsg(msg)
}
}
// selectMappedSubject will chose the mapped subject based on the client's inbound subject.
func (c *client) selectMappedSubject() bool {
nsubj, changed := c.acc.selectMappedSubject(string(c.pa.subject))
if changed {
c.pa.mapped = c.pa.subject
c.pa.subject = []byte(nsubj)
}
return changed
}
// processInboundClientMsg is called to process an inbound msg from a client.
// Return if the message was delivered, and if the message was not delivered
// due to a permission issue.
func (c *client) processInboundClientMsg(msg []byte) (bool, bool) {
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.in.msgs++
c.in.bytes += int32(len(msg) - LEN_CR_LF)
// Check that client (could be here with SYSTEM) is not publishing on reserved "$GNR" prefix.
if c.kind == CLIENT && hasGWRoutedReplyPrefix(c.pa.subject) {
c.pubPermissionViolation(c.pa.subject)
return false, true
}
// Mostly under testing scenarios.
if c.srv == nil || c.acc == nil {
return false, false
}
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.pubPermissionViolation(c.pa.subject)
return false, true
}
// Now check for reserved replies. These are used for service imports.
if c.kind == CLIENT && len(c.pa.reply) > 0 && isReservedReply(c.pa.reply) {
c.replySubjectViolation(c.pa.reply)
return false, true
}
if c.opts.Verbose {
c.sendOK()
}
// If MQTT client, check for retain flag now that we have passed permissions check
if c.isMqtt() {
c.mqttHandlePubRetain()
}
// Doing this inline as opposed to create a function (which otherwise has a measured
// performance impact reported in our bench)
var isGWRouted bool
if c.kind != CLIENT {
if atomic.LoadInt32(&c.acc.gwReplyMapping.check) > 0 {
c.acc.mu.RLock()
c.pa.subject, isGWRouted = c.acc.gwReplyMapping.get(c.pa.subject)
c.acc.mu.RUnlock()
}
} else if atomic.LoadInt32(&c.gwReplyMapping.check) > 0 {
c.mu.Lock()
c.pa.subject, isGWRouted = c.gwReplyMapping.get(c.pa.subject)
c.mu.Unlock()
}
// If we have an exported service and we are doing remote tracking, check this subject
// to see if we need to report the latency.
if c.rrTracking != nil {
c.mu.Lock()
rl := c.rrTracking.rmap[string(c.pa.subject)]
if rl != nil {
delete(c.rrTracking.rmap, string(c.pa.subject))
}
c.mu.Unlock()
if rl != nil {
sl := &rl.M2
// Fill this in and send it off to the other side.
sl.Status = 200
sl.Responder = c.getClientInfo(true)
sl.ServiceLatency = time.Since(sl.RequestStart) - sl.Responder.RTT
sl.TotalLatency = sl.ServiceLatency + sl.Responder.RTT
sanitizeLatencyMetric(sl)
lsub := remoteLatencySubjectForResponse(c.pa.subject)
c.srv.sendInternalAccountMsg(nil, lsub, rl) // Send to SYS account
}
}
// If the subject was converted to the gateway routed subject, then handle it now
// and be done with the rest of this function.
if isGWRouted {
c.handleGWReplyMap(msg)
return true, false
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
// Indication if we attempted to deliver the message to anyone.
var didDeliver bool
var qnames [][]byte
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
flag := pmrNoFlag
// If there are matching queue subs and we are in gateway mode,
// we need to keep track of the queue names the messages are
// delivered to. When sending to the GWs, the RMSG will include
// those names so that the remote clusters do not deliver messages
// to their queue subs of the same names.
if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
flag |= pmrCollectQueueNames
}
didDeliver, qnames = c.processMsgResults(c.acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, flag)
}
// Now deal with gateways
if c.srv.gateway.enabled {
reply := c.pa.reply
if len(c.pa.deliver) > 0 && c.kind == JETSTREAM && len(c.pa.reply) > 0 {
reply = append(reply, '@')
reply = append(reply, c.pa.deliver...)
}
didDeliver = c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, qnames) || didDeliver
}
// Check to see if we did not deliver to anyone and the client has a reply subject set
// and wants notification of no_responders.
if !didDeliver && len(c.pa.reply) > 0 {
c.mu.Lock()
if c.opts.NoResponders {
if sub := c.subForReply(c.pa.reply); sub != nil {
proto := fmt.Sprintf("HMSG %s %s 16 16\r\nNATS/1.0 503\r\n\r\n\r\n", c.pa.reply, sub.sid)
c.queueOutbound([]byte(proto))
c.addToPCD(c)
}
}
c.mu.Unlock()
}
return didDeliver, false
}
// Return the subscription for this reply subject. Only look at normal subs for this client.
func (c *client) subForReply(reply []byte) *subscription {
r := c.acc.sl.Match(string(reply))
for _, sub := range r.psubs {
if sub.client == c {
return sub
}
}
return nil
}
// This is invoked knowing that c.pa.subject has been set to the gateway routed subject.
// This function will send the message to possibly LEAFs and directly back to the origin
// gateway.
func (c *client) handleGWReplyMap(msg []byte) bool {
// Check for leaf nodes
if c.srv.gwLeafSubs.Count() > 0 {
if r := c.srv.gwLeafSubs.Match(string(c.pa.subject)); len(r.psubs) > 0 {
c.processMsgResults(c.acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, pmrNoFlag)
}
}
if c.srv.gateway.enabled {
reply := c.pa.reply
if len(c.pa.deliver) > 0 && c.kind == JETSTREAM && len(c.pa.reply) > 0 {
reply = append(reply, '@')
reply = append(reply, c.pa.deliver...)
}
c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, nil)
}
return true
}
// Used to setup the response map for a service import request that has a reply subject.
func (c *client) setupResponseServiceImport(acc *Account, si *serviceImport, tracking bool, header http.Header) *serviceImport {
rsi := si.acc.addRespServiceImport(acc, string(c.pa.reply), si, tracking, header)
if si.latency != nil {
if c.rtt == 0 {
// We have a service import that we are tracking but have not established RTT.
c.sendRTTPing()
}
si.acc.mu.Lock()
rsi.rc = c
si.acc.mu.Unlock()
}
return rsi
}
// Will remove a header if present.
func removeHeaderIfPresent(hdr []byte, key string) []byte {
start := bytes.Index(hdr, []byte(key))
// key can't be first and we want to check that it is preceded by a '\n'
if start < 1 || hdr[start-1] != '\n' {
return hdr
}
index := start + len(key)
if index >= len(hdr) || hdr[index] != ':' {
return hdr
}
end := bytes.Index(hdr[start:], []byte(_CRLF_))
if end < 0 {
return hdr
}
hdr = append(hdr[:start], hdr[start+end+len(_CRLF_):]...)
if len(hdr) <= len(emptyHdrLine) {
return nil
}
return hdr
}
// Generate a new header based on optional original header and key value.
// More used in JetStream layers.
func genHeader(hdr []byte, key, value string) []byte {
var bb bytes.Buffer
if len(hdr) > LEN_CR_LF {
bb.Write(hdr[:len(hdr)-LEN_CR_LF])
} else {
bb.WriteString(hdrLine)
}
http.Header{key: []string{value}}.Write(&bb)
bb.WriteString(CR_LF)
return bb.Bytes()
}
// This will set a header for the message.
// Lock does not need to be held but this should only be called
// from the inbound go routine. We will update the pubArgs.
// This will replace any previously set header and not add to it per normal spec.
func (c *client) setHeader(key, value string, msg []byte) []byte {
var bb bytes.Buffer
var omi int
// Write original header if present.
if c.pa.hdr > LEN_CR_LF {
omi = c.pa.hdr
hdr := removeHeaderIfPresent(msg[:c.pa.hdr-LEN_CR_LF], key)
if len(hdr) == 0 {
bb.WriteString(hdrLine)
} else {
bb.Write(hdr)
}
} else {
bb.WriteString(hdrLine)
}
http.Header{key: []string{value}}.Write(&bb)
bb.WriteString(CR_LF)
nhdr := bb.Len()
// Put the original message back.
// FIXME(dlc) - This is inefficient.
bb.Write(msg[omi:])
nsize := bb.Len() - LEN_CR_LF
// MQTT producers don't have CRLF, so add it back.
if c.isMqtt() {
nsize += LEN_CR_LF
}
// Update pubArgs
// If others will use this later we need to save and restore original.
c.pa.hdr = nhdr
c.pa.size = nsize
c.pa.hdb = []byte(strconv.Itoa(nhdr))
c.pa.szb = []byte(strconv.Itoa(nsize))
return bb.Bytes()
}
// Will return the value for the header denoted by key or nil if it does not exists.
// This function ignores errors and tries to achieve speed and no additional allocations.
func getHeader(key string, hdr []byte) []byte {
if len(hdr) == 0 {
return nil
}
index := bytes.Index(hdr, []byte(key))
if index < 0 {
return nil
}
index += len(key)
if index >= len(hdr) {
return nil
}
if hdr[index] != ':' {
return nil
}
index++
var value []byte
hdrLen := len(hdr)
for hdr[index] == ' ' && index < hdrLen {
index++
}
for index < hdrLen {
if hdr[index] == '\r' && index < hdrLen-1 && hdr[index+1] == '\n' {
break
}
value = append(value, hdr[index])
index++
}
return value
}
// processServiceImport is an internal callback when a subscription matches an imported service
// from another account. This includes response mappings as well.
func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byte) {
// If we are a GW and this is not a direct serviceImport ignore.
isResponse := si.isRespServiceImport()
if (c.kind == GATEWAY || c.kind == ROUTER) && !isResponse {
return
}
// If we are here and we are a serviceImport response make sure we are not matching back
// to the import/export pair that started the request. If so ignore.
if isResponse && c.pa.psi != nil && c.pa.psi.se == si.se {
return
}
acc.mu.RLock()
shouldReturn := si.invalid || acc.sl == nil
checkJSGetNext := !isResponse && si.to == jsAllAPI && strings.HasPrefix(string(c.pa.subject), jsRequestNextPre)
acc.mu.RUnlock()
// We have a special case where JetStream pulls in all service imports through one export.
// However the GetNext for consumers is a no-op and causes buildups of service imports,
// response service imports and rrMap entries which all will need to simply expire.
// TODO(dlc) - Come up with something better.
if checkJSGetNext && si.se != nil && si.se.acc == c.srv.SystemAccount() {
shouldReturn = true
}
// Check for short circuit return.
if shouldReturn {
return
}
var nrr []byte
var rsi *serviceImport
// Check if there is a reply present and set up a response.
tracking, headers := shouldSample(si.latency, c)
if len(c.pa.reply) > 0 {
// Special case for now, need to formalize.
// TODO(dlc) - Formalize as a service import option for reply rewrite.
// For now we can't do $JS.ACK since that breaks pull consumers across accounts.
if !bytes.HasPrefix(c.pa.reply, []byte(jsAckPre)) {
if rsi = c.setupResponseServiceImport(acc, si, tracking, headers); rsi != nil {
nrr = []byte(rsi.from)
}
} else {
// This only happens when we do a pull subscriber that trampolines through another account.
// Normally this code is not called.
nrr = c.pa.reply
}
} else if !isResponse && si.latency != nil && tracking {
// Check to see if this was a bad request with no reply and we were supposed to be tracking.
si.acc.sendBadRequestTrackingLatency(si, c, headers)
}
// Send tracking info here if we are tracking this response.
// This is always a response.
var didSendTL bool
if si.tracking {
// Stamp that we attempted delivery.
si.didDeliver = true
didSendTL = acc.sendTrackingLatency(si, c)
}
// Pick correct "to" subject. If we matched on a wildcard use the literal publish subject.
to, subject := si.to, string(c.pa.subject)
if si.tr != nil {
// FIXME(dlc) - This could be slow, may want to look at adding cache to bare transforms?
to, _ = si.tr.transformSubject(subject)
} else if si.usePub {
to = subject
}
// Copy our pubArg since this gets modified as we process the service import itself.
pacopy := c.pa
// Now check to see if this account has mappings that could affect the service import.
// Can't use non-locked trick like in processInboundClientMsg, so just call into selectMappedSubject
// so we only lock once.
if nsubj, changed := si.acc.selectMappedSubject(to); changed {
c.pa.mapped = []byte(to)
to = nsubj
}
// Set previous service import to detect chaining.
hadPrevSi, share := c.pa.psi != nil, si.share
if hadPrevSi {
share = c.pa.psi.share
}
c.pa.psi = si
// Place our client info for the request in the original message.
// This will survive going across routes, etc.
if !isResponse {
var ci *ClientInfo
if hadPrevSi && c.pa.hdr >= 0 {
var cis ClientInfo
if err := json.Unmarshal(getHeader(ClientInfoHdr, msg[:c.pa.hdr]), &cis); err == nil {
ci = &cis
ci.Service = acc.Name
}
} else if c.kind != LEAF || c.pa.hdr < 0 || len(getHeader(ClientInfoHdr, msg[:c.pa.hdr])) == 0 {
ci = c.getClientInfo(share)
}
if ci != nil {
if b, _ := json.Marshal(ci); b != nil {
msg = c.setHeader(ClientInfoHdr, string(b), msg)
}
}
}
// Set our optional subject(to) and reply.
if !isResponse && to != subject {
c.pa.subject = []byte(to)
}
c.pa.reply = nrr
// FIXME(dlc) - Do L1 cache trick like normal client?
rr := si.acc.sl.Match(to)
// If we are a route or gateway or leafnode and this message is flipped to a queue subscriber we
// need to handle that since the processMsgResults will want a queue filter.
flags := pmrMsgImportedFromService
if c.kind == GATEWAY || c.kind == ROUTER || c.kind == LEAF {
flags |= pmrIgnoreEmptyQueueFilter
}
// We will be calling back into processMsgResults since we are now being called as a normal sub.
// We need to take care of the c.in.rts, so save off what is there and use a local version. We
// will put back what was there after.
orts := c.in.rts
var lrts [routeTargetInit]routeTarget
c.in.rts = lrts[:0]
var didDeliver bool
// If this is not a gateway connection but gateway is enabled,
// try to send this converted message to all gateways.
if c.srv.gateway.enabled {
flags |= pmrCollectQueueNames
var queues [][]byte
didDeliver, queues = c.processMsgResults(si.acc, rr, msg, c.pa.deliver, []byte(to), nrr, flags)
didDeliver = c.sendMsgToGateways(si.acc, msg, []byte(to), nrr, queues) || didDeliver
} else {
didDeliver, _ = c.processMsgResults(si.acc, rr, msg, c.pa.deliver, []byte(to), nrr, flags)
}
// Restore to original values.
c.in.rts = orts
c.pa = pacopy
// Determine if we should remove this service import. This is for response service imports.
// We will remove if we did not deliver, or if we are a response service import and we are
// a singleton, or we have an EOF message.
shouldRemove := !didDeliver || (isResponse && (si.rt == Singleton || len(msg) == LEN_CR_LF))
// If we are tracking and we did not actually send the latency info we need to suppress the removal.
if si.tracking && !didSendTL {
shouldRemove = false
}
// If we are streamed or chunked we need to update our timestamp to avoid cleanup.
if si.rt != Singleton && didDeliver {
acc.mu.Lock()
si.ts = time.Now().UnixNano()
acc.mu.Unlock()
}
// Cleanup of a response service import
if shouldRemove {
reason := rsiOk
if !didDeliver {
reason = rsiNoDelivery
}
if isResponse {
acc.removeRespServiceImport(si, reason)
} else {
// This is a main import and since we could not even deliver to the exporting account
// go ahead and remove the respServiceImport we created above.
si.acc.removeRespServiceImport(rsi, reason)
}
}
}
func (c *client) addSubToRouteTargets(sub *subscription) {
if c.in.rts == nil {
c.in.rts = make([]routeTarget, 0, routeTargetInit)
}
for i := range c.in.rts {
rt := &c.in.rts[i]
if rt.sub.client == sub.client {
if sub.queue != nil {
rt.qs = append(rt.qs, sub.queue...)
rt.qs = append(rt.qs, ' ')
}
return
}
}
var rt *routeTarget
lrts := len(c.in.rts)
// If we are here we do not have the sub yet in our list
// If we have to grow do so here.
if lrts == cap(c.in.rts) {
c.in.rts = append(c.in.rts, routeTarget{})
}
c.in.rts = c.in.rts[:lrts+1]
rt = &c.in.rts[lrts]
rt.sub = sub
rt.qs = rt._qs[:0]
if sub.queue != nil {
rt.qs = append(rt.qs, sub.queue...)
rt.qs = append(rt.qs, ' ')
}
}
// This processes the sublist results for a given message.
// Returns if the message was delivered to at least target and queue filters.
func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, subject, reply []byte, flags int) (bool, [][]byte) {
// For sending messages across routes and leafnodes.
// Reset if we have one since we reuse this data structure.
if c.in.rts != nil {
c.in.rts = c.in.rts[:0]
}
var rplyHasGWPrefix bool
var creply = reply
// If the reply subject is a GW routed reply, we will perform some
// tracking in deliverMsg(). We also want to send to the user the
// reply without the prefix. `creply` will be set to that and be
// used to create the message header for client connections.
if rplyHasGWPrefix = isGWRoutedReply(reply); rplyHasGWPrefix {
creply = reply[gwSubjectOffset:]
}
// With JetStream we now have times where we want to match a subscription
// on one subject, but deliver it with another. e.g. JetStream deliverables.
// This only works for last mile, meaning to a client. For other types we need
// to use the original subject.
subj := subject
if len(deliver) > 0 {
subj = deliver
}
// Check for JetStream encoded reply subjects.
// For now these will only be on $JS.ACK prefixed reply subjects.
if len(creply) > 0 &&
c.kind != CLIENT && c.kind != SYSTEM && c.kind != JETSTREAM && c.kind != ACCOUNT &&
bytes.HasPrefix(creply, []byte(jsAckPre)) {
// We need to rewrite the subject and the reply.
if li := bytes.LastIndex(creply, []byte("@")); li != -1 && li < len(creply)-1 {
subj, creply = creply[li+1:], creply[:li]
}
}
var didDeliver bool
// delivery subject for clients
var dsubj []byte
// Used as scratch if mapping
var _dsubj [64]byte
// Loop over all normal subscriptions that match.
for _, sub := range r.psubs {
// Check if this is a send to a ROUTER. We now process
// these after everything else.
switch sub.client.kind {
case ROUTER:
if (c.kind != ROUTER && !c.isSpokeLeafNode()) || (flags&pmrAllowSendFromRouteToRoute != 0) {
c.addSubToRouteTargets(sub)
}
continue
case GATEWAY:
// Never send to gateway from here.
continue
case LEAF:
// We handle similarly to routes and use the same data structures.
// Leaf node delivery audience is different however.
// Also leaf nodes are always no echo, so we make sure we are not
// going to send back to ourselves here. For messages from routes we want
// to suppress in general unless we know from the hub or its a service reply.
if c != sub.client && (c.kind != ROUTER || sub.client.isHubLeafNode() || isServiceReply(c.pa.subject)) {
c.addSubToRouteTargets(sub)
}
continue
}
// Assume delivery subject is the normal subject to this point.
dsubj = subj
// Check for stream import mapped subs (shadow subs). These apply to local subs only.
if sub.im != nil {
// If this message was a service import do not re-export to an exported stream.
if flags&pmrMsgImportedFromService != 0 {
continue
}
if sub.im.tr != nil {
to, _ := sub.im.tr.transformSubject(string(dsubj))
dsubj = append(_dsubj[:0], to...)
} else if sub.im.usePub {
dsubj = append(_dsubj[:0], subj...)
} else {
dsubj = append(_dsubj[:0], sub.im.to...)
}
// If we are mapping for a deliver subject we will reverse roles.
// The original subj we set from above is correct for the msg header,
// but we need to transform the deliver subject to properly route.
if len(deliver) > 0 {
dsubj, subj = subj, dsubj
}
}
// Remap to the original subject if internal.
if sub.icb != nil && sub.rsi {
dsubj = subject
}
// Normal delivery
mh := c.msgHeader(dsubj, creply, sub)
didDeliver = c.deliverMsg(sub, acc, dsubj, creply, mh, msg, rplyHasGWPrefix) || didDeliver
}
// Set these up to optionally filter based on the queue lists.
// This is for messages received from routes which will have directed
// guidance on which queue groups we should deliver to.
qf := c.pa.queues
// Declared here because of goto.
var queues [][]byte
// For all routes/leaf/gateway connections, we may still want to send messages to
// leaf nodes or routes even if there are no queue filters since we collect
// them above and do not process inline like normal clients.
// However, do select queue subs if asked to ignore empty queue filter.
if (c.kind == LEAF || c.kind == ROUTER || c.kind == GATEWAY) && qf == nil && flags&pmrIgnoreEmptyQueueFilter == 0 {
goto sendToRoutesOrLeafs
}
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.in.prand == nil {
c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Process queue subs
for i := 0; i < len(r.qsubs); i++ {
qsubs := r.qsubs[i]
// If we have a filter check that here. We could make this a map or someting more
// complex but linear search since we expect queues to be small. Should be faster
// and more cache friendly.
if qf != nil && len(qsubs) > 0 {
tqn := qsubs[0].queue
for _, qn := range qf {
if bytes.Equal(qn, tqn) {
goto selectQSub
}
}
continue
}
selectQSub:
// We will hold onto remote or lead qsubs when we are coming from
// a route or a leaf node just in case we can no longer do local delivery.
var rsub, sub *subscription
var _ql [32]*subscription
src := c.kind
// If we just came from a route we want to prefer local subs.
// So only select from local subs but remember the first rsub
// in case all else fails.
if src == ROUTER {
ql := _ql[:0]
for i := 0; i < len(qsubs); i++ {
sub = qsubs[i]
if sub.client.kind == LEAF || sub.client.kind == ROUTER {
// If we have assigned an rsub already, replace if the destination is a LEAF
// since we want to favor that compared to a ROUTER. We could make sure that
// we override only if previous was a ROUTE and not a LEAF, but we don't have to.
if rsub == nil || sub.client.kind == LEAF {
rsub = sub
}
} else {
ql = append(ql, sub)
}
}
qsubs = ql
}
sindex := 0
lqs := len(qsubs)
if lqs > 1 {
sindex = c.in.prand.Int() % lqs
}
// Find a subscription that is able to deliver this message starting at a random index.
for i := 0; i < lqs; i++ {
if sindex+i < lqs {
sub = qsubs[sindex+i]
} else {
sub = qsubs[(sindex+i)%lqs]
}
if sub == nil {
continue
}
// We have taken care of preferring local subs for a message from a route above.
// Here we just care about a client or leaf and skipping a leaf and preferring locals.
if dst := sub.client.kind; dst == ROUTER || dst == LEAF {
if (src == LEAF || src == CLIENT) && dst == LEAF {
if rsub == nil {
rsub = sub
}
continue
} else {
c.addSubToRouteTargets(sub)
// Clear rsub since we added a sub.
rsub = nil
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, sub.queue)
}
}
break
}
// Assume delivery subject is normal subject to this point.
dsubj = subj
// Check for stream import mapped subs. These apply to local subs only.
if sub.im != nil {
// If this message was a service import do not re-export to an exported stream.
if flags&pmrMsgImportedFromService != 0 {
continue
}
if sub.im.tr != nil {
to, _ := sub.im.tr.transformSubject(string(subj))
dsubj = append(_dsubj[:0], to...)
} else if sub.im.usePub {
dsubj = append(_dsubj[:0], subj...)
} else {
dsubj = append(_dsubj[:0], sub.im.to...)
}
}
mh := c.msgHeader(dsubj, creply, sub)
if c.deliverMsg(sub, acc, subject, creply, mh, msg, rplyHasGWPrefix) {
didDeliver = true
// Clear rsub
rsub = nil
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, sub.queue)
}
break
}
}
if rsub != nil {
// If we are here we tried to deliver to a local qsub
// but failed. So we will send it to a remote or leaf node.
c.addSubToRouteTargets(rsub)
if flags&pmrCollectQueueNames != 0 {
queues = append(queues, rsub.queue)
}
}
}
sendToRoutesOrLeafs:
// If no messages for routes or leafnodes return here.
if len(c.in.rts) == 0 {
return didDeliver, queues
}
// If we do have a deliver subject we need to do something with it.
// Again this is when JetStream (but possibly others) wants the system
// to rewrite the delivered subject. The way we will do that is place it
// at the end of the reply subject if it exists.
if len(deliver) > 0 && len(reply) > 0 {
reply = append(reply, '@')
reply = append(reply, deliver...)
}
// Copy off original pa in case it changes.
pa := c.pa
// We address by index to avoid struct copy.
// We have inline structs for memory layout and cache coherency.
for i := range c.in.rts {
rt := &c.in.rts[i]
dc := rt.sub.client
dmsg, hset := msg, false
// Check if we have an origin cluster set from a leafnode message.
// If so make sure we do not send it back to the same cluster for a different
// leafnode. Cluster wide no echo.
if dc.kind == LEAF {
// Check two scenarios. One is inbound from a route (c.pa.origin)
if c.kind == ROUTER && len(c.pa.origin) > 0 {
if string(c.pa.origin) == dc.remoteCluster() {
continue
}
}
// The other is leaf to leaf.
if c.kind == LEAF {
src, dest := c.remoteCluster(), dc.remoteCluster()
if src != _EMPTY_ && src == dest {
continue
}
}
// We need to check if this is a request that has a stamped client information header.
// This will contain an account but will represent the account from the leafnode. If
// they are not named the same this would cause an account lookup failure trying to
// process the request for something like JetStream or other system services that rely
// on the client info header. We can just check for reply and the presence of a header
// to avoid slow downs for all traffic.
if len(c.pa.reply) > 0 && c.pa.hdr >= 0 {
dmsg, hset = c.checkLeafClientInfoHeader(msg)
}
}
mh := c.msgHeaderForRouteOrLeaf(subject, reply, rt, acc)
didDeliver = c.deliverMsg(rt.sub, acc, subject, reply, mh, dmsg, false) || didDeliver
// If we set the header reset the origin pub args.
if hset {
c.pa = pa
}
}
return didDeliver, queues
}
// Check and swap accounts on a client info header destined across a leafnode.
func (c *client) checkLeafClientInfoHeader(msg []byte) (dmsg []byte, setHdr bool) {
if c.pa.hdr < 0 || len(msg) < c.pa.hdr {
return msg, false
}
cir := getHeader(ClientInfoHdr, msg[:c.pa.hdr])
if len(cir) == 0 {
return msg, false
}
dmsg = msg
var ci ClientInfo
if err := json.Unmarshal(cir, &ci); err == nil {
if v, _ := c.srv.leafRemoteAccounts.Load(ci.Account); v != nil {
remoteAcc := v.(string)
if ci.Account != remoteAcc {
ci.Account = remoteAcc
if b, _ := json.Marshal(ci); b != nil {
dmsg, setHdr = c.setHeader(ClientInfoHdr, string(b), msg), true
}
}
}
}
return dmsg, setHdr
}
func (c *client) pubPermissionViolation(subject []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject))
c.Errorf("Publish Violation - %s, Subject %q", c.getAuthUser(), subject)
}
func (c *client) subPermissionViolation(sub *subscription) {
errTxt := fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject)
logTxt := fmt.Sprintf("Subscription Violation - %s, Subject %q, SID %s",
c.getAuthUser(), sub.subject, sub.sid)
if sub.queue != nil {
errTxt = fmt.Sprintf("Permissions Violation for Subscription to %q using queue %q", sub.subject, sub.queue)
logTxt = fmt.Sprintf("Subscription Violation - %s, Subject %q, Queue: %q, SID %s",
c.getAuthUser(), sub.subject, sub.queue, sub.sid)
}
c.sendErr(errTxt)
c.Errorf(logTxt)
}
func (c *client) replySubjectViolation(reply []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply))
c.Errorf("Publish Violation - %s, Reply %q", c.getAuthUser(), reply)
}
func (c *client) processPingTimer() {
c.mu.Lock()
c.ping.tmr = nil
// Check if connection is still opened
if c.isClosed() {
c.mu.Unlock()
return
}
c.Debugf("%s Ping Timer", c.kindString())
var sendPing bool
// If we have had activity within the PingInterval then
// there is no need to send a ping. This can be client data
// or if we received a ping from the other side.
pingInterval := c.srv.getOpts().PingInterval
if c.kind == GATEWAY {
pingInterval = adjustPingIntervalForGateway(pingInterval)
sendPing = true
}
now := time.Now()
needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL
// Do not delay PINGs for GATEWAY connections.
if c.kind != GATEWAY {
if delta := now.Sub(c.last); delta < pingInterval && !needRTT {
c.Debugf("Delaying PING due to client activity %v ago", delta.Round(time.Second))
} else if delta := now.Sub(c.ping.last); delta < pingInterval && !needRTT {
c.Debugf("Delaying PING due to remote ping %v ago", delta.Round(time.Second))
} else {
sendPing = true
}
}
if sendPing {
// Check for violation
if c.ping.out+1 > c.srv.getOpts().MaxPingsOut {
c.Debugf("Stale Client Connection - Closing")
c.enqueueProto([]byte(fmt.Sprintf(errProto, "Stale Connection")))
c.mu.Unlock()
c.closeConnection(StaleConnection)
return
}
// Send PING
c.sendPing()
}
// Reset to fire again.
c.setPingTimer()
c.mu.Unlock()
}
// Returns the smallest value between the given `d` and `gatewayMaxPingInterval` durations.
// Invoked for connections known to be of GATEWAY type.
func adjustPingIntervalForGateway(d time.Duration) time.Duration {
if d > gatewayMaxPingInterval {
return gatewayMaxPingInterval
}
return d
}
// Lock should be held
func (c *client) setPingTimer() {
if c.srv == nil {
return
}
d := c.srv.getOpts().PingInterval
if c.kind == GATEWAY {
d = adjustPingIntervalForGateway(d)
}
c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
}
// Lock should be held
func (c *client) clearPingTimer() {
if c.ping.tmr == nil {
return
}
c.ping.tmr.Stop()
c.ping.tmr = nil
}
// Lock should be held
func (c *client) setAuthTimer(d time.Duration) {
c.atmr = time.AfterFunc(d, c.authTimeout)
}
// Lock should be held
func (c *client) clearAuthTimer() bool {
if c.atmr == nil {
return true
}
stopped := c.atmr.Stop()
c.atmr = nil
return stopped
}
// We may reuse atmr for expiring user jwts,
// so check connectReceived.
// Lock assume held on entry.
func (c *client) awaitingAuth() bool {
return !c.flags.isSet(connectReceived) && c.atmr != nil
}
// This will set the atmr for the JWT expiration time.
// We will lock on entry.
func (c *client) setExpirationTimer(d time.Duration) {
c.mu.Lock()
c.atmr = time.AfterFunc(d, c.authExpired)
c.mu.Unlock()
}
// Possibly flush the connection and then close the low level connection.
// The boolean `minimalFlush` indicates if the flush operation should have a
// minimal write deadline.
// Lock is held on entry.
func (c *client) flushAndClose(minimalFlush bool) {
if !c.flags.isSet(skipFlushOnClose) && c.out.pb > 0 {
if minimalFlush {
const lowWriteDeadline = 100 * time.Millisecond
// Reduce the write deadline if needed.
if c.out.wdl > lowWriteDeadline {
c.out.wdl = lowWriteDeadline
}
}
c.flushOutbound()
}
c.out.p, c.out.s = nil, nil
// Close the low level connection.
if c.nc != nil {
// Starting with Go 1.16, the low level close will set its own deadline
// of 5 seconds, so setting our own deadline does not work. Instead,
// we will close the TLS connection in separate go routine.
nc := c.nc
c.nc = nil
if _, ok := nc.(*tls.Conn); ok {
go func() { nc.Close() }()
} else {
nc.Close()
}
}
}
var kindStringMap = map[int]string{
CLIENT: "Client",
ROUTER: "Router",
GATEWAY: "Gateway",
LEAF: "Leafnode",
JETSTREAM: "JetStream",
ACCOUNT: "Account",
SYSTEM: "System",
}
func (c *client) kindString() string {
if kindStringVal, ok := kindStringMap[c.kind]; ok {
return kindStringVal
}
return "Unknown Type"
}
// swapAccountAfterReload will check to make sure the bound account for this client
// is current. Under certain circumstances after a reload we could be pointing to
// an older one.
func (c *client) swapAccountAfterReload() {
c.mu.Lock()
defer c.mu.Unlock()
if c.srv == nil {
return
}
acc, _ := c.srv.LookupAccount(c.acc.Name)
c.acc = acc
}
// processSubsOnConfigReload removes any subscriptions the client has that are no
// longer authorized, and checks for imports (accounts) due to a config reload.
func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) {
c.mu.Lock()
var (
checkPerms = c.perms != nil
checkAcc = c.acc != nil
acc = c.acc
)
if !checkPerms && !checkAcc {
c.mu.Unlock()
return
}
var (
_subs [32]*subscription
subs = _subs[:0]
_removed [32]*subscription
removed = _removed[:0]
srv = c.srv
)
if checkAcc {
// We actually only want to check if stream imports have changed.
if _, ok := awcsti[acc.Name]; !ok {
checkAcc = false
}
}
// We will clear any mperms we have here. It will rebuild on the fly with canSubscribe,
// so we do that here as we collect them. We will check result down below.
c.mperms = nil
// Collect client's subs under the lock
for _, sub := range c.subs {
// Just checking to rebuild mperms under the lock, will collect removed though here.
// Only collect under subs array of canSubscribe and checkAcc true.
canSub := c.canSubscribe(string(sub.subject))
canQSub := sub.queue != nil && c.canQueueSubscribe(string(sub.subject), string(sub.queue))
if !canSub && !canQSub {
removed = append(removed, sub)
} else if checkAcc {
subs = append(subs, sub)
}
}
c.mu.Unlock()
// This list is all subs who are allowed and we need to check accounts.
for _, sub := range subs {
c.mu.Lock()
oldShadows := sub.shadow
sub.shadow = nil
c.mu.Unlock()
c.addShadowSubscriptions(acc, sub)
for _, nsub := range oldShadows {
nsub.im.acc.sl.Remove(nsub)
}
}
// Unsubscribe all that need to be removed and report back to client and logs.
for _, sub := range removed {
c.unsubscribe(acc, sub, true, true)
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)",
sub.subject, sub.sid))
srv.Noticef("Removed sub %q (sid %q) for %s - not authorized",
sub.subject, sub.sid, c.getAuthUser())
}
}
// Allows us to count up all the queue subscribers during close.
type qsub struct {
sub *subscription
n int32
}
func (c *client) closeConnection(reason ClosedState) {
c.mu.Lock()
if c.flags.isSet(closeConnection) {
c.mu.Unlock()
return
}
// Note that we may have markConnAsClosed() invoked before closeConnection(),
// so don't set this to 1, instead bump the count.
c.rref++
c.flags.set(closeConnection)
c.clearAuthTimer()
c.clearPingTimer()
c.markConnAsClosed(reason)
// Unblock anyone who is potentially stalled waiting on us.
if c.out.stc != nil {
close(c.out.stc)
c.out.stc = nil
}
var (
connectURLs []string
wsConnectURLs []string
kind = c.kind
srv = c.srv
noReconnect = c.flags.isSet(noReconnect)
acc = c.acc
spoke bool
)
// Snapshot for use if we are a client connection.
// FIXME(dlc) - we can just stub in a new one for client
// and reference existing one.
var subs []*subscription
if kind == CLIENT || kind == LEAF || kind == JETSTREAM {
var _subs [32]*subscription
subs = _subs[:0]
for _, sub := range c.subs {
// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
sub.max = 0
sub.close()
subs = append(subs, sub)
}
spoke = c.isSpokeLeafNode()
}
if c.route != nil {
connectURLs = c.route.connectURLs
wsConnectURLs = c.route.wsConnURLs
}
// If we have remote latency tracking running shut that down.
if c.rrTracking != nil {
c.rrTracking.ptmr.Stop()
c.rrTracking = nil
}
c.mu.Unlock()
// Remove client's or leaf node or jetstream subscriptions.
if acc != nil && (kind == CLIENT || kind == LEAF || kind == JETSTREAM) {
acc.sl.RemoveBatch(subs)
} else if kind == ROUTER {
go c.removeRemoteSubs()
}
if srv != nil {
// If this is a route that disconnected, possibly send an INFO with
// the updated list of connect URLs to clients that know how to
// handle async INFOs.
if (len(connectURLs) > 0 || len(wsConnectURLs) > 0) && !srv.getOpts().Cluster.NoAdvertise {
srv.removeConnectURLsAndSendINFOToClients(connectURLs, wsConnectURLs)
}
// Unregister
srv.removeClient(c)
// Update remote subscriptions.
if acc != nil && (kind == CLIENT || kind == LEAF || kind == JETSTREAM) {
qsubs := map[string]*qsub{}
for _, sub := range subs {
// Call unsubscribe here to cleanup shadow subscriptions and such.
c.unsubscribe(acc, sub, true, false)
// Update route as normal for a normal subscriber.
if sub.queue == nil {
if !spoke {
srv.updateRouteSubscriptionMap(acc, sub, -1)
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
}
}
srv.updateLeafNodes(acc, sub, -1)
} else {
// We handle queue subscribers special in case we
// have a bunch we can just send one update to the
// connected routes.
num := int32(1)
if kind == LEAF {
num = sub.qw
}
key := string(sub.subject) + " " + string(sub.queue)
if esub, ok := qsubs[key]; ok {
esub.n += num
} else {
qsubs[key] = &qsub{sub, num}
}
}
}
// Process any qsubs here.
for _, esub := range qsubs {
if !spoke {
srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n))
if srv.gateway.enabled {
srv.gatewayUpdateSubInterest(acc.Name, esub.sub, -(esub.n))
}
}
srv.updateLeafNodes(acc, esub.sub, -(esub.n))
}
if prev := acc.removeClient(c); prev == 1 {
srv.decActiveAccounts()
}
}
}
// Don't reconnect connections that have been marked with
// the no reconnect flag.
if noReconnect {
return
}
c.reconnect()
}
// Depending on the kind of connections, this may attempt to recreate a connection.
// The actual reconnect attempt will be started in a go routine.
func (c *client) reconnect() {
var (
retryImplicit bool
gwName string
gwIsOutbound bool
gwCfg *gatewayCfg
)
c.mu.Lock()
// Decrease the ref count and perform the reconnect only if == 0.
c.rref--
if c.flags.isSet(noReconnect) || c.rref > 0 {
c.mu.Unlock()
return
}
if c.route != nil {
retryImplicit = c.route.retry
}
kind := c.kind
if kind == GATEWAY {
gwName = c.gw.name
gwIsOutbound = c.gw.outbound
gwCfg = c.gw.cfg
}
srv := c.srv
c.mu.Unlock()
// Check for a solicited route. If it was, start up a reconnect unless
// we are already connected to the other end.
if c.isSolicitedRoute() || retryImplicit {
// Capture these under lock
c.mu.Lock()
rid := c.route.remoteID
rtype := c.route.routeType
rurl := c.route.url
c.mu.Unlock()
srv.mu.Lock()
defer srv.mu.Unlock()
// It is possible that the server is being shutdown.
// If so, don't try to reconnect
if !srv.running {
return
}
if rid != "" && srv.remotes[rid] != nil {
srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid)
return
} else if rid == srv.info.ID {
srv.Debugf("Detected route to self, ignoring %q", rurl)
return
} else if rtype != Implicit || retryImplicit {
srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl)
// Keep track of this go-routine so we can wait for it on
// server shutdown.
srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) })
}
} else if srv != nil && kind == GATEWAY && gwIsOutbound {
if gwCfg != nil {
srv.Debugf("Attempting reconnect for gateway %q", gwName)
// Run this as a go routine since we may be called within
// the solicitGateway itself if there was an error during
// the creation of the gateway connection.
srv.startGoRoutine(func() { srv.reconnectGateway(gwCfg) })
} else {
srv.Debugf("Gateway %q not in configuration, not attempting reconnect", gwName)
}
} else if c.isSolicitedLeafNode() {
// Check if this is a solicited leaf node. Start up a reconnect.
srv.startGoRoutine(func() { srv.reConnectToRemoteLeafNode(c.leaf.remote) })
}
}
// Set the noReconnect flag. This is used before a call to closeConnection()
// to prevent the connection to reconnect (routes, gateways).
func (c *client) setNoReconnect() {
c.mu.Lock()
c.flags.set(noReconnect)
c.mu.Unlock()
}
// Returns the client's RTT value with the protection of the client's lock.
func (c *client) getRTTValue() time.Duration {
c.mu.Lock()
rtt := c.rtt
c.mu.Unlock()
return rtt
}
// This function is used by ROUTER and GATEWAY connections to
// look for a subject on a given account (since these type of
// connections are not bound to a specific account).
// If the c.pa.subject is found in the cache, the cached result
// is returned, otherwse, we match the account's sublist and update
// the cache. The cache is pruned if reaching a certain size.
func (c *client) getAccAndResultFromCache() (*Account, *SublistResult) {
var (
acc *Account
pac *perAccountCache
r *SublistResult
ok bool
)
// Check our cache.
if pac, ok = c.in.pacache[string(c.pa.pacache)]; ok {
// Check the genid to see if it's still valid.
// sl could be swapped out on reload so need to lock.
pac.acc.mu.RLock()
sl := pac.acc.sl
pac.acc.mu.RUnlock()
if genid := atomic.LoadUint64(&sl.genid); genid != pac.genid {
ok = false
delete(c.in.pacache, string(c.pa.pacache))
} else {
acc = pac.acc
r = pac.results
}
}
if !ok {
// Match correct account and sublist.
if acc, _ = c.srv.LookupAccount(string(c.pa.account)); acc == nil {
return nil, nil
}
// sl could be swapped out on reload so need to lock.
acc.mu.RLock()
sl := acc.sl
acc.mu.RUnlock()
// Match against the account sublist.
r = sl.Match(string(c.pa.subject))
// Store in our cache
c.in.pacache[string(c.pa.pacache)] = &perAccountCache{acc, r, atomic.LoadUint64(&sl.genid)}
// Check if we need to prune.
if len(c.in.pacache) > maxPerAccountCacheSize {
c.prunePerAccountCache()
}
}
return acc, r
}
// Account will return the associated account for this client.
func (c *client) Account() *Account {
if c == nil {
return nil
}
c.mu.Lock()
acc := c.acc
c.mu.Unlock()
return acc
}
// prunePerAccountCache will prune off a random number of cache entries.
func (c *client) prunePerAccountCache() {
n := 0
for cacheKey := range c.in.pacache {
delete(c.in.pacache, cacheKey)
if n++; n > prunePerAccountCacheSize {
break
}
}
}
// pruneClosedSubFromPerAccountCache remove entries that contain subscriptions
// that have been closed.
func (c *client) pruneClosedSubFromPerAccountCache() {
for cacheKey, pac := range c.in.pacache {
for _, sub := range pac.results.psubs {
if sub.isClosed() {
goto REMOVE
}
}
for _, qsub := range pac.results.qsubs {
for _, sub := range qsub {
if sub.isClosed() {
goto REMOVE
}
}
}
continue
REMOVE:
delete(c.in.pacache, cacheKey)
}
}
// Returns our service account for this request.
func (ci *ClientInfo) serviceAccount() string {
if ci == nil {
return _EMPTY_
}
if ci.Service != _EMPTY_ {
return ci.Service
}
return ci.Account
}
// Grabs the information for this client.
func (c *client) getClientInfo(detailed bool) *ClientInfo {
if c == nil || (c.kind != CLIENT && c.kind != LEAF && c.kind != JETSTREAM) {
return nil
}
// Server name. Defaults to server ID if not set explicitly.
var cn, sn string
if detailed {
if c.kind != LEAF {
sn = c.srv.Name()
}
cn = c.srv.cachedClusterName()
}
c.mu.Lock()
var ci ClientInfo
// RTT and Account are always added.
ci.Account = accForClient(c)
ci.RTT = c.rtt
// Detailed signals additional opt in.
if detailed {
if c.kind == LEAF {
sn = c.leaf.remoteServer
}
ci.Start = &c.start
ci.Host = c.host
ci.ID = c.cid
ci.Name = c.opts.Name
ci.User = c.getRawAuthUser()
ci.Lang = c.opts.Lang
ci.Version = c.opts.Version
ci.Server = sn
ci.Cluster = cn
ci.Jwt = c.opts.JWT
ci.IssuerKey = issuerForClient(c)
ci.NameTag = c.nameTag
ci.Tags = c.tags
ci.Kind = c.kindString()
ci.ClientType = c.clientTypeString()
}
c.mu.Unlock()
return &ci
}
func (c *client) doTLSServerHandshake(typ string, tlsConfig *tls.Config, timeout float64, pCerts PinnedCertSet) error {
_, err := c.doTLSHandshake(typ, false, nil, tlsConfig, _EMPTY_, timeout, pCerts)
return err
}
func (c *client) doTLSClientHandshake(typ string, url *url.URL, tlsConfig *tls.Config, tlsName string, timeout float64, pCerts PinnedCertSet) (bool, error) {
return c.doTLSHandshake(typ, true, url, tlsConfig, tlsName, timeout, pCerts)
}
// Performs either server or client side (if solicit is true) TLS Handshake.
// On error, the TLS handshake error has been logged and the connection
// has been closed.
//
// Lock is held on entry.
func (c *client) doTLSHandshake(typ string, solicit bool, url *url.URL, tlsConfig *tls.Config, tlsName string, timeout float64, pCerts PinnedCertSet) (bool, error) {
var host string
var resetTLSName bool
var err error
// Capture kind for some debug/error statements.
kind := c.kind
// If we solicited, we will act like the client, otherwise the server.
if solicit {
c.Debugf("Starting TLS %s client handshake", typ)
if tlsConfig.ServerName == _EMPTY_ {
// If the given url is a hostname, use this hostname for the
// ServerName. If it is an IP, use the cfg's tlsName. If none
// is available, resort to current IP.
host = url.Hostname()
if tlsName != _EMPTY_ && net.ParseIP(host) != nil {
host = tlsName
}
tlsConfig.ServerName = host
}
c.nc = tls.Client(c.nc, tlsConfig)
} else {
if kind == CLIENT {
c.Debugf("Starting TLS client connection handshake")
} else {
c.Debugf("Starting TLS %s server handshake", typ)
}
c.nc = tls.Server(c.nc, tlsConfig)
}
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(timeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
c.mu.Unlock()
if err = conn.Handshake(); err != nil {
if solicit {
// Based on type of error, possibly clear the saved tlsName
// See: https://github.com/nats-io/nats-server/issues/1256
if _, ok := err.(x509.HostnameError); ok {
if host == tlsName {
resetTLSName = true
}
}
}
} else if !c.matchesPinnedCert(pCerts) {
err = ErrCertNotPinned
}
if err != nil {
if kind == CLIENT {
c.Errorf("TLS handshake error: %v", err)
} else {
c.Errorf("TLS %s handshake error: %v", typ, err)
}
c.closeConnection(TLSHandshakeError)
// Grab the lock before returning since the caller was holding the lock on entry
c.mu.Lock()
// Returning any error is fine. Since the connection is closed ErrConnectionClosed
// is appropriate.
return resetTLSName, ErrConnectionClosed
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// To be consistent with client, set this flag to indicate that handshake is done
c.flags.set(handshakeComplete)
// The connection still may have been closed on success handshake due
// to a race with tls timeout. If that the case, return error indicating
// that the connection is closed.
if c.isClosed() {
err = ErrConnectionClosed
}
return false, err
}
// getRAwAuthUser returns the raw auth user for the client.
// Lock should be held.
func (c *client) getRawAuthUser() string {
switch {
case c.opts.Nkey != "":
return c.opts.Nkey
case c.opts.Username != "":
return c.opts.Username
case c.opts.JWT != "":
return c.pubKey
case c.opts.Token != "":
return c.opts.Token
default:
return ""
}
}
// getAuthUser returns the auth user for the client.
// Lock should be held.
func (c *client) getAuthUser() string {
switch {
case c.opts.Nkey != "":
return fmt.Sprintf("Nkey %q", c.opts.Nkey)
case c.opts.Username != "":
return fmt.Sprintf("User %q", c.opts.Username)
case c.opts.JWT != "":
return fmt.Sprintf("JWT User %q", c.pubKey)
default:
return `User "N/A"`
}
}
// Given an array of strings, this function converts it to a map as long
// as all the content (converted to upper-case) matches some constants.
// Converts the given array of strings to a map of string.
// The strings are converted to upper-case and added to the map only
// if the server recognize them as valid connection types.
// If there are unknown connection types, the map of valid ones is returned
// along with an error that contains the name of the unknown.
func convertAllowedConnectionTypes(cts []string) (map[string]struct{}, error) {
var unknown []string
m := make(map[string]struct{}, len(cts))
for _, i := range cts {
i = strings.ToUpper(i)
switch i {
case jwt.ConnectionTypeStandard, jwt.ConnectionTypeWebsocket, jwt.ConnectionTypeLeafnode, jwt.ConnectionTypeMqtt:
m[i] = struct{}{}
default:
unknown = append(unknown, i)
}
}
var err error
// We will still return the map of valid ones.
if len(unknown) != 0 {
err = fmt.Errorf("invalid connection types %q", unknown)
}
return m, err
}
// This will return true if the connection is of a type present in the given `acts` map.
// Note that so far this is used only for CLIENT or LEAF connections.
// But a CLIENT can be standard or websocket (and other types in the future).
func (c *client) connectionTypeAllowed(acts map[string]struct{}) bool {
// Empty means all type of clients are allowed
if len(acts) == 0 {
return true
}
var want string
switch c.kind {
case CLIENT:
switch c.clientType() {
case NATS:
want = jwt.ConnectionTypeStandard
case WS:
want = jwt.ConnectionTypeWebsocket
case MQTT:
want = jwt.ConnectionTypeMqtt
}
case LEAF:
want = jwt.ConnectionTypeLeafnode
}
_, ok := acts[want]
return ok
}
// isClosed returns true if either closeConnection or connMarkedClosed
// flag have been set, or if `nc` is nil, which may happen in tests.
func (c *client) isClosed() bool {
return c.flags.isSet(closeConnection) || c.flags.isSet(connMarkedClosed) || c.nc == nil
}
// Logging functionality scoped to a client or route.
func (c *client) Error(err error) {
c.srv.Errors(c, err)
}
func (c *client) Errorf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Errorf(format, v...)
}
func (c *client) Debugf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Debugf(format, v...)
}
func (c *client) Noticef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Noticef(format, v...)
}
func (c *client) Tracef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Tracef(format, v...)
}
func (c *client) Warnf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Warnf(format, v...)
}
| 1 | 14,116 | Not sure what is this change doing? | nats-io-nats-server | go |
@@ -272,8 +272,9 @@ def revoke_user_code_tokens(user):
db.session.delete(code)
revoke_tokens(user)
-def get_exp(mins=30):
- return datetime.utcnow() + timedelta(minutes=mins)
+def get_exp(mins=None):
+ delta = timedelta(minutes=mins) if mins is not None else timedelta(days=90)
+ return datetime.utcnow() + delta
def issue_token(user, exp=None):
uuid = generate_uuid() | 1 | import base64
from datetime import datetime, timedelta
import json
import uuid
from flask import redirect, request
import itsdangerous
import jwt
from passlib.context import CryptContext
from sqlalchemy import func
from . import app, db
from .const import (VALID_EMAIL_RE, VALID_USERNAME_RE, blacklisted_name,
ACTIVATE_SALT, PASSWORD_RESET_SALT, MAX_LINK_AGE,
CODE_EXP_MINUTES)
from .mail import (send_activation_email, send_reset_email, send_new_user_email,
send_welcome_email)
from .models import ActivationToken, Code, PasswordResetToken, Token, User
CATALOG_URL = app.config['CATALOG_URL']
pwd_context = CryptContext(
schemes=['pbkdf2_sha512', 'django_pbkdf2_sha256'],
pbkdf2_sha512__default_rounds=500000
)
# Each round should take about half a second,
# 500000 rounds experimentally determined
class AuthException(Exception):
"""
Base class for Auth exceptions.
"""
def __init__(self, msg):
super().__init__()
self.message = msg
class ValidationException(AuthException):
"""
Represents a failure to deserialize a signed link,
a password that is too short, etc.
"""
pass
class ConflictException(AuthException):
"""
Represents an exception involving an attempt to register a
username that already exists, etc.
"""
pass
class NotFoundException(AuthException):
"""
Represents an exception involving an attempted operation on an entity
that could not be located.
"""
pass
class CredentialException(AuthException):
"""
Represents an exception involving things like an incorrect token,
an incorrect password, etc.
"""
pass
def generate_uuid():
return str(uuid.uuid4())
def hash_password(password):
return pwd_context.hash(password)
def get_admins():
return [user.email for user in User.query.filter_by(is_admin=True).all()]
def activate_response(link):
payload = verify_activation_link(link)
if payload:
_activate_user(User.query.filter_by(id=payload['id']).with_for_update().one_or_none())
db.session.commit()
return redirect("{CATALOG_URL}/signin".format(CATALOG_URL=CATALOG_URL), code=302)
return redirect("{CATALOG_URL}/activation_error".format(CATALOG_URL=CATALOG_URL), code=302)
def validate_password(password):
if len(password) < 8:
raise ValidationException("Password must be at least 8 characters long.")
def reset_password_from_email(email):
user = User.query.filter_by(email=email).with_for_update().one_or_none()
if user:
reset_password(user)
def change_password(raw_password, link):
validate_password(raw_password)
payload = verify_reset_link(link)
if not payload:
raise CredentialException("Reset token invalid")
user_id = payload['id']
user = User.query.filter_by(id=user_id).with_for_update().one_or_none()
if not user:
raise NotFoundException("User not found")
user.password = hash_password(raw_password)
db.session.add(user)
def _create_user(username, password='', email=None, is_admin=False,
requires_activation=True, requires_reset=False):
def check_conflicts(username, email):
if not VALID_USERNAME_RE.match(username):
raise ValidationException("Unacceptable username.")
if blacklisted_name(username):
raise ValidationException("Unacceptable username.")
if email is None:
raise ValidationException("Must provide email.")
if not VALID_EMAIL_RE.match(email):
raise ValidationException("Unacceptable email.")
if User.query.filter_by(name=username).one_or_none():
raise ConflictException("Username already taken.")
if User.query.filter_by(email=email).one_or_none():
raise ConflictException("Email already taken.")
check_conflicts(username, email)
validate_password(password)
new_password = "" if requires_reset else hash_password(password)
if requires_activation:
is_active = False
else:
is_active = True
user = User(
id=generate_uuid(),
name=username,
password=new_password,
email=email,
is_active=is_active,
is_admin=is_admin
)
db.session.add(user)
if requires_activation:
db.session.flush() # necessary due to link token foreign key relationship with User
send_activation_email(user, generate_activation_link(user.id))
if requires_reset:
db.session.flush() # necessary due to link token foreign key relationship with User
send_welcome_email(user, user.email, generate_reset_link(user.id))
def _update_user(username, password=None, email=None, is_admin=None, is_active=None):
existing_user = User.query.filter_by(name=username).with_for_update().one_or_none()
if not existing_user:
raise NotFoundException("User to update not found")
if password is not None:
new_password = hash_password(password)
existing_user.password = new_password
if email is not None:
existing_user.email = email
if is_admin is not None:
existing_user.is_admin = is_admin
if is_active is not None:
existing_user.is_active = is_active
db.session.add(existing_user)
def _activate_user(user):
if user is None:
raise NotFoundException("User not found")
user.is_active = True
db.session.add(user)
admins = get_admins()
if admins:
send_new_user_email(user.name, user.email, admins)
def update_last_login(user):
user.last_login = func.now()
db.session.add(user)
def _delete_user(user):
if user:
revoke_user_code_tokens(user)
db.session.delete(user)
else:
raise NotFoundException("User to delete not found")
return user
def _enable_user(user):
if user:
user.is_active = True
db.session.add(user)
else:
raise NotFoundException("User to enable not found")
def _disable_user(user):
if user:
revoke_user_code_tokens(user)
user.is_active = False
db.session.add(user)
else:
raise NotFoundException("User to disable not found")
def issue_code(user):
user_id = user.id
expires = datetime.utcnow() + timedelta(minutes=CODE_EXP_MINUTES)
code = Code(user_id=user_id, code=generate_uuid(), expires=expires)
db.session.add(code)
return encode_code({'id': user_id, 'code': code.code})
def encode_code(code_dict):
return base64.b64encode(bytes(json.dumps(code_dict), 'utf-8')).decode('utf8')
def decode_code(code_str):
try:
return json.loads(base64.b64decode(code_str).decode('utf8'))
except Exception:
raise ValidationException("Decoding code failed")
def decode_token(token_str):
try:
return jwt.decode(token_str, app.secret_key, algorithm='HS256')
except jwt.exceptions.InvalidTokenError:
raise ValidationException("Token could not be deserialized")
def check_token(user_id, token):
return Token.query.filter_by(user_id=user_id, token=token).one_or_none() is not None
def _verify(payload):
user_id = payload['id']
uuid = payload['uuid']
user = User.query.filter_by(id=user_id).one_or_none()
if user is None:
raise CredentialException('User ID invalid')
if not check_token(user_id, uuid):
raise CredentialException('Token invalid')
return user
def verify_token_string(token_string):
token = decode_token(token_string)
user = _verify(token)
return user
def exp_from_token(token):
token = decode_token(token)
return token['exp']
def revoke_token_string(token_str):
token = decode_token(token_str)
user_id = token['id']
uuid = token['uuid']
return revoke_token(user_id, uuid)
def revoke_token(user_id, token):
found = Token.query.filter_by(user_id=user_id, token=token).with_for_update().one_or_none()
if found is None:
return False
db.session.delete(found)
return True
def revoke_tokens(user):
tokens = Token.query.filter_by(user_id=user.id).with_for_update().all()
for token in tokens:
db.session.delete(token)
def revoke_user_code_tokens(user):
codes = Code.query.filter_by(user_id=user.id).with_for_update().all()
for code in codes:
db.session.delete(code)
revoke_tokens(user)
def get_exp(mins=30):
return datetime.utcnow() + timedelta(minutes=mins)
def issue_token(user, exp=None):
uuid = generate_uuid()
token = Token(user_id=user.id, token=uuid)
db.session.add(token)
exp = exp or get_exp()
payload = {'id': user.id, 'uuid': uuid, 'exp': exp}
token = jwt.encode(payload, app.secret_key, algorithm='HS256')
return token.decode('utf-8')
def consume_code_string(code_str):
code = decode_code(code_str)
return consume_code(code['id'], code['code'])
def consume_code(user_id, code):
found = Code.query.filter_by(user_id=user_id, code=code).with_for_update().one_or_none()
if found is None:
raise ValidationException("Code not found")
if found.expires.timetuple() < datetime.utcnow().timetuple():
db.session.delete(found)
raise CredentialException("Code expired")
db.session.delete(found)
return User.query.filter_by(id=user_id).one_or_none()
def verify_hash(password, pw_hash):
try:
if not pwd_context.verify(password, pw_hash):
raise CredentialException('Password verification failed')
except ValueError:
raise CredentialException('Password verification failed')
def try_login(user, password):
if not user.is_active:
return False
try:
verify_hash(password, user.password)
except CredentialException:
return False
update_last_login(user)
return True
linkgenerator = itsdangerous.URLSafeTimedSerializer(
app.secret_key,
salt='quilt'
)
def dump_link(payload, salt=None):
link = linkgenerator.dumps(payload, salt=salt)
return link.replace('.', '~')
def load_link(link, max_age, salt=None):
payload = link.replace('~', '.')
return linkgenerator.loads(payload, max_age=max_age, salt=salt)
def generate_activation_token(user_id):
new_token = ActivationToken(user_id=user_id, token=generate_uuid())
db.session.add(new_token)
return new_token.token
def consume_activation_token(user_id, token):
found = (
ActivationToken.query
.filter_by(user_id=user_id, token=token)
.with_for_update()
.one_or_none()
)
if not found:
return False
db.session.delete(found)
return True
def generate_reset_token(user_id):
reset_token = generate_uuid()
PasswordResetToken.upsert(user_id, reset_token)
return reset_token
def consume_reset_token(user_id, token):
found = (
PasswordResetToken
.query
.filter_by(user_id=user_id, token=token)
.with_for_update()
.one_or_none()
)
if not found:
return False
db.session.delete(found)
return True
def generate_activation_link(user_id):
token = generate_activation_token(user_id)
payload = {'id': user_id, 'token': token}
return dump_link(payload, ACTIVATE_SALT)
def generate_reset_link(user_id):
token = generate_reset_token(user_id)
payload = {'id': user_id, 'token': token}
return dump_link(payload, PASSWORD_RESET_SALT)
def verify_activation_link(link, max_age=None):
max_age = max_age if max_age is not None else MAX_LINK_AGE
try:
payload = load_link(link, max_age=max_age, salt=ACTIVATE_SALT)
if not consume_activation_token(payload['id'], payload['token']):
return None
return payload
except (TypeError, KeyError, ValueError, itsdangerous.BadData):
return None
def verify_reset_link(link, max_age=None):
max_age = max_age if max_age is not None else MAX_LINK_AGE
try:
payload = load_link(link, max_age=max_age, salt=PASSWORD_RESET_SALT)
if not consume_reset_token(payload['id'], payload['token']):
return None
return payload
except (TypeError, KeyError, ValueError, itsdangerous.BadData):
return None
def reset_password(user, set_unusable=False):
if set_unusable:
user.password = ''
db.session.add(user)
link = generate_reset_link(user.id)
send_reset_email(user, link)
| 1 | 16,857 | this is funky. either don't take` minutes` as keyword arg or take both `minutes` and `days` and pass all of them on to `timedelta`. i'm guessing you're aiming for backwards compatibility, but i don't think it's worth it given how confusing this is. atlernatively, make `mins=60*24*30` the default. and that brings me to another issue, `mins=None` is not a very useful default. | quiltdata-quilt | py |
@@ -267,6 +267,11 @@ public final class RememberMeConfigurer<H extends HttpSecurityBuilder<H>>
validateInput();
String key = getKey();
RememberMeServices rememberMeServices = getRememberMeServices(http, key);
+ if (key == null) {
+ if (rememberMeServices instanceof AbstractRememberMeServices) {
+ key = ((AbstractRememberMeServices) rememberMeServices).getKey();
+ }
+ }
http.setSharedObject(RememberMeServices.class, rememberMeServices);
LogoutConfigurer<H> logoutConfigurer = http.getConfigurer(LogoutConfigurer.class);
if (logoutConfigurer != null && this.logoutHandler != null) { | 1 | /*
* Copyright 2002-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.config.annotation.web.configurers;
import java.util.UUID;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.RememberMeAuthenticationProvider;
import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
import org.springframework.security.config.annotation.web.HttpSecurityBuilder;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.web.authentication.AuthenticationSuccessHandler;
import org.springframework.security.web.authentication.RememberMeServices;
import org.springframework.security.web.authentication.logout.LogoutHandler;
import org.springframework.security.web.authentication.rememberme.AbstractRememberMeServices;
import org.springframework.security.web.authentication.rememberme.PersistentTokenBasedRememberMeServices;
import org.springframework.security.web.authentication.rememberme.PersistentTokenRepository;
import org.springframework.security.web.authentication.rememberme.RememberMeAuthenticationFilter;
import org.springframework.security.web.authentication.rememberme.TokenBasedRememberMeServices;
import org.springframework.security.web.authentication.ui.DefaultLoginPageGeneratingFilter;
/**
* Configures Remember Me authentication. This typically involves the user checking a box
* when they enter their username and password that states to "Remember Me".
*
* <h2>Security Filters</h2>
*
* The following Filters are populated
*
* <ul>
* <li>{@link RememberMeAuthenticationFilter}</li>
* </ul>
*
* <h2>Shared Objects Created</h2>
*
* The following shared objects are populated
*
* <ul>
* <li>
* {@link HttpSecurity#authenticationProvider(org.springframework.security.authentication.AuthenticationProvider)}
* is populated with a {@link RememberMeAuthenticationProvider}</li>
* <li>{@link RememberMeServices} is populated as a shared object and available on
* {@link HttpSecurity#getSharedObject(Class)}</li>
* <li>{@link LogoutConfigurer#addLogoutHandler(LogoutHandler)} is used to add a logout
* handler to clean up the remember me authentication.</li>
* </ul>
*
* <h2>Shared Objects Used</h2>
*
* The following shared objects are used:
*
* <ul>
* <li>{@link AuthenticationManager}</li>
* <li>{@link UserDetailsService} if no {@link #userDetailsService(UserDetailsService)}
* was specified.</li>
* <li>{@link DefaultLoginPageGeneratingFilter} - if present will be populated with
* information from the configuration</li>
* </ul>
*
* @author Rob Winch
* @author Eddú Meléndez
* @since 3.2
*/
public final class RememberMeConfigurer<H extends HttpSecurityBuilder<H>>
extends AbstractHttpConfigurer<RememberMeConfigurer<H>, H> {
/**
* The default name for remember me parameter name and remember me cookie name
*/
private static final String DEFAULT_REMEMBER_ME_NAME = "remember-me";
private AuthenticationSuccessHandler authenticationSuccessHandler;
private String key;
private RememberMeServices rememberMeServices;
private LogoutHandler logoutHandler;
private String rememberMeParameter = DEFAULT_REMEMBER_ME_NAME;
private String rememberMeCookieName = DEFAULT_REMEMBER_ME_NAME;
private String rememberMeCookieDomain;
private PersistentTokenRepository tokenRepository;
private UserDetailsService userDetailsService;
private Integer tokenValiditySeconds;
private Boolean useSecureCookie;
private Boolean alwaysRemember;
/**
* Creates a new instance
*/
public RememberMeConfigurer() {
}
/**
* Allows specifying how long (in seconds) a token is valid for
*
* @param tokenValiditySeconds
* @return {@link RememberMeConfigurer} for further customization
* @see AbstractRememberMeServices#setTokenValiditySeconds(int)
*/
public RememberMeConfigurer<H> tokenValiditySeconds(int tokenValiditySeconds) {
this.tokenValiditySeconds = tokenValiditySeconds;
return this;
}
/**
* Whether the cookie should be flagged as secure or not. Secure cookies can only be
* sent over an HTTPS connection and thus cannot be accidentally submitted over HTTP
* where they could be intercepted.
* <p>
* By default the cookie will be secure if the request is secure. If you only want to
* use remember-me over HTTPS (recommended) you should set this property to
* {@code true}.
*
* @param useSecureCookie set to {@code true} to always user secure cookies,
* {@code false} to disable their use.
* @return the {@link RememberMeConfigurer} for further customization
* @see AbstractRememberMeServices#setUseSecureCookie(boolean)
*/
public RememberMeConfigurer<H> useSecureCookie(boolean useSecureCookie) {
this.useSecureCookie = useSecureCookie;
return this;
}
/**
* Specifies the {@link UserDetailsService} used to look up the {@link UserDetails}
* when a remember me token is valid. The default is to use the
* {@link UserDetailsService} found by invoking
* {@link HttpSecurity#getSharedObject(Class)} which is set when using
* {@link WebSecurityConfigurerAdapter#configure(AuthenticationManagerBuilder)}.
* Alternatively, one can populate {@link #rememberMeServices(RememberMeServices)}.
*
* @param userDetailsService the {@link UserDetailsService} to configure
* @return the {@link RememberMeConfigurer} for further customization
* @see AbstractRememberMeServices
*/
public RememberMeConfigurer<H> userDetailsService(
UserDetailsService userDetailsService) {
this.userDetailsService = userDetailsService;
return this;
}
/**
* Specifies the {@link PersistentTokenRepository} to use. The default is to use
* {@link TokenBasedRememberMeServices} instead.
*
* @param tokenRepository the {@link PersistentTokenRepository} to use
* @return the {@link RememberMeConfigurer} for further customization
*/
public RememberMeConfigurer<H> tokenRepository(
PersistentTokenRepository tokenRepository) {
this.tokenRepository = tokenRepository;
return this;
}
/**
* Sets the key to identify tokens created for remember me authentication. Default is
* a secure randomly generated key.
*
* @param key the key to identify tokens created for remember me authentication
* @return the {@link RememberMeConfigurer} for further customization
*/
public RememberMeConfigurer<H> key(String key) {
this.key = key;
return this;
}
/**
* The HTTP parameter used to indicate to remember the user at time of login.
*
* @param rememberMeParameter the HTTP parameter used to indicate to remember the user
* @return the {@link RememberMeConfigurer} for further customization
*/
public RememberMeConfigurer<H> rememberMeParameter(String rememberMeParameter) {
this.rememberMeParameter = rememberMeParameter;
return this;
}
/**
* The name of cookie which store the token for remember me authentication. Defaults
* to 'remember-me'.
*
* @param rememberMeCookieName the name of cookie which store the token for remember
* me authentication
* @return the {@link RememberMeConfigurer} for further customization
* @since 4.0.1
*/
public RememberMeConfigurer<H> rememberMeCookieName(String rememberMeCookieName) {
this.rememberMeCookieName = rememberMeCookieName;
return this;
}
/**
* The domain name within which the remember me cookie is visible.
*
* @param rememberMeCookieDomain the domain name within which the remember me cookie
* is visible.
* @return the {@link RememberMeConfigurer} for further customization
* @since 4.1.0
*/
public RememberMeConfigurer<H> rememberMeCookieDomain(String rememberMeCookieDomain) {
this.rememberMeCookieDomain = rememberMeCookieDomain;
return this;
}
/**
* Allows control over the destination a remembered user is sent to when they are
* successfully authenticated. By default, the filter will just allow the current
* request to proceed, but if an {@code AuthenticationSuccessHandler} is set, it will
* be invoked and the {@code doFilter()} method will return immediately, thus allowing
* the application to redirect the user to a specific URL, regardless of what the
* original request was for.
*
* @param authenticationSuccessHandler the strategy to invoke immediately before
* returning from {@code doFilter()}.
* @return {@link RememberMeConfigurer} for further customization
* @see RememberMeAuthenticationFilter#setAuthenticationSuccessHandler(AuthenticationSuccessHandler)
*/
public RememberMeConfigurer<H> authenticationSuccessHandler(
AuthenticationSuccessHandler authenticationSuccessHandler) {
this.authenticationSuccessHandler = authenticationSuccessHandler;
return this;
}
/**
* Specify the {@link RememberMeServices} to use.
* @param rememberMeServices the {@link RememberMeServices} to use
* @return the {@link RememberMeConfigurer} for further customizations
* @see RememberMeServices
*/
public RememberMeConfigurer<H> rememberMeServices(
RememberMeServices rememberMeServices) {
this.rememberMeServices = rememberMeServices;
return this;
}
/**
* Whether the cookie should always be created even if the remember-me parameter is
* not set.
* <p>
* By default this will be set to {@code false}.
*
* @param alwaysRemember set to {@code true} to always trigger remember me,
* {@code false} to use the remember-me parameter.
* @return the {@link RememberMeConfigurer} for further customization
* @see AbstractRememberMeServices#setAlwaysRemember(boolean)
*/
public RememberMeConfigurer<H> alwaysRemember(boolean alwaysRemember) {
this.alwaysRemember = alwaysRemember;
return this;
}
@SuppressWarnings("unchecked")
@Override
public void init(H http) throws Exception {
validateInput();
String key = getKey();
RememberMeServices rememberMeServices = getRememberMeServices(http, key);
http.setSharedObject(RememberMeServices.class, rememberMeServices);
LogoutConfigurer<H> logoutConfigurer = http.getConfigurer(LogoutConfigurer.class);
if (logoutConfigurer != null && this.logoutHandler != null) {
logoutConfigurer.addLogoutHandler(this.logoutHandler);
}
RememberMeAuthenticationProvider authenticationProvider = new RememberMeAuthenticationProvider(
key);
authenticationProvider = postProcess(authenticationProvider);
http.authenticationProvider(authenticationProvider);
initDefaultLoginFilter(http);
}
@Override
public void configure(H http) throws Exception {
RememberMeAuthenticationFilter rememberMeFilter = new RememberMeAuthenticationFilter(
http.getSharedObject(AuthenticationManager.class),
this.rememberMeServices);
if (this.authenticationSuccessHandler != null) {
rememberMeFilter
.setAuthenticationSuccessHandler(this.authenticationSuccessHandler);
}
rememberMeFilter = postProcess(rememberMeFilter);
http.addFilter(rememberMeFilter);
}
/**
* Validate rememberMeServices and rememberMeCookieName have not been set at
* the same time.
*/
private void validateInput() {
if (this.rememberMeServices != null && this.rememberMeCookieName != DEFAULT_REMEMBER_ME_NAME) {
throw new IllegalArgumentException("Can not set rememberMeCookieName " +
"and custom rememberMeServices.");
}
}
/**
* Returns the HTTP parameter used to indicate to remember the user at time of login.
* @return the HTTP parameter used to indicate to remember the user
*/
private String getRememberMeParameter() {
return this.rememberMeParameter;
}
/**
* If available, initializes the {@link DefaultLoginPageGeneratingFilter} shared
* object.
*
* @param http the {@link HttpSecurityBuilder} to use
*/
private void initDefaultLoginFilter(H http) {
DefaultLoginPageGeneratingFilter loginPageGeneratingFilter = http
.getSharedObject(DefaultLoginPageGeneratingFilter.class);
if (loginPageGeneratingFilter != null) {
loginPageGeneratingFilter.setRememberMeParameter(getRememberMeParameter());
}
}
/**
* Gets the {@link RememberMeServices} or creates the {@link RememberMeServices}.
* @param http the {@link HttpSecurity} to lookup shared objects
* @param key the {@link #key(String)}
* @return the {@link RememberMeServices} to use
* @throws Exception
*/
private RememberMeServices getRememberMeServices(H http, String key)
throws Exception {
if (this.rememberMeServices != null) {
if (this.rememberMeServices instanceof LogoutHandler
&& this.logoutHandler == null) {
this.logoutHandler = (LogoutHandler) this.rememberMeServices;
}
return this.rememberMeServices;
}
AbstractRememberMeServices tokenRememberMeServices = createRememberMeServices(
http, key);
tokenRememberMeServices.setParameter(this.rememberMeParameter);
tokenRememberMeServices.setCookieName(this.rememberMeCookieName);
if (this.rememberMeCookieDomain != null) {
tokenRememberMeServices.setCookieDomain(this.rememberMeCookieDomain);
}
if (this.tokenValiditySeconds != null) {
tokenRememberMeServices.setTokenValiditySeconds(this.tokenValiditySeconds);
}
if (this.useSecureCookie != null) {
tokenRememberMeServices.setUseSecureCookie(this.useSecureCookie);
}
if (this.alwaysRemember != null) {
tokenRememberMeServices.setAlwaysRemember(this.alwaysRemember);
}
tokenRememberMeServices.afterPropertiesSet();
this.logoutHandler = tokenRememberMeServices;
this.rememberMeServices = tokenRememberMeServices;
return tokenRememberMeServices;
}
/**
* Creates the {@link RememberMeServices} to use when none is provided. The result is
* either {@link PersistentTokenRepository} (if a {@link PersistentTokenRepository} is
* specified, else {@link TokenBasedRememberMeServices}.
*
* @param http the {@link HttpSecurity} to lookup shared objects
* @param key the {@link #key(String)}
* @return the {@link RememberMeServices} to use
* @throws Exception
*/
private AbstractRememberMeServices createRememberMeServices(H http, String key)
throws Exception {
return this.tokenRepository == null
? createTokenBasedRememberMeServices(http, key)
: createPersistentRememberMeServices(http, key);
}
/**
* Creates {@link TokenBasedRememberMeServices}
*
* @param http the {@link HttpSecurity} to lookup shared objects
* @param key the {@link #key(String)}
* @return the {@link TokenBasedRememberMeServices}
*/
private AbstractRememberMeServices createTokenBasedRememberMeServices(H http,
String key) {
UserDetailsService userDetailsService = getUserDetailsService(http);
return new TokenBasedRememberMeServices(key, userDetailsService);
}
/**
* Creates {@link PersistentTokenBasedRememberMeServices}
*
* @param http the {@link HttpSecurity} to lookup shared objects
* @param key the {@link #key(String)}
* @return the {@link PersistentTokenBasedRememberMeServices}
*/
private AbstractRememberMeServices createPersistentRememberMeServices(H http,
String key) {
UserDetailsService userDetailsService = getUserDetailsService(http);
return new PersistentTokenBasedRememberMeServices(key, userDetailsService,
this.tokenRepository);
}
/**
* Gets the {@link UserDetailsService} to use. Either the explicitly configure
* {@link UserDetailsService} from {@link #userDetailsService(UserDetailsService)} or
* a shared object from {@link HttpSecurity#getSharedObject(Class)}.
*
* @param http {@link HttpSecurity} to get the shared {@link UserDetailsService}
* @return the {@link UserDetailsService} to use
*/
private UserDetailsService getUserDetailsService(H http) {
if (this.userDetailsService == null) {
this.userDetailsService = http.getSharedObject(UserDetailsService.class);
}
if (this.userDetailsService == null) {
throw new IllegalStateException("userDetailsService cannot be null. Invoke "
+ RememberMeConfigurer.class.getSimpleName()
+ "#userDetailsService(UserDetailsService) or see its javadoc for alternative approaches.");
}
return this.userDetailsService;
}
/**
* Gets the key to use for validating remember me tokens. Either the value passed into
* {@link #key(String)}, or a secure random string if none was specified.
*
* @return the remember me key to use
*/
private String getKey() {
if (this.key == null) {
this.key = UUID.randomUUID().toString();
}
return this.key;
}
} | 1 | 10,621 | Please use a tab for indentation instead of spaces. | spring-projects-spring-security | java |
@@ -40,7 +40,7 @@ import java.util.stream.Collector;
* @since 2.0.0
*/
@SuppressWarnings("deprecation")
-public final class Queue<T> extends AbstractsQueue<T, Queue<T>> implements LinearSeq<T>, Kind1<Queue<T>, T> {
+public final class Queue<T> extends AbstractQueue<T, Queue<T>> implements LinearSeq<T>, Kind1<Queue<T>, T> {
private static final long serialVersionUID = 1L;
| 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2017 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.*;
import javaslang.control.Option;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collector;
/**
* An immutable {@code Queue} stores elements allowing a first-in-first-out (FIFO) retrieval.
* <p>
* Queue API:
*
* <ul>
* <li>{@link #dequeue()}</li>
* <li>{@link #dequeueOption()}</li>
* <li>{@link #enqueue(Object)}</li>
* <li>{@link #enqueue(Object[])}</li>
* <li>{@link #enqueueAll(Iterable)}</li>
* <li>{@link #peek()}</li>
* <li>{@link #peekOption()}</li>
* </ul>
*
* A Queue internally consists of a front List containing the front elements of the Queue in the correct order and a
* rear List containing the rear elements of the Queue in reverse order.
* <p>
* When the front list is empty, front and rear are swapped and rear is reversed. This implies the following queue
* invariant: {@code front.isEmpty() => rear.isEmpty()}.
* <p>
* See Okasaki, Chris: <em>Purely Functional Data Structures</em> (p. 42 ff.). Cambridge, 2003.
*
* @param <T> Component type of the Queue
* @author Daniel Dietrich
* @since 2.0.0
*/
@SuppressWarnings("deprecation")
public final class Queue<T> extends AbstractsQueue<T, Queue<T>> implements LinearSeq<T>, Kind1<Queue<T>, T> {
private static final long serialVersionUID = 1L;
private static final Queue<?> EMPTY = new Queue<>(List.empty(), List.empty());
private final List<T> front;
private final List<T> rear;
/**
* Creates a Queue consisting of a front List and a rear List.
* <p>
* For a {@code Queue(front, rear)} the following invariant holds: {@code Queue is empty <=> front is empty}.
* In other words: If the Queue is not empty, the front List contains at least one element.
*
* @param front A List of front elements, in correct order.
* @param rear A List of rear elements, in reverse order.
*/
private Queue(List<T> front, List<T> rear) {
final boolean frontIsEmpty = front.isEmpty();
this.front = frontIsEmpty ? rear.reverse() : front;
this.rear = frontIsEmpty ? front : rear;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link javaslang.collection.Queue}
* .
*
* @param <T> Component type of the Queue.
* @return A javaslang.collection.Queue Collector.
*/
public static <T> Collector<T, ArrayList<T>, Queue<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, Queue<T>> finisher = Queue::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns the empty Queue.
*
* @param <T> Component type
* @return The empty Queue.
*/
@SuppressWarnings("unchecked")
public static <T> Queue<T> empty() {
return (Queue<T>) EMPTY;
}
/**
* Narrows a widened {@code Queue<? extends T>} to {@code Queue<T>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param queue An {@code Queue}.
* @param <T> Component type of the {@code Queue}.
* @return the given {@code queue} instance as narrowed type {@code Queue<T>}.
*/
@SuppressWarnings("unchecked")
public static <T> Queue<T> narrow(Queue<? extends T> queue) {
return (Queue<T>) queue;
}
/**
* Returns a singleton {@code Queue}, i.e. a {@code Queue} of one element.
*
* @param element An element.
* @param <T> The component type
* @return A new Queue instance containing the given element
*/
public static <T> Queue<T> of(T element) {
return ofAll(List.of(element));
}
/**
* Creates a Queue of the given elements.
*
* @param <T> Component type of the Queue.
* @param elements Zero or more elements.
* @return A queue containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
@SuppressWarnings("varargs")
@SafeVarargs
public static <T> Queue<T> of(T... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.of(elements));
}
/**
* Creates a Queue of the given elements.
*
* @param <T> Component type of the Queue.
* @param elements An Iterable of elements.
* @return A queue containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
@SuppressWarnings("unchecked")
public static <T> Queue<T> ofAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof Queue) {
return (Queue<T>) elements;
} else if (!elements.iterator().hasNext()) {
return empty();
} else if (elements instanceof List) {
return new Queue<>((List<T>) elements, List.empty());
} else {
return new Queue<>(List.ofAll(elements), List.empty());
}
}
/**
* Creates a Queue that contains the elements of the given {@link java.util.stream.Stream}.
*
* @param javaStream A {@link java.util.stream.Stream}
* @param <T> Component type of the Stream.
* @return A Queue containing the given elements in the same order.
*/
public static <T> Queue<T> ofAll(java.util.stream.Stream<? extends T> javaStream) {
Objects.requireNonNull(javaStream, "javaStream is null");
return new Queue<>(List.ofAll(javaStream), List.empty());
}
/**
* Creates a Queue from boolean values.
*
* @param elements boolean values
* @return A new Queue of Boolean values
* @throws NullPointerException if elements is null
*/
public static Queue<Boolean> ofAll(boolean... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.ofAll(elements));
}
/**
* Creates a Queue from byte values.
*
* @param elements byte values
* @return A new Queue of Byte values
* @throws NullPointerException if elements is null
*/
public static Queue<Byte> ofAll(byte... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.ofAll(elements));
}
/**
* Creates a Queue from char values.
*
* @param elements char values
* @return A new Queue of Character values
* @throws NullPointerException if elements is null
*/
public static Queue<Character> ofAll(char... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.ofAll(elements));
}
/**
* Creates a Queue from double values.
*
* @param elements double values
* @return A new Queue of Double values
* @throws NullPointerException if elements is null
*/
public static Queue<Double> ofAll(double... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.ofAll(elements));
}
/**
* Creates a Queue from float values.
*
* @param elements float values
* @return A new Queue of Float values
* @throws NullPointerException if elements is null
*/
public static Queue<Float> ofAll(float... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.ofAll(elements));
}
/**
* Creates a Queue from int values.
*
* @param elements int values
* @return A new Queue of Integer values
* @throws NullPointerException if elements is null
*/
public static Queue<Integer> ofAll(int... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.ofAll(elements));
}
/**
* Creates a Queue from long values.
*
* @param elements long values
* @return A new Queue of Long values
* @throws NullPointerException if elements is null
*/
public static Queue<Long> ofAll(long... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.ofAll(elements));
}
/**
* Creates a Queue from short values.
*
* @param elements short values
* @return A new Queue of Short values
* @throws NullPointerException if elements is null
*/
public static Queue<Short> ofAll(short... elements) {
Objects.requireNonNull(elements, "elements is null");
return ofAll(List.ofAll(elements));
}
/**
* Returns a Queue containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the Queue
* @param n The number of elements in the Queue
* @param f The Function computing element values
* @return A Queue consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
public static <T> Queue<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return Collections.tabulate(n, f, empty(), Queue::of);
}
/**
* Returns a Queue containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the Queue
* @param n The number of elements in the Queue
* @param s The Supplier computing element values
* @return An Queue of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
public static <T> Queue<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return Collections.fill(n, s, empty(), Queue::of);
}
public static Queue<Character> range(char from, char toExclusive) {
return ofAll(Iterator.range(from, toExclusive));
}
public static Queue<Character> rangeBy(char from, char toExclusive, int step) {
return ofAll(Iterator.rangeBy(from, toExclusive, step));
}
@GwtIncompatible
public static Queue<Double> rangeBy(double from, double toExclusive, double step) {
return ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a Queue of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.range(0, 0) // = Queue()
* Queue.range(2, 0) // = Queue()
* Queue.range(-2, 2) // = Queue(-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or {@code Nil} if {@code from >= toExclusive}
*/
public static Queue<Integer> range(int from, int toExclusive) {
return ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a Queue of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeBy(1, 3, 1) // = Queue(1, 2)
* Queue.rangeBy(1, 4, 2) // = Queue(1, 3)
* Queue.rangeBy(4, 1, -2) // = Queue(4, 2)
* Queue.rangeBy(4, 1, 2) // = Queue()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or {@code Nil} if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Queue<Integer> rangeBy(int from, int toExclusive, int step) {
return ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a Queue of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.range(0L, 0L) // = Queue()
* Queue.range(2L, 0L) // = Queue()
* Queue.range(-2L, 2L) // = Queue(-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or {@code Nil} if {@code from >= toExclusive}
*/
public static Queue<Long> range(long from, long toExclusive) {
return ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a Queue of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeBy(1L, 3L, 1L) // = Queue(1L, 2L)
* Queue.rangeBy(1L, 4L, 2L) // = Queue(1L, 3L)
* Queue.rangeBy(4L, 1L, -2L) // = Queue(4L, 2L)
* Queue.rangeBy(4L, 1L, 2L) // = Queue()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or {@code Nil} if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Queue<Long> rangeBy(long from, long toExclusive, long step) {
return ofAll(Iterator.rangeBy(from, toExclusive, step));
}
public static Queue<Character> rangeClosed(char from, char toInclusive) {
return ofAll(Iterator.rangeClosed(from, toInclusive));
}
public static Queue<Character> rangeClosedBy(char from, char toInclusive, int step) {
return ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
@GwtIncompatible
public static Queue<Double> rangeClosedBy(double from, double toInclusive, double step) {
return ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Queue of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeClosed(0, 0) // = Queue(0)
* Queue.rangeClosed(2, 0) // = Queue()
* Queue.rangeClosed(-2, 2) // = Queue(-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or {@code Nil} if {@code from > toInclusive}
*/
public static Queue<Integer> rangeClosed(int from, int toInclusive) {
return ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a Queue of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeClosedBy(1, 3, 1) // = Queue(1, 2, 3)
* Queue.rangeClosedBy(1, 4, 2) // = Queue(1, 3)
* Queue.rangeClosedBy(4, 1, -2) // = Queue(4, 2)
* Queue.rangeClosedBy(4, 1, 2) // = Queue()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or {@code Nil} if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Queue<Integer> rangeClosedBy(int from, int toInclusive, int step) {
return ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Queue of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeClosed(0L, 0L) // = Queue(0L)
* Queue.rangeClosed(2L, 0L) // = Queue()
* Queue.rangeClosed(-2L, 2L) // = Queue(-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or {@code Nil} if {@code from > toInclusive}
*/
public static Queue<Long> rangeClosed(long from, long toInclusive) {
return ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Transposes the rows and columns of a {@link Queue} matrix.
*
* @param <T> matrix element type
* @param matrix to be transposed.
* @return a transposed {@link Queue} matrix.
* @throws IllegalArgumentException if the row lengths of {@code matrix} differ.
*
* <p>
* ex: {@code
* Queue.transpose(Queue(Queue(1,2,3), Queue(4,5,6))) → Queue(Queue(1,4), Queue(2,5), Queue(3,6))
* }
*/
public static <T> Queue<Queue<T>> transpose(Queue<Queue<T>> matrix) {
return Collections.transpose(matrix, Queue::ofAll, Queue::of);
}
/**
* Creates a Queue of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Queue.rangeClosedBy(1L, 3L, 1L) // = Queue(1L, 2L, 3L)
* Queue.rangeClosedBy(1L, 4L, 2L) // = Queue(1L, 3L)
* Queue.rangeClosedBy(4L, 1L, -2L) // = Queue(4L, 2L)
* Queue.rangeClosedBy(4L, 1L, 2L) // = Queue()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or {@code Nil} if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static Queue<Long> rangeClosedBy(long from, long toInclusive, long step) {
return ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Queue from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Queue, otherwise {@code Some} {@code Tuple}
* of the element for the next call and the value to add to the
* resulting Queue.
* <p>
* Example:
* <pre>
* <code>
* Queue.unfoldRight(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x, x-1)));
* // Queue(10, 9, 8, 7, 6, 5, 4, 3, 2, 1))
* </code>
* </pre>
*
* @param <T> type of seeds
* @param <U> type of unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Queue with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static <T, U> Queue<U> unfoldRight(T seed, Function<? super T, Option<Tuple2<? extends U, ? extends T>>> f) {
return Iterator.unfoldRight(seed, f).toQueue();
}
/**
* Creates a Queue from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Queue, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting Queue and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* Queue.unfoldLeft(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x-1, x)));
* // Queue(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
* </code>
* </pre>
*
* @param <T> type of seeds
* @param <U> type of unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Queue with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static <T, U> Queue<U> unfoldLeft(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends U>>> f) {
return Iterator.unfoldLeft(seed, f).toQueue();
}
/**
* Creates a Queue from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the Queue, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting Queue and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* Queue.unfold(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x-1, x)));
* // Queue(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
* </code>
* </pre>
*
* @param <T> type of seeds and unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a Queue with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static <T> Queue<T> unfold(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends T>>> f) {
return Iterator.unfold(seed, f).toQueue();
}
/**
* Enqueues a new element.
*
* @param element The new element
* @return a new {@code Queue} instance, containing the new element
*/
@Override
public Queue<T> enqueue(T element) {
return new Queue<>(front, rear.prepend(element));
}
/**
* Enqueues the given elements. A queue has FIFO order, i.e. the first of the given elements is
* the first which will be retrieved.
*
* @param elements An Iterable of elements, may be empty
* @return a new {@code Queue} instance, containing the new elements
* @throws NullPointerException if elements is null
*/
@SuppressWarnings("unchecked")
public Queue<T> enqueueAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty() && elements instanceof Queue) {
return (Queue<T>) elements;
} else {
return List.ofAll(elements).foldLeft(this, Queue::enqueue);
}
}
// -- Adjusted return types of Seq methods
@Override
public Queue<T> append(T element) {
return enqueue(element);
}
@Override
public Queue<T> appendAll(Iterable<? extends T> elements) {
return enqueueAll(elements);
}
@Override
public Queue<Queue<T>> combinations() {
return ofAll(toList().combinations().map(Queue::ofAll));
}
@Override
public Queue<Queue<T>> combinations(int k) {
return ofAll(toList().combinations(k).map(Queue::ofAll));
}
@Override
public Iterator<Queue<T>> crossProduct(int power) {
return Collections.crossProduct(empty(), this, power);
}
@Override
public Queue<T> distinct() {
return ofAll(toList().distinct());
}
@Override
public Queue<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return ofAll(toList().distinctBy(comparator));
}
@Override
public <U> Queue<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
return ofAll(toList().distinctBy(keyExtractor));
}
@Override
public Queue<T> drop(int n) {
if (n <= 0) {
return this;
}
if (n >= length()) {
return empty();
}
return new Queue<>(front.drop(n), rear.dropRight(n - front.length()));
}
@Override
public Queue<T> dropUntil(Predicate<? super T> predicate) {
return Collections.dropUntil(this, predicate);
}
@Override
public Queue<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropUntil(predicate.negate());
}
@Override
public Queue<T> dropRight(int n) {
if (n <= 0) {
return this;
}
if (n >= length()) {
return empty();
}
return new Queue<>(front.dropRight(n - rear.length()), rear.drop(n));
}
@Override
public Queue<T> dropRightUntil(Predicate<? super T> predicate) {
return Collections.dropUntil(reverse(), predicate).reverse();
}
@Override
public Queue<T> dropRightWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropRightUntil(predicate.negate());
}
@Override
public Queue<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final List<T> filtered = toList().filter(predicate);
if (filtered.isEmpty()) {
return empty();
} else if (filtered.length() == length()) {
return this;
} else {
return ofAll(filtered);
}
}
@Override
public <U> Queue<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return empty();
} else {
return new Queue<>(front.flatMap(mapper), rear.flatMap(mapper));
}
}
@Override
public T get(int index) {
if (isEmpty()) {
throw new IndexOutOfBoundsException("get(" + index + ") on empty Queue");
}
if (index < 0) {
throw new IndexOutOfBoundsException("get(" + index + ")");
}
final int length = front.length();
if (index < length) {
return front.get(index);
} else {
final int rearIndex = index - length;
final int rearLength = rear.length();
if (rearIndex < rearLength) {
final int reverseRearIndex = rearLength - rearIndex - 1;
return rear.get(reverseRearIndex);
} else {
throw new IndexOutOfBoundsException("get(" + index + ") on Queue of length " + length());
}
}
}
@Override
public <C> Map<C, Queue<T>> groupBy(Function<? super T, ? extends C> classifier) {
return Collections.groupBy(this, classifier, Queue::ofAll);
}
@Override
public Iterator<Queue<T>> grouped(int size) {
return sliding(size, size);
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public T head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty " + stringPrefix());
} else {
return front.head();
}
}
@Override
public int indexOf(T element, int from) {
final int frontIndex = front.indexOf(element, from);
if (frontIndex != -1) {
return frontIndex;
} else {
// we need to reverse because we search the first occurrence
final int rearIndex = rear.reverse().indexOf(element, from - front.length());
return (rearIndex == -1) ? -1 : rearIndex + front.length();
}
}
@Override
public Queue<T> init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty " + stringPrefix());
} else if (rear.isEmpty()) {
return new Queue<>(front.init(), rear);
} else {
return new Queue<>(front, rear.tail());
}
}
@Override
public Queue<T> insert(int index, T element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", element)");
}
final int length = front.length();
if (index <= length) {
return new Queue<>(front.insert(index, element), rear);
} else {
final int rearIndex = index - length;
final int rearLength = rear.length();
if (rearIndex <= rearLength) {
final int reverseRearIndex = rearLength - rearIndex;
return new Queue<>(front, rear.insert(reverseRearIndex, element));
} else {
throw new IndexOutOfBoundsException("insert(" + index + ", element) on Queue of length " + length());
}
}
}
@SuppressWarnings("unchecked")
@Override
public Queue<T> insertAll(int index, Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements)");
}
final int length = front.length();
if (index <= length) {
if (isEmpty() && elements instanceof Queue) {
return (Queue<T>) elements;
} else {
final List<T> newFront = front.insertAll(index, elements);
return (newFront == front) ? this : new Queue<>(newFront, rear);
}
} else {
final int rearIndex = index - length;
final int rearLength = rear.length();
if (rearIndex <= rearLength) {
final int reverseRearIndex = rearLength - rearIndex;
final List<T> newRear = rear.insertAll(reverseRearIndex, List.ofAll(elements).reverse());
return (newRear == rear) ? this : new Queue<>(front, newRear);
} else {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements) on Queue of length " + length());
}
}
}
@Override
public Queue<T> intersperse(T element) {
if (isEmpty()) {
return this;
} else if (rear.isEmpty()) {
return new Queue<>(front.intersperse(element), rear);
} else {
return new Queue<>(front.intersperse(element), rear.intersperse(element).append(element));
}
}
@Override
public boolean isEmpty() {
return front.isEmpty();
}
@Override
public boolean isTraversableAgain() {
return true;
}
@Override
public int lastIndexOf(T element, int end) {
return toList().lastIndexOf(element, end);
}
@Override
public int length() {
return front.length() + rear.length();
}
@Override
public <U> Queue<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return new Queue<>(front.map(mapper), rear.map(mapper));
}
@Override
public Queue<T> orElse(Iterable<? extends T> other) {
return isEmpty() ? ofAll(other) : this;
}
@Override
public Queue<T> orElse(Supplier<? extends Iterable<? extends T>> supplier) {
return isEmpty() ? ofAll(supplier.get()) : this;
}
@Override
public Queue<T> padTo(int length, T element) {
final int actualLength = length();
if (length <= actualLength) {
return this;
} else {
return ofAll(toList().padTo(length, element));
}
}
@Override
public Queue<T> leftPadTo(int length, T element) {
final int actualLength = length();
if (length <= actualLength) {
return this;
} else {
return ofAll(toList().leftPadTo(length, element));
}
}
@Override
public Queue<T> patch(int from, Iterable<? extends T> that, int replaced) {
from = from < 0 ? 0 : from;
replaced = replaced < 0 ? 0 : replaced;
Queue<T> result = take(from).appendAll(that);
from += replaced;
result = result.appendAll(drop(from));
return result;
}
@Override
public Tuple2<Queue<T>, Queue<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return toList().partition(predicate).map(List::toQueue, List::toQueue);
}
@Override
public Queue<Queue<T>> permutations() {
return ofAll(toList().permutations().map(List::toQueue));
}
@Override
public Queue<T> prepend(T element) {
return new Queue<>(front.prepend(element), rear);
}
@SuppressWarnings("unchecked")
@Override
public Queue<T> prependAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty() && elements instanceof Queue) {
return (Queue<T>) elements;
} else {
final List<T> newFront = front.prependAll(elements);
return (newFront == front) ? this : new Queue<>(newFront, rear);
}
}
@Override
public Queue<T> remove(T element) {
final List<T> removed = toList().remove(element);
return ofAll(removed.length() == length() ? this : removed);
}
@Override
public Queue<T> removeFirst(Predicate<T> predicate) {
final List<T> removed = toList().removeFirst(predicate);
return ofAll(removed.length() == length() ? this : removed);
}
@Override
public Queue<T> removeLast(Predicate<T> predicate) {
final List<T> removed = toList().removeLast(predicate);
return ofAll(removed.length() == length() ? this : removed);
}
@Override
public Queue<T> removeAt(int index) {
return ofAll(toList().removeAt(index));
}
@Override
public Queue<T> removeAll(T element) {
return Collections.removeAll(this, element);
}
@Override
public Queue<T> replace(T currentElement, T newElement) {
final List<T> newFront = front.replace(currentElement, newElement);
final List<T> newRear = rear.replace(currentElement, newElement);
return newFront.size() + newRear.size() == 0 ? empty()
: newFront == front && newRear == rear ? this
: new Queue<>(newFront, newRear);
}
@Override
public Queue<T> replaceAll(T currentElement, T newElement) {
final List<T> newFront = front.replaceAll(currentElement, newElement);
final List<T> newRear = rear.replaceAll(currentElement, newElement);
return newFront.size() + newRear.size() == 0 ? empty()
: newFront == front && newRear == rear ? this
: new Queue<>(newFront, newRear);
}
@Override
public Queue<T> reverse() {
return isEmpty() ? this : ofAll(toList().reverse());
}
@Override
public Queue<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
return scanLeft(zero, operation);
}
@Override
public <U> Queue<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
return Collections.scanLeft(this, zero, operation, Iterator::toQueue);
}
@Override
public <U> Queue<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
return Collections.scanRight(this, zero, operation, Iterator::toQueue);
}
@Override
public Queue<T> shuffle() {
return Collections.shuffle(this, Queue::ofAll);
}
@Override
public Queue<T> slice(int beginIndex, int endIndex) {
return ofAll(toList().slice(beginIndex, endIndex));
}
@Override
public Iterator<Queue<T>> slideBy(Function<? super T, ?> classifier) {
return iterator().slideBy(classifier).map(Queue::ofAll);
}
@Override
public Iterator<Queue<T>> sliding(int size) {
return sliding(size, 1);
}
@Override
public Iterator<Queue<T>> sliding(int size, int step) {
return iterator().sliding(size, step).map(Queue::ofAll);
}
@Override
public Queue<T> sorted() {
return ofAll(toList().sorted());
}
@Override
public Queue<T> sorted(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return ofAll(toList().sorted(comparator));
}
@Override
public <U extends Comparable<? super U>> Queue<T> sortBy(Function<? super T, ? extends U> mapper) {
return sortBy(U::compareTo, mapper);
}
@Override
public <U> Queue<T> sortBy(Comparator<? super U> comparator, Function<? super T, ? extends U> mapper) {
final Function<? super T, ? extends U> domain = Function1.of(mapper::apply).memoized();
return toJavaStream()
.sorted((e1, e2) -> comparator.compare(domain.apply(e1), domain.apply(e2)))
.collect(collector());
}
@Override
public Tuple2<Queue<T>, Queue<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return toList().span(predicate).map(List::toQueue, List::toQueue);
}
@Override
public Tuple2<Queue<T>, Queue<T>> splitAt(int n) {
return toList().splitAt(n).map(List::toQueue, List::toQueue);
}
@Override
public Tuple2<Queue<T>, Queue<T>> splitAt(Predicate<? super T> predicate) {
return toList().splitAt(predicate).map(List::toQueue, List::toQueue);
}
@Override
public Tuple2<Queue<T>, Queue<T>> splitAtInclusive(Predicate<? super T> predicate) {
return toList().splitAtInclusive(predicate).map(List::toQueue, List::toQueue);
}
@Override
public boolean startsWith(Iterable<? extends T> that, int offset) {
return toList().startsWith(that, offset);
}
@Override
public Queue<T> subSequence(int beginIndex) {
if (beginIndex < 0 || beginIndex > length()) {
throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ")");
} else {
return drop(beginIndex);
}
}
@Override
public Queue<T> subSequence(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex > endIndex || endIndex > length()) {
throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ", " + endIndex + ") on Queue of length " + length());
} else if (beginIndex == endIndex) {
return empty();
} else if (beginIndex == 0 && endIndex == length()) {
return this;
} else {
return ofAll(toList().subSequence(beginIndex, endIndex));
}
}
@Override
public Queue<T> tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty " + stringPrefix());
} else {
return new Queue<>(front.tail(), rear);
}
}
@Override
public Queue<T> take(int n) {
if (n <= 0) {
return empty();
}
if (n >= length()) {
return this;
}
final int frontLength = front.length();
if (n < frontLength) {
return new Queue<>(front.take(n), List.empty());
} else if (n == frontLength) {
return new Queue<>(front, List.empty());
} else {
return new Queue<>(front, rear.takeRight(n - frontLength));
}
}
@Override
public Queue<T> takeRight(int n) {
if (n <= 0) {
return empty();
}
if (n >= length()) {
return this;
}
final int rearLength = rear.length();
if (n < rearLength) {
return new Queue<>(rear.take(n).reverse(), List.empty());
} else if (n == rearLength) {
return new Queue<>(rear.reverse(), List.empty());
} else {
return new Queue<>(front.takeRight(n - rearLength), rear);
}
}
@Override
public Queue<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final List<T> taken = toList().takeUntil(predicate);
return taken.length() == length() ? this : ofAll(taken);
}
/**
* Transforms this {@code Queue}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
public <U> U transform(Function<? super Queue<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@SuppressWarnings("deprecation")
@Override
public <U> Queue<U> unit(Iterable<? extends U> iterable) {
return ofAll(iterable);
}
@Override
public <T1, T2> Tuple2<Queue<T1>, Queue<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return toList().unzip(unzipper).map(List::toQueue, List::toQueue);
}
@Override
public <T1, T2, T3> Tuple3<Queue<T1>, Queue<T2>, Queue<T3>> unzip3(Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
return toList().unzip3(unzipper).map(List::toQueue, List::toQueue, List::toQueue);
}
@Override
public Queue<T> update(int index, T element) {
return ofAll(toList().update(index, element));
}
@Override
public Queue<T> update(int index, Function<? super T, ? extends T> updater) {
Objects.requireNonNull(updater, "updater is null");
return update(index, updater.apply(get(index)));
}
@Override
public <U> Queue<Tuple2<T, U>> zip(Iterable<? extends U> that) {
return zipWith(that, Tuple::of);
}
@SuppressWarnings("unchecked")
@Override
public <U, R> Queue<R> zipWith(Iterable<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper) {
Objects.requireNonNull(that, "that is null");
Objects.requireNonNull(mapper, "mapper is null");
return ofAll(toList().zipWith(that, mapper));
}
@Override
public <U> Queue<Tuple2<T, U>> zipAll(Iterable<? extends U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
return ofAll(toList().zipAll(that, thisElem, thatElem));
}
@Override
public Queue<Tuple2<T, Integer>> zipWithIndex() {
return zipWithIndex(Tuple::of);
}
@Override
public <U> Queue<U> zipWithIndex(BiFunction<? super T, ? super Integer, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return ofAll(toList().zipWithIndex(mapper));
}
private Object readResolve() {
return isEmpty() ? EMPTY : this;
}
@Override
public String stringPrefix() {
return "Queue";
}
@Override
public boolean equals(Object o) {
return o == this || o instanceof Queue && Collections.areEqual(this, (Iterable) o);
}
}
| 1 | 12,180 | I can't believe we didn't see this typo before :)) | vavr-io-vavr | java |
@@ -25,6 +25,9 @@ from Queue import Empty, Queue
from google.cloud.forseti.services.inventory.base import crawler
from google.cloud.forseti.services.inventory.base import gcp
from google.cloud.forseti.services.inventory.base import resources
+from google.cloud.forseti.common.util import log_util
+
+LOGGER = log_util.get_logger(__name__)
class CrawlerConfig(crawler.CrawlerConfig): | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crawler implementation."""
# TODO: Remove this when time allows
# pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc
# pylint: disable=missing-param-doc
import threading
import time
from Queue import Empty, Queue
from google.cloud.forseti.services.inventory.base import crawler
from google.cloud.forseti.services.inventory.base import gcp
from google.cloud.forseti.services.inventory.base import resources
class CrawlerConfig(crawler.CrawlerConfig):
"""Crawler configuration to inject dependencies."""
def __init__(self, storage, progresser, api_client, variables=None):
super(CrawlerConfig, self).__init__()
self.storage = storage
self.progresser = progresser
self.variables = {} if not variables else variables
self.client = api_client
class ParallelCrawlerConfig(crawler.CrawlerConfig):
"""Multithreaded crawler configuration, to inject dependencies."""
def __init__(self, storage, progresser, api_client, threads=10,
variables=None):
super(ParallelCrawlerConfig, self).__init__()
self.storage = storage
self.progresser = progresser
self.variables = {} if not variables else variables
self.threads = threads
self.client = api_client
class Crawler(crawler.Crawler):
"""Simple single-threaded Crawler implementation."""
def __init__(self, config):
super(Crawler, self).__init__()
self.config = config
def run(self, resource):
"""Run the crawler, given a start resource.
Args:
resource (object): Resource to start with.
"""
resource.accept(self)
return self.config.progresser
def visit(self, resource):
"""Handle a newly found resource.
Args:
resource (object): Resource to handle.
Raises:
Exception: Reraises any exception.
"""
progresser = self.config.progresser
try:
resource.get_iam_policy(self.get_client())
resource.get_gcs_policy(self.get_client())
resource.get_dataset_policy(self.get_client())
resource.get_cloudsql_policy(self.get_client())
resource.get_billing_info(self.get_client())
resource.get_enabled_apis(self.get_client())
self.write(resource)
except Exception as e:
progresser.on_error(e)
raise
else:
progresser.on_new_object(resource)
def dispatch(self, callback):
"""Dispatch crawling of a subtree.
Args:
callback (function): Callback to dispatch.
"""
callback()
def write(self, resource):
"""Save resource to storage.
Args:
resource (object): Resource to handle.
"""
self.config.storage.write(resource)
def get_client(self):
"""Get the GCP API client."""
return self.config.client
def on_child_error(self, error):
"""Process the error generated by child of a resource
Inventory does not stop for children errors but raise a warning
"""
warning_message = '{}\n'.format(error)
self.config.storage.warning(warning_message)
self.config.progresser.on_warning(error)
def update(self, resource):
"""Update the row of an existing resource
Raises:
Exception: Reraises any exception.
"""
try:
self.config.storage.update(resource)
except Exception as e:
self.config.progresser.on_error(e)
raise
class ParallelCrawler(Crawler):
"""Multi-threaded Crawler implementation."""
def __init__(self, config):
super(ParallelCrawler, self).__init__(config)
self._write_lock = threading.Lock()
self._dispatch_queue = Queue()
self._shutdown_event = threading.Event()
def _start_workers(self):
"""Start a pool of worker threads for processing the dispatch queue."""
self._shutdown_event.clear()
for _ in xrange(self.config.threads):
worker = threading.Thread(target=self._process_queue)
worker.daemon = True
worker.start()
def _process_queue(self):
"""Process items in the queue until the shutdown event is set."""
while not self._shutdown_event.is_set():
try:
callback = self._dispatch_queue.get(timeout=1)
except Empty:
continue
callback()
self._dispatch_queue.task_done()
def run(self, resource):
"""Run the crawler, given a start resource.
Args:
resource (object): Resource to start with.
"""
try:
self._start_workers()
resource.accept(self)
self._dispatch_queue.join()
finally:
self._shutdown_event.set()
# Wait for threads to exit.
time.sleep(2)
return self.config.progresser
def dispatch(self, callback):
"""Dispatch crawling of a subtree.
Args:
callback (function): Callback to dispatch.
"""
self._dispatch_queue.put(callback)
def write(self, resource):
"""Save resource to storage.
Args:
resource (object): Resource to handle.
"""
with self._write_lock:
self.config.storage.write(resource)
def on_child_error(self, error):
"""Process the error generated by child of a resource
Inventory does not stop for children errors but raise a warning
"""
warning_message = '{}\n'.format(error)
with self._write_lock:
self.config.storage.warning(warning_message)
self.config.progresser.on_warning(error)
def update(self, resource):
"""Update the row of an existing resource
Raises:
Exception: Reraises any exception.
"""
try:
with self._write_lock:
self.config.storage.update(resource)
except Exception as e:
self.config.progresser.on_error(e)
raise
def run_crawler(storage,
progresser,
config,
parallel=True):
"""Run the crawler with a determined configuration.
Args:
storage (object): Storage implementation to use.
progresser (object): Progresser to notify status updates.
config (object): Inventory configuration.
parallel (bool): If true, use the parallel crawler implementation.
"""
client_config = {
'groups_service_account_key_file': config.get_gsuite_sa_path(),
'domain_super_admin_email': config.get_gsuite_admin_email(),
'max_admin_api_calls_per_100_seconds': 1500,
'max_appengine_api_calls_per_second': 20,
'max_bigquery_api_calls_per_100_seconds': 17000,
'max_cloudbilling_api_calls_per_60_seconds': 300,
'max_crm_api_calls_per_100_seconds': 400,
'max_sqladmin_api_calls_per_100_seconds': 100,
'max_servicemanagement_api_calls_per_100_seconds': 200,
'max_compute_api_calls_per_second': 20,
'max_iam_api_calls_per_second': 20,
}
root_id = config.get_root_resource_id()
client = gcp.ApiClientImpl(client_config)
resource = resources.from_root_id(client, root_id)
if parallel:
config = ParallelCrawlerConfig(storage, progresser, client)
crawler_impl = ParallelCrawler(config)
else:
config = CrawlerConfig(storage, progresser, client)
crawler_impl = Crawler(config)
progresser = crawler_impl.run(resource)
return progresser.get_summary()
| 1 | 28,533 | If the logger isn't used, it probably doesn't need to be added. | forseti-security-forseti-security | py |
@@ -0,0 +1,4 @@
+var script = document.createElement('script')
+script.type = 'text/javascript'
+script.src = '{}'
+document.head.appendChild(script) | 1 | 1 | 18,012 | These files should in `/javascript/brython` | SeleniumHQ-selenium | js |
|
@@ -54,11 +54,15 @@ func (ci *CreateImages) UnmarshalJSON(b []byte) error {
func imageUsesAlphaFeatures(imagesAlpha []*ImageAlpha) bool {
for _, imageAlpha := range imagesAlpha {
- if imageAlpha != nil && len(imageAlpha.RolloutOverride.DefaultRolloutTime) > 0 {
- return true
+ if imageAlpha != nil && imageAlpha.RolloutOverride != nil {
+ if len(imageAlpha.RolloutOverride.DefaultRolloutTime) > 0 {
+ return true
+ }
}
- if imageAlpha != nil && len(imageAlpha.Deprecated.StateOverride.DefaultRolloutTime) > 0 {
- return true
+ if imageAlpha != nil && imageAlpha.Deprecated != nil && imageAlpha.Deprecated.StateOverride != nil {
+ if len(imageAlpha.Deprecated.StateOverride.DefaultRolloutTime) > 0 {
+ return true
+ }
}
}
return false | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"context"
"encoding/json"
"sync"
"google.golang.org/api/googleapi"
)
// CreateImages is a Daisy CreateImages workflow step.
type CreateImages struct {
Images []*Image
ImagesAlpha []*ImageAlpha
ImagesBeta []*ImageBeta
}
// UnmarshalJSON unmarshals Image.
func (ci *CreateImages) UnmarshalJSON(b []byte) error {
var imagesAlpha []*ImageAlpha
if err := json.Unmarshal(b, &imagesAlpha); err != nil {
return err
}
ci.ImagesAlpha = imagesAlpha
var imagesBeta []*ImageBeta
if err := json.Unmarshal(b, &imagesBeta); err != nil {
return err
}
ci.ImagesBeta = imagesBeta
var images []*Image
if err := json.Unmarshal(b, &images); err != nil {
return err
}
ci.Images = images
return nil
}
func imageUsesAlphaFeatures(imagesAlpha []*ImageAlpha) bool {
for _, imageAlpha := range imagesAlpha {
if imageAlpha != nil && len(imageAlpha.RolloutOverride.DefaultRolloutTime) > 0 {
return true
}
if imageAlpha != nil && len(imageAlpha.Deprecated.StateOverride.DefaultRolloutTime) > 0 {
return true
}
}
return false
}
func imageUsesBetaFeatures(imagesBeta []*ImageBeta) bool {
return false
}
// populate preprocesses fields: Name, Project, Description, SourceDisk, RawDisk, and daisyName.
// - sets defaults
// - extends short partial URLs to include "projects/<project>"
func (ci *CreateImages) populate(ctx context.Context, s *Step) DError {
var errs DError
if ci.Images != nil {
for _, i := range ci.Images {
errs = addErrs(errs, (&i.ImageBase).populate(ctx, i, s))
}
}
if ci.ImagesAlpha != nil {
for _, i := range ci.ImagesAlpha {
errs = addErrs(errs, (&i.ImageBase).populate(ctx, i, s))
}
}
if ci.ImagesBeta != nil {
for _, i := range ci.ImagesBeta {
errs = addErrs(errs, (&i.ImageBase).populate(ctx, i, s))
}
}
return errs
}
func (ci *CreateImages) validate(ctx context.Context, s *Step) DError {
var errs DError
if imageUsesBetaFeatures(ci.ImagesBeta) {
for _, i := range ci.ImagesBeta {
errs = addErrs(errs, (&i.ImageBase).validate(ctx, i, i.Licenses, s))
}
} else {
for _, i := range ci.Images {
errs = addErrs(errs, (&i.ImageBase).validate(ctx, i, i.Licenses, s))
}
}
return errs
}
func (ci *CreateImages) run(ctx context.Context, s *Step) DError {
var wg sync.WaitGroup
w := s.w
e := make(chan DError)
createImage := func(ci ImageInterface, overwrite bool) {
defer wg.Done()
// Get source disk link if SourceDisk is a daisy reference to a disk.
if d, ok := w.disks.get(ci.getSourceDisk()); ok {
ci.setSourceDisk(d.link)
}
// Delete existing if OverWrite is true.
if overwrite {
// Just try to delete it, a 404 here indicates the image doesn't exist.
if err := ci.delete(w.ComputeClient); err != nil {
if apiErr, ok := err.(*googleapi.Error); !ok || apiErr.Code != 404 {
e <- Errf("error deleting existing image: %v", err)
return
}
}
}
w.LogStepInfo(s.name, "CreateImages", "Creating image %q.", ci.getName())
if err := ci.create(w.ComputeClient); err != nil {
e <- newErr("failed to create images", err)
return
}
ci.markCreatedInWorkflow()
}
if imageUsesAlphaFeatures(ci.ImagesAlpha) {
for _, i := range ci.ImagesAlpha {
wg.Add(1)
go createImage(i, i.OverWrite)
}
} else if imageUsesBetaFeatures(ci.ImagesBeta) {
for _, i := range ci.ImagesBeta {
wg.Add(1)
go createImage(i, i.OverWrite)
}
} else {
for _, i := range ci.Images {
wg.Add(1)
go createImage(i, i.OverWrite)
}
}
go func() {
wg.Wait()
e <- nil
}()
select {
case err := <-e:
return err
case <-w.Cancel:
// Wait so Images being created now will complete before we try to clean them up.
wg.Wait()
return nil
}
}
| 1 | 13,494 | minor, you can squash these into one `if` | GoogleCloudPlatform-compute-image-tools | go |
@@ -73,7 +73,7 @@ func (s *transmissionTaskSuite) TearDownTest() {
func (s *transmissionTaskSuite) TestHandleTransmissionTask_RegisterNamespaceTask_IsGlobalNamespace() {
taskType := replicationgenpb.ReplicationTaskType_NamespaceTask
- id := primitives.NewUUID()
+ id := primitives.NewUUID().String()
name := "some random namespace test name"
status := namespacepb.NamespaceStatus_Registered
description := "some random test description" | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package namespace
import (
"testing"
"github.com/gogo/protobuf/types"
"github.com/stretchr/testify/suite"
namespacepb "go.temporal.io/temporal-proto/namespace"
replicationpb "go.temporal.io/temporal-proto/replication"
"github.com/temporalio/temporal/.gen/proto/persistenceblobs"
replicationgenpb "github.com/temporalio/temporal/.gen/proto/replication"
"github.com/temporalio/temporal/common/log/loggerimpl"
"github.com/temporalio/temporal/common/mocks"
"github.com/temporalio/temporal/common/primitives"
)
type (
transmissionTaskSuite struct {
suite.Suite
namespaceReplicator *namespaceReplicatorImpl
kafkaProducer *mocks.KafkaProducer
}
)
func TestTransmissionTaskSuite(t *testing.T) {
s := new(transmissionTaskSuite)
suite.Run(t, s)
}
func (s *transmissionTaskSuite) SetupSuite() {
}
func (s *transmissionTaskSuite) TearDownSuite() {
}
func (s *transmissionTaskSuite) SetupTest() {
s.kafkaProducer = &mocks.KafkaProducer{}
s.namespaceReplicator = NewNamespaceReplicator(
s.kafkaProducer,
loggerimpl.NewDevelopmentForTest(s.Suite),
).(*namespaceReplicatorImpl)
}
func (s *transmissionTaskSuite) TearDownTest() {
s.kafkaProducer.AssertExpectations(s.T())
}
func (s *transmissionTaskSuite) TestHandleTransmissionTask_RegisterNamespaceTask_IsGlobalNamespace() {
taskType := replicationgenpb.ReplicationTaskType_NamespaceTask
id := primitives.NewUUID()
name := "some random namespace test name"
status := namespacepb.NamespaceStatus_Registered
description := "some random test description"
ownerEmail := "some random test owner"
data := map[string]string{"k": "v"}
retention := int32(10)
emitMetric := true
historyArchivalStatus := namespacepb.ArchivalStatus_Enabled
historyArchivalURI := "some random history archival uri"
visibilityArchivalStatus := namespacepb.ArchivalStatus_Enabled
visibilityArchivalURI := "some random visibility archival uri"
clusterActive := "some random active cluster name"
clusterStandby := "some random standby cluster name"
configVersion := int64(0)
failoverVersion := int64(59)
clusters := []string{clusterActive, clusterStandby}
namespaceOperation := replicationgenpb.NamespaceOperation_Create
info := &persistenceblobs.NamespaceInfo{
Id: id,
Name: name,
Status: namespacepb.NamespaceStatus_Registered,
Description: description,
Owner: ownerEmail,
Data: data,
}
config := &persistenceblobs.NamespaceConfig{
RetentionDays: retention,
EmitMetric: emitMetric,
HistoryArchivalStatus: historyArchivalStatus,
HistoryArchivalURI: historyArchivalURI,
VisibilityArchivalStatus: visibilityArchivalStatus,
VisibilityArchivalURI: visibilityArchivalURI,
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
}
replicationConfig := &persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: clusterActive,
Clusters: clusters,
}
isGlobalNamespace := true
s.kafkaProducer.On("Publish", &replicationgenpb.ReplicationTask{
TaskType: taskType,
Attributes: &replicationgenpb.ReplicationTask_NamespaceTaskAttributes{
NamespaceTaskAttributes: &replicationgenpb.NamespaceTaskAttributes{
NamespaceOperation: namespaceOperation,
Id: id.String(),
Info: &namespacepb.NamespaceInfo{
Name: name,
Status: status,
Description: description,
OwnerEmail: ownerEmail,
Data: data,
},
Config: &namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: emitMetric},
HistoryArchivalStatus: historyArchivalStatus,
HistoryArchivalURI: historyArchivalURI,
VisibilityArchivalStatus: visibilityArchivalStatus,
VisibilityArchivalURI: visibilityArchivalURI,
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
},
ReplicationConfig: &replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: clusterActive,
Clusters: s.namespaceReplicator.convertClusterReplicationConfigToProto(clusters),
},
ConfigVersion: configVersion,
FailoverVersion: failoverVersion,
},
},
}).Return(nil).Once()
err := s.namespaceReplicator.HandleTransmissionTask(namespaceOperation, info, config, replicationConfig, configVersion, failoverVersion, isGlobalNamespace)
s.Nil(err)
}
func (s *transmissionTaskSuite) TestHandleTransmissionTask_RegisterNamespaceTask_NotGlobalNamespace() {
id := primitives.NewUUID()
name := "some random namespace test name"
description := "some random test description"
ownerEmail := "some random test owner"
data := map[string]string{"k": "v"}
retention := int32(10)
emitMetric := true
historyArchivalStatus := namespacepb.ArchivalStatus_Enabled
historyArchivalURI := "some random history archival uri"
visibilityArchivalStatus := namespacepb.ArchivalStatus_Enabled
visibilityArchivalURI := "some random visibility archival uri"
clusterActive := "some random active cluster name"
clusterStandby := "some random standby cluster name"
configVersion := int64(0)
failoverVersion := int64(59)
clusters := []string{clusterActive, clusterStandby}
namespaceOperation := replicationgenpb.NamespaceOperation_Create
info := &persistenceblobs.NamespaceInfo{
Id: id,
Name: name,
Status: namespacepb.NamespaceStatus_Registered,
Description: description,
Owner: ownerEmail,
Data: data,
}
config := &persistenceblobs.NamespaceConfig{
RetentionDays: retention,
EmitMetric: emitMetric,
HistoryArchivalStatus: historyArchivalStatus,
HistoryArchivalURI: historyArchivalURI,
VisibilityArchivalStatus: visibilityArchivalStatus,
VisibilityArchivalURI: visibilityArchivalURI,
BadBinaries: &namespacepb.BadBinaries{},
}
replicationConfig := &persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: clusterActive,
Clusters: clusters,
}
isGlobalNamespace := false
err := s.namespaceReplicator.HandleTransmissionTask(namespaceOperation, info, config, replicationConfig, configVersion, failoverVersion, isGlobalNamespace)
s.Nil(err)
}
func (s *transmissionTaskSuite) TestHandleTransmissionTask_UpdateNamespaceTask_IsGlobalNamespace() {
taskType := replicationgenpb.ReplicationTaskType_NamespaceTask
id := primitives.NewUUID()
name := "some random namespace test name"
status, _ := s.namespaceReplicator.convertNamespaceStatusToProto(int(namespacepb.NamespaceStatus_Deprecated))
description := "some random test description"
ownerEmail := "some random test owner"
data := map[string]string{"k": "v"}
retention := int32(10)
emitMetric := true
historyArchivalStatus := namespacepb.ArchivalStatus_Enabled
historyArchivalURI := "some random history archival uri"
visibilityArchivalStatus := namespacepb.ArchivalStatus_Enabled
visibilityArchivalURI := "some random visibility archival uri"
clusterActive := "some random active cluster name"
clusterStandby := "some random standby cluster name"
configVersion := int64(0)
failoverVersion := int64(59)
clusters := []string{clusterActive, clusterStandby}
namespaceOperation := replicationgenpb.NamespaceOperation_Update
info := &persistenceblobs.NamespaceInfo{
Id: id,
Name: name,
Status: namespacepb.NamespaceStatus_Deprecated,
Description: description,
Owner: ownerEmail,
Data: data,
}
config := &persistenceblobs.NamespaceConfig{
RetentionDays: retention,
EmitMetric: emitMetric,
HistoryArchivalStatus: historyArchivalStatus,
HistoryArchivalURI: historyArchivalURI,
VisibilityArchivalStatus: visibilityArchivalStatus,
VisibilityArchivalURI: visibilityArchivalURI,
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
}
replicationConfig := &persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: clusterActive,
Clusters: clusters,
}
isGlobalNamespace := true
s.kafkaProducer.On("Publish", &replicationgenpb.ReplicationTask{
TaskType: taskType,
Attributes: &replicationgenpb.ReplicationTask_NamespaceTaskAttributes{
NamespaceTaskAttributes: &replicationgenpb.NamespaceTaskAttributes{
NamespaceOperation: namespaceOperation,
Id: id.String(),
Info: &namespacepb.NamespaceInfo{
Name: name,
Status: status,
Description: description,
OwnerEmail: ownerEmail,
Data: data,
},
Config: &namespacepb.NamespaceConfiguration{
WorkflowExecutionRetentionPeriodInDays: retention,
EmitMetric: &types.BoolValue{Value: emitMetric},
HistoryArchivalStatus: historyArchivalStatus,
HistoryArchivalURI: historyArchivalURI,
VisibilityArchivalStatus: visibilityArchivalStatus,
VisibilityArchivalURI: visibilityArchivalURI,
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
},
ReplicationConfig: &replicationpb.NamespaceReplicationConfiguration{
ActiveClusterName: clusterActive,
Clusters: s.namespaceReplicator.convertClusterReplicationConfigToProto(clusters),
},
ConfigVersion: configVersion,
FailoverVersion: failoverVersion},
},
}).Return(nil).Once()
err := s.namespaceReplicator.HandleTransmissionTask(namespaceOperation, info, config, replicationConfig, configVersion, failoverVersion, isGlobalNamespace)
s.Nil(err)
}
func (s *transmissionTaskSuite) TestHandleTransmissionTask_UpdateNamespaceTask_NotGlobalNamespace() {
id := primitives.NewUUID()
name := "some random namespace test name"
description := "some random test description"
ownerEmail := "some random test owner"
data := map[string]string{"k": "v"}
retention := int32(10)
emitMetric := true
historyArchivalStatus := namespacepb.ArchivalStatus_Enabled
historyArchivalURI := "some random history archival uri"
visibilityArchivalStatus := namespacepb.ArchivalStatus_Enabled
visibilityArchivalURI := "some random visibility archival uri"
clusterActive := "some random active cluster name"
clusterStandby := "some random standby cluster name"
configVersion := int64(0)
failoverVersion := int64(59)
clusters := []string{clusterActive, clusterStandby}
namespaceOperation := replicationgenpb.NamespaceOperation_Update
info := &persistenceblobs.NamespaceInfo{
Id: id,
Name: name,
Status: namespacepb.NamespaceStatus_Deprecated,
Description: description,
Owner: ownerEmail,
Data: data,
}
config := &persistenceblobs.NamespaceConfig{
RetentionDays: retention,
EmitMetric: emitMetric,
HistoryArchivalStatus: historyArchivalStatus,
HistoryArchivalURI: historyArchivalURI,
VisibilityArchivalStatus: visibilityArchivalStatus,
VisibilityArchivalURI: visibilityArchivalURI,
}
replicationConfig := &persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: clusterActive,
Clusters: clusters,
}
isGlobalNamespace := false
err := s.namespaceReplicator.HandleTransmissionTask(namespaceOperation, info, config, replicationConfig, configVersion, failoverVersion, isGlobalNamespace)
s.Nil(err)
}
| 1 | 9,611 | Used regexes to do most of this, hence different methods of string creation of UUIDs. I plan to follow up with an additional change to remove direct references to google/pborman UUID so `uuid.New()` and `uuid.NewRandom()` will instead use our `primitives.UUID`. | temporalio-temporal | go |
@@ -581,7 +581,7 @@ namespace pwiz.Skyline.Controls.Graphs
requestContext.Settings = null;
}
- if (GraphSummary.IsHandleCreated)
+ if (GraphSummary.IsHandleCreated && !GraphSummary.Disposing)
{
try
{ | 1 | /*
* Original author: Brendan MacLean <brendanx .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2009 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Threading;
using System.Windows.Forms;
using pwiz.Common.DataAnalysis;
using pwiz.Common.SystemUtil;
using pwiz.Skyline.Controls.SeqNode;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.Irt;
using pwiz.Skyline.Model.Lib;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Model.RetentionTimes;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using ZedGraph;
using pwiz.Skyline.Util.Extensions;
namespace pwiz.Skyline.Controls.Graphs
{
public sealed class RTLinearRegressionGraphPane : SummaryGraphPane, IUpdateGraphPaneController, IDisposable, ITipDisplayer
{
public static ReplicateDisplay ShowReplicate
{
get
{
return Helpers.ParseEnum(Settings.Default.ShowRegressionReplicateEnum, ReplicateDisplay.all);
}
}
public static readonly Color COLOR_REFINED = Color.DarkBlue;
public static readonly Color COLOR_LINE_REFINED = Color.Black;
public static readonly Color COLOR_LINE_PREDICT = Color.DarkGray;
public static readonly Color COLOR_OUTLIERS = Color.BlueViolet;
public static readonly Color COLOR_LINE_ALL = Color.BlueViolet;
private GraphData _data;
private NodeTip _tip;
private CancellationTokenSource _cancellationTokenSource;
private bool _pendingUpdate;
public RTLinearRegressionGraphPane(GraphSummary graphSummary, bool runToRun)
: base(graphSummary)
{
XAxis.Title.Text = Resources.RTLinearRegressionGraphPane_RTLinearRegressionGraphPane_Score;
RunToRun = runToRun;
Settings.Default.RTScoreCalculatorList.ListChanged += RTScoreCalculatorList_ListChanged;
AllowDisplayTip = true;
_cancellationTokenSource = new CancellationTokenSource();
}
public void Dispose()
{
Cancel(false);
AllowDisplayTip = false;
Settings.Default.RTScoreCalculatorList.ListChanged -= RTScoreCalculatorList_ListChanged;
}
public override bool HasToolbar { get { return RunToRun; } }
public bool UpdateUIOnIndexChanged()
{
return true;
}
public bool UpdateUIOnLibraryChanged()
{
return ShowReplicate == ReplicateDisplay.single && !RunToRun;
}
private void RTScoreCalculatorList_ListChanged(object sender, EventArgs e)
{
// Avoid updating on every minor change to the list.
if (_pendingUpdate)
return;
// Wait for the UI thread to become available again, and then update
if (GraphSummary.IsHandleCreated)
{
GraphSummary.BeginInvoke(new Action(DelayedUpdate));
_pendingUpdate = true;
}
}
private void DelayedUpdate()
{
// Any change to the calculator list requires a full data update when in auto mode.
if (string.IsNullOrEmpty(Settings.Default.RTCalculatorName))
Data = null;
UpdateGraph(true);
_pendingUpdate = false;
}
public override bool HandleMouseMoveEvent(ZedGraphControl sender, MouseEventArgs e)
{
var peptideIndex = PeptideIndexFromPoint(new PointF(e.X, e.Y));
if (peptideIndex != null)
{
double x, y;
PointFromPeptide(peptideIndex.DocNode, out x, out y);
if (RTGraphController.PlotType == PlotTypeRT.residuals && Data != null &&
Data.ResidualsRegression != null && Data.ResidualsRegression.Conversion != null)
y = Data.GetResidual(Data.ResidualsRegression, x, y);
if (_tip == null)
_tip = new NodeTip(this);
_tip.SetTipProvider(
new PeptideRegressionTipProvider(peptideIndex.DocNode, XAxis.Title.Text, YAxis.Title.Text,
new PointD(x, y)),
new Rectangle(e.Location, new Size()),
e.Location);
GraphSummary.Cursor = Cursors.Hand;
return true;
}
else
{
if (_tip != null)
_tip.HideTip();
return base.HandleMouseMoveEvent(sender, e);
}
}
public override bool HandleMouseDownEvent(ZedGraphControl sender, MouseEventArgs e)
{
var peptideIndex = PeptideIndexFromPoint(new PointF(e.X, e.Y));
if (peptideIndex != null)
{
var document = GraphSummary.DocumentUIContainer.DocumentUI;
var pathSelect = document.GetPathTo((int) SrmDocument.Level.Molecules,
peptideIndex.IndexDoc);
SelectPeptide(pathSelect);
return true;
}
return false;
}
public bool RunToRun { get; private set; }
public void SelectPeptide(IdentityPath peptidePath)
{
GraphSummary.StateProvider.SelectedPath = peptidePath;
if (ShowReplicate == ReplicateDisplay.best && !RunToRun)
{
var document = GraphSummary.DocumentUIContainer.DocumentUI;
var nodePep = (PeptideDocNode)document.FindNode(peptidePath);
int resultsIndex = nodePep.BestResult;
if (resultsIndex != -1)
GraphSummary.StateProvider.SelectedResultsIndex = resultsIndex;
}
}
public bool AllowDeletePoint(PointF point)
{
return PeptideIndexFromPoint(point) != null;
}
private GraphData Data
{
get
{
return _data;
}
set
{
_data = value;
}
}
public bool HasOutliers
{
get
{
var data = Data;
return data != null && data.HasOutliers;
}
}
public PeptideDocNode[] Outliers
{
get
{
GraphData data = Data;
return data == null ? null : data.Outliers;
}
}
public static PeptideDocNode[] CalcOutliers(SrmDocument document, double threshold, int? precision, bool bestResult)
{
var data = new GraphData(document, null, -1, threshold, precision, true, bestResult,
RTGraphController.PointsType, RTGraphController.RegressionMethod, -1, null, CustomCancellationToken.NONE);
return data.Refine(() => false).Outliers;
}
public RetentionTimeRegression RegressionRefined
{
get
{
GraphData data = Data;
return data == null ? null : data.RegressionRefined;
}
}
public RetentionTimeStatistics StatisticsRefined
{
get
{
GraphData data = Data;
return data == null ? null : data.StatisticsRefined;
}
}
private static bool IsValidFor(GraphData data, SrmDocument document)
{
return data != null && data.IsValidFor(document);
}
public bool IsValidFor(SrmDocument document, int targetIndex, int originalIndex, bool bestResult, double threshold, bool refine, PointsTypeRT pointsType, RegressionMethodRT regressionMethod)
{
var data = Data;
return data != null && data.IsValidFor(document, targetIndex, originalIndex,bestResult, threshold, refine, pointsType, regressionMethod);
}
public void Clear()
{
Data = null;
Title.Text = string.Empty;
CurveList.Clear();
GraphObjList.Clear();
}
public void Graph(PeptideDocNode nodeSelected)
{
var data = Data;
if (data != null)
data.Graph(this, nodeSelected);
}
private GraphData Update(SrmDocument document, int targetIndex, double threshold, bool refine, PointsTypeRT pointsType, RegressionMethodRT regressionMethod, int origIndex, CancellationToken token)
{
bool bestResults = (ShowReplicate == ReplicateDisplay.best);
return new GraphData(document, Data, targetIndex, threshold, null, refine, bestResults, pointsType, regressionMethod, origIndex, this, new CustomCancellationToken(token));
}
private static bool IsDataRefined(GraphData data)
{
return data != null && data.IsRefined();
}
public bool IsRefined
{
get { return IsDataRefined(Data); }
}
public bool RegressionRefinedNull => Data.RegressionRefinedNull;
private GraphData Refine(GraphData currentData, Func<bool> isCanceled)
{
GraphData dataNew = currentData != null ? currentData.Refine(isCanceled) : null;
// No refinement happened, if data did not change
if (ReferenceEquals(dataNew, currentData))
return currentData;
return dataNew;
}
public override void Draw(Graphics g)
{
GraphObjList.Clear();
var data = Data;
if (data != null && RTGraphController.PlotType == PlotTypeRT.correlation)
{
// Force Axes to recalculate to ensure proper layout of labels
AxisChange(g);
data.AddLabels(this, g);
}
base.Draw(g);
}
public PeptideDocumentIndex PeptideIndexFromPoint(PointF point)
{
var data = Data;
return data != null ? data.PeptideIndexFromPoint(this, point) : null;
}
public bool PointFromPeptide(PeptideDocNode peptide, out double score, out double time)
{
score = time = 0;
return Data != null && Data.PointFromPeptide(peptide, out score, out time);
}
private const int OVER_THRESHOLD = 4;
public bool PointIsOver(PointF point, double score, double time)
{
float x = XAxis.Scale.Transform(score);
if (Math.Abs(x - point.X) > OVER_THRESHOLD)
return false;
float y = YAxis.Scale.Transform(time);
if (Math.Abs(y - point.Y) > OVER_THRESHOLD)
return false;
return true;
}
private void Cancel(bool createNew = true)
{
if (_cancellationTokenSource == null)
return;
_cancellationTokenSource.Cancel();
if (createNew)
_cancellationTokenSource = new CancellationTokenSource();
}
public override void UpdateGraph(bool selectionChanged)
{
GraphHelper.FormatGraphPane(this);
SrmDocument document = GraphSummary.DocumentUIContainer.DocumentUI;
PeptideDocNode nodeSelected = null;
int targetIndex = (ShowReplicate == ReplicateDisplay.single || RunToRun ? GraphSummary.TargetResultsIndex : -1);
int originalIndex = RunToRun ? GraphSummary.OriginalResultsIndex : -1;
var results = document.Settings.MeasuredResults;
bool resultsAvailable = results != null;
if (resultsAvailable)
{
if (targetIndex == -1)
resultsAvailable = results.IsLoaded;
else
resultsAvailable = results.Chromatograms.Count > targetIndex &&
results.IsChromatogramSetLoaded(targetIndex);
}
if (!resultsAvailable)
{
Clear();
}
else
{
var nodeTree = GraphSummary.StateProvider.SelectedNode as SrmTreeNode;
var nodePeptide = nodeTree as PeptideTreeNode;
while (nodePeptide == null && nodeTree != null)
{
nodeTree = nodeTree.Parent as SrmTreeNode;
nodePeptide = nodeTree as PeptideTreeNode;
}
if (nodePeptide != null)
nodeSelected = nodePeptide.DocNode;
bool shouldDrawGraph = true;
double threshold = RTGraphController.OutThreshold;
bool refine = Settings.Default.RTRefinePeptides && RTGraphController.CanDoRefinementForRegressionMethod;
bool bestResult = (ShowReplicate == ReplicateDisplay.best);
if ((RTGraphController.PointsType == PointsTypeRT.standards && !document.GetRetentionTimeStandards().Any()) ||
(RTGraphController.PointsType == PointsTypeRT.decoys &&
!document.PeptideGroups.Any(nodePepGroup => nodePepGroup.Children.Cast<PeptideDocNode>().Any(nodePep => nodePep.IsDecoy))) ||
RTGraphController.PointsType == PointsTypeRT.targets_fdr && targetIndex == -1) // Replicate display is not single and this is not a run to run regression
{
RTGraphController.PointsType = PointsTypeRT.targets;
}
PointsTypeRT pointsType = RTGraphController.PointsType;
RegressionMethodRT regressionMethod = RTGraphController.RegressionMethod;
if (!IsValidFor(document, targetIndex, originalIndex, bestResult, threshold, refine, pointsType,
regressionMethod))
{
var requested = new RequestContext(new RegressionSettings(document, targetIndex, originalIndex, bestResult,
threshold, refine, pointsType, regressionMethod, Settings.Default.RTCalculatorName, RunToRun));
if (UpdateData(requested))
{
// Calculate and refine regression on background thread
lock (_requestLock)
{
//
var ctx = _requestContext;
var token = _cancellationTokenSource.Token;
ActionUtil.RunAsync(() => UpdateAndRefine(ctx, token),
@"Update and refine regression data");
}
Title.Text = Resources.RTLinearRegressionGraphPane_UpdateGraph_Calculating___;
shouldDrawGraph = false;
}
}
else
{
lock (_requestLock)
{
_requestContext = null;
}
}
if (shouldDrawGraph)
Graph(nodeSelected);
}
lock (_requestLock)
{
if (_requestContext?.Settings == null)
Title.Text = string.Empty;
}
AxisChange();
GraphSummary.GraphControl.Invalidate();
}
// Returns true if data should be updated
bool UpdateData(RequestContext requested)
{
lock (_requestLock)
{
if (_requestContext?.Settings == null)
{
_requestContext = requested;
return true;
}
else
{
var valid = _requestContext.Settings.IsValidFor(requested.Settings);
if (!valid)
{
Cancel();
_requestContext = requested;
}
return !valid;
}
}
}
private class RequestContext
{
public RequestContext(RegressionSettings requested)
{
Settings = requested;
}
public RegressionSettings Settings { get; set; }
}
private RequestContext _requestContext;
private readonly object _requestLock = new object();
private bool _allowDisplayTip;
public bool IsCalculating
{
get
{
lock (_requestLock)
{
return _requestContext?.Settings != null;
}
}
}
private class RegressionSettings
{
public
RegressionSettings(SrmDocument document, int targetIndex, int originalIndex, bool bestResult,
double threshold, bool refine, PointsTypeRT pointsType, RegressionMethodRT regressionMethod, string calculatorName, bool isRunToRun)
{
Document = document;
TargetIndex = targetIndex;
OriginalIndex = originalIndex;
BestResult = bestResult;
Threshold = threshold;
Refine = refine;
PointsType = pointsType;
RegressionMethod = regressionMethod;
CalculatorName = calculatorName;
if (!string.IsNullOrEmpty(CalculatorName))
Calculators = new[] {Settings.Default.GetCalculatorByName(calculatorName)};
else
Calculators = Settings.Default.RTScoreCalculatorList.ToArray();
IsRunToRun = isRunToRun;
}
public bool IsValidFor(RegressionSettings other)
{
return IsValidFor(other.Document, other.TargetIndex, other.OriginalIndex, other.BestResult,
other.Threshold, other.Refine, other.PointsType, other.RegressionMethod, other.CalculatorName, other.IsRunToRun);
}
private bool IsValidFor(SrmDocument document, int targetIndex, int originalIndex, bool bestResult,
double threshold, bool refine, PointsTypeRT pointsType, RegressionMethodRT regressionMethod, string calculatorName, bool isRunToRun)
{
if(!(ReferenceEquals(Document, document) && TargetIndex == targetIndex &&
OriginalIndex == originalIndex && BestResult == bestResult && Threshold == threshold &&
Refine == refine && PointsType == pointsType && RegressionMethod == regressionMethod &&
IsRunToRun == isRunToRun))
return false;
if (!IsRunToRun)
{
if (string.IsNullOrEmpty(calculatorName))
return ArrayUtil.EqualsDeep(Calculators, Settings.Default.RTScoreCalculatorList);
else
return CalculatorName == calculatorName && Equals(Calculators[0],
Settings.Default.GetCalculatorByName(calculatorName));
}
return true;
}
public SrmDocument Document { get; private set; }
public int TargetIndex { get; private set; }
public int OriginalIndex { get; private set; }
public bool BestResult { get; private set; }
public double Threshold { get; private set; }
public bool Refine { get; private set; }
public PointsTypeRT PointsType { get; private set; }
public RegressionMethodRT RegressionMethod { get; private set; }
public string CalculatorName { get; private set; }
public RetentionScoreCalculatorSpec[] Calculators { get; private set; }
public bool IsRunToRun { get; private set; }
}
private void UpdateAndRefine(RequestContext requestContext,
CancellationToken cancellationToken)
{
try
{
var regressionSettings = requestContext.Settings;
var newData = Update(regressionSettings.Document, regressionSettings.TargetIndex,
regressionSettings.Threshold,
regressionSettings.Refine, regressionSettings.PointsType, regressionSettings.RegressionMethod,
regressionSettings.OriginalIndex,
// ReSharper disable once InconsistentlySynchronizedField
cancellationToken);
if (regressionSettings.Refine && !IsDataRefined(newData))
{
var data = newData;
newData = Refine(newData, () => cancellationToken.IsCancellationRequested ||
!IsValidFor(data, GraphSummary.DocumentUIContainer.Document));
}
ThreadingHelper.CheckCanceled(cancellationToken);
// Update the graph on the UI thread.
lock (_requestLock)
{
if (ReferenceEquals(_requestContext, requestContext))
{
Interlocked.CompareExchange(ref _data, newData, Data);
}
// Set to null so that the next UpdateGraph call will update graph title accordingly
requestContext.Settings = null;
}
if (GraphSummary.IsHandleCreated)
{
try
{
GraphSummary.Invoke(new Action(() =>
{
try
{
if (!cancellationToken.IsCancellationRequested)
UpdateGraph(false);
}
catch (Exception ex)
{
Program.ReportException(ex);
}
}));
}
catch (ObjectDisposedException)
{
// Can happen during tests
}
}
}
catch (OperationCanceledException)
{
}
catch (Exception x)
{
Program.ReportException(x);
}
lock (_requestLock)
{
requestContext.Settings = null;
}
}
/// <summary>
/// Holds the data currently displayed in the graph.
/// </summary>
sealed class GraphData : Immutable
{
private readonly SrmDocument _document;
private readonly RTLinearRegressionGraphPane _graphPane;
private readonly RegressionMethodRT _regressionMethod;
private readonly int _targetIndex;
private readonly int _originalIndex; // set to -1 if we are using IRTs
private readonly bool _bestResult;
private readonly double _threshold;
private readonly int? _thresholdPrecision;
private readonly bool _refine;
private readonly PointsTypeRT _pointsType;
private readonly List<PeptideDocumentIndex> _peptidesIndexes;
private readonly List<MeasuredRetentionTime> _targetTimes;
private readonly IList<MeasuredRetentionTime> _originalTimes;
private readonly RetentionTimeScoreCache _scoreCache;
private readonly RetentionTimeRegression _regressionPredict;
private readonly IRegressionFunction _conversionPredict;
private readonly RetentionTimeStatistics _statisticsPredict;
private readonly RetentionTimeRegression _regressionAll;
private readonly RetentionTimeStatistics _statisticsAll;
private RetentionTimeRegression _regressionRefined;
private RetentionTimeStatistics _statisticsRefined;
private double[] _timesRefined;
private double[] _scoresRefined;
private double[] _timesOutliers;
private double[] _scoresOutliers;
private readonly string _calculatorName;
private readonly RetentionScoreCalculatorSpec _calculator;
private RetentionScoreCalculatorSpec Calculator { get { return _calculator; } }
private HashSet<int> _outlierIndexes;
private bool IsRunToRun { get { return _graphPane != null && _graphPane.RunToRun; } }
public GraphData(SrmDocument document,
GraphData dataPrevious,
int targetIndex,
double threshold,
int? thresholdPrecision,
bool refine,
bool bestResult,
PointsTypeRT pointsType,
RegressionMethodRT regressionMethod,
int originalIndex,
RTLinearRegressionGraphPane graphPane,
CustomCancellationToken token
)
{
_document = document;
_graphPane = graphPane;
_targetIndex = targetIndex;
_originalIndex = originalIndex;
if(IsRunToRun && _originalIndex < 0)
throw new ArgumentException(@"Original index cannot not be negative if we are doing run to run regression");
_bestResult = bestResult && !IsRunToRun;
_threshold = threshold;
_thresholdPrecision = thresholdPrecision;
_pointsType = pointsType;
_regressionMethod = regressionMethod;
_peptidesIndexes = new List<PeptideDocumentIndex>();
_targetTimes = new List<MeasuredRetentionTime>();
var originalTimes = IsRunToRun ? new List<MeasuredRetentionTime>() : null;
int index = -1;
var standards = new HashSet<Target>();
if (RTGraphController.PointsType == PointsTypeRT.standards)
standards = document.GetRetentionTimeStandards();
// Only used if we are comparing two runs
var origTimesDict = IsRunToRun ? new Dictionary<Target, double>() : null;
var targetTimesDict = IsRunToRun ? new Dictionary<Target, double>() : null;
foreach (var nodePeptide in document.Molecules)
{
ThreadingHelper.CheckCanceled(token);
index++;
switch (RTGraphController.PointsType)
{
case PointsTypeRT.targets:
if (nodePeptide.IsDecoy)
continue;
break;
case PointsTypeRT.targets_fdr:
{
if(nodePeptide.IsDecoy)
continue;
if (TargetIndex != -1 && GetMaxQValue(nodePeptide, TargetIndex) >= 0.01 ||
OriginalIndex != -1 && GetMaxQValue(nodePeptide, OriginalIndex) >= 0.01)
continue;
break;
}
case PointsTypeRT.standards:
if (!standards.Contains(document.Settings.GetModifiedSequence(nodePeptide))
|| nodePeptide.GlobalStandardType != StandardType.IRT) // In case of 15N labeled peptides, the unlabeled form may also show up
continue;
break;
case PointsTypeRT.decoys:
if (!nodePeptide.IsDecoy)
continue;
break;
}
float? rtTarget = null;
//Only used if we are doing run to run, otherwise we use scores
float? rtOrig = null;
if (originalIndex != -1)
rtOrig = nodePeptide.GetSchedulingTime(originalIndex);
if (!_bestResult)
rtTarget = nodePeptide.GetSchedulingTime(targetIndex);
else
{
int iBest = nodePeptide.BestResult;
if (iBest != -1)
rtTarget = nodePeptide.GetSchedulingTime(iBest);
}
var modSeq = _document.Settings.GetSourceTarget(nodePeptide);
if (!rtTarget.HasValue)
rtTarget = 0;
if (!rtOrig.HasValue)
rtOrig = 0;
_peptidesIndexes.Add(new PeptideDocumentIndex(nodePeptide, index));
_targetTimes.Add(new MeasuredRetentionTime(modSeq, rtTarget.Value));
if(IsRunToRun)
originalTimes.Add(new MeasuredRetentionTime(modSeq,rtOrig.Value));
if (IsRunToRun )
{
if (!targetTimesDict.ContainsKey(modSeq))
{
targetTimesDict.Add(modSeq, rtTarget.Value);
origTimesDict.Add(modSeq, rtOrig.Value);
}
else
{
_peptidesIndexes.RemoveAt(_peptidesIndexes.Count-1);
_targetTimes.RemoveAt(_targetTimes.Count-1);
originalTimes.RemoveAt(originalTimes.Count-1);
}
}
}
_originalTimes = originalTimes != null ? originalTimes.ToArray() : null;
_calculatorName = Settings.Default.RTCalculatorName;
if (IsRunToRun)
{
_calculator = new DictionaryRetentionScoreCalculator(XmlNamedElement.NAME_INTERNAL, origTimesDict);
var alignedRetentionTimes = AlignedRetentionTimes.AlignLibraryRetentionTimes(targetTimesDict,
origTimesDict, refine ? threshold : 0, _regressionMethod,
token);
if (alignedRetentionTimes != null)
{
_regressionAll = alignedRetentionTimes.Regression;
_statisticsAll = alignedRetentionTimes.RegressionStatistics;
}
}
else
{
var calc = !string.IsNullOrEmpty(_calculatorName)
? Settings.Default.GetCalculatorByName(Settings.Default.RTCalculatorName)
: null;
if (calc == null)
{
// Initialize all calculators
Settings.Default.RTScoreCalculatorList.Initialize(null);
var summary = RetentionTimeRegression.CalcBestRegressionBackground(XmlNamedElement.NAME_INTERNAL,
Settings.Default.RTScoreCalculatorList.ToList(), _targetTimes, _scoreCache, true,
_regressionMethod, token);
_calculator = summary.Best.Calculator;
_statisticsAll = summary.Best.Statistics;
_regressionAll = summary.Best.Regression;
}
else
{
// Initialize the one calculator
calc = Settings.Default.RTScoreCalculatorList.Initialize(null, calc);
double unused;
_regressionAll = RetentionTimeRegression.CalcSingleRegression(XmlNamedElement.NAME_INTERNAL,
calc,
_targetTimes,
_scoreCache,
true,
_regressionMethod,
out _statisticsAll,
out unused,
token);
ThreadingHelper.CheckCanceled(token);
_calculator = calc;
//If _regressionAll is null, it is safe to assume that the calculator is an iRT Calc with
//its database disconnected.
if (_regressionAll == null)
{
var tryIrtCalc = calc as RCalcIrt;
//Only show an error message if the user specifically chooses this calculator.
if (dataPrevious != null && !ReferenceEquals(calc, dataPrevious.Calculator) &&
tryIrtCalc != null)
{
throw new DatabaseNotConnectedException(tryIrtCalc);
}
}
}
}
if (_regressionAll != null)
{
_scoreCache = new RetentionTimeScoreCache(new[] { _calculator }, _targetTimes,
dataPrevious != null ? dataPrevious._scoreCache : null);
if (dataPrevious != null && !ReferenceEquals(_calculator, dataPrevious._calculator))
_scoreCache.RecalculateCalcCache(_calculator);
_scoresRefined = _statisticsAll.ListHydroScores.ToArray();
_timesRefined = _statisticsAll.ListRetentionTimes.ToArray();
}
_regressionPredict = (IsRunToRun || _regressionMethod != RegressionMethodRT.linear) ? null : document.Settings.PeptideSettings.Prediction.RetentionTime;
if (_regressionPredict != null)
{
if (!Equals(_calculator, _regressionPredict.Calculator))
_regressionPredict = null;
else
{
IDictionary<Target, double> scoreCache = null;
if (_regressionAll != null && Equals(_regressionAll.Calculator, _regressionPredict.Calculator))
scoreCache = _statisticsAll.ScoreCache;
// This is a bit of a HACK to better support the very common case of replicate graphing
// with a replicate that only has one file. More would need to be done for replicates
// composed of multiple files.
ChromFileInfoId fileId = null;
if (!bestResult && targetIndex != -1)
{
var chromatogramSet = document.Settings.MeasuredResults.Chromatograms[targetIndex];
if (chromatogramSet.FileCount > 0)
{
fileId = chromatogramSet.MSDataFileInfos[0].FileId;
_conversionPredict = _regressionPredict.GetConversion(fileId);
}
}
_statisticsPredict = _regressionPredict.CalcStatistics(_targetTimes, scoreCache, fileId);
}
}
// Only refine, if not already exceeding the threshold
_refine = refine && !IsRefined();
}
private float GetMaxQValue(PeptideDocNode node, int replicateIndex)
{
var chromInfos = node.TransitionGroups
.Select(tr => tr.GetSafeChromInfo(TargetIndex).FirstOrDefault(ci => ci.OptimizationStep == 0))
.Where(ci => ci?.QValue != null).ToArray();
if (chromInfos.Length == 0)
return 1.0f;
return chromInfos.Max(ci => ci.QValue.Value);
}
public bool IsValidFor(SrmDocument document)
{
return ReferenceEquals(document, _document);
}
public bool IsValidFor(SrmDocument document, int targetIndex, int originalIndex, bool bestResult, double threshold, bool refine, PointsTypeRT pointsType, RegressionMethodRT regressionMethod)
{
string calculatorName = Settings.Default.RTCalculatorName;
if (string.IsNullOrEmpty(calculatorName) && !IsRunToRun)
calculatorName = _calculator.Name;
return IsValidFor(document) &&
_targetIndex == targetIndex &&
_originalIndex == originalIndex &&
_bestResult == bestResult &&
_threshold == threshold &&
_pointsType == pointsType &&
_regressionMethod == regressionMethod &&
(IsRunToRun || (_calculatorName == Settings.Default.RTCalculatorName &&
ReferenceEquals(_calculator, Settings.Default.GetCalculatorByName(calculatorName)))) &&
// Valid if refine is true, and this data requires no further refining
(_refine == refine || (refine && IsRefined()));
}
public int TargetIndex { get { return _targetIndex; } }
public int OriginalIndex { get { return _originalIndex; } }
public RetentionTimeRegression RegressionRefined
{
get { return _regressionRefined ?? _regressionAll; }
}
public RetentionTimeStatistics StatisticsRefined
{
get { return _statisticsRefined ?? _statisticsAll; }
}
public bool RegressionRefinedNull => _regressionRefined == null;
public bool IsRefined()
{
// If refinement has been performed, or it doesn't need to be.
if (_regressionRefined != null)
return true;
if (_statisticsAll == null)
return false;
return RetentionTimeRegression.IsAboveThreshold(_statisticsAll.R, _threshold);
}
public GraphData Refine(Func<bool> isCanceled)
{
if (IsRefined())
return this;
var result = ImClone(this).RefineCloned(_threshold, _thresholdPrecision, isCanceled);
if (result == null)
return this;
return result;
}
private GraphData RefineCloned(double threshold, int? precision, Func<bool> isCanceled)
{
// Create list of deltas between predicted and measured times
_outlierIndexes = new HashSet<int>();
// Start with anything assigned a zero retention time as outliers
for (int i = 0; i < _targetTimes.Count; i++)
{
if (_targetTimes[i].RetentionTime == 0 || (_originalTimes != null && _originalTimes[i].RetentionTime == 0))
_outlierIndexes.Add(i);
}
// Now that we have added iRT calculators, RecalcRegression
// cannot go and mark as outliers peptides at will anymore. It must know which peptides, if any,
// are required by the calculator for a regression. With iRT calcs, the standard is required.
if(!_calculator.IsUsable)
return null;
HashSet<Target> standardNames;
try
{
var names = _calculator.GetStandardPeptides(_targetTimes.Select(pep => pep.PeptideSequence));
standardNames = new HashSet<Target>(names);
}
catch (CalculatorException)
{
standardNames = new HashSet<Target>();
}
//For run to run all peptides are variables. There are no standards.
var standardPeptides = IsRunToRun ? new MeasuredRetentionTime[0] : _targetTimes.Where(pep => pep.IsStandard && standardNames.Contains(pep.PeptideSequence)).ToArray();
var variableTargetPeptides = IsRunToRun ? _targetTimes.ToArray() : _targetTimes.Where(pep => !pep.IsStandard || !standardNames.Contains(pep.PeptideSequence)).ToArray();
var variableOrigPeptides = _originalTimes;
//Throws DatabaseNotConnectedException
_regressionRefined = (_regressionAll == null
? null
: _regressionAll.FindThreshold(threshold,
precision,
0,
variableTargetPeptides.Length,
standardPeptides,
variableTargetPeptides,
variableOrigPeptides,
_statisticsAll,
_calculator,
_regressionMethod,
_scoreCache,
new CustomCancellationToken(CancellationToken.None, isCanceled),
ref _statisticsRefined,
ref _outlierIndexes));
if (ReferenceEquals(_regressionRefined, _regressionAll))
return null;
// Separate lists into acceptable and outliers
var listScoresRefined = new List<double>();
var listTimesRefined = new List<double>();
var listScoresOutliers = new List<double>();
var listTimesOutliers = new List<double>();
for (int i = 0; i < _scoresRefined.Length; i++)
{
if (_outlierIndexes.Contains(i))
{
listScoresOutliers.Add(_scoresRefined[i]);
listTimesOutliers.Add(_timesRefined[i]);
}
else
{
listScoresRefined.Add(_scoresRefined[i]);
listTimesRefined.Add(_timesRefined[i]);
}
}
_scoresRefined = listScoresRefined.ToArray();
_timesRefined = listTimesRefined.ToArray();
_scoresOutliers = listScoresOutliers.ToArray();
_timesOutliers = listTimesOutliers.ToArray();
return this;
}
public PeptideDocumentIndex PeptideIndexFromPoint(RTLinearRegressionGraphPane graphPane, PointF point)
{
var regression = ResidualsRegression;
if (RTGraphController.PlotType == PlotTypeRT.correlation)
regression = null;
if (RTGraphController.PlotType == PlotTypeRT.correlation || regression != null)
{
int iRefined = 0, iOut = 0;
for (int i = 0; i < _peptidesIndexes.Count; i++)
{
if (_outlierIndexes != null && _outlierIndexes.Contains(i))
{
if (PointIsOverEx(graphPane, point, regression, _scoresOutliers[iOut], _timesOutliers[iOut]))
return _peptidesIndexes[i];
iOut++;
}
else if (_scoresRefined != null && _timesRefined != null)
{
if (PointIsOverEx(graphPane, point, regression, _scoresRefined[iRefined], _timesRefined[iRefined]))
return _peptidesIndexes[i];
iRefined++;
}
}
}
return null;
}
private bool PointIsOverEx(RTLinearRegressionGraphPane graphPane, PointF point,
RetentionTimeRegression regression, double x, double y)
{
if (regression != null && regression.IsUsable)
y = GetResidual(regression, x, y);
return graphPane.PointIsOver(point, x, y);
}
public bool PointFromPeptide(PeptideDocNode nodePeptide, out double score, out double time)
{
if (nodePeptide != null && _regressionAll != null)
{
int iRefined = 0, iOut = 0;
for (int i = 0; i < _peptidesIndexes.Count; i++)
{
if (_outlierIndexes != null && _outlierIndexes.Contains(i))
{
if (ReferenceEquals(nodePeptide, _peptidesIndexes[i].DocNode))
{
score = _scoresOutliers[iOut];
time = _timesOutliers[iOut];
return true;
}
iOut++;
}
else
{
if (ReferenceEquals(nodePeptide, _peptidesIndexes[i].DocNode))
{
score = _scoresRefined[iRefined];
time = _timesRefined[iRefined];
return true;
}
iRefined++;
}
}
}
score = 0;
time = 0;
return false;
}
public bool HasOutliers { get { return _outlierIndexes != null && _outlierIndexes.Count > 0; } }
public PeptideDocNode[] Outliers
{
get
{
if (!HasOutliers)
return new PeptideDocNode[0];
var listOutliers = new List<PeptideDocNode>();
for (int i = 0; i < _peptidesIndexes.Count; i++)
{
if (_outlierIndexes.Contains(i))
listOutliers.Add(_peptidesIndexes[i].DocNode);
}
return listOutliers.ToArray();
}
}
public void Graph(GraphPane graphPane, PeptideDocNode nodeSelected)
{
graphPane.CurveList.Clear();
graphPane.XAxis.Title.Text = XAxisName;
graphPane.YAxis.Title.Text = YAxisName;
if (RTGraphController.PlotType == PlotTypeRT.correlation)
GraphCorrelation(graphPane, nodeSelected);
else
GraphResiduals(graphPane, nodeSelected);
}
private void GraphCorrelation(GraphPane graphPane, PeptideDocNode nodeSelected)
{
if (graphPane.YAxis.Scale.MinAuto)
{
graphPane.YAxis.Scale.MinAuto = false;
graphPane.YAxis.Scale.Min = 0;
}
double scoreSelected, timeSelected;
if (PointFromPeptide(nodeSelected, out scoreSelected, out timeSelected))
{
Color colorSelected = GraphSummary.ColorSelected;
var curveOut = graphPane.AddCurve(null, new[] { scoreSelected }, new[] { timeSelected },
colorSelected, SymbolType.Diamond);
curveOut.Line.IsVisible = false;
curveOut.Symbol.Fill = new Fill(colorSelected);
curveOut.Symbol.Size = 8f;
}
string labelPoints = Resources.GraphData_Graph_Peptides;
if (!_refine)
{
GraphRegression(graphPane, _statisticsAll, _regressionAll, Resources.GraphData_Graph_Regression, COLOR_LINE_REFINED);
}
else
{
labelPoints = Resources.GraphData_Graph_Peptides_Refined;
GraphRegression(graphPane, _statisticsRefined, _regressionAll, Resources.GraphData_Graph_Regression_Refined, COLOR_LINE_REFINED);
GraphRegression(graphPane, _statisticsAll, _regressionAll, Resources.GraphData_Graph_Regression, COLOR_LINE_ALL);
}
if (_regressionPredict != null && Settings.Default.RTPredictorVisible)
{
GraphRegression(graphPane, _statisticsPredict, _regressionAll, Resources.GraphData_Graph_Predictor, COLOR_LINE_PREDICT);
}
var curve = graphPane.AddCurve(labelPoints, _scoresRefined, _timesRefined,
Color.Black, SymbolType.Diamond);
curve.Line.IsVisible = false;
curve.Symbol.Border.IsVisible = false;
curve.Symbol.Fill = new Fill(COLOR_REFINED);
if (_scoresOutliers != null)
{
var curveOut = graphPane.AddCurve(Resources.GraphData_Graph_Outliers, _scoresOutliers, _timesOutliers,
Color.Black, SymbolType.Diamond);
curveOut.Line.IsVisible = false;
curveOut.Symbol.Border.IsVisible = false;
curveOut.Symbol.Fill = new Fill(COLOR_OUTLIERS);
}
}
private void GraphResiduals(GraphPane graphPane, PeptideDocNode nodeSelected)
{
if (!graphPane.YAxis.Scale.MinAuto && graphPane.ZoomStack.Count == 0)
{
graphPane.YAxis.Scale.MinAuto = true;
graphPane.YAxis.Scale.MaxAuto = true;
}
var regression = ResidualsRegression;
if (regression == null || regression.Conversion == null)
return;
double scoreSelected, timeSelected;
if (PointFromPeptide(nodeSelected, out scoreSelected, out timeSelected))
{
timeSelected = GetResidual(regression, scoreSelected, timeSelected);
Color colorSelected = GraphSummary.ColorSelected;
var curveOut = graphPane.AddCurve(null, new[] { scoreSelected }, new[] { timeSelected },
colorSelected, SymbolType.Diamond);
curveOut.Line.IsVisible = false;
curveOut.Symbol.Fill = new Fill(colorSelected);
curveOut.Symbol.Size = 8f;
}
string labelPoints = _refine ? Resources.GraphData_Graph_Peptides_Refined : Resources.GraphData_Graph_Peptides;
var curve = graphPane.AddCurve(labelPoints, _scoresRefined, GetResiduals(regression, _scoresRefined, _timesRefined),
Color.Black, SymbolType.Diamond);
curve.Line.IsVisible = false;
curve.Symbol.Border.IsVisible = false;
curve.Symbol.Fill = new Fill(COLOR_REFINED);
if (_scoresOutliers != null)
{
var curveOut = graphPane.AddCurve(Resources.GraphData_Graph_Outliers, _scoresOutliers,
GetResiduals(regression, _scoresOutliers, _timesOutliers),
Color.Black, SymbolType.Diamond);
curveOut.Line.IsVisible = false;
curveOut.Symbol.Border.IsVisible = false;
curveOut.Symbol.Fill = new Fill(COLOR_OUTLIERS);
}
}
public RetentionTimeRegression ResidualsRegression
{
get { return _regressionPredict ?? _regressionRefined ?? _regressionAll; }
}
private string ResidualsLabel
{
get
{
if (IsRunToRun)
{
return string.Format(Resources.GraphData_ResidualsLabel_Time_from_Regression___0__,
_document.MeasuredResults.Chromatograms[_targetIndex].Name);
}
else
{
return _regressionPredict != null
? Resources.GraphData_GraphResiduals_Time_from_Prediction
: Resources.GraphData_GraphResiduals_Time_from_Regression;
}
}
}
private string CorrelationLabel
{
get
{
if (IsRunToRun)
{
return string.Format(Resources.GraphData_CorrelationLabel_Measured_Time___0__,
_document.MeasuredResults.Chromatograms[_targetIndex].Name);
}
else
{
return Resources.RTLinearRegressionGraphPane_RTLinearRegressionGraphPane_Measured_Time;
}
}
}
private double[] GetResiduals(RetentionTimeRegression regression, double[] scores, double[] times)
{
var residualsRefined = new double[times.Length];
for (int i = 0; i < residualsRefined.Length; i++)
residualsRefined[i] = GetResidual(regression, scores[i], times[i]);
return residualsRefined;
}
public double GetResidual(RetentionTimeRegression regression, double score, double time)
{
//We round this for numerical error.
return Math.Round(time - GetConversion(regression).GetY(score), 6);
}
private IRegressionFunction GetConversion(RetentionTimeRegression regression)
{
if (regression == null)
return null;
if (ReferenceEquals(regression, _regressionPredict) && _conversionPredict != null)
return _conversionPredict;
return regression.Conversion;
}
private static void GraphRegression(GraphPane graphPane,
RetentionTimeStatistics statistics, RetentionTimeRegression regression, string name, Color color)
{
double[] lineScores, lineTimes;
if (statistics == null || regression == null)
{
lineScores = new double[0];
lineTimes = new double[0];
}
else
{
regression.Conversion.GetCurve(statistics, out lineScores, out lineTimes);
}
var curve = graphPane.AddCurve(name, lineScores, lineTimes, color, SymbolType.None);
if (lineScores.Length > 0 && lineTimes.Length > 0)
{
graphPane.AddCurve(string.Empty, new[] { lineScores[0] }, new[] { lineTimes[0] }, color, SymbolType.Square);
graphPane.AddCurve(string.Empty, new[] { lineScores.Last() }, new[] { lineTimes.Last() }, color, SymbolType.Square);
}
curve.Line.IsAntiAlias = true;
curve.Line.IsOptimizedDraw = true;
}
public void AddLabels(GraphPane graphPane, Graphics g)
{
RectangleF rectChart = graphPane.Chart.Rect;
PointF ptTop = rectChart.Location;
// Setup axes scales to enable the ReverseTransform method
var xAxis = graphPane.XAxis;
xAxis.Scale.SetupScaleData(graphPane, xAxis);
var yAxis = graphPane.YAxis;
yAxis.Scale.SetupScaleData(graphPane, yAxis);
float yNext = ptTop.Y;
double scoreLeft = xAxis.Scale.ReverseTransform(ptTop.X + 8);
double timeTop = yAxis.Scale.ReverseTransform(yNext);
if (!_refine)
{
yNext += AddRegressionLabel(graphPane, g, scoreLeft, timeTop,
_regressionAll, _statisticsAll, COLOR_LINE_REFINED);
}
else
{
yNext += AddRegressionLabel(graphPane, g, scoreLeft, timeTop,
_regressionRefined, _statisticsRefined, COLOR_LINE_REFINED);
timeTop = yAxis.Scale.ReverseTransform(yNext);
yNext += AddRegressionLabel(graphPane, g, scoreLeft, timeTop,
_regressionAll, _statisticsAll, COLOR_LINE_ALL);
}
if (_regressionPredict != null &&
_regressionPredict.Conversion != null &&
Settings.Default.RTPredictorVisible)
{
timeTop = yAxis.Scale.ReverseTransform(yNext);
AddRegressionLabel(graphPane, g, scoreLeft, timeTop,
_regressionPredict, _statisticsPredict, COLOR_LINE_PREDICT);
}
}
private float AddRegressionLabel(PaneBase graphPane, Graphics g, double score, double time,
RetentionTimeRegression regression, RetentionTimeStatistics statistics, Color color)
{
string label;
var conversion = GetConversion(regression);
if (conversion == null || statistics == null)
{
// ReSharper disable LocalizableElement
label = String.Format("{0} = ?, {1} = ?\n" + "{2} = ?\n" + "r = ?",
Resources.Regression_slope,
Resources.Regression_intercept,
Resources.GraphData_AddRegressionLabel_window);
// ReSharper restore LocalizableElement
}
else
{
label = regression.Conversion.GetRegressionDescription(statistics.R, regression.TimeWindow);
}
TextObj text = new TextObj(label, score, time,
CoordType.AxisXYScale, AlignH.Left, AlignV.Top)
{
IsClippedToChartRect = true,
ZOrder = ZOrder.E_BehindCurves,
FontSpec = GraphSummary.CreateFontSpec(color),
};
graphPane.GraphObjList.Add(text);
// Measure the text just added, and return its height
SizeF sizeLabel = text.FontSpec.MeasureString(g, label, graphPane.CalcScaleFactor());
return sizeLabel.Height + 3;
}
private string XAxisName
{
get
{
if (IsRunToRun)
{
if (_document.MeasuredResults != null && 0 <= _originalIndex && _originalIndex < _document.MeasuredResults.Chromatograms.Count)
{
return string.Format(Resources.GraphData_CorrelationLabel_Measured_Time___0__,
_document.MeasuredResults.Chromatograms[_originalIndex].Name);
}
return string.Empty;
}
return Calculator.Name;
}
}
private string YAxisName
{
get
{
if (RTGraphController.PlotType == PlotTypeRT.correlation)
return CorrelationLabel;
else
return ResidualsLabel;
}
}
}
public Rectangle ScreenRect { get { return Screen.GetBounds(GraphSummary); } }
public bool AllowDisplayTip
{
get { return !GraphSummary.IsDisposed && _allowDisplayTip; }
private set { _allowDisplayTip = value; }
}
public Rectangle RectToScreen(Rectangle r)
{
return GraphSummary.RectangleToScreen(r);
}
}
public sealed class PeptideDocumentIndex
{
public PeptideDocumentIndex(PeptideDocNode docNode, int indexDoc)
{
DocNode = docNode;
IndexDoc = indexDoc;
}
public PeptideDocNode DocNode { get; private set; }
public int IndexDoc { get; private set; }
}
}
| 1 | 13,256 | Add a TODO here to revisit if this was problematic. | ProteoWizard-pwiz | .cs |
@@ -23,7 +23,11 @@ class SSDHead(AnchorHead):
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
reg_decoded_bbox (bool): If true, the regression loss would be
- applied on decoded bounding boxes. Default: False
+ applied directly on decoded bounding boxes, meaning both the
+ predicted boxes and regression targets are with absolute
+ coordinates format. Default False. Note generally it should be
+ set to `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss`
+ in the bbox head.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
""" # noqa: W605 | 1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmcv.runner import force_fp32
from mmdet.core import (build_anchor_generator, build_assigner,
build_bbox_coder, build_sampler, multi_apply)
from ..builder import HEADS
from ..losses import smooth_l1_loss
from .anchor_head import AnchorHead
# TODO: add loss evaluator for SSD
@HEADS.register_module()
class SSDHead(AnchorHead):
"""SSD head used in https://arxiv.org/abs/1512.02325.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
reg_decoded_bbox (bool): If true, the regression loss would be
applied on decoded bounding boxes. Default: False
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
""" # noqa: W605
def __init__(self,
num_classes=80,
in_channels=(512, 1024, 512, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
strides=[8, 16, 32, 64, 100, 300],
ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
basesize_ratio_range=(0.1, 0.9)),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=True,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
),
reg_decoded_bbox=False,
train_cfg=None,
test_cfg=None):
super(AnchorHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.cls_out_channels = num_classes + 1 # add background class
self.anchor_generator = build_anchor_generator(anchor_generator)
num_anchors = self.anchor_generator.num_base_anchors
reg_convs = []
cls_convs = []
for i in range(len(in_channels)):
reg_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * 4,
kernel_size=3,
padding=1))
cls_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * (num_classes + 1),
kernel_size=3,
padding=1))
self.reg_convs = nn.ModuleList(reg_convs)
self.cls_convs = nn.ModuleList(cls_convs)
self.bbox_coder = build_bbox_coder(bbox_coder)
self.reg_decoded_bbox = reg_decoded_bbox
self.use_sigmoid_cls = False
self.cls_focal_loss = False
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# set sampling=False for archor_target
self.sampling = False
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# SSD sampling=False so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.fp16_enabled = False
def init_weights(self):
"""Initialize weights of the head."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform', bias=0)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple:
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
self.cls_convs):
cls_scores.append(cls_conv(feat))
bbox_preds.append(reg_conv(feat))
return cls_scores, bbox_preds
def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples):
"""Compute loss of a single image.
Args:
cls_score (Tensor): Box scores for eachimage
Has shape (num_total_anchors, num_classes).
bbox_pred (Tensor): Box energies / deltas for each image
level with shape (num_total_anchors, 4).
anchors (Tensor): Box reference for each scale level with shape
(num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(num_total_anchors,).
label_weights (Tensor): Label weights of each anchor with shape
(num_total_anchors,)
bbox_targets (Tensor): BBox regression targets of each anchor wight
shape (num_total_anchors, 4).
bbox_weights (Tensor): BBox regression loss weights of each anchor
with shape (num_total_anchors, 4).
num_total_samples (int): If sampling, num total samples equal to
the number of total anchors; Otherwise, it is the number of
positive anchors.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
loss_cls_all = F.cross_entropy(
cls_score, labels, reduction='none') * label_weights
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
pos_inds = ((labels >= 0) &
(labels < self.num_classes)).nonzero().reshape(-1)
neg_inds = (labels == self.num_classes).nonzero().view(-1)
num_pos_samples = pos_inds.size(0)
num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
if num_neg_samples > neg_inds.size(0):
num_neg_samples = neg_inds.size(0)
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
loss_cls_pos = loss_cls_all[pos_inds].sum()
loss_cls_neg = topk_loss_cls_neg.sum()
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
if self.reg_decoded_bbox:
bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
loss_bbox = smooth_l1_loss(
bbox_pred,
bbox_targets,
bbox_weights,
beta=self.train_cfg.smoothl1_beta,
avg_factor=num_total_samples)
return loss_cls[None], loss_bbox
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=1,
unmap_outputs=False)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_images = len(img_metas)
all_cls_scores = torch.cat([
s.permute(0, 2, 3, 1).reshape(
num_images, -1, self.cls_out_channels) for s in cls_scores
], 1)
all_labels = torch.cat(labels_list, -1).view(num_images, -1)
all_label_weights = torch.cat(label_weights_list,
-1).view(num_images, -1)
all_bbox_preds = torch.cat([
b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
for b in bbox_preds
], -2)
all_bbox_targets = torch.cat(bbox_targets_list,
-2).view(num_images, -1, 4)
all_bbox_weights = torch.cat(bbox_weights_list,
-2).view(num_images, -1, 4)
# concat all level anchors to a single tensor
all_anchors = []
for i in range(num_images):
all_anchors.append(torch.cat(anchor_list[i]))
# check NaN and Inf
assert torch.isfinite(all_cls_scores).all().item(), \
'classification scores become infinite or NaN!'
assert torch.isfinite(all_bbox_preds).all().item(), \
'bbox predications become infinite or NaN!'
losses_cls, losses_bbox = multi_apply(
self.loss_single,
all_cls_scores,
all_bbox_preds,
all_anchors,
all_labels,
all_label_weights,
all_bbox_targets,
all_bbox_weights,
num_total_samples=num_total_pos)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
| 1 | 22,288 | Note generally it -> It | open-mmlab-mmdetection | py |
@@ -51,6 +51,7 @@ model* instantiate_model(lbann_comm* comm,
if (type == "directed_acyclic_graph_model") {
return new directed_acyclic_graph_model(comm, mini_batch_size, obj, opt);
}
+#if 0
if (type == "recurrent_model") {
const auto& params = proto_model.recurrent();
return new recurrent_model(comm, | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/proto/factories.hpp"
#include "lbann/objective_functions/layer_term.hpp"
namespace lbann {
namespace proto {
namespace {
/** Instantiate a model based on prototext. */
model* instantiate_model(lbann_comm* comm,
objective_function* obj,
const lbann_data::Optimizer& proto_opt,
const lbann_data::Model& proto_model) {
std::stringstream err;
// Default optimizer
auto&& opt = construct_optimizer(comm, proto_opt);
// Construct model
const auto& type = proto_model.name();
const auto& mini_batch_size = proto_model.mini_batch_size();
if (type == "sequential_model" || type == "") {
return new sequential_model(comm, mini_batch_size, obj, opt);
}
if (type == "directed_acyclic_graph_model") {
return new directed_acyclic_graph_model(comm, mini_batch_size, obj, opt);
}
if (type == "recurrent_model") {
const auto& params = proto_model.recurrent();
return new recurrent_model(comm,
mini_batch_size,
obj,
opt,
params.unroll_depth());
}
if (type == "siamese_model") {
const auto& params = proto_model.siamese();
return new siamese_model(comm,
mini_batch_size,
obj,
opt,
params.num_heads());
}
// Throw error if model type is not supported
err << "unknown model type (" << type << ")";
LBANN_ERROR(err.str());
return nullptr;
}
void assign_layers_to_objective_function(std::vector<Layer*>& layer_list,
objective_function* obj,
const lbann_data::ObjectiveFunction& proto_obj) {
std::stringstream err;
// Construct map from layer names to layers
std::unordered_map<std::string, Layer*> names_to_layers;
for (auto&& l : layer_list) {
const auto& name = l->get_name();
if (names_to_layers.count(name) > 0) {
err << "layer name \"" << name << "\" is not unique";
LBANN_ERROR(err.str());
}
names_to_layers[name] = l;
}
// Assign evaluation layers to layer terms in objective function
auto&& obj_terms = obj->get_terms();
int num_layer_terms = 0;
for (size_t i = 0; i < obj_terms.size(); ++i) {
auto&& term = dynamic_cast<layer_term*>(obj_terms[i]);
if (term != nullptr) {
++num_layer_terms;
if (num_layer_terms > proto_obj.layer_term_size()) { continue; }
const auto& params = proto_obj.layer_term(num_layer_terms-1);
auto&& eval = names_to_layers[params.layer()];
term->set_evaluation_layer(eval);
}
}
// Check that layer terms in objective function match prototext
if (num_layer_terms != proto_obj.layer_term_size()) {
err << "number of layer terms in objective function does not match prototext "
<< "(expected " << proto_obj.layer_term_size() << ", "
<< "found " << num_layer_terms << ")";
LBANN_ERROR(err.str());
}
}
void assign_layers_to_metrics(std::vector<Layer*>& layer_list,
std::vector<metric*>& metric_list,
const lbann_data::Model& proto_model) {
// Construct map from layer names to layers
std::unordered_map<std::string, Layer*> names_to_layers;
for (auto&& l : layer_list) {
const auto& name = l->get_name();
if (names_to_layers.count(name) > 0) {
std::stringstream err;
err << "layer name \"" << name << "\" is not unique";
LBANN_ERROR(err.str());
}
names_to_layers[name] = l;
}
// Assign evaluation layers to layer metrics
for (int i=0; i<proto_model.metric_size(); ++i) {
auto&& m = dynamic_cast<layer_metric*>(metric_list[i]);
if (m != nullptr) {
const auto& params = proto_model.metric(i).layer_metric();
auto&& eval = names_to_layers[params.layer()];
m->set_evaluation_layer(eval);
}
}
}
/** Setup pointers from layers to weights. */
void assign_weights_to_layers(std::vector<Layer*>& layer_list,
std::vector<weights*>& weights_list,
const lbann_data::Model& proto_model) {
std::stringstream err;
// Construct map from weights names to weights
std::unordered_map<std::string, weights*> names_to_weights;
for (auto&& w : weights_list) {
const auto& name = w->get_name();
if (names_to_weights.count(name) > 0) {
err << "weights name \"" << name << "\" is not unique";
LBANN_ERROR(err.str());
}
names_to_weights[name] = w;
}
// Find weights assigned to each layer
for (int i=0; i<proto_model.layer_size(); ++i) {
const auto& proto_layer = proto_model.layer(i);
auto& layer_weights = layer_list[i]->get_weights();
for (auto&& name : parse_list<std::string>(proto_layer.weights())) {
auto&& w = names_to_weights[name];
if (w == nullptr) {
err << "could not find weights named \"" << name << "\", "
<< "which are expected by layer " << layer_list[i]->get_name();
LBANN_ERROR(err.str());
}
layer_weights.push_back(w);
}
}
}
} // namespace
model* construct_model(lbann_comm* comm,
cudnn::cudnn_manager* cudnn,
std::map<execution_mode, generic_data_reader*>& data_readers,
const lbann_data::Optimizer& proto_opt,
const lbann_data::Model& proto_model) {
// Add layer graph
auto&& layer_list = construct_layer_graph(comm,
data_readers,
cudnn,
proto_model);
// Construct objective function
const auto& proto_obj = proto_model.objective_function();
auto&& obj = construct_objective_function(proto_obj);
assign_layers_to_objective_function(layer_list, obj, proto_obj);
// Instantiate model
auto&& m = instantiate_model(comm, obj, proto_opt, proto_model);
for (auto&& l : layer_list) { m->add_layer(l); }
// Add weights and assign to layers
for (int i=0; i<proto_model.weights_size(); i++) {
m->add_weights(construct_weights(comm,
cudnn,
proto_opt,
proto_model.weights(i)));
}
auto weights_list = m->get_weights();
assign_weights_to_layers(layer_list, weights_list, proto_model);
// Add metrics
for (int i=0; i<proto_model.metric_size(); ++i) {
m->add_metric(construct_metric(comm, proto_model.metric(i)));
}
auto metric_list = m->get_metrics();
assign_layers_to_metrics(layer_list, metric_list, proto_model);
// Add callbacks
auto&& summarizer = construct_summarizer(comm, proto_model);
for (int i=0; i<proto_model.callback_size(); i++) {
m->add_callback(construct_callback(comm,
proto_model.callback(i),
data_readers,
layer_list,
weights_list,
summarizer));
}
return m;
}
} // namespace proto
} // namespace lbann
| 1 | 12,965 | Is this relevant to this PR? | LLNL-lbann | cpp |
@@ -102,7 +102,7 @@ module Beaker
end
if block_hosts.is_a? Array
if block_hosts.length > 0
- if opts[:run_in_parallel]
+ if opts[:run_in_parallel] == true
# Pass caller[1] - the line that called block_on - for logging purposes.
result = block_hosts.map.each_in_parallel(caller[1]) do |h|
run_block_on h, &block | 1 | module Beaker
module Shared
#Methods for managing Hosts.
#- selecting hosts by role (Symbol or String)
#- selecting hosts by name (String)
#- adding additional method definitions for selecting by role
#- executing blocks of code against selected sets of hosts
module HostManager
#Find hosts from a given array of hosts that all have the desired role.
#@param [Array<Host>] hosts The hosts to examine
#@param [String] desired_role The hosts returned will have this role in their roles list
#@return [Array<Host>] The hosts that have the desired role in their roles list
def hosts_with_role(hosts, desired_role = nil)
hosts.select do |host|
desired_role.nil? or host['roles'].include?(desired_role.to_s)
end
end
#Find hosts from a given array of hosts that all have the desired name, match against host name,
#vmhostname and ip (the three valid ways to identify an individual host)
#@param [Array<Host>] hosts The hosts to examine
#@param [String] name The hosts returned will have this name/vmhostname/ip
#@return [Array<Host>] The hosts that have the desired name/vmhostname/ip
def hosts_with_name(hosts, name = nil)
hosts.select do |host|
name.nil? or host.name =~ /\A#{name}/ or host[:vmhostname] =~ /\A#{name}/ or host[:ip] =~ /\A#{name}/
end
end
#Find a single host with the role provided. Raise an error if more than one host is found to have the
#provided role.
#@param [Array<Host>] hosts The hosts to examine
#@param [String] role The host returned will have this role in its role list
#@return [Host] The single host with the desired role in its roles list
#@raise [ArgumentError] Raised if more than one host has the given role defined, or if no host has the
# role defined.
def only_host_with_role(hosts, role)
a_host = hosts_with_role(hosts, role)
case
when a_host.length == 0
raise ArgumentError, "There should be one host with #{role} defined!"
when a_host.length > 1
host_string = ( a_host.map { |host| host.name } ).join( ', ')
raise ArgumentError, "There should be only one host with #{role} defined, but I found #{a_host.length} (#{host_string})"
end
a_host.first
end
# Find at most a single host with the role provided. Raise an error if
# more than one host is found to have the provided role.
# @param [Array<Host>] hosts The hosts to examine
# @param [String] role The host returned will have this role in its role list
# @return [Host] The single host with the desired role in its roles list
# or nil if no host is found
# @raise [ArgumentError] Raised if more than one host has the given role defined
def find_at_most_one_host_with_role(hosts, role)
role_hosts = hosts_with_role(hosts, role)
host_with_role = nil
case role_hosts.length
when 0
when 1
host_with_role = role_hosts[0]
else
host_string = ( role_hosts.map { |host| host.name } ).join( ', ')
raise ArgumentError, "There should be only one host with #{role} defined, but I found #{role_hosts.length} (#{host_string})"
end
host_with_role
end
# Execute a block selecting the hosts that match with the provided criteria
#
# @param [Array<Host>, Host] hosts The host or hosts to run the provided block against
# @param [String, Symbol] filter Optional filter to apply to provided hosts - limits by name or role
# @param [Hash{Symbol=>String}] opts
# @option opts [Boolean] :run_in_parallel Whether to run on each host in parallel.
# @param [Block] block This method will yield to a block of code passed by the caller
#
# @todo (beaker3.0:BKR-571): simplify return types to Array<Result> only
#
# @return [Array<Result>, Result, nil] If an array of hosts has been
# passed (after filtering), then either an array of results is returned
# (if the array is non-empty), or nil is returned (if the array is empty).
# Else, a result object is returned. If filtering makes it such that only
# one host is left, then it's passed as a host object (not in an array),
# and thus a result object is returned.
def run_block_on hosts = [], filter = nil, opts = {}, &block
result = nil
block_hosts = hosts #the hosts to apply the block to after any filtering
if filter
if not hosts.empty?
block_hosts = hosts_with_role(hosts, filter) #check by role
if block_hosts.empty?
block_hosts = hosts_with_name(hosts, filter) #check by name
end
if block_hosts.length == 1 #we only found one matching host, don't need it wrapped in an array
block_hosts = block_hosts.pop
end
else
raise ArgumentError, "Unable to sort for #{filter} type hosts when provided with [] as Hosts"
end
end
if block_hosts.is_a? Array
if block_hosts.length > 0
if opts[:run_in_parallel]
# Pass caller[1] - the line that called block_on - for logging purposes.
result = block_hosts.map.each_in_parallel(caller[1]) do |h|
run_block_on h, &block
end
hosts.each{|host| host.close}# For some reason, I have to close the SSH connection
# after spawning a process and running commands on a host,
# or else it gets into a broken state for the next call.
else
result = block_hosts.map do |h|
run_block_on h, &block
end
end
else
# there are no matching hosts to execute against
# should warn here
# check if logger is defined in this context
if ( cur_logger = (logger || @logger ) )
cur_logger.info "Attempting to execute against an empty array of hosts (#{hosts}, filtered to #{block_hosts}), no execution will occur"
end
end
else
result = yield block_hosts
end
result
end
end
end
end
| 1 | 13,183 | Can we instead ensure that `opts[:run_in_parellel]` will always be a boolean? Otherwise we'll have to account for the case when it's a non-boolean value in multiple places, such as any/every other `if` statement. | voxpupuli-beaker | rb |
@@ -25,6 +25,11 @@ import (
"go.opentelemetry.io/otel/codes"
)
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
+
// NetAttributesFromHTTPRequest generates attributes of the net
// namespace as specified by the OpenTelemetry specification for a
// span. The network parameter is a string that net.Dial function | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semconv // import "go.opentelemetry.io/otel/semconv"
import (
"fmt"
"net"
"net/http"
"strconv"
"strings"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
)
// NetAttributesFromHTTPRequest generates attributes of the net
// namespace as specified by the OpenTelemetry specification for a
// span. The network parameter is a string that net.Dial function
// from standard library can understand.
func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
switch network {
case "tcp", "tcp4", "tcp6":
attrs = append(attrs, NetTransportTCP)
case "udp", "udp4", "udp6":
attrs = append(attrs, NetTransportUDP)
case "ip", "ip4", "ip6":
attrs = append(attrs, NetTransportIP)
case "unix", "unixgram", "unixpacket":
attrs = append(attrs, NetTransportUnix)
default:
attrs = append(attrs, NetTransportOther)
}
peerName, peerIP, peerPort := "", "", 0
{
hostPart := request.RemoteAddr
portPart := ""
if idx := strings.LastIndex(hostPart, ":"); idx >= 0 {
hostPart = request.RemoteAddr[:idx]
portPart = request.RemoteAddr[idx+1:]
}
if hostPart != "" {
if ip := net.ParseIP(hostPart); ip != nil {
peerIP = ip.String()
} else {
peerName = hostPart
}
if portPart != "" {
numPort, err := strconv.ParseUint(portPart, 10, 16)
if err == nil {
peerPort = (int)(numPort)
} else {
peerName, peerIP = "", ""
}
}
}
}
if peerName != "" {
attrs = append(attrs, NetPeerNameKey.String(peerName))
}
if peerIP != "" {
attrs = append(attrs, NetPeerIPKey.String(peerIP))
}
if peerPort != 0 {
attrs = append(attrs, NetPeerPortKey.Int(peerPort))
}
hostIP, hostName, hostPort := "", "", 0
for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} {
hostPart := ""
if idx := strings.LastIndex(someHost, ":"); idx >= 0 {
strPort := someHost[idx+1:]
numPort, err := strconv.ParseUint(strPort, 10, 16)
if err == nil {
hostPort = (int)(numPort)
}
hostPart = someHost[:idx]
} else {
hostPart = someHost
}
if hostPart != "" {
ip := net.ParseIP(hostPart)
if ip != nil {
hostIP = ip.String()
} else {
hostName = hostPart
}
break
} else {
hostPort = 0
}
}
if hostIP != "" {
attrs = append(attrs, NetHostIPKey.String(hostIP))
}
if hostName != "" {
attrs = append(attrs, NetHostNameKey.String(hostName))
}
if hostPort != 0 {
attrs = append(attrs, NetHostPortKey.Int(hostPort))
}
return attrs
}
// EndUserAttributesFromHTTPRequest generates attributes of the
// enduser namespace as specified by the OpenTelemetry specification
// for a span.
func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
if username, _, ok := request.BasicAuth(); ok {
return []attribute.KeyValue{EnduserIDKey.String(username)}
}
return nil
}
// HTTPClientAttributesFromHTTPRequest generates attributes of the
// http namespace as specified by the OpenTelemetry specification for
// a span on the client side.
func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
if request.Method != "" {
attrs = append(attrs, HTTPMethodKey.String(request.Method))
} else {
attrs = append(attrs, HTTPMethodKey.String(http.MethodGet))
}
attrs = append(attrs, HTTPURLKey.String(request.URL.String()))
return append(attrs, httpCommonAttributesFromHTTPRequest(request)...)
}
func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
if ua := request.UserAgent(); ua != "" {
attrs = append(attrs, HTTPUserAgentKey.String(ua))
}
if request.ContentLength > 0 {
attrs = append(attrs, HTTPRequestContentLengthKey.Int64(request.ContentLength))
}
return append(attrs, httpBasicAttributesFromHTTPRequest(request)...)
}
func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
// as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality
attrs := []attribute.KeyValue{}
if request.TLS != nil {
attrs = append(attrs, HTTPSchemeHTTPS)
} else {
attrs = append(attrs, HTTPSchemeHTTP)
}
if request.Host != "" {
attrs = append(attrs, HTTPHostKey.String(request.Host))
}
flavor := ""
if request.ProtoMajor == 1 {
flavor = fmt.Sprintf("1.%d", request.ProtoMinor)
} else if request.ProtoMajor == 2 {
flavor = "2"
}
if flavor != "" {
attrs = append(attrs, HTTPFlavorKey.String(flavor))
}
return attrs
}
// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
// to be used with server-side HTTP metrics.
func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{}
if serverName != "" {
attrs = append(attrs, HTTPServerNameKey.String(serverName))
}
return append(attrs, httpBasicAttributesFromHTTPRequest(request)...)
}
// HTTPServerAttributesFromHTTPRequest generates attributes of the
// http namespace as specified by the OpenTelemetry specification for
// a span on the server side. Currently, only basic authentication is
// supported.
func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
attrs := []attribute.KeyValue{
HTTPMethodKey.String(request.Method),
HTTPTargetKey.String(request.RequestURI),
}
if serverName != "" {
attrs = append(attrs, HTTPServerNameKey.String(serverName))
}
if route != "" {
attrs = append(attrs, HTTPRouteKey.String(route))
}
if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 {
attrs = append(attrs, HTTPClientIPKey.String(values[0]))
}
return append(attrs, httpCommonAttributesFromHTTPRequest(request)...)
}
// HTTPAttributesFromHTTPStatusCode generates attributes of the http
// namespace as specified by the OpenTelemetry specification for a
// span.
func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
attrs := []attribute.KeyValue{
HTTPStatusCodeKey.Int(code),
}
return attrs
}
type codeRange struct {
fromInclusive int
toInclusive int
}
func (r codeRange) contains(code int) bool {
return r.fromInclusive <= code && code <= r.toInclusive
}
var validRangesPerCategory = map[int][]codeRange{
1: {
{http.StatusContinue, http.StatusEarlyHints},
},
2: {
{http.StatusOK, http.StatusAlreadyReported},
{http.StatusIMUsed, http.StatusIMUsed},
},
3: {
{http.StatusMultipleChoices, http.StatusUseProxy},
{http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
},
4: {
{http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
{http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
{http.StatusPreconditionRequired, http.StatusTooManyRequests},
{http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
{http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
},
5: {
{http.StatusInternalServerError, http.StatusLoopDetected},
{http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
},
}
// SpanStatusFromHTTPStatusCode generates a status code and a message
// as specified by the OpenTelemetry specification for a span.
func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
spanCode, valid := validateHTTPStatusCode(code)
if !valid {
return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code)
}
return spanCode, ""
}
// Validates the HTTP status code and returns corresponding span status code.
// If the `code` is not a valid HTTP status code, returns span status Error
// and false.
func validateHTTPStatusCode(code int) (codes.Code, bool) {
category := code / 100
ranges, ok := validRangesPerCategory[category]
if !ok {
return codes.Error, false
}
ok = false
for _, crange := range ranges {
ok = crange.contains(code)
if ok {
break
}
}
if !ok {
return codes.Error, false
}
if category > 0 && category < 4 {
return codes.Unset, true
}
return codes.Error, true
}
| 1 | 15,313 | Is this just moved out of the other files? It doesn't seem like this was generated like the other files. | open-telemetry-opentelemetry-go | go |
@@ -0,0 +1,14 @@
+using System.IO;
+using System.Threading.Tasks;
+
+namespace HttpOverStream
+{
+ internal interface IHttpContent
+ {
+ long? Length { get; }
+
+ void CopyTo(Stream destination);
+
+ Task CopyToAsync(Stream destination);
+ }
+} | 1 | 1 | 18,322 | I started out doing everything `async`, but it was getting in the way of debugging, so I switched back to all synchronous until I got things working. We should probably move everything back to `async` and remove the synchronous versions. | DataDog-dd-trace-dotnet | .cs |
|
@@ -98,7 +98,9 @@ namespace Nethermind.Mev.Test
protected override IBlockProducer CreateTestBlockProducer(TxPoolTxSource txPoolTxSource, ISealer sealer, ITransactionComparerProvider transactionComparerProvider)
{
MiningConfig miningConfig = new() {MinGasPrice = UInt256.One};
-
+
+ MevBlockProducerTransactionsExecutorFactory mevExecutorFactory = new(SpecProvider, LogManager);
+
BlockProducerEnvFactory blockProducerEnvFactory = new(
DbProvider,
BlockTree, | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
using System;
using System.Collections.Generic;
using System.Linq;
using System.Reflection.Metadata;
using System.Threading;
using System.Threading.Tasks;
using FluentAssertions;
using Nethermind.Blockchain;
using Nethermind.Blockchain.Comparers;
using Nethermind.Blockchain.Processing;
using Nethermind.Blockchain.Producers;
using Nethermind.Blockchain.Rewards;
using Nethermind.Blockchain.Validators;
using Nethermind.Consensus;
using Nethermind.Consensus.Transactions;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Core.Specs;
using Nethermind.Core.Test;
using Nethermind.Core.Test.Blockchain;
using Nethermind.Core.Test.Builders;
using Nethermind.Facade;
using Nethermind.Int256;
using Nethermind.JsonRpc;
using Nethermind.JsonRpc.Modules.Eth;
using Nethermind.JsonRpc.Test.Modules;
using Nethermind.Logging;
using Nethermind.Merge.Plugin.Handlers;
using Nethermind.Merge.Plugin.Test;
using Nethermind.Mev.Data;
using Nethermind.Mev.Execution;
using Nethermind.Mev.Source;
using Nethermind.Runner.Ethereum;
using Nethermind.Serialization.Rlp;
using Nethermind.Specs;
using Nethermind.Specs.Forks;
using Nethermind.State;
using Newtonsoft.Json;
using NLog.Fluent;
using NSubstitute;
using Org.BouncyCastle.Asn1.Cms;
namespace Nethermind.Mev.Test
{
public partial class MevRpcModuleTests
{
public static Task<TestMevRpcBlockchain> CreateChain(int maxMergedBundles, IReleaseSpec? releaseSpec = null, UInt256? initialBaseFeePerGas = null)
{
TestMevRpcBlockchain testMevRpcBlockchain = new(maxMergedBundles, initialBaseFeePerGas);
TestSpecProvider testSpecProvider = releaseSpec is not null ? new TestSpecProvider(releaseSpec) : new TestSpecProvider(Berlin.Instance);
testSpecProvider.ChainId = 1;
return TestRpcBlockchain.ForTest(testMevRpcBlockchain).Build(testSpecProvider);
}
public class TestMevRpcBlockchain : TestRpcBlockchain
{
private readonly int _maxMergedBundles;
private ITracerFactory _tracerFactory = null!;
public TestBundlePool BundlePool { get; private set; } = null!;
public TestMevRpcBlockchain(int maxMergedBundles, UInt256? initialBaseFeePerGas)
{
_maxMergedBundles = maxMergedBundles;
Signer = new Eth2Signer(MinerAddress);
GenesisBlockBuilder = Core.Test.Builders.Build.A.Block.Genesis.Genesis
.WithTimestamp(UInt256.One)
.WithGasLimit(GasLimitCalculator.GasLimit)
.WithBaseFeePerGas(initialBaseFeePerGas ?? 0);
}
public IMevRpcModule MevRpcModule { get; set; } = Substitute.For<IMevRpcModule>();
public ManualGasLimitCalculator GasLimitCalculator = new() {GasLimit = 10_000_000};
private MevConfig _mevConfig = new MevConfig {Enabled = true};
public Address MinerAddress => TestItem.PrivateKeyD.Address;
private IBlockValidator BlockValidator { get; set; } = null!;
private ISigner Signer { get; }
public override ILogManager LogManager => NUnitLogManager.Instance;
protected override IBlockProducer CreateTestBlockProducer(TxPoolTxSource txPoolTxSource, ISealer sealer, ITransactionComparerProvider transactionComparerProvider)
{
MiningConfig miningConfig = new() {MinGasPrice = UInt256.One};
BlockProducerEnvFactory blockProducerEnvFactory = new(
DbProvider,
BlockTree,
ReadOnlyTrieStore,
SpecProvider,
BlockValidator,
NoBlockRewards.Instance,
ReceiptStorage,
BlockPreprocessorStep,
TxPool,
transactionComparerProvider,
miningConfig,
LogManager)
{
TransactionsExecutorFactory = new MevBlockProducerTransactionsExecutorFactory(SpecProvider, LogManager)
};
Eth2BlockProducer CreateEth2BlockProducer(IBlockProductionTrigger blockProductionTrigger, ITxSource? txSource = null) =>
new Eth2TestBlockProducerFactory(GasLimitCalculator, txSource).Create(
blockProducerEnvFactory,
BlockTree,
blockProductionTrigger,
SpecProvider,
Signer,
Timestamper,
miningConfig,
LogManager);
MevBlockProducer.MevBlockProducerInfo CreateProducer(int bundleLimit = 0, ITxSource? additionalTxSource = null)
{
bool BundleLimitTriggerCondition(BlockProductionEventArgs e)
{
BlockHeader? parent = BlockTree.GetProducedBlockParent(e.ParentHeader);
if (parent is not null)
{
IEnumerable<MevBundle> bundles = BundlePool.GetBundles(parent, Timestamper);
return bundles.Count() >= bundleLimit;
}
return false;
}
IManualBlockProductionTrigger manualTrigger = new BuildBlocksWhenRequested();
IBlockProductionTrigger trigger = manualTrigger;
if (bundleLimit != 0)
{
trigger = new TriggerWithCondition(manualTrigger, BundleLimitTriggerCondition);
}
IBlockProducer producer = CreateEth2BlockProducer(trigger, additionalTxSource);
return new MevBlockProducer.MevBlockProducerInfo(producer, manualTrigger, new BeneficiaryTracer());
}
List<MevBlockProducer.MevBlockProducerInfo> blockProducers =
new(_maxMergedBundles + 1);
// Add non-mev block
MevBlockProducer.MevBlockProducerInfo standardProducer = CreateProducer();
blockProducers.Add(standardProducer);
// Try blocks with all bundle numbers <= maxMergedBundles
for (int bundleLimit = 1; bundleLimit <= _maxMergedBundles; bundleLimit++)
{
BundleSelector bundleSelector = new(BundlePool, bundleLimit);
BundleTxSource bundleTxSource = new(bundleSelector, Timestamper);
MevBlockProducer.MevBlockProducerInfo bundleProducer = CreateProducer(bundleLimit, bundleTxSource);
blockProducers.Add(bundleProducer);
}
return new MevBlockProducer(BlockProductionTrigger, LogManager, blockProducers.ToArray());
}
protected override BlockProcessor CreateBlockProcessor()
{
BlockValidator = CreateBlockValidator();
BlockProcessor blockProcessor = new(
SpecProvider,
BlockValidator,
NoBlockRewards.Instance,
new BlockProcessor.BlockValidationTransactionsExecutor(TxProcessor, State),
State,
Storage,
ReceiptStorage,
NullWitnessCollector.Instance,
LogManager);
_tracerFactory = new TracerFactory(
DbProvider,
BlockTree,
ReadOnlyTrieStore,
BlockPreprocessorStep,
SpecProvider,
LogManager,
ProcessingOptions.ProducingBlock);
TxBundleSimulator txBundleSimulator = new(_tracerFactory, GasLimitCalculator, Timestamper, TxPool, SpecProvider, Signer);
BundlePool = new TestBundlePool(BlockTree, txBundleSimulator, Timestamper, new TxValidator(BlockTree.ChainId), SpecProvider, _mevConfig, LogManager);
return blockProcessor;
}
protected override async Task<TestBlockchain> Build(ISpecProvider specProvider = null, UInt256? initialValues = null)
{
TestBlockchain chain = await base.Build(specProvider, initialValues);
MevRpcModule = new MevRpcModule(new JsonRpcConfig(),
BundlePool,
BlockFinder,
StateReader,
_tracerFactory,
SpecProvider,
Signer);
return chain;
}
private IBlockValidator CreateBlockValidator()
{
HeaderValidator headerValidator = new(BlockTree, new Eth2SealEngine(Signer), SpecProvider, LogManager);
return new BlockValidator(
new TxValidator(SpecProvider.ChainId),
headerValidator,
Always.Valid,
SpecProvider,
LogManager);
}
protected override Task AddBlocksOnStart() => Task.CompletedTask;
public MevBundle SendBundle(int blockNumber, params BundleTransaction[] txs)
{
byte[][] bundleBytes = txs.Select(t => Rlp.Encode(t).Bytes).ToArray();
Keccak[] revertingTxHashes = txs.Where(t => t.CanRevert).Select(t => t.Hash!).ToArray();
MevBundleRpc mevBundleRpc = new() {BlockNumber = blockNumber, Txs = bundleBytes, RevertingTxHashes = revertingTxHashes};
ResultWrapper<bool> resultOfBundle = MevRpcModule.eth_sendBundle(mevBundleRpc);
resultOfBundle.GetResult().ResultType.Should().NotBe(ResultType.Failure);
resultOfBundle.GetData().Should().Be(true);
return new MevBundle(blockNumber, txs);
}
}
}
}
| 1 | 26,099 | this looks already too complicated... | NethermindEth-nethermind | .cs |
@@ -176,6 +176,18 @@ TEST(RocksEngineTest, OptionTest) {
engine->setDBOption("max_background_compactions", "bad_value"));
}
+TEST(RocksEngineTest, CompactTest) {
+ fs::TempDir rootPath("/tmp/rocksdb_compact_test.XXXXXX");
+ auto engine = std::make_unique<RocksEngine>(0, rootPath.path());
+ std::vector<KV> data;
+ for (int32_t i = 2; i < 8; i++) {
+ data.emplace_back(folly::stringPrintf("key_%d", i),
+ folly::stringPrintf("value_%d", i));
+ }
+ EXPECT_EQ(ResultCode::SUCCEEDED, engine->multiPut(std::move(data)));
+ EXPECT_EQ(ResultCode::SUCCEEDED, engine->compactAll());
+}
+
} // namespace kvstore
} // namespace nebula
| 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "base/Base.h"
#include <gtest/gtest.h>
#include <rocksdb/db.h>
#include <folly/lang/Bits.h>
#include "fs/TempDir.h"
#include "kvstore/RocksEngine.h"
namespace nebula {
namespace kvstore {
TEST(RocksEngineTest, SimpleTest) {
fs::TempDir rootPath("/tmp/rocksdb_engine_test.XXXXXX");
auto engine = std::make_unique<RocksEngine>(0, rootPath.path());
EXPECT_EQ(ResultCode::SUCCEEDED, engine->put("key", "val"));
std::string val;
EXPECT_EQ(ResultCode::SUCCEEDED, engine->get("key", &val));
EXPECT_EQ(val, "val");
}
TEST(RocksEngineTest, RangeTest) {
fs::TempDir rootPath("/tmp/rocksdb_engine_test.XXXXXX");
auto engine = std::make_unique<RocksEngine>(0, rootPath.path());
std::vector<KV> data;
for (int32_t i = 10; i < 20; i++) {
data.emplace_back(std::string(reinterpret_cast<const char*>(&i), sizeof(int32_t)),
folly::stringPrintf("val_%d", i));
}
EXPECT_EQ(ResultCode::SUCCEEDED, engine->multiPut(std::move(data)));
auto checkRange = [&](int32_t start, int32_t end,
int32_t expectedFrom, int32_t expectedTotal) {
LOG(INFO) << "start " << start << ", end " << end
<< ", expectedFrom " << expectedFrom << ", expectedTotal " << expectedTotal;
std::string s(reinterpret_cast<const char*>(&start), sizeof(int32_t));
std::string e(reinterpret_cast<const char*>(&end), sizeof(int32_t));
std::unique_ptr<KVIterator> iter;
EXPECT_EQ(ResultCode::SUCCEEDED, engine->range(s, e, &iter));
int num = 0;
while (iter->valid()) {
num++;
auto key = *reinterpret_cast<const int32_t*>(iter->key().data());
auto val = iter->val();
EXPECT_EQ(expectedFrom, key);
EXPECT_EQ(folly::stringPrintf("val_%d", expectedFrom), val);
expectedFrom++;
iter->next();
}
EXPECT_EQ(expectedTotal, num);
};
checkRange(10, 20, 10, 10);
checkRange(1, 50, 10, 10);
checkRange(15, 18, 15, 3);
checkRange(15, 23, 15, 5);
checkRange(1, 15, 10, 5);
}
TEST(RocksEngineTest, PrefixTest) {
fs::TempDir rootPath("/tmp/rocksdb_engine_test.XXXXXX");
auto engine = std::make_unique<RocksEngine>(0, rootPath.path());
LOG(INFO) << "Write data in batch and scan them...";
std::vector<KV> data;
for (int32_t i = 0; i < 10; i++) {
data.emplace_back(folly::stringPrintf("a_%d", i),
folly::stringPrintf("val_%d", i));
}
for (int32_t i = 10; i < 15; i++) {
data.emplace_back(folly::stringPrintf("b_%d", i),
folly::stringPrintf("val_%d", i));
}
for (int32_t i = 20; i < 40; i++) {
data.emplace_back(folly::stringPrintf("c_%d", i),
folly::stringPrintf("val_%d", i));
}
EXPECT_EQ(ResultCode::SUCCEEDED, engine->multiPut(std::move(data)));
auto checkPrefix = [&](const std::string& prefix,
int32_t expectedFrom, int32_t expectedTotal) {
LOG(INFO) << "prefix " << prefix
<< ", expectedFrom " << expectedFrom << ", expectedTotal " << expectedTotal;
std::unique_ptr<KVIterator> iter;
EXPECT_EQ(ResultCode::SUCCEEDED, engine->prefix(prefix, &iter));
int num = 0;
while (iter->valid()) {
num++;
auto key = iter->key();
auto val = iter->val();
EXPECT_EQ(folly::stringPrintf("%s_%d", prefix.c_str(), expectedFrom), key);
EXPECT_EQ(folly::stringPrintf("val_%d", expectedFrom), val);
expectedFrom++;
iter->next();
}
EXPECT_EQ(expectedTotal, num);
};
checkPrefix("a", 0, 10);
checkPrefix("b", 10, 5);
checkPrefix("c", 20, 20);
}
TEST(RocksEngineTest, RemoveTest) {
fs::TempDir rootPath("/tmp/rocksdb_engine_test.XXXXXX");
auto engine = std::make_unique<RocksEngine>(0, rootPath.path());
EXPECT_EQ(ResultCode::SUCCEEDED, engine->put("key", "val"));
std::string val;
EXPECT_EQ(ResultCode::SUCCEEDED, engine->get("key", &val));
EXPECT_EQ(val, "val");
EXPECT_EQ(ResultCode::SUCCEEDED, engine->remove("key"));
EXPECT_EQ(ResultCode::ERR_KEY_NOT_FOUND, engine->get("key", &val));
}
TEST(RocksEngineTest, RemoveRangeTest) {
fs::TempDir rootPath("/tmp/rocksdb_remove_range_test.XXXXXX");
auto engine = std::make_unique<RocksEngine>(0, rootPath.path());
for (int32_t i = 0; i < 100; i++) {
EXPECT_EQ(ResultCode::SUCCEEDED, engine->put(
std::string(reinterpret_cast<const char*>(&i), sizeof(int32_t)),
folly::stringPrintf("%d_val", i)));
std::string val;
EXPECT_EQ(ResultCode::SUCCEEDED, engine->get(
std::string(reinterpret_cast<const char*>(&i), sizeof(int32_t)),
&val));
EXPECT_EQ(val, folly::stringPrintf("%d_val", i));
}
{
int32_t s = 0, e = 50;
EXPECT_EQ(ResultCode::SUCCEEDED, engine->removeRange(
std::string(reinterpret_cast<const char*>(&s), sizeof(int32_t)),
std::string(reinterpret_cast<const char*>(&e), sizeof(int32_t))));
}
{
int32_t s = 0, e = 100;
std::unique_ptr<KVIterator> iter;
std::string start(reinterpret_cast<const char*>(&s), sizeof(int32_t));
std::string end(reinterpret_cast<const char*>(&e), sizeof(int32_t));
EXPECT_EQ(ResultCode::SUCCEEDED, engine->range(start, end, &iter));
int num = 0;
int expectedFrom = 50;
while (iter->valid()) {
num++;
auto key = *reinterpret_cast<const int32_t*>(iter->key().data());
auto val = iter->val();
EXPECT_EQ(expectedFrom, key);
EXPECT_EQ(folly::stringPrintf("%d_val", expectedFrom), val);
expectedFrom++;
iter->next();
}
EXPECT_EQ(50, num);
}
}
TEST(RocksEngineTest, OptionTest) {
fs::TempDir rootPath("/tmp/rocksdb_option_test.XXXXXX");
auto engine = std::make_unique<RocksEngine>(0, rootPath.path());
EXPECT_EQ(ResultCode::SUCCEEDED,
engine->setOption("disable_auto_compactions", "true"));
EXPECT_EQ(ResultCode::ERR_INVALID_ARGUMENT,
engine->setOption("disable_auto_compactions_", "true"));
EXPECT_EQ(ResultCode::ERR_INVALID_ARGUMENT,
engine->setOption("disable_auto_compactions", "bad_value"));
EXPECT_EQ(ResultCode::SUCCEEDED,
engine->setDBOption("max_background_compactions", "2"));
EXPECT_EQ(ResultCode::ERR_INVALID_ARGUMENT,
engine->setDBOption("max_background_compactions_", "2"));
EXPECT_EQ(ResultCode::SUCCEEDED,
engine->setDBOption("max_background_compactions", "2_"));
EXPECT_EQ(ResultCode::ERR_INVALID_ARGUMENT,
engine->setDBOption("max_background_compactions", "bad_value"));
}
} // namespace kvstore
} // namespace nebula
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
folly::init(&argc, &argv, true);
google::SetStderrLogging(google::INFO);
return RUN_ALL_TESTS();
}
| 1 | 16,183 | As for the testings, we better to verify the actual effects of the compaction. Of course, you could do it in future. | vesoft-inc-nebula | cpp |
@@ -85,7 +85,9 @@ public class Catalog implements AutoCloseable {
if (tableMap == null) {
tableMap = loadTables(db);
}
- return ImmutableList.copyOf(tableMap.values());
+ Collection<TiTableInfo> tables = tableMap.values();
+ tables.removeIf(TiTableInfo::isView);
+ return ImmutableList.copyOf(tables);
}
public TiTableInfo getTable(TiDBInfo db, String tableName) { | 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.catalog;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.pingcap.tikv.Snapshot;
import com.pingcap.tikv.meta.TiDBInfo;
import com.pingcap.tikv.meta.TiTableInfo;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.log4j.Logger;
public class Catalog implements AutoCloseable {
private Supplier<Snapshot> snapshotProvider;
private ScheduledExecutorService service;
private CatalogCache metaCache;
private final boolean showRowId;
private final String dbPrefix;
private final Logger logger = Logger.getLogger(this.getClass());
@Override
public void close() throws Exception {
if (service != null) {
service.shutdownNow();
service.awaitTermination(1, TimeUnit.SECONDS);
}
}
private static class CatalogCache {
private CatalogCache(CatalogTransaction transaction, String dbPrefix, boolean loadTables) {
this.transaction = transaction;
this.dbPrefix = dbPrefix;
this.tableCache = new ConcurrentHashMap<>();
this.dbCache = loadDatabases(loadTables);
this.currentVersion = transaction.getLatestSchemaVersion();
}
private final Map<String, TiDBInfo> dbCache;
private final ConcurrentHashMap<TiDBInfo, Map<String, TiTableInfo>> tableCache;
private CatalogTransaction transaction;
private long currentVersion;
private final String dbPrefix;
public CatalogTransaction getTransaction() {
return transaction;
}
public long getVersion() {
return currentVersion;
}
public TiDBInfo getDatabase(String name) {
Objects.requireNonNull(name, "name is null");
return dbCache.get(name.toLowerCase());
}
public List<TiDBInfo> listDatabases() {
return ImmutableList.copyOf(dbCache.values());
}
public List<TiTableInfo> listTables(TiDBInfo db) {
Map<String, TiTableInfo> tableMap = tableCache.get(db);
if (tableMap == null) {
tableMap = loadTables(db);
}
return ImmutableList.copyOf(tableMap.values());
}
public TiTableInfo getTable(TiDBInfo db, String tableName) {
Map<String, TiTableInfo> tableMap = tableCache.get(db);
if (tableMap == null) {
tableMap = loadTables(db);
}
return tableMap.get(tableName.toLowerCase());
}
private Map<String, TiTableInfo> loadTables(TiDBInfo db) {
List<TiTableInfo> tables = transaction.getTables(db.getId());
ImmutableMap.Builder<String, TiTableInfo> builder = ImmutableMap.builder();
for (TiTableInfo table : tables) {
builder.put(table.getName().toLowerCase(), table);
}
Map<String, TiTableInfo> tableMap = builder.build();
tableCache.put(db, tableMap);
return tableMap;
}
private Map<String, TiDBInfo> loadDatabases(boolean loadTables) {
HashMap<String, TiDBInfo> newDBCache = new HashMap<>();
List<TiDBInfo> databases = transaction.getDatabases();
databases.forEach(
db -> {
TiDBInfo newDBInfo = db.rename(dbPrefix + db.getName());
newDBCache.put(newDBInfo.getName().toLowerCase(), newDBInfo);
if (loadTables) {
loadTables(newDBInfo);
}
});
return newDBCache;
}
}
public Catalog(
Supplier<Snapshot> snapshotProvider,
int refreshPeriod,
TimeUnit periodUnit,
boolean showRowId,
String dbPrefix) {
this.snapshotProvider = Objects.requireNonNull(snapshotProvider, "Snapshot Provider is null");
this.showRowId = showRowId;
this.dbPrefix = dbPrefix;
metaCache = new CatalogCache(new CatalogTransaction(snapshotProvider.get()), dbPrefix, false);
service =
Executors.newSingleThreadScheduledExecutor(
new ThreadFactoryBuilder().setDaemon(true).build());
service.scheduleAtFixedRate(
() -> {
// Wrap this with a try catch block in case schedule update fails
try {
reloadCache(true);
} catch (Exception e) {
logger.warn("Reload Cache failed", e);
}
},
refreshPeriod,
refreshPeriod,
periodUnit);
}
/**
* read current row id from TiKV and write the calculated value back to TiKV. The calculation rule
* is start(read from TiKV) + step.
*/
public synchronized long getAutoTableId(long dbId, long tableId, long step) {
Snapshot snapshot = snapshotProvider.get();
CatalogTransaction newTrx = new CatalogTransaction(snapshot);
return newTrx.getAutoTableId(dbId, tableId, step);
}
/** read current row id from TiKV according to database id and table id. */
public synchronized long getAutoTableId(long dbId, long tableId) {
Snapshot snapshot = snapshotProvider.get();
CatalogTransaction newTrx = new CatalogTransaction(snapshot);
return newTrx.getAutoTableId(dbId, tableId);
}
public synchronized void reloadCache(boolean loadTables) {
Snapshot snapshot = snapshotProvider.get();
CatalogTransaction newTrx = new CatalogTransaction(snapshot);
long latestVersion = newTrx.getLatestSchemaVersion();
if (latestVersion > metaCache.getVersion()) {
metaCache = new CatalogCache(newTrx, dbPrefix, loadTables);
}
}
public void reloadCache() {
reloadCache(false);
}
public List<TiDBInfo> listDatabases() {
return metaCache.listDatabases();
}
public List<TiTableInfo> listTables(TiDBInfo database) {
Objects.requireNonNull(database, "database is null");
if (showRowId) {
return metaCache
.listTables(database)
.stream()
.map(TiTableInfo::copyTableWithRowId)
.collect(Collectors.toList());
} else {
return metaCache.listTables(database);
}
}
public TiDBInfo getDatabase(String dbName) {
Objects.requireNonNull(dbName, "dbName is null");
TiDBInfo dbInfo = metaCache.getDatabase(dbName);
if (dbInfo == null) {
// reload cache if database does not exist
reloadCache(true);
dbInfo = metaCache.getDatabase(dbName);
}
return dbInfo;
}
public TiTableInfo getTable(String dbName, String tableName) {
TiDBInfo database = getDatabase(dbName);
if (database == null) {
return null;
}
return getTable(database, tableName);
}
public TiTableInfo getTable(TiDBInfo database, String tableName) {
Objects.requireNonNull(database, "database is null");
Objects.requireNonNull(tableName, "tableName is null");
TiTableInfo table = metaCache.getTable(database, tableName);
if (table == null) {
// reload cache if table does not exist
reloadCache(true);
table = metaCache.getTable(database, tableName);
}
if (showRowId && table != null) {
return table.copyTableWithRowId();
} else {
return table;
}
}
@VisibleForTesting
public TiTableInfo getTable(TiDBInfo database, long tableId) {
Objects.requireNonNull(database, "database is null");
Collection<TiTableInfo> tables = listTables(database);
for (TiTableInfo table : tables) {
if (table.getId() == tableId) {
if (showRowId) {
return table.copyTableWithRowId();
} else {
return table;
}
}
}
return null;
}
}
| 1 | 10,736 | should we add a TODO here? | pingcap-tispark | java |
@@ -35,14 +35,12 @@ import { Component, render } from '@wordpress/element';
import { loadTranslations } from './util';
import './components/notifications';
import DashboardDetailsApp from './components/dashboard-details/dashboard-details-app';
-import ErrorHandler from './components/ErrorHandler';
+import Root from './components/root';
class GoogleSitekitDashboardDetails extends Component {
render() {
return (
- <ErrorHandler>
- <DashboardDetailsApp />
- </ErrorHandler>
+ <DashboardDetailsApp />
);
}
} | 1 | /**
* DashboardDetails component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint camelcase:[0] */
/**
* External dependencies
*/
import './modules';
/**
* WordPress dependencies
*/
import domReady from '@wordpress/dom-ready';
import { doAction } from '@wordpress/hooks';
import { Component, render } from '@wordpress/element';
/**
* Internal dependencies.
*/
import { loadTranslations } from './util';
import './components/notifications';
import DashboardDetailsApp from './components/dashboard-details/dashboard-details-app';
import ErrorHandler from './components/ErrorHandler';
class GoogleSitekitDashboardDetails extends Component {
render() {
return (
<ErrorHandler>
<DashboardDetailsApp />
</ErrorHandler>
);
}
}
// Initialize the app once the DOM is ready.
domReady( () => {
const renderTarget = document.getElementById( 'js-googlesitekit-dashboard-details' );
if ( renderTarget ) {
loadTranslations();
render( <GoogleSitekitDashboardDetails />, renderTarget );
/**
* Action triggered when the dashboard details App is loaded.
*/
doAction( 'googlesitekit.moduleLoaded', 'Dashboard' );
}
} );
| 1 | 28,986 | This can be inlined below as `GoogleSitekitDashboardDetails` is an unnecessary wrapper now. | google-site-kit-wp | js |
@@ -191,7 +191,7 @@ func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_CurrentWorkflow_Active_Clo
versionHistory := versionhistory.New([]byte("branch token"), []*historyspb.VersionHistoryItem{
{EventId: lastWorkflowTaskStartedEventID, Version: lastWorkflowTaskStartedVersion},
})
- histories := versionhistory.NewVHS(versionHistory)
+ histories := versionhistory.NewVersionHistories(versionHistory)
releaseCalled := false
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"context"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/pborman/uuid"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/api/serviceerror"
historyspb "go.temporal.io/server/api/history/v1"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/mocks"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/persistence/versionhistory"
"go.temporal.io/server/service/history/shard"
)
type (
nDCTransactionMgrSuite struct {
suite.Suite
*require.Assertions
controller *gomock.Controller
mockShard *shard.ContextTest
mockCreateMgr *MocknDCTransactionMgrForNewWorkflow
mockUpdateMgr *MocknDCTransactionMgrForExistingWorkflow
mockEventsReapplier *MocknDCEventsReapplier
mockWorkflowResetter *MockworkflowResetter
mockClusterMetadata *cluster.MockMetadata
mockExecutionMgr *mocks.ExecutionManager
logger log.Logger
namespaceEntry *cache.NamespaceCacheEntry
transactionMgr *nDCTransactionMgrImpl
}
)
func TestNDCTransactionMgrSuite(t *testing.T) {
s := new(nDCTransactionMgrSuite)
suite.Run(t, s)
}
func (s *nDCTransactionMgrSuite) SetupTest() {
s.Assertions = require.New(s.T())
s.controller = gomock.NewController(s.T())
s.mockCreateMgr = NewMocknDCTransactionMgrForNewWorkflow(s.controller)
s.mockUpdateMgr = NewMocknDCTransactionMgrForExistingWorkflow(s.controller)
s.mockEventsReapplier = NewMocknDCEventsReapplier(s.controller)
s.mockWorkflowResetter = NewMockworkflowResetter(s.controller)
s.mockShard = shard.NewTestContext(
s.controller,
&persistence.ShardInfoWithFailover{
ShardInfo: &persistencespb.ShardInfo{
ShardId: 10,
RangeId: 1,
TransferAckLevel: 0,
}},
NewDynamicConfigForTest(),
)
s.mockClusterMetadata = s.mockShard.Resource.ClusterMetadata
s.mockExecutionMgr = s.mockShard.Resource.ExecutionMgr
s.logger = s.mockShard.GetLogger()
s.namespaceEntry = testGlobalNamespaceEntry
s.transactionMgr = newNDCTransactionMgr(s.mockShard, newHistoryCache(s.mockShard), s.mockEventsReapplier, s.logger)
s.transactionMgr.createMgr = s.mockCreateMgr
s.transactionMgr.updateMgr = s.mockUpdateMgr
s.transactionMgr.workflowResetter = s.mockWorkflowResetter
}
func (s *nDCTransactionMgrSuite) TearDownTest() {
s.controller.Finish()
s.mockShard.Finish(s.T())
}
func (s *nDCTransactionMgrSuite) TestCreateWorkflow() {
ctx := context.Background()
now := time.Now().UTC()
targetWorkflow := NewMocknDCWorkflow(s.controller)
s.mockCreateMgr.EXPECT().dispatchForNewWorkflow(
ctx, now, targetWorkflow,
).Return(nil).Times(1)
err := s.transactionMgr.createWorkflow(ctx, now, targetWorkflow)
s.NoError(err)
}
func (s *nDCTransactionMgrSuite) TestUpdateWorkflow() {
ctx := context.Background()
now := time.Now().UTC()
isWorkflowRebuilt := true
targetWorkflow := NewMocknDCWorkflow(s.controller)
newWorkflow := NewMocknDCWorkflow(s.controller)
s.mockUpdateMgr.EXPECT().dispatchForExistingWorkflow(
ctx, now, isWorkflowRebuilt, targetWorkflow, newWorkflow,
).Return(nil).Times(1)
err := s.transactionMgr.updateWorkflow(ctx, now, isWorkflowRebuilt, targetWorkflow, newWorkflow)
s.NoError(err)
}
func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_CurrentWorkflow_Active_Open() {
ctx := context.Background()
now := time.Now().UTC()
releaseCalled := false
runID := uuid.New()
workflow := NewMocknDCWorkflow(s.controller)
weContext := NewMockworkflowExecutionContext(s.controller)
mutableState := NewMockmutableState(s.controller)
var releaseFn releaseWorkflowExecutionFunc = func(error) { releaseCalled = true }
workflowEvents := &persistence.WorkflowEvents{
Events: []*historypb.HistoryEvent{{EventId: 1}},
}
workflow.EXPECT().getContext().Return(weContext).AnyTimes()
workflow.EXPECT().getMutableState().Return(mutableState).AnyTimes()
workflow.EXPECT().getReleaseFn().Return(releaseFn).AnyTimes()
s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.GetFailoverVersion()).Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockEventsReapplier.EXPECT().reapplyEvents(ctx, mutableState, workflowEvents.Events, runID).Return(workflowEvents.Events, nil).Times(1)
mutableState.EXPECT().IsCurrentWorkflowGuaranteed().Return(true).AnyTimes()
mutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
mutableState.EXPECT().GetNamespaceEntry().Return(s.namespaceEntry).AnyTimes()
mutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{RunId: runID}).Times(1)
weContext.EXPECT().persistNonFirstWorkflowEvents(workflowEvents).Return(int64(0), nil).Times(1)
weContext.EXPECT().updateWorkflowExecutionWithNew(
now, persistence.UpdateWorkflowModeUpdateCurrent, nil, nil, transactionPolicyActive, (*transactionPolicy)(nil),
).Return(nil).Times(1)
err := s.transactionMgr.backfillWorkflow(ctx, now, workflow, workflowEvents)
s.NoError(err)
s.True(releaseCalled)
}
func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_CurrentWorkflow_Active_Closed() {
ctx := context.Background()
now := time.Now().UTC()
namespaceID := "some random namespace ID"
workflowID := "some random workflow ID"
runID := "some random run ID"
lastWorkflowTaskStartedEventID := int64(9999)
nextEventID := lastWorkflowTaskStartedEventID * 2
lastWorkflowTaskStartedVersion := s.namespaceEntry.GetFailoverVersion()
versionHistory := versionhistory.New([]byte("branch token"), []*historyspb.VersionHistoryItem{
{EventId: lastWorkflowTaskStartedEventID, Version: lastWorkflowTaskStartedVersion},
})
histories := versionhistory.NewVHS(versionHistory)
releaseCalled := false
workflow := NewMocknDCWorkflow(s.controller)
weContext := NewMockworkflowExecutionContext(s.controller)
mutableState := NewMockmutableState(s.controller)
var releaseFn releaseWorkflowExecutionFunc = func(error) { releaseCalled = true }
workflowEvents := &persistence.WorkflowEvents{}
workflow.EXPECT().getContext().Return(weContext).AnyTimes()
workflow.EXPECT().getMutableState().Return(mutableState).AnyTimes()
workflow.EXPECT().getReleaseFn().Return(releaseFn).AnyTimes()
s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.GetFailoverVersion()).Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes()
mutableState.EXPECT().IsCurrentWorkflowGuaranteed().Return(false).AnyTimes()
mutableState.EXPECT().IsWorkflowExecutionRunning().Return(false).AnyTimes()
mutableState.EXPECT().GetNamespaceEntry().Return(s.namespaceEntry).AnyTimes()
mutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{
NamespaceId: namespaceID,
WorkflowId: workflowID,
VersionHistories: histories,
}).AnyTimes()
mutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{
RunId: runID,
}).AnyTimes()
mutableState.EXPECT().GetNextEventID().Return(nextEventID).AnyTimes()
mutableState.EXPECT().GetPreviousStartedEventID().Return(lastWorkflowTaskStartedEventID).Times(1)
s.mockWorkflowResetter.EXPECT().resetWorkflow(
ctx,
namespaceID,
workflowID,
runID,
versionHistory.GetBranchToken(),
lastWorkflowTaskStartedEventID,
lastWorkflowTaskStartedVersion,
nextEventID,
gomock.Any(),
gomock.Any(),
workflow,
eventsReapplicationResetWorkflowReason,
workflowEvents.Events,
).Return(nil).Times(1)
s.mockExecutionMgr.On("GetCurrentExecution", &persistence.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: workflowID,
}).Return(&persistence.GetCurrentExecutionResponse{RunID: runID}, nil).Once()
weContext.EXPECT().persistNonFirstWorkflowEvents(workflowEvents).Return(int64(0), nil).Times(1)
weContext.EXPECT().updateWorkflowExecutionWithNew(
now, persistence.UpdateWorkflowModeBypassCurrent, nil, nil, transactionPolicyPassive, (*transactionPolicy)(nil),
).Return(nil).Times(1)
err := s.transactionMgr.backfillWorkflow(ctx, now, workflow, workflowEvents)
s.NoError(err)
s.True(releaseCalled)
}
func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_CurrentWorkflow_Passive_Open() {
ctx := context.Background()
now := time.Now().UTC()
releaseCalled := false
workflow := NewMocknDCWorkflow(s.controller)
weContext := NewMockworkflowExecutionContext(s.controller)
mutableState := NewMockmutableState(s.controller)
var releaseFn releaseWorkflowExecutionFunc = func(error) { releaseCalled = true }
workflowEvents := &persistence.WorkflowEvents{
Events: []*historypb.HistoryEvent{{EventId: 1}},
}
workflow.EXPECT().getContext().Return(weContext).AnyTimes()
workflow.EXPECT().getMutableState().Return(mutableState).AnyTimes()
workflow.EXPECT().getReleaseFn().Return(releaseFn).AnyTimes()
s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.GetFailoverVersion()).Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestAlternativeClusterName).AnyTimes()
mutableState.EXPECT().IsCurrentWorkflowGuaranteed().Return(true).AnyTimes()
mutableState.EXPECT().IsWorkflowExecutionRunning().Return(true).AnyTimes()
mutableState.EXPECT().GetNamespaceEntry().Return(s.namespaceEntry).AnyTimes()
weContext.EXPECT().reapplyEvents([]*persistence.WorkflowEvents{workflowEvents}).Times(1)
weContext.EXPECT().persistNonFirstWorkflowEvents(workflowEvents).Return(int64(0), nil).Times(1)
weContext.EXPECT().updateWorkflowExecutionWithNew(
now, persistence.UpdateWorkflowModeUpdateCurrent, nil, nil, transactionPolicyPassive, (*transactionPolicy)(nil),
).Return(nil).Times(1)
err := s.transactionMgr.backfillWorkflow(ctx, now, workflow, workflowEvents)
s.NoError(err)
s.True(releaseCalled)
}
func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_CurrentWorkflow_Passive_Closed() {
ctx := context.Background()
now := time.Now().UTC()
namespaceID := "some random namespace ID"
workflowID := "some random workflow ID"
runID := "some random run ID"
releaseCalled := false
workflow := NewMocknDCWorkflow(s.controller)
weContext := NewMockworkflowExecutionContext(s.controller)
mutableState := NewMockmutableState(s.controller)
var releaseFn releaseWorkflowExecutionFunc = func(error) { releaseCalled = true }
workflowEvents := &persistence.WorkflowEvents{}
workflow.EXPECT().getContext().Return(weContext).AnyTimes()
workflow.EXPECT().getMutableState().Return(mutableState).AnyTimes()
workflow.EXPECT().getReleaseFn().Return(releaseFn).AnyTimes()
s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.GetFailoverVersion()).Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestAlternativeClusterName).AnyTimes()
mutableState.EXPECT().IsCurrentWorkflowGuaranteed().Return(false).AnyTimes()
mutableState.EXPECT().IsWorkflowExecutionRunning().Return(false).AnyTimes()
mutableState.EXPECT().GetNamespaceEntry().Return(s.namespaceEntry).AnyTimes()
mutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{
NamespaceId: namespaceID,
WorkflowId: workflowID,
}).AnyTimes()
mutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{
RunId: runID,
}).AnyTimes()
s.mockExecutionMgr.On("GetCurrentExecution", &persistence.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: workflowID,
}).Return(&persistence.GetCurrentExecutionResponse{RunID: runID}, nil).Once()
weContext.EXPECT().reapplyEvents([]*persistence.WorkflowEvents{workflowEvents}).Times(1)
weContext.EXPECT().persistNonFirstWorkflowEvents(workflowEvents).Return(int64(0), nil).Times(1)
weContext.EXPECT().updateWorkflowExecutionWithNew(
now, persistence.UpdateWorkflowModeUpdateCurrent, nil, nil, transactionPolicyPassive, (*transactionPolicy)(nil),
).Return(nil).Times(1)
err := s.transactionMgr.backfillWorkflow(ctx, now, workflow, workflowEvents)
s.NoError(err)
s.True(releaseCalled)
}
func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_NotCurrentWorkflow_Active() {
ctx := context.Background()
now := time.Now().UTC()
namespaceID := "some random namespace ID"
workflowID := "some random workflow ID"
runID := "some random run ID"
currentRunID := "other random run ID"
releaseCalled := false
workflow := NewMocknDCWorkflow(s.controller)
weContext := NewMockworkflowExecutionContext(s.controller)
mutableState := NewMockmutableState(s.controller)
var releaseFn releaseWorkflowExecutionFunc = func(error) { releaseCalled = true }
workflowEvents := &persistence.WorkflowEvents{
Events: []*historypb.HistoryEvent{{
EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED,
}},
NamespaceID: namespaceID,
WorkflowID: workflowID,
}
workflow.EXPECT().getContext().Return(weContext).AnyTimes()
workflow.EXPECT().getMutableState().Return(mutableState).AnyTimes()
workflow.EXPECT().getReleaseFn().Return(releaseFn).AnyTimes()
s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.GetFailoverVersion()).Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestCurrentClusterName).AnyTimes()
mutableState.EXPECT().IsCurrentWorkflowGuaranteed().Return(false).AnyTimes()
mutableState.EXPECT().IsWorkflowExecutionRunning().Return(false).AnyTimes()
mutableState.EXPECT().GetNamespaceEntry().Return(s.namespaceEntry).AnyTimes()
mutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{
NamespaceId: namespaceID,
WorkflowId: workflowID,
}).AnyTimes()
mutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{
RunId: runID,
}).AnyTimes()
s.mockExecutionMgr.On("GetCurrentExecution", &persistence.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: workflowID,
}).Return(&persistence.GetCurrentExecutionResponse{RunID: currentRunID}, nil).Once()
weContext.EXPECT().reapplyEvents([]*persistence.WorkflowEvents{workflowEvents}).Times(1)
weContext.EXPECT().persistNonFirstWorkflowEvents(workflowEvents).Return(int64(0), nil).Times(1)
weContext.EXPECT().updateWorkflowExecutionWithNew(
now, persistence.UpdateWorkflowModeBypassCurrent, nil, nil, transactionPolicyPassive, (*transactionPolicy)(nil),
).Return(nil).Times(1)
err := s.transactionMgr.backfillWorkflow(ctx, now, workflow, workflowEvents)
s.NoError(err)
s.True(releaseCalled)
}
func (s *nDCTransactionMgrSuite) TestBackfillWorkflow_NotCurrentWorkflow_Passive() {
ctx := context.Background()
now := time.Now().UTC()
namespaceID := "some random namespace ID"
workflowID := "some random workflow ID"
runID := "some random run ID"
currentRunID := "other random run ID"
releaseCalled := false
workflow := NewMocknDCWorkflow(s.controller)
weContext := NewMockworkflowExecutionContext(s.controller)
mutableState := NewMockmutableState(s.controller)
var releaseFn releaseWorkflowExecutionFunc = func(error) { releaseCalled = true }
workflowEvents := &persistence.WorkflowEvents{
Events: []*historypb.HistoryEvent{{
EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED,
}},
NamespaceID: namespaceID,
WorkflowID: workflowID,
}
workflow.EXPECT().getContext().Return(weContext).AnyTimes()
workflow.EXPECT().getMutableState().Return(mutableState).AnyTimes()
workflow.EXPECT().getReleaseFn().Return(releaseFn).AnyTimes()
s.mockClusterMetadata.EXPECT().ClusterNameForFailoverVersion(s.namespaceEntry.GetFailoverVersion()).Return(cluster.TestCurrentClusterName).AnyTimes()
s.mockClusterMetadata.EXPECT().GetCurrentClusterName().Return(cluster.TestAlternativeClusterName).AnyTimes()
mutableState.EXPECT().IsCurrentWorkflowGuaranteed().Return(false).AnyTimes()
mutableState.EXPECT().IsWorkflowExecutionRunning().Return(false).AnyTimes()
mutableState.EXPECT().GetNamespaceEntry().Return(s.namespaceEntry).AnyTimes()
mutableState.EXPECT().GetExecutionInfo().Return(&persistencespb.WorkflowExecutionInfo{
NamespaceId: namespaceID,
WorkflowId: workflowID,
}).AnyTimes()
mutableState.EXPECT().GetExecutionState().Return(&persistencespb.WorkflowExecutionState{
RunId: runID,
}).AnyTimes()
s.mockExecutionMgr.On("GetCurrentExecution", &persistence.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: workflowID,
}).Return(&persistence.GetCurrentExecutionResponse{RunID: currentRunID}, nil).Once()
weContext.EXPECT().reapplyEvents([]*persistence.WorkflowEvents{workflowEvents}).Times(1)
weContext.EXPECT().persistNonFirstWorkflowEvents(workflowEvents).Return(int64(0), nil).Times(1)
weContext.EXPECT().updateWorkflowExecutionWithNew(
now, persistence.UpdateWorkflowModeBypassCurrent, nil, nil, transactionPolicyPassive, (*transactionPolicy)(nil),
).Return(nil).Times(1)
err := s.transactionMgr.backfillWorkflow(ctx, now, workflow, workflowEvents)
s.NoError(err)
s.True(releaseCalled)
}
func (s *nDCTransactionMgrSuite) TestCheckWorkflowExists_DoesNotExists() {
ctx := context.Background()
namespaceID := "some random namespace ID"
workflowID := "some random workflow ID"
runID := "some random run ID"
s.mockExecutionMgr.On("GetWorkflowExecution", &persistence.GetWorkflowExecutionRequest{
NamespaceID: namespaceID,
Execution: commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: runID,
},
}).Return(nil, serviceerror.NewNotFound("")).Once()
exists, err := s.transactionMgr.checkWorkflowExists(ctx, namespaceID, workflowID, runID)
s.NoError(err)
s.False(exists)
}
func (s *nDCTransactionMgrSuite) TestCheckWorkflowExists_DoesExists() {
ctx := context.Background()
namespaceID := "some random namespace ID"
workflowID := "some random workflow ID"
runID := "some random run ID"
s.mockExecutionMgr.On("GetWorkflowExecution", &persistence.GetWorkflowExecutionRequest{
NamespaceID: namespaceID,
Execution: commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: runID,
},
}).Return(&persistence.GetWorkflowExecutionResponse{}, nil).Once()
exists, err := s.transactionMgr.checkWorkflowExists(ctx, namespaceID, workflowID, runID)
s.NoError(err)
s.True(exists)
}
func (s *nDCTransactionMgrSuite) TestGetWorkflowCurrentRunID_Missing() {
ctx := context.Background()
namespaceID := "some random namespace ID"
workflowID := "some random workflow ID"
s.mockExecutionMgr.On("GetCurrentExecution", &persistence.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: workflowID,
}).Return(nil, serviceerror.NewNotFound("")).Once()
currentRunID, err := s.transactionMgr.getCurrentWorkflowRunID(ctx, namespaceID, workflowID)
s.NoError(err)
s.Equal("", currentRunID)
}
func (s *nDCTransactionMgrSuite) TestGetWorkflowCurrentRunID_Exists() {
ctx := context.Background()
namespaceID := "some random namespace ID"
workflowID := "some random workflow ID"
runID := "some random run ID"
s.mockExecutionMgr.On("GetCurrentExecution", &persistence.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: workflowID,
}).Return(&persistence.GetCurrentExecutionResponse{RunID: runID}, nil).Once()
currentRunID, err := s.transactionMgr.getCurrentWorkflowRunID(ctx, namespaceID, workflowID)
s.NoError(err)
s.Equal(runID, currentRunID)
}
| 1 | 10,609 | NewVHS -> NewVersionHistories this one looks better | temporalio-temporal | go |
@@ -36,10 +36,17 @@ module Selenium
SOCKET_LOCK_TIMEOUT = 45
STOP_TIMEOUT = 20
+ @executable = nil
+ @missing = ''
+
+ class << self
+ attr_accessor :executable, :missing_text
+ end
+
attr_accessor :host
def initialize(executable_path, port, *extra_args)
- @executable_path = executable_path
+ @executable_path = binary_path(executable_path)
@host = Platform.localhost
@port = Integer(port)
@extra_args = extra_args | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
#
# Base class implementing default behavior of service object,
# responsible for starting and stopping driver implementations.
#
# Subclasses must implement the following private methods:
# * #start_process
# * #stop_server
# * #cannot_connect_error_text
#
# @api private
#
class Service
START_TIMEOUT = 20
SOCKET_LOCK_TIMEOUT = 45
STOP_TIMEOUT = 20
attr_accessor :host
def initialize(executable_path, port, *extra_args)
@executable_path = executable_path
@host = Platform.localhost
@port = Integer(port)
@extra_args = extra_args
raise Error::WebDriverError, "invalid port: #{@port}" if @port < 1
end
def start
if process_running?
raise "already started: #{uri.inspect} #{@executable_path.inspect}"
end
Platform.exit_hook { stop } # make sure we don't leave the server running
socket_lock.locked do
find_free_port
start_process
connect_until_stable
end
end
def stop
stop_server
@process.poll_for_exit STOP_TIMEOUT
rescue ChildProcess::TimeoutError
ensure
stop_process
end
def uri
@uri ||= URI.parse("http://#{@host}:#{@port}")
end
private
def connect_to_server
Net::HTTP.start(@host, @port) do |http|
http.open_timeout = STOP_TIMEOUT / 2
http.read_timeout = STOP_TIMEOUT / 2
yield http
end
end
def find_free_port
@port = PortProber.above(@port)
end
def start_process
raise NotImplementedError, 'subclass responsibility'
end
def stop_process
return if process_exited?
@process.stop STOP_TIMEOUT
end
def stop_server
connect_to_server { |http| http.get('/shutdown') }
end
def process_running?
@process && @process.alive?
end
def process_exited?
@process.nil? || @process.exited?
end
def connect_until_stable
socket_poller = SocketPoller.new @host, @port, START_TIMEOUT
return if socket_poller.connected?
raise Error::WebDriverError, cannot_connect_error_text
end
def cannot_connect_error_text
raise NotImplementedError, 'subclass responsibility'
end
def socket_lock
@socket_lock ||= SocketLock.new(@port - 1, SOCKET_LOCK_TIMEOUT)
end
end # Service
end # WebDriver
end # Selenium
| 1 | 13,879 | Is this `@missing_text` ? | SeleniumHQ-selenium | java |
@@ -25,6 +25,7 @@ CONFIG_PATH = BASE_PATH / 'config.yml'
OPEN_DATA_URL = "https://open.quiltdata.com"
PACKAGE_NAME_FORMAT = r"[\w-]+/[\w-]+$"
+SUBPACKAGE_NAME_FORMAT = r"([\w-]+/[\w-]+)(?:/(.+))?$"
## CONFIG_TEMPLATE
# Must contain every permitted config key, as well as their default values (which can be 'null'/None). | 1 | import re
from collections import OrderedDict
from collections.abc import Mapping, Sequence, Set
import datetime
import json
import os
import pathlib
from urllib.parse import parse_qs, quote, unquote, urlencode, urlparse, urlunparse
from urllib.request import pathname2url, url2pathname
import warnings
# Third-Party
import ruamel.yaml
from appdirs import user_cache_dir, user_data_dir
import requests
APP_NAME = "Quilt"
APP_AUTHOR = "QuiltData"
BASE_DIR = user_data_dir(APP_NAME, APP_AUTHOR)
BASE_PATH = pathlib.Path(BASE_DIR)
CACHE_PATH = pathlib.Path(user_cache_dir(APP_NAME, APP_AUTHOR)) / "v0"
TEMPFILE_DIR_PATH = BASE_PATH / "tempfiles"
CONFIG_PATH = BASE_PATH / 'config.yml'
OPEN_DATA_URL = "https://open.quiltdata.com"
PACKAGE_NAME_FORMAT = r"[\w-]+/[\w-]+$"
## CONFIG_TEMPLATE
# Must contain every permitted config key, as well as their default values (which can be 'null'/None).
# Comments are retained and added to local config, unless overridden by autoconfig via `api.config(<url>)`
CONFIG_TEMPLATE = """
# Quilt3 configuration file
# navigator_url: <url string, default: null>
#
# Used for autoconfiguration
# navigator_url: https://example.com
navigator_url:
# default_local_registry: <url string, default: local appdirs>
# default target registry for operations like install and build
default_local_registry: "{}"
# default_remote_registry: <url string, default: null>
# default target for operations like push and browse
default_remote_registry:
# default_install_location: <url string, default: null>
# default filesystem target for the install operation
default_install_location:
# Identity service URL
registryUrl:
# Disable anonymous usage metrics
telemetry_disabled: false
# S3 Proxy
s3Proxy:
# API Gateway endpoint (e.g., for search)
apiGatewayEndpoint:
# Binary API Gateway endpoint (e.g., for preview)
binaryApiGatewayEndpoint:
""".format(BASE_PATH.as_uri() + '/packages')
class QuiltException(Exception):
def __init__(self, message, **kwargs):
# We use NewError("Prefix: " + str(error)) a lot.
# To be consistent across Python 2.7 and 3.x:
# 1) This `super` call must exist, or 2.7 will have no text for str(error)
# 2) This `super` call must have only one argument (the message) or str(error) will be a repr of args
super(QuiltException, self).__init__(message)
self.message = message
for k, v in kwargs.items():
setattr(self, k, v)
class PhysicalKey(object):
__slots__ = ['bucket', 'path', 'version_id']
def __init__(self, bucket, path, version_id):
"""
For internal use only; call from_path or from_url instead.
"""
assert bucket is None or isinstance(bucket, str)
assert isinstance(path, str)
assert version_id is None or isinstance(version_id, str)
if bucket is None:
assert path is not None, "Local keys must have a path"
assert version_id is None, "Local keys cannot have a version ID"
if os.name == 'nt':
assert '\\' not in path, "Paths must use / as a separator"
else:
assert not path.startswith('/'), "S3 paths must not start with '/'"
self.bucket = bucket
self.path = path
self.version_id = version_id
@classmethod
def from_url(cls, url):
parsed = urlparse(url)
if parsed.scheme == 's3':
if not parsed.netloc:
raise ValueError("Missing bucket")
bucket = parsed.netloc
assert not parsed.path or parsed.path.startswith('/')
path = unquote(parsed.path)[1:]
# Parse the version ID the way the Java SDK does:
# https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-s3/src/main/java/com/amazonaws/services/s3/AmazonS3URI.java#L192
query = parse_qs(parsed.query)
version_id = query.pop('versionId', [None])[0]
if query:
raise ValueError(f"Unexpected S3 query string: {parsed.query!r}")
return cls(bucket, path, version_id)
elif parsed.scheme == 'file':
if parsed.netloc not in ('', 'localhost'):
raise ValueError("Unexpected hostname")
if not parsed.path:
raise ValueError("Missing path")
if not parsed.path.startswith('/'):
raise ValueError("Relative paths are not allowed")
if parsed.query:
raise ValueError("Unexpected query")
path = url2pathname(parsed.path)
if parsed.path.endswith('/') and not path.endswith(os.path.sep):
# On Windows, url2pathname loses the trailing `/`.
path += os.path.sep
return cls.from_path(path)
else:
raise ValueError(f"Unexpected scheme: {parsed.scheme!r}")
@classmethod
def from_path(cls, path):
path = os.fspath(path)
new_path = os.path.realpath(path)
# Use '/' as the path separator.
if os.path.sep != '/':
new_path = new_path.replace(os.path.sep, '/')
# Add back a trailing '/' if the original path has it.
if (path.endswith(os.path.sep) or
(os.path.altsep is not None and path.endswith(os.path.altsep))):
new_path += '/'
return cls(None, new_path, None)
def is_local(self):
return self.bucket is None
def join(self, rel_path):
if self.version_id is not None:
raise ValueError('Cannot append paths to URLs with a version ID')
if os.name == 'nt' and '\\' in rel_path:
raise ValueError("Paths must use / as a separator")
if self.path:
new_path = self.path.rstrip('/') + '/' + rel_path.lstrip('/')
else:
new_path = rel_path.lstrip('/')
return PhysicalKey(self.bucket, new_path, None)
def basename(self):
return self.path.rsplit('/', 1)[-1]
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.bucket == other.bucket and
self.path == other.path and
self.version_id == other.version_id
)
def __repr__(self):
return f'{self.__class__.__name__}({self.bucket!r}, {self.path!r}, {self.version_id!r})'
def __str__(self):
if self.bucket is None:
return urlunparse(('file', '', pathname2url(self.path.replace('/', os.path.sep)), None, None, None))
else:
if self.version_id is None:
params = {}
else:
params = {'versionId': self.version_id}
return urlunparse(('s3', self.bucket, quote(self.path), None, urlencode(params), None))
def fix_url(url):
"""Convert non-URL paths to file:// URLs"""
# If it has a scheme, we assume it's a URL.
# On Windows, we ignore schemes that look like drive letters, e.g. C:/users/foo
if not url:
raise ValueError("Empty URL")
url = str(url)
parsed = urlparse(url)
if parsed.scheme and not os.path.splitdrive(url)[0]:
return url
# `expanduser()` expands any leading "~" or "~user" path components, as a user convenience
# `resolve()` _tries_ to make the URI absolute - but doesn't guarantee anything.
# In particular, on Windows, non-existent files won't be resolved.
# `absolute()` makes the URI absolute, though it can still contain '..'
fixed_url = pathlib.Path(url).expanduser().resolve().absolute().as_uri()
# pathlib likes to remove trailing slashes, so add it back if needed.
if url[-1:] in (os.sep, os.altsep) and not fixed_url.endswith('/'):
fixed_url += '/'
return fixed_url
def extract_file_extension(file_path_or_url):
"""
Extract the file extension if it exists.
Args:
file_path_or_url: The path to the file. Type can can be anything that pathlib.Path understands.
Returns:
File extension without the period, i.e. ("txt" not ".txt"). None if the path does not have an extension.
"""
p = pathlib.Path(file_path_or_url)
if len(p.suffix) > 0:
return p.suffix[1:]
else:
return None
def read_yaml(yaml_stream):
yaml = ruamel.yaml.YAML()
try:
return yaml.load(yaml_stream)
except ruamel.yaml.parser.ParserError as error:
raise QuiltException(str(error), original_error=error)
def write_yaml(data, yaml_path, keep_backup=False):
"""Write `data` to `yaml_path`
:param data: Any yaml-serializable data
:param yaml_path: Destination. Can be a string or pathlib path.
:param keep_backup: If set, a timestamped backup will be kept in the same dir.
"""
yaml = ruamel.yaml.YAML()
path = pathlib.Path(yaml_path)
now = str(datetime.datetime.now())
# XXX unicode colon for Windows/NTFS -- looks prettier, but could be confusing. We could use '_' instead.
if os.name == 'nt':
now = now.replace(':', '\ua789')
backup_path = path.with_name(path.name + '.backup.' + now)
try:
if path.exists():
path.rename(backup_path)
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open('w') as config_file:
yaml.dump(data, config_file)
except Exception: #! intentionally wide catch -- reraised immediately.
if backup_path.exists():
if path.exists():
path.unlink()
backup_path.rename(path)
raise
if backup_path.exists() and not keep_backup:
backup_path.unlink()
def yaml_has_comments(parsed):
"""Determine if parsed YAML data has comments.
Any object can be given, but only objects based on `ruamel.yaml`'s
`CommentedBase` class can be True.
:returns: True if object has retained comments, False otherwise
"""
# Is this even a parse result object that stores comments?
if not isinstance(parsed, ruamel.yaml.comments.CommentedBase):
return False
# Are there comments on this object?
if parsed.ca.items or parsed.ca.comment or parsed.ca.end:
return True
# Is this a container that might have values with comments?
values = ()
if isinstance(parsed, (Sequence, Set)):
values = parsed
if isinstance(parsed, Mapping):
values = parsed.values()
# If so, do any of them have comments?
for value in values:
if yaml_has_comments(value):
return True
# no comments found.
return False
def validate_url(url):
"""A URL must have scheme and host, at minimum."""
parsed_url = urlparse(url)
# require scheme and host at minimum, like config_path'http://foo'
if not all((parsed_url.scheme, parsed_url.netloc)):
raise QuiltException("Invalid URL -- Requires at least scheme and host: {}".format(url))
try:
parsed_url.port
except ValueError:
raise QuiltException("Invalid URL -- Port must be a number: {}".format(url))
# Although displaying the config may seem not to warrant a class, it's pretty important
# for good UX. A lot of points were considered in making this -- retaining order,
# user's usage in an interpreted environment like Jupyter, and keeping the displayed
# information concise. Given the limitations of the other options, making a class with
# custom repr panned out to be the best (and shortest) option.
class QuiltConfig(OrderedDict):
def __init__(self, filepath, *args, **kwargs):
self.filepath = pathlib.Path(filepath)
super(QuiltConfig, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
# Per chat in #engineering 4-5-19, strip navigator_url of trailing slash.
# Ideally, we should do that kind of thing in one cohesive spot.
# This is a good spot.
if key == 'navigator_url' and value:
if not isinstance(value, str):
raise ValueError("Expected a string for config key {!r}, but got {!r}"
.format(key, value))
value = value.strip().rstrip('/')
# Similar activity, moved from api.config() to here.
if isinstance(key, str) and key.endswith('_url'):
if value:
validate_url(value)
super().__setitem__(key, value)
# TODO: Make an _html_repr_ for nicer Notebook display
def __repr__(self):
return "<{} at {!r} {}>".format(type(self).__name__, str(self.filepath), json.dumps(self, indent=4))
def validate_package_name(name):
""" Verify that a package name is two alphanumeric strings separated by a slash."""
if not re.match(PACKAGE_NAME_FORMAT, name):
raise QuiltException(f"Invalid package name: {name}.")
def get_package_registry(path=None):
""" Returns the package registry root for a given path """
if path is None:
path = get_from_config('default_local_registry')
return path.rstrip('/') + '/.quilt'
def configure_from_url(catalog_url):
""" Read configuration settings from a Quilt catalog """
config_template = read_yaml(CONFIG_TEMPLATE)
# Clean up and validate catalog url
catalog_url = catalog_url.rstrip('/')
validate_url(catalog_url)
# Get the new config
config_url = catalog_url + '/config.json'
response = requests.get(config_url)
if not response.ok:
message = "An HTTP Error ({code}) occurred: {reason}"
raise QuiltException(
message.format(code=response.status_code, reason=response.reason),
response=response
)
# QuiltConfig may perform some validation and value scrubbing.
new_config = QuiltConfig('', response.json())
# 'navigator_url' needs to be renamed, the term is outdated.
if not new_config.get('navigator_url'):
new_config['navigator_url'] = catalog_url
# Use our template + their configured values, keeping our comments.
for key, value in new_config.items():
if not key in config_template:
continue
config_template[key] = value
write_yaml(config_template, CONFIG_PATH, keep_backup=True)
return config_template
def config_exists():
"""
Returns True if a config file (config.yml) is installed.
"""
return CONFIG_PATH.exists()
def user_is_configured_to_custom_stack():
"""Look at the users stack to see if they have configured to their own stack. There is currently no way to
distinguish between someone who has not configured their stack and someone who has intentionally configured
their stack to use open.quiltdata.com"""
configured_nav_url = get_from_config("navigator_url")
return configured_nav_url is not None and configured_nav_url != OPEN_DATA_URL
def configure_from_default():
"""
Try to configure to the default (public) Quilt stack.
If reading from the public stack fails, warn the user
and save an empty template.
"""
try:
local_config = configure_from_url(OPEN_DATA_URL)
except requests.exceptions.ConnectionError:
msg = f"Failed to connect to {OPEN_DATA_URL}."
msg += "Some features will not work without a"
msg += "valid configuration."
warnings.warn(msg)
config_template = read_yaml(CONFIG_TEMPLATE)
write_yaml(config_template, CONFIG_PATH, keep_backup=True)
local_config = config_template
return local_config
def load_config():
"""
Read the local config using defaults from CONFIG_TEMPLATE.
"""
local_config = read_yaml(CONFIG_TEMPLATE)
if CONFIG_PATH.exists():
local_config.update(read_yaml(CONFIG_PATH))
return local_config
def get_from_config(key):
return load_config().get(key)
def get_install_location():
loc = get_from_config('default_install_location')
if loc is None:
loc = get_from_config('default_local_registry').rstrip('/')
return loc
def set_config_value(key, value):
# Use local configuration (or defaults)
local_config = load_config()
local_config[key] = value
write_yaml(local_config, CONFIG_PATH)
def quiltignore_filter(paths, ignore, url_scheme):
"""Given a list of paths, filter out the paths which are captured by the
given ignore rules.
Args:
paths (list): a list or iterable of paths
ignore (path): a path to the file defining ignore rules, in Unix shell
style wildcard format
url_scheme (str): the URL scheme, only the "file" scheme is currently
supported
"""
ignore_rules = ignore.read_text('utf-8').split("\n")
ignore_rules = ['*/' + rule for rule in ignore_rules if rule]
if url_scheme == 'file':
from fnmatch import fnmatch
files, dirs = set(), set()
for path in paths:
if path.is_file():
files.add(path)
else:
dirs.add(path)
filtered_dirs = dirs.copy()
for ignore_rule in ignore_rules:
for pkg_dir in filtered_dirs.copy():
# copy git behavior --- git matches paths and directories equivalently.
# e.g. both foo and foo/ will match the ignore rule "foo"
# but only foo/ will match the ignore rule "foo/"
if fnmatch(pkg_dir.as_posix() + "/", ignore_rule) or fnmatch(pkg_dir.as_posix(), ignore_rule):
files = set(n for n in files if pkg_dir not in n.parents)
dirs = dirs - {pkg_dir}
files = set(n for n in files if not fnmatch(n, ignore_rule))
return files.union(dirs)
else:
raise NotImplementedError
def validate_key(key):
"""
Verify that a file path or S3 path does not contain any '.' or '..' separators or files.
"""
if key is None or key == '':
raise QuiltException(
f"Invalid key {key!r}. A package entry key cannot be empty."
)
for part in key.split('/'):
if part in ('', '.', '..'):
raise QuiltException(
f"Invalid key {key!r}. "
f"A package entry key cannot contain a file or folder named '.' or '..' in its path."
)
def catalog_s3_url(catalog_url, s3_url):
"""
Generate a URL to the Quilt catalog page for an object in S3
"""
if s3_url is None:
return catalog_url
pk = PhysicalKey.from_url(s3_url)
if pk.is_local():
raise QuiltException("Not an S3 URL")
url = f"{catalog_url}/b/{quote(pk.bucket)}"
if pk.path:
url += f"/tree/{quote(pk.path)}"
# Ignore version_id if path is empty (e.g., s3://<bucket>)
if pk.version_id is not None:
params = {'version': pk.version_id}
url += f"?{urlencode(params)}"
return url
def catalog_package_url(catalog_url, bucket, package_name, package_timestamp="latest"):
"""
Generate a URL to the Quilt catalog page of a package. By default will go to the latest version of the package,
but the user can pass in the appropriate timestamp to go to a different version.
Note: There is currently no good way to generate the URL given a specific tophash
"""
assert bucket is not None, "The bucket parameter must not be None"
assert package_name is not None, "The package_name parameter must not be None"
validate_package_name(package_name)
return f"{catalog_url}/b/{bucket}/packages/{package_name}/tree/{package_timestamp}"
| 1 | 18,376 | Minor suggestion, but wouldn't it be cleaner to simply replace PACKAGE_NAME_FORMAT to all the optional path, then check that the path is empty in validate_package_name? We might also want a helper function to pull out the package name and sub-package path. | quiltdata-quilt | py |
@@ -56,9 +56,9 @@ module.exports = class FileCard extends Component {
{this.props.fileCardFor &&
<div style="width: 100%; height: 100%;">
<div class="uppy-DashboardContent-bar">
- <h2 class="uppy-DashboardContent-title">Editing <span class="uppy-DashboardContent-titleFile">{file.meta ? file.meta.name : file.name}</span></h2>
- <button class="uppy-DashboardContent-back" type="button" title="Finish editing file"
- onclick={this.handleClick}>Done</button>
+ <h2 class="uppy-DashboardContent-title">{this.props.i18n('editing')} <span class="uppy-DashboardContent-titleFile">{file.meta ? file.meta.name : file.name}</span></h2>
+ <button class="uppy-DashboardContent-back" type="button" title={this.props.i18n('finishEditingFile')}
+ onclick={this.handleClick}>{this.props.i18n('done')}</button>
</div>
<div class="uppy-DashboardFileCard-inner">
<div class="uppy-DashboardFileCard-preview" style={{ backgroundColor: getFileTypeIcon(file.type).color }}> | 1 | const getFileTypeIcon = require('./getFileTypeIcon')
const { checkIcon } = require('./icons')
const { h, Component } = require('preact')
module.exports = class FileCard extends Component {
constructor (props) {
super(props)
this.meta = {}
this.tempStoreMetaOrSubmit = this.tempStoreMetaOrSubmit.bind(this)
this.renderMetaFields = this.renderMetaFields.bind(this)
this.handleClick = this.handleClick.bind(this)
}
tempStoreMetaOrSubmit (ev) {
const file = this.props.files[this.props.fileCardFor]
if (ev.keyCode === 13) {
ev.stopPropagation()
ev.preventDefault()
this.props.fileCardDone(this.meta, file.id)
return
}
const value = ev.target.value
const name = ev.target.dataset.name
this.meta[name] = value
}
renderMetaFields (file) {
const metaFields = this.props.metaFields || []
return metaFields.map((field) => {
return <fieldset class="uppy-DashboardFileCard-fieldset">
<label class="uppy-DashboardFileCard-label">{field.name}</label>
<input class="uppy-DashboardFileCard-input"
type="text"
data-name={field.id}
value={file.meta[field.id]}
placeholder={field.placeholder}
onkeyup={this.tempStoreMetaOrSubmit}
onkeydown={this.tempStoreMetaOrSubmit}
onkeypress={this.tempStoreMetaOrSubmit} /></fieldset>
})
}
handleClick (ev) {
const file = this.props.files[this.props.fileCardFor]
this.props.fileCardDone(this.meta, file.id)
}
render () {
const file = this.props.files[this.props.fileCardFor]
return <div class="uppy-DashboardFileCard" aria-hidden={!this.props.fileCardFor}>
{this.props.fileCardFor &&
<div style="width: 100%; height: 100%;">
<div class="uppy-DashboardContent-bar">
<h2 class="uppy-DashboardContent-title">Editing <span class="uppy-DashboardContent-titleFile">{file.meta ? file.meta.name : file.name}</span></h2>
<button class="uppy-DashboardContent-back" type="button" title="Finish editing file"
onclick={this.handleClick}>Done</button>
</div>
<div class="uppy-DashboardFileCard-inner">
<div class="uppy-DashboardFileCard-preview" style={{ backgroundColor: getFileTypeIcon(file.type).color }}>
{file.preview
? <img alt={file.name} src={file.preview} />
: <div class="uppy-DashboardItem-previewIconWrap">
<span class="uppy-DashboardItem-previewIcon" style={{ color: getFileTypeIcon(file.type).color }}>{getFileTypeIcon(file.type).icon}</span>
<svg class="uppy-DashboardItem-previewIconBg" width="72" height="93" viewBox="0 0 72 93"><g><path d="M24.08 5h38.922A2.997 2.997 0 0 1 66 8.003v74.994A2.997 2.997 0 0 1 63.004 86H8.996A2.998 2.998 0 0 1 6 83.01V22.234L24.08 5z" fill="#FFF" /><path d="M24 5L6 22.248h15.007A2.995 2.995 0 0 0 24 19.244V5z" fill="#E4E4E4" /></g></svg>
</div>
}
</div>
<div class="uppy-DashboardFileCard-info">
<fieldset class="uppy-DashboardFileCard-fieldset">
<label class="uppy-DashboardFileCard-label">Name</label>
<input class="uppy-DashboardFileCard-input"
type="text"
data-name="name"
value={file.meta.name || ''}
placeholder="name"
onkeyup={this.tempStoreMetaOrSubmit}
onkeydown={this.tempStoreMetaOrSubmit}
onkeypress={this.tempStoreMetaOrSubmit} />
</fieldset>
{this.renderMetaFields(file)}
</div>
</div>
<div class="uppy-Dashboard-actions">
<button class="UppyButton--circular UppyButton--blue uppy-DashboardFileCard-done"
type="button"
title="Finish editing file"
onclick={this.handleClick}>{checkIcon()}</button>
</div>
</div>
}
</div>
}
}
| 1 | 10,448 | we should have a way to insert components into a i18n string. so that we could define a translation string like `'%{filename} is being edited'`, and inject the filename `<span/>` at the right place. I imagine there must be a language where the `Editing` text should come after the file name. we can think about that later tho. | transloadit-uppy | js |
@@ -254,6 +254,9 @@ fpga_result enum_thermalmgmt_metrics(fpga_metric_vector *vector,
for (i = 0; i < pglob.gl_pathc; i++) {
+ if (!pglob.gl_pathv)
+ continue;
+
char *dir_name = strrchr(pglob.gl_pathv[i], '/');
if (!dir_name) | 1 | // Copyright(c) 2018-2019, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/**
* \file metrics_utils.c
* \brief fpga metrics utils functions
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif // HAVE_CONFIG_H
#include <glob.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <dirent.h>
#include <uuid/uuid.h>
#include <dlfcn.h>
#include "common_int.h"
#include "metrics_int.h"
#include "types_int.h"
#include "opae/metrics.h"
#include "metrics/vector.h"
#include "xfpga.h"
#include "metrics/bmc/bmc.h"
#include "safe_string/safe_string.h"
#include "metrics/metrics_metadata.h"
#include "mcp_metadata.h"
#include "metrics_max10.h"
fpga_result metric_sysfs_path_is_dir(const char *path)
{
struct stat astats;
if (path == NULL) {
return FPGA_INVALID_PARAM;
}
if ((stat(path, &astats)) != 0) {
return FPGA_NOT_FOUND;
}
if (S_ISDIR(astats.st_mode)) {
return FPGA_OK;
}
return FPGA_NOT_FOUND;
}
fpga_result metric_sysfs_path_is_file(const char *path)
{
struct stat astats;
if (path == NULL) {
return FPGA_INVALID_PARAM;
}
if ((stat(path, &astats)) != 0) {
return FPGA_NOT_FOUND;
}
if (S_ISREG(astats.st_mode)) {
return FPGA_OK;
}
return FPGA_NOT_FOUND;
}
// Adds Metrics info to vector
fpga_result add_metric_vector(fpga_metric_vector *vector,
uint64_t metric_num,
const char *qualifier_name,
const char *group_name,
const char *group_sysfs,
const char *metric_name,
const char *metric_sysfs,
const char *metric_units,
enum fpga_metric_datatype metric_datatype,
enum fpga_metric_type metric_type,
enum fpga_hw_type hw_type,
uint64_t mmio_offset)
{
fpga_result result = FPGA_OK;
struct _fpga_enum_metric *fpga_enum_metric = NULL;
errno_t e = 0;
if (vector == NULL ||
group_name == NULL ||
group_sysfs == NULL ||
metric_name == NULL ||
metric_sysfs == NULL ||
qualifier_name == NULL ||
metric_units == NULL) {
FPGA_ERR("Invalid Input parameters");
return FPGA_INVALID_PARAM;
}
fpga_enum_metric = (struct _fpga_enum_metric *)malloc(sizeof(struct _fpga_enum_metric));
if (fpga_enum_metric == NULL) {
FPGA_ERR("Failed to allocate memory");
return FPGA_NO_MEMORY;
}
e = strncpy_s(fpga_enum_metric->group_name, sizeof(fpga_enum_metric->group_name),
group_name, SYSFS_PATH_MAX);
if (EOK != e)
goto out_free;
e = strncpy_s(fpga_enum_metric->group_sysfs, sizeof(fpga_enum_metric->group_sysfs),
group_sysfs, SYSFS_PATH_MAX);
if (EOK != e)
goto out_free;
e = strncpy_s(fpga_enum_metric->metric_name, sizeof(fpga_enum_metric->metric_name),
metric_name, SYSFS_PATH_MAX);
if (EOK != e)
goto out_free;
e = strncpy_s(fpga_enum_metric->metric_sysfs, sizeof(fpga_enum_metric->metric_sysfs),
metric_sysfs, SYSFS_PATH_MAX);
if (EOK != e)
goto out_free;
e = strncpy_s(fpga_enum_metric->qualifier_name, sizeof(fpga_enum_metric->qualifier_name),
qualifier_name, SYSFS_PATH_MAX);
if (EOK != e)
goto out_free;
e = strncpy_s(fpga_enum_metric->metric_units, sizeof(fpga_enum_metric->metric_units),
metric_units, SYSFS_PATH_MAX);
if (EOK != e)
goto out_free;
fpga_enum_metric->metric_type = metric_type;
fpga_enum_metric->metric_datatype = metric_datatype;
fpga_enum_metric->hw_type = hw_type;
fpga_enum_metric->metric_num = metric_num;
fpga_enum_metric->mmio_offset = mmio_offset;
fpga_vector_push(vector, fpga_enum_metric);
return result;
out_free:
free(fpga_enum_metric);
return FPGA_INVALID_PARAM;
}
fpga_result get_metric_data_info(const char *group_name,
const char *metric_name,
fpga_metric_metadata *metric_data_serach,
uint64_t size,
fpga_metric_metadata *metric_data)
{
fpga_result result = FPGA_OK;
uint64_t i = 0;
int group_indicator = 0;
int metric_indicator = 0;
if (group_name == NULL ||
metric_name == NULL ||
metric_data_serach == NULL ||
metric_data == NULL) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
for (i = 0; i < size; i++) {
strcasecmp_s(metric_data_serach[i].group_name, sizeof(metric_data_serach[i].group_name),
group_name, &group_indicator);
strcasecmp_s(metric_data_serach[i].metric_name, sizeof(metric_data_serach[i].metric_name),
metric_name, &metric_indicator);
if (group_indicator == 0 &&
metric_indicator == 0) {
*metric_data = (struct fpga_metric_metadata)metric_data_serach[i];
return result;
}
}
return FPGA_NOT_SUPPORTED;
}
// enumerates thermal metrics info
fpga_result enum_thermalmgmt_metrics(fpga_metric_vector *vector,
uint64_t *metric_num,
const char *sysfspath,
enum fpga_hw_type hw_type)
{
fpga_result result = FPGA_OK;
fpga_metric_metadata metric_data;
size_t i = 0;
glob_t pglob;
memset_s(&metric_data, sizeof(metric_data), 0);
if (vector == NULL ||
sysfspath == NULL ||
metric_num == NULL) {
FPGA_ERR("Invalid Input parameters");
return FPGA_INVALID_PARAM;
}
int gres = glob(sysfspath, GLOB_NOSORT, NULL, &pglob);
if (gres) {
FPGA_ERR("Failed pattern match %s: %s", sysfspath, strerror(errno));
//TODO refactor to common function
switch (gres) {
case GLOB_NOSPACE:
result = FPGA_NO_MEMORY;
break;
case GLOB_NOMATCH:
result = FPGA_NOT_FOUND;
break;
default:
result = FPGA_EXCEPTION;
}
if (pglob.gl_pathv) {
globfree(&pglob);
}
return result;
}
for (i = 0; i < pglob.gl_pathc; i++) {
char *dir_name = strrchr(pglob.gl_pathv[i], '/');
if (!dir_name)
continue;
if (!strcmp((dir_name + 1), REVISION))
continue;
result = get_metric_data_info(THERLGMT, (dir_name + 1), mcp_metric_metadata, MCP_MDATA_SIZE, &metric_data);
if (result != FPGA_OK) {
FPGA_MSG("Failed to get metric metadata ");
}
result = add_metric_vector(vector, *metric_num, THERLGMT, THERLGMT, sysfspath, (dir_name + 1), pglob.gl_pathv[i], metric_data.metric_units,
FPGA_METRIC_DATATYPE_INT, FPGA_METRIC_TYPE_THERMAL, hw_type, 0);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metrics");
if (pglob.gl_pathv) {
globfree(&pglob);
}
return result;
}
*metric_num = *metric_num + 1;
}
if (pglob.gl_pathv) {
globfree(&pglob);
}
return result;
}
// enumerates power metrics info
fpga_result enum_powermgmt_metrics(fpga_metric_vector *vector,
uint64_t *metric_num,
const char *sysfspath,
enum fpga_hw_type hw_type)
{
fpga_result result = FPGA_OK;
size_t i = 0;
fpga_metric_metadata metric_data;
glob_t pglob;
memset_s(&metric_data, sizeof(metric_data), 0);
if (vector == NULL ||
sysfspath == NULL ||
metric_num == NULL) {
FPGA_ERR("Invalid Input parameters");
return FPGA_INVALID_PARAM;
}
int gres = glob(sysfspath, GLOB_NOSORT, NULL, &pglob);
if (gres) {
FPGA_ERR("Failed pattern match %s: %s", sysfspath, strerror(errno));
//TODO refactor to common function
switch (gres) {
case GLOB_NOSPACE:
result = FPGA_NO_MEMORY;
break;
case GLOB_NOMATCH:
result = FPGA_NOT_FOUND;
break;
default:
result = FPGA_EXCEPTION;
}
if (pglob.gl_pathv) {
globfree(&pglob);
}
return result;
}
for (i = 0; i < pglob.gl_pathc; i++) {
char *dir_name = strrchr(pglob.gl_pathv[i], '/');
if (!dir_name)
continue;
if (!strcmp((dir_name + 1), REVISION))
continue;
result = get_metric_data_info(PWRMGMT, (dir_name + 1), mcp_metric_metadata, MCP_MDATA_SIZE, &metric_data);
if (result != FPGA_OK) {
FPGA_MSG("Failed to get metric metadata ");
}
result = add_metric_vector(vector, *metric_num, PWRMGMT, PWRMGMT, sysfspath, (dir_name + 1), pglob.gl_pathv[i], metric_data.metric_units,
FPGA_METRIC_DATATYPE_INT, FPGA_METRIC_TYPE_POWER, hw_type, 0);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metrics");
if (pglob.gl_pathv) {
globfree(&pglob);
}
return result;
}
*metric_num = *metric_num + 1;
}
if (pglob.gl_pathv) {
globfree(&pglob);
}
return result;
}
// enumerates performance counters metrics info
fpga_result enum_perf_counter_items(fpga_metric_vector *vector,
uint64_t *metric_num,
const char *qualifier_name,
const char *sysfspath,
const char *sysfs_name,
enum fpga_metric_type metric_type,
enum fpga_hw_type hw_type)
{
fpga_result result = FPGA_OK;
DIR *dir = NULL;
struct dirent *dirent = NULL;
char sysfs_path[SYSFS_PATH_MAX] = { 0 };
char metric_sysfs[SYSFS_PATH_MAX] = { 0 };
char qname[SYSFS_PATH_MAX] = { 0 };
if (vector == NULL ||
sysfspath == NULL ||
sysfs_name == NULL ||
qualifier_name == NULL ||
metric_num == NULL) {
FPGA_ERR("Invalid Input parameters");
return FPGA_INVALID_PARAM;
}
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", sysfspath, sysfs_name);
dir = opendir(sysfs_path);
if (NULL == dir) {
FPGA_MSG("can't find dir %s ", strerror(errno));
return FPGA_NOT_FOUND;
}
while ((dirent = readdir(dir)) != NULL) {
if (!strcmp(dirent->d_name, "."))
continue;
if (!strcmp(dirent->d_name, ".."))
continue;
if (!strcmp(dirent->d_name, PERF_ENABLE))
continue;
if (!strcmp(dirent->d_name, PERF_FREEZE))
continue;
if (dirent->d_type == DT_DIR) {
snprintf_s_ss(qname, sizeof(qname), "%s:%s", qualifier_name, dirent->d_name);
result = enum_perf_counter_items(vector, metric_num, qname, sysfs_path, dirent->d_name, metric_type, hw_type);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metrics");
}
continue;
}
snprintf_s_ss(metric_sysfs, sizeof(metric_sysfs), "%s/%s", sysfs_path, dirent->d_name);
result = add_metric_vector(vector, *metric_num, qualifier_name, "performance", sysfs_path, dirent->d_name,
metric_sysfs, "", FPGA_METRIC_DATATYPE_INT, metric_type, hw_type, 0);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metrics");
closedir(dir);
return result;
}
*metric_num = *metric_num + 1;
}
closedir(dir);
return result;
}
// enumerates performance counters metrics info
fpga_result enum_perf_counter_metrics(fpga_metric_vector *vector,
uint64_t *metric_num,
const char *sysfspath,
enum fpga_hw_type hw_type)
{
fpga_result result = FPGA_OK;
DIR *dir = NULL;
struct dirent *dirent = NULL;
char sysfs_path[SYSFS_PATH_MAX] = { 0 };
char qualifier_name[SYSFS_PATH_MAX] = { 0 };
glob_t pglob;
if (vector == NULL ||
sysfspath == NULL ||
metric_num == NULL) {
FPGA_ERR("Invalid Input parameters");
return FPGA_INVALID_PARAM;
}
int gres = glob(sysfspath, GLOB_NOSORT, NULL, &pglob);
if (gres) {
FPGA_ERR("Failed pattern match %s: %s", sysfspath, strerror(errno));
if (pglob.gl_pathv) {
globfree(&pglob);
}
return FPGA_NOT_FOUND;
}
snprintf_s_s(sysfs_path, sizeof(sysfs_path), "%s", pglob.gl_pathv[0]);
globfree(&pglob);
dir = opendir(sysfs_path);
if (NULL == dir) {
FPGA_MSG("can't find dirt %s ", strerror(errno));
return FPGA_NOT_FOUND;
}
while ((dirent = readdir(dir)) != NULL) {
if (!strcmp(dirent->d_name, "."))
continue;
if (!strcmp(dirent->d_name, ".."))
continue;
if (!strcmp(dirent->d_name, REVISION))
continue;
if (strcmp(dirent->d_name, PERF_CACHE) == 0) {
snprintf_s_ss(qualifier_name, sizeof(qualifier_name), "%s:%s", PERFORMANCE, PERF_CACHE);
result = enum_perf_counter_items(vector, metric_num, qualifier_name, sysfs_path, dirent->d_name, FPGA_METRIC_TYPE_PERFORMANCE_CTR, hw_type);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metrics");
}
}
if (strcmp(dirent->d_name, PERF_FABRIC) == 0) {
snprintf_s_ss(qualifier_name, sizeof(qualifier_name), "%s:%s", PERFORMANCE, PERF_FABRIC);
result = enum_perf_counter_items(vector, metric_num, qualifier_name, sysfs_path, dirent->d_name, FPGA_METRIC_TYPE_PERFORMANCE_CTR, hw_type);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metrics");
}
}
if (strcmp(dirent->d_name, PERF_IOMMU) == 0) {
snprintf_s_ss(qualifier_name, sizeof(qualifier_name), "%s:%s", PERFORMANCE, PERF_IOMMU);
result = enum_perf_counter_items(vector, metric_num, qualifier_name, sysfs_path, dirent->d_name, FPGA_METRIC_TYPE_PERFORMANCE_CTR, hw_type);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metrics");
}
}
}
closedir(dir);
return result;
}
fpga_result xfpga_bmcLoadSDRs(struct _fpga_handle *_handle,
bmc_sdr_handle *records,
uint32_t *num_sensors)
{
fpga_result result = FPGA_NOT_FOUND;
fpga_result(*bmcLoadSDRs)(fpga_token token, bmc_sdr_handle *records,
uint32_t *num_sensors);
if (_handle->bmc_handle != NULL) {
bmcLoadSDRs = dlsym(_handle->bmc_handle, "bmcLoadSDRs");
if (bmcLoadSDRs)
result = bmcLoadSDRs(_handle->token, records, num_sensors);
else
result = FPGA_EXCEPTION;
}
return result;
}
fpga_result xfpga_bmcDestroySDRs(struct _fpga_handle *_handle,
bmc_sdr_handle *records)
{
fpga_result result = FPGA_NOT_FOUND;
fpga_result(*bmcDestroySDRs)(bmc_sdr_handle *records);
if (_handle->bmc_handle != NULL) {
bmcDestroySDRs = dlsym(_handle->bmc_handle, "bmcDestroySDRs");
if (bmcDestroySDRs)
result = bmcDestroySDRs(records);
else
result = FPGA_EXCEPTION;
}
return result;
}
fpga_result xfpga_bmcReadSensorValues(struct _fpga_handle *_handle,
bmc_sdr_handle records,
bmc_values_handle *values,
uint32_t *num_values)
{
fpga_result result = FPGA_NOT_FOUND;
fpga_result(*bmcReadSensorValues)(bmc_sdr_handle records, bmc_values_handle *values, uint32_t *num_values);
if (_handle->bmc_handle != NULL) {
bmcReadSensorValues = dlsym(_handle->bmc_handle, "bmcReadSensorValues");
if (bmcReadSensorValues)
result = bmcReadSensorValues(records, values, num_values);
else
result = FPGA_EXCEPTION;
}
return result;
}
fpga_result xfpga_bmcDestroySensorValues(struct _fpga_handle *_handle,
bmc_values_handle *values)
{
fpga_result result = FPGA_NOT_FOUND;
fpga_result(*bmcDestroySensorValues)(bmc_values_handle *values);
if (_handle->bmc_handle != NULL) {
bmcDestroySensorValues = dlsym(_handle->bmc_handle, "bmcDestroySensorValues");
if (bmcDestroySensorValues)
result = bmcDestroySensorValues(values);
else
result = FPGA_EXCEPTION;
}
return result;
}
fpga_result xfpga_bmcGetSensorReading(struct _fpga_handle *_handle,
bmc_values_handle values,
uint32_t sensor_number,
uint32_t *is_valid,
double *value)
{
fpga_result result = FPGA_NOT_FOUND;
fpga_result(*bmcGetSensorReading)(bmc_values_handle values,
uint32_t sensor_number, uint32_t *is_valid,
double *value);
if (_handle->bmc_handle != NULL) {
bmcGetSensorReading = dlsym(_handle->bmc_handle, "bmcGetSensorReading");
if (bmcGetSensorReading)
result = bmcGetSensorReading(values, sensor_number, is_valid, value);
else
result = FPGA_EXCEPTION;
}
return result;
}
fpga_result xfpga_bmcGetSDRDetails(struct _fpga_handle *_handle,
bmc_values_handle values,
uint32_t sensor_number,
sdr_details *details)
{
fpga_result result = FPGA_NOT_FOUND;
fpga_result(*bmcGetSDRDetails)(bmc_values_handle values, uint32_t sensor_number,
sdr_details *details);
if (_handle->bmc_handle != NULL) {
bmcGetSDRDetails = dlsym(_handle->bmc_handle, "bmcGetSDRDetails");
if (bmcGetSDRDetails)
result = bmcGetSDRDetails(values, sensor_number, details);
else
result = FPGA_EXCEPTION;
}
return result;
}
// enumerates bmc power & theraml metrics info
fpga_result enum_bmc_metrics_info(struct _fpga_handle *_handle,
fpga_metric_vector *vector,
uint64_t *metric_num,
enum fpga_hw_type hw_type)
{
fpga_result result = FPGA_OK;
uint32_t x = 0;
uint32_t num_sensors = 0;
uint32_t num_values = 0;
enum fpga_metric_type metric_type = FPGA_METRIC_TYPE_POWER;
char group_name[SYSFS_PATH_MAX] = { 0 };
char qualifier_name[SYSFS_PATH_MAX] = { 0 };
char units[SYSFS_PATH_MAX] = { 0 };
sdr_details details;
bmc_sdr_handle records;
bmc_values_handle values;
if (vector == NULL ||
metric_num == NULL) {
FPGA_ERR("Invalid input");
return result;
}
result = xfpga_bmcLoadSDRs(_handle, &records, &num_sensors);
if (result != FPGA_OK) {
FPGA_ERR("Failed to load BMC SDR.");
return result;
}
result = xfpga_bmcReadSensorValues(_handle, records, &values, &num_values);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read BMC sensor values.");
return result;
}
for (x = 0; x < num_sensors; x++) {
result = xfpga_bmcGetSDRDetails(_handle, values, x, &details);
if (details.type == BMC_THERMAL) {
metric_type = FPGA_METRIC_TYPE_THERMAL;
snprintf_s_s(group_name, sizeof(group_name), "%s", THERLGMT);
snprintf_s_s(units, sizeof(units), "%s", TEMP);
snprintf_s_ss(qualifier_name, sizeof(qualifier_name), "%s:%s", THERLGMT, details.name);
} else if (details.type == BMC_POWER) {
metric_type = FPGA_METRIC_TYPE_POWER;
snprintf_s_s(group_name, sizeof(group_name), "%s", PWRMGMT);
snprintf_s_ss(qualifier_name, sizeof(qualifier_name), "%s:%s", PWRMGMT, details.name);
snprintf(units, sizeof(units), "%ls", details.units);
} else {
continue;
}
result = add_metric_vector(vector, *metric_num, qualifier_name, group_name, "", details.name, "", units, FPGA_METRIC_DATATYPE_DOUBLE, metric_type, hw_type, 0);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metrics");
return result;
}
*metric_num = *metric_num + 1;
}
result = xfpga_bmcDestroySensorValues(_handle, &values);
if (result != FPGA_OK) {
FPGA_MSG("Failed to Destroy Sensor value.");
}
result = xfpga_bmcDestroySDRs(_handle, &records);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Destroy SDR.");
return result;
}
return result;
}
// frees metrics info vector
fpga_result free_fpga_enum_metrics_vector(struct _fpga_handle *_handle)
{
fpga_result result = FPGA_OK;
uint64_t i = 0;
uint64_t num_enun_metrics = 0;
if (_handle == NULL) {
FPGA_ERR("Invalid handle ");
return FPGA_INVALID_PARAM;
}
if (_handle->magic != FPGA_HANDLE_MAGIC) {
FPGA_MSG("Invalid handle");
return FPGA_INVALID_PARAM;
}
result = fpga_vector_total(&(_handle->fpga_enum_metric_vector), &num_enun_metrics);
if (result != FPGA_OK) {
FPGA_MSG("Failed to get metric total");
return FPGA_INVALID_PARAM;
}
for (i = 0; i < num_enun_metrics; i++) {
fpga_vector_delete(&(_handle->fpga_enum_metric_vector), i);
}
fpga_vector_free(&(_handle->fpga_enum_metric_vector));
if (_handle->bmc_handle) {
dlclose(_handle->bmc_handle);
_handle->bmc_handle = NULL;
}
clear_cached_values(_handle);
_handle->metric_enum_status = false;
return result;
}
// retrives fpga object type
fpga_result get_fpga_object_type(fpga_handle handle,
fpga_objtype *objtype)
{
fpga_result result = FPGA_OK;
fpga_result resval = FPGA_OK;
fpga_properties prop;
result = xfpga_fpgaGetPropertiesFromHandle(handle, &prop);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get properties");
return result;
}
result = fpgaPropertiesGetObjectType(prop, objtype);
if (result != FPGA_OK) {
FPGA_ERR("Failed to object type.");
}
resval = (result != FPGA_OK) ? result : resval;
result = fpgaDestroyProperties(&prop);
if (result != FPGA_OK) {
FPGA_ERR("Failed to destroy properties");
}
resval = (result != FPGA_OK) ? result : resval;
return resval;
}
// enumerates FME & AFU metrics info
fpga_result enum_fpga_metrics(fpga_handle handle)
{
fpga_result result = FPGA_OK;
struct _fpga_token *_token = NULL;
enum fpga_hw_type hw_type = FPGA_HW_UNKNOWN;
uint64_t mmio_offset = 0;
uint64_t metric_num = 0;
char metrics_path[SYSFS_PATH_MAX] = { 0 };
fpga_objtype objtype;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
if (_handle == NULL) {
FPGA_ERR("Invalid handle ");
return FPGA_INVALID_PARAM;
}
if (_handle->metric_enum_status)
return FPGA_OK;
_token = (struct _fpga_token *)_handle->token;
if (_token == NULL) {
FPGA_ERR("Invalid token within handle");
return FPGA_INVALID_PARAM;
}
result = get_fpga_object_type(handle, &objtype);
if (result != FPGA_OK) {
FPGA_ERR("Failed to init vector");
return result;
}
// Init vector
result = fpga_vector_init(&(_handle->fpga_enum_metric_vector));
if (result != FPGA_OK) {
FPGA_ERR("Failed to init vector");
return result;
}
if (objtype == FPGA_ACCELERATOR) {
// enum AFU
result = discover_afu_metrics_feature(handle, &mmio_offset);
if (result != FPGA_OK) {
FPGA_ERR("Failed to discover AFU Metrics BBB");
return result;
}
result = enum_afu_metrics(handle,
&(_handle->fpga_enum_metric_vector),
&metric_num,
mmio_offset);
if (result != FPGA_OK) {
FPGA_ERR("Failed to enum AFU metrics BBB");
return result;
}
} else if (objtype == FPGA_DEVICE) {
// enum FME
// get fpga hw type.
result = get_fpga_hw_type(_handle, &hw_type);
if (result != FPGA_OK) {
FPGA_ERR("Failed to discover hardware type.");
return result;
}
switch (hw_type) {
// MCP
case FPGA_HW_MCP: {
memset_s(metrics_path, SYSFS_PATH_MAX, 0);
if (sysfs_get_fme_pwr_path(_token, metrics_path) == FPGA_OK) {
result = enum_powermgmt_metrics(&(_handle->fpga_enum_metric_vector), &metric_num, metrics_path, FPGA_HW_MCP);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Enum Power metrics.");
}
}
memset_s(metrics_path, SYSFS_PATH_MAX, 0);
if (sysfs_get_fme_temp_path(_token, metrics_path) == FPGA_OK) {
result = enum_thermalmgmt_metrics(&(_handle->fpga_enum_metric_vector), &metric_num, metrics_path, FPGA_HW_MCP);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Enum Thermal metrics.");
}
}
memset_s(metrics_path, SYSFS_PATH_MAX, 0);
if (sysfs_get_fme_perf_path(_token, metrics_path) == FPGA_OK) {
result = enum_perf_counter_metrics(&(_handle->fpga_enum_metric_vector), &metric_num, metrics_path, FPGA_HW_MCP);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Enum Performance metrics.");
}
}
}
break;
// DCP RC
case FPGA_HW_DCP_RC: {
memset_s(metrics_path, SYSFS_PATH_MAX, 0);
if (sysfs_get_fme_perf_path(_token, metrics_path) == FPGA_OK) {
result = enum_perf_counter_metrics(&(_handle->fpga_enum_metric_vector), &metric_num, metrics_path, FPGA_HW_DCP_RC);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Enum Performance metrics.");
}
}
memset_s(metrics_path, SYSFS_PATH_MAX, 0);
if (sysfs_get_bmc_path(_token, metrics_path) == FPGA_OK) {
if (_handle->bmc_handle == NULL)
_handle->bmc_handle = dlopen(BMC_LIB, RTLD_LAZY | RTLD_LOCAL);
if (_handle->bmc_handle) {
result = enum_bmc_metrics_info(_handle, &(_handle->fpga_enum_metric_vector), &metric_num, FPGA_HW_DCP_RC);
if (result != FPGA_OK) {
FPGA_ERR("Failed to enumerate BMC metrics.");
}
}
}
}
break;
// DCP VC DC
case FPGA_HW_DCP_DC:
case FPGA_HW_DCP_VC: {
memset_s(metrics_path, SYSFS_PATH_MAX, 0);
if (sysfs_get_max10_path(_token, metrics_path) == FPGA_OK) {
// Max10 Power & Thermal
result = enum_max10_metrics_info(_handle,
&(_handle->fpga_enum_metric_vector),
&metric_num,
hw_type);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Enum Power and Thermal metrics.");
}
}
memset_s(metrics_path, SYSFS_PATH_MAX, 0);
if (sysfs_get_fme_perf_path(_token, metrics_path) == FPGA_OK) {
// Perf Counters
result = enum_perf_counter_metrics(&(_handle->fpga_enum_metric_vector), &metric_num, _token->sysfspath, hw_type);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Enum Performance metrics.");
}
}
}
break;
default:
FPGA_MSG("Unknown hardware type.");
result = FPGA_EXCEPTION;
}
} // if Object type
if (result != FPGA_OK)
free_fpga_enum_metrics_vector(_handle);
_handle->metric_enum_status = true;
return result;
}
fpga_result add_metric_info(struct _fpga_enum_metric *_enum_metrics,
struct fpga_metric_info *fpga_metric_info)
{
fpga_result result = FPGA_OK;
errno_t e = 0;
if (_enum_metrics == NULL ||
fpga_metric_info == NULL) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
e = strncpy_s(fpga_metric_info->group_name, sizeof(fpga_metric_info->group_name),
_enum_metrics->group_name, SYSFS_PATH_MAX);
if (EOK != e)
goto out;
e = strncpy_s(fpga_metric_info->metric_name, sizeof(fpga_metric_info->metric_name),
_enum_metrics->metric_name, SYSFS_PATH_MAX);
if (EOK != e)
goto out;
e = strncpy_s(fpga_metric_info->qualifier_name, sizeof(fpga_metric_info->qualifier_name),
_enum_metrics->qualifier_name, SYSFS_PATH_MAX);
if (EOK != e)
goto out;
e = strncpy_s(fpga_metric_info->metric_units, sizeof(fpga_metric_info->metric_units),
_enum_metrics->metric_units, SYSFS_PATH_MAX);
if (EOK != e)
goto out;
fpga_metric_info->metric_num = _enum_metrics->metric_num;
fpga_metric_info->metric_type = _enum_metrics->metric_type;
fpga_metric_info->metric_datatype = _enum_metrics->metric_datatype;
return result;
out:
return FPGA_INVALID_PARAM;
}
// Reads bmc metric value
fpga_result get_bmc_metrics_values(fpga_handle handle,
struct _fpga_enum_metric *_fpga_enum_metric,
struct fpga_metric *fpga_metric)
{
fpga_result result = FPGA_OK;
uint32_t num_sensors = 0;
uint32_t num_values = 0;
uint32_t x = 0;
uint32_t is_valid = 0;
double tmp = 0;
int metric_indicator = 0;
bmc_sdr_handle records;
bmc_values_handle values;
sdr_details details;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
if (_handle->_bmc_metric_cache_value) {
for (x = 0; x < _handle->num_bmc_metric; x++) {
strcasecmp_s(_handle->_bmc_metric_cache_value[x].metric_name, sizeof(_handle->_bmc_metric_cache_value[x].metric_name),
_fpga_enum_metric->metric_name, &metric_indicator);
if (metric_indicator == 0) {
fpga_metric->value.dvalue = _handle->_bmc_metric_cache_value[x].fpga_metric.value.dvalue;
return result;
}
}
return FPGA_NOT_FOUND;
}
result = xfpga_bmcLoadSDRs(_handle, &records, &num_sensors);
if (result != FPGA_OK) {
FPGA_ERR("Failed to load BMC SDR.");
return result;
}
if (_handle->_bmc_metric_cache_value == NULL) {
_handle->_bmc_metric_cache_value = calloc(sizeof(struct _fpga_bmc_metric), num_sensors);
if (_handle->_bmc_metric_cache_value == NULL) {
FPGA_ERR("Failed to allocate memory");
result = FPGA_NO_MEMORY;
goto out_destroy;
}
_handle->num_bmc_metric = num_sensors;
}
result = xfpga_bmcReadSensorValues(_handle, records, &values, &num_values);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read BMC sensor values.");
goto out_destroy;
}
for (x = 0; x < num_sensors; x++) {
result = xfpga_bmcGetSDRDetails(_handle, values, x, &details);
if (result != FPGA_OK) {
FPGA_MSG("Failed to get SDR details.");
}
result = xfpga_bmcGetSensorReading(_handle, values, x, &is_valid, &tmp);
if (result != FPGA_OK) {
FPGA_MSG("Failed to read sensor readings.");
continue;
}
if (!is_valid) {
continue;
}
snprintf_s_s(_handle->_bmc_metric_cache_value[x].metric_name, sizeof(_handle->_bmc_metric_cache_value[x].metric_name), "%s", details.name);
_handle->_bmc_metric_cache_value[x].fpga_metric.value.dvalue = tmp;
strcasecmp_s(details.name, strnlen_s(details.name, FPGA_METRIC_STR_SIZE), _fpga_enum_metric->metric_name, &metric_indicator);
if (metric_indicator == 0) {
fpga_metric->value.dvalue = tmp;
}
}
result = xfpga_bmcDestroySensorValues(_handle, &values);
if (result != FPGA_OK) {
FPGA_MSG("Failed to Destroy Sensor value.");
}
out_destroy:
result = xfpga_bmcDestroySDRs(_handle, &records);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Destroy SDR.");
return result;
}
return result;
}
// Reads mcp power & thermal metric value
fpga_result get_pwr_thermal_max10_value(const char *sysfs_path,
double *dvalue)
{
fpga_result result = FPGA_OK;
uint64_t value;
if (sysfs_path == NULL ||
dvalue == NULL) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
result = sysfs_read_u64(sysfs_path, &value);
if (result != FPGA_OK) {
FPGA_MSG("Failed to read Metrics values");
return result;
}
*dvalue = ((double)value / MILLI);
return result;
}
// Reads mcp power & thermal metric value
fpga_result get_pwr_thermal_value(const char *sysfs_path,
uint64_t *value)
{
fpga_result result = FPGA_OK;
char *ptr = NULL;
if (sysfs_path == NULL ||
value == NULL) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
result = sysfs_read_u64(sysfs_path, value);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read Metrics values");
return result;
}
ptr = strstr(sysfs_path, FPGA_LIMIT);
if (ptr)
*value = *value / 8;
ptr = NULL;
ptr = strstr(sysfs_path, XEON_LIMIT);
if (ptr)
*value = *value / 8;
return result;
}
// Reads mcp power & thermal metric value
fpga_result get_performance_counter_value(const char *group_sysfs,
const char *metric_sysfs,
uint64_t *value)
{
fpga_result result = FPGA_OK;
char sysfs_path[SYSFS_PATH_MAX] = { 0 };
uint64_t val = 0;
if (group_sysfs == NULL ||
metric_sysfs == NULL ||
value == NULL) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", group_sysfs, PERF_ENABLE);
result = metric_sysfs_path_is_file(sysfs_path);
if (result == FPGA_OK) {
result = sysfs_read_u64(sysfs_path, &val);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read perf fabric enable");
}
if (val == 0x0) {
// Writer Fabric Enable
result = sysfs_write_u64_decimal(sysfs_path, 1);;
if (result != FPGA_OK) {
FPGA_ERR("Failed to read perf fabric enable");
}
}
}
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", group_sysfs, PERF_FREEZE);
result = metric_sysfs_path_is_file(sysfs_path);
if (result == FPGA_OK) {
result = sysfs_read_u64(sysfs_path, &val);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read perf fabric freeze");
}
if (val != 0x1) {
// Write Fabric Freeze
result = sysfs_write_u64(sysfs_path, 1);
if (result != FPGA_OK) {
FPGA_ERR("Failed to write perf fabric freeze");
}
}
}
*value = 0;
result = sysfs_read_u64(metric_sysfs, value);
if (result != FPGA_OK) {
FPGA_ERR("--Failed to read Metrics values");
return result;
}
snprintf_s_ss(sysfs_path, sizeof(sysfs_path), "%s/%s", group_sysfs, PERF_FREEZE);
result = metric_sysfs_path_is_file(sysfs_path);
if (result == FPGA_OK) {
result = sysfs_read_u64(sysfs_path, &val);
if (result != FPGA_OK) {
FPGA_ERR("Failed to read perf fabric freeze");
}
if (val == 0x1) {
// Write Fabric Freeze
result = sysfs_write_u64(sysfs_path, 0);
if (result != FPGA_OK) {
FPGA_ERR("Failed to write perf fabric freeze");
}
}
}
result = FPGA_OK;
return result;
}
// Reads fme metric value
fpga_result get_fme_metric_value(fpga_handle handle,
fpga_metric_vector *enum_vector,
uint64_t metric_num,
struct fpga_metric *fpga_metric)
{
fpga_result result = FPGA_OK;
uint64_t index = 0;
struct _fpga_enum_metric *_fpga_enum_metric = NULL;
uint64_t num_enun_metrics = 0;
metric_value value = {0};
if (enum_vector == NULL ||
fpga_metric == NULL) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
result = fpga_vector_total(enum_vector, &num_enun_metrics);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get metric total");
return FPGA_NOT_FOUND;
}
fpga_metric->isvalid = false;
result = FPGA_NOT_FOUND;
for (index = 0; index < num_enun_metrics ; index++) {
_fpga_enum_metric = (struct _fpga_enum_metric *) fpga_vector_get(enum_vector, index);
if (metric_num == _fpga_enum_metric->metric_num) {
// Found Metic
memset_s(&value, sizeof(value), 0);
// DCP Power & Thermal
if ((_fpga_enum_metric->hw_type == FPGA_HW_DCP_RC) &&
((_fpga_enum_metric->metric_type == FPGA_METRIC_TYPE_POWER) ||
(_fpga_enum_metric->metric_type == FPGA_METRIC_TYPE_THERMAL))) {
result = get_bmc_metrics_values(handle, _fpga_enum_metric, fpga_metric);
if (result != FPGA_OK) {
FPGA_MSG("Failed to get BMC metric value");
} else {
fpga_metric->isvalid = true;
}
fpga_metric->metric_num = metric_num;
}
if ((_fpga_enum_metric->hw_type == FPGA_HW_MCP) &&
((_fpga_enum_metric->metric_type == FPGA_METRIC_TYPE_POWER) ||
(_fpga_enum_metric->metric_type == FPGA_METRIC_TYPE_THERMAL))) {
result = get_pwr_thermal_value(_fpga_enum_metric->metric_sysfs, &value.ivalue);
if (result != FPGA_OK) {
FPGA_MSG("Failed to get BMC metric value");
} else {
fpga_metric->isvalid = true;
}
fpga_metric->value = value;
fpga_metric->metric_num = metric_num;
}
// Read power theraml values from Max10
if (((_fpga_enum_metric->hw_type == FPGA_HW_DCP_DC) ||
(_fpga_enum_metric->hw_type == FPGA_HW_DCP_VC)) &&
((_fpga_enum_metric->metric_type == FPGA_METRIC_TYPE_POWER) ||
(_fpga_enum_metric->metric_type == FPGA_METRIC_TYPE_THERMAL))) {
result = read_max10_value(_fpga_enum_metric, &value.dvalue);
if (result != FPGA_OK) {
FPGA_MSG("Failed to get Max10 metric value");
} else {
fpga_metric->isvalid = true;
}
fpga_metric->value = value;
fpga_metric->metric_num = metric_num;
}
if (_fpga_enum_metric->metric_type == FPGA_METRIC_TYPE_PERFORMANCE_CTR) {
result = get_performance_counter_value(_fpga_enum_metric->group_sysfs, _fpga_enum_metric->metric_sysfs, &value.ivalue);
if (result != FPGA_OK) {
FPGA_MSG("Failed to get perf metric value");
} else {
fpga_metric->isvalid = true;
}
fpga_metric->value = value;
fpga_metric->metric_num = metric_num;
}
break;
}
}
return result;
}
// parses metric name strings
fpga_result parse_metric_num_name(const char *search_string,
fpga_metric_vector *fpga_enum_metrics_vector,
uint64_t *metric_num)
{
fpga_result result = FPGA_OK;
size_t init_size = 0;
char *str = NULL;
char *str_last = NULL;
uint64_t i = 0;
struct _fpga_enum_metric *fpga_enum_metric = NULL;
char qualifier_name[SYSFS_PATH_MAX] = { 0 };
char metrics_name[SYSFS_PATH_MAX] = { 0 };
int qualifier_indicator = 0;
int metric_indicator = 0;
errno_t err = 0;
uint64_t num_enun_metrics = 0;
if (search_string == NULL ||
fpga_enum_metrics_vector == NULL ||
metric_num == NULL) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
err = strlastchar_s((char *)search_string,
strnlen_s(search_string, FPGA_METRIC_STR_SIZE), ':', &str);
if (err != 0 &&
str == NULL) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
// Metric Name
err = strncpy_s(metrics_name, strnlen_s(search_string, FPGA_METRIC_STR_SIZE) + 1,
str + 1, strnlen_s(str + 1, FPGA_METRIC_STR_SIZE));
if (err != 0) {
FPGA_ERR("Failed to copy metric name");
return FPGA_INVALID_PARAM;
}
// qualifier_name
err = strlastchar_s((char *)search_string, strnlen_s(search_string, FPGA_METRIC_STR_SIZE), ':', &str_last);
if (err != 0) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
init_size = strnlen_s(search_string, FPGA_METRIC_STR_SIZE) - strnlen_s(str_last, FPGA_METRIC_STR_SIZE) + 1;
err = strncpy_s(qualifier_name, init_size + 1, search_string, init_size);
if (err != 0) {
FPGA_ERR("Invalid Input Paramters");
return FPGA_INVALID_PARAM;
}
if (init_size < FPGA_METRIC_STR_SIZE)
qualifier_name[init_size - 1] = '\0';
result = fpga_vector_total(fpga_enum_metrics_vector, &num_enun_metrics);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get metric total");
return FPGA_NOT_FOUND;
}
for (i = 0; i < num_enun_metrics; i++) {
fpga_enum_metric = (struct _fpga_enum_metric *) fpga_vector_get(fpga_enum_metrics_vector, i);
strcasecmp_s(fpga_enum_metric->qualifier_name, sizeof(fpga_enum_metric->qualifier_name),
qualifier_name, &qualifier_indicator);
strcasecmp_s(fpga_enum_metric->metric_name, sizeof(fpga_enum_metric->metric_name),
metrics_name, &metric_indicator);
if (qualifier_indicator == 0 &&
metric_indicator == 0) {
*metric_num = fpga_enum_metric->metric_num;
return result;
}
} // end of for loop
return FPGA_NOT_FOUND;
}
// clears BMC values
fpga_result clear_cached_values(fpga_handle handle)
{
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
fpga_result result = FPGA_OK;
if (_handle->_bmc_metric_cache_value) {
free(_handle->_bmc_metric_cache_value);
_handle->_bmc_metric_cache_value = NULL;
}
_handle->num_bmc_metric = 0;
return result;
}
| 1 | 19,203 | Probably want break instead of continue. | OPAE-opae-sdk | c |
@@ -305,7 +305,7 @@ func testMQTTDefaultOptions() *Options {
func testMQTTRunServer(t testing.TB, o *Options) *Server {
t.Helper()
- o.NoLog = false
+ o.NoLog = true
if o.StoreDir == _EMPTY_ {
o.StoreDir = createDir(t, "mqtt_js")
} | 1 | // Copyright 2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"os"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nats.go"
"github.com/nats-io/nkeys"
"github.com/nats-io/nuid"
)
var testMQTTTimeout = 4 * time.Second
var jsClusterTemplWithLeafAndMQTT = `
listen: 127.0.0.1:-1
server_name: %s
jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: "%s"}
{{leaf}}
cluster {
name: %s
listen: 127.0.0.1:%d
routes = [%s]
}
mqtt {
listen: 127.0.0.1:-1
}
# For access to system account.
accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } }
`
type mqttWrapAsWs struct {
net.Conn
t testing.TB
br *bufio.Reader
tmp []byte
}
func (c *mqttWrapAsWs) Write(p []byte) (int, error) {
proto := testWSCreateClientMsg(wsBinaryMessage, 1, true, false, p)
return c.Conn.Write(proto)
}
func (c *mqttWrapAsWs) Read(p []byte) (int, error) {
for {
if n := len(c.tmp); n > 0 {
if len(p) < n {
n = len(p)
}
copy(p, c.tmp[:n])
c.tmp = c.tmp[n:]
return n, nil
}
c.tmp = testWSReadFrame(c.t, c.br)
}
}
func testMQTTReadPacket(t testing.TB, r *mqttReader) (byte, int) {
t.Helper()
var b byte
var pl int
var err error
rd := r.reader
fill := func() []byte {
t.Helper()
var buf [512]byte
n, err := rd.Read(buf[:])
if err != nil {
t.Fatalf("Error reading data: %v", err)
}
return copyBytes(buf[:n])
}
rd.SetReadDeadline(time.Now().Add(testMQTTTimeout))
for {
r.pstart = r.pos
if !r.hasMore() {
r.reset(fill())
continue
}
b, err = r.readByte("packet type")
if err != nil {
t.Fatalf("Error reading packet: %v", err)
}
var complete bool
pl, complete, err = r.readPacketLen()
if err != nil {
t.Fatalf("Error reading packet: %v", err)
}
if !complete {
r.reset(fill())
continue
}
break
}
rd.SetReadDeadline(time.Time{})
return b, pl
}
func TestMQTTReader(t *testing.T) {
r := &mqttReader{}
r.reset([]byte{0, 2, 'a', 'b'})
bs, err := r.readBytes("", false)
if err != nil {
t.Fatal(err)
}
sbs := string(bs)
if sbs != "ab" {
t.Fatalf(`expected "ab", got %q`, sbs)
}
r.reset([]byte{0, 2, 'a', 'b'})
bs, err = r.readBytes("", true)
if err != nil {
t.Fatal(err)
}
bs[0], bs[1] = 'c', 'd'
if bytes.Equal(bs, r.buf[2:]) {
t.Fatal("readBytes should have returned a copy")
}
r.reset([]byte{'a', 'b'})
if b, err := r.readByte(""); err != nil || b != 'a' {
t.Fatalf("Error reading byte: b=%v err=%v", b, err)
}
if !r.hasMore() {
t.Fatal("expected to have more, did not")
}
if b, err := r.readByte(""); err != nil || b != 'b' {
t.Fatalf("Error reading byte: b=%v err=%v", b, err)
}
if r.hasMore() {
t.Fatal("expected to not have more")
}
if _, err := r.readByte("test"); err == nil || !strings.Contains(err.Error(), "error reading test") {
t.Fatalf("unexpected error: %v", err)
}
r.reset([]byte{0, 2, 'a', 'b'})
if s, err := r.readString(""); err != nil || s != "ab" {
t.Fatalf("Error reading string: s=%q err=%v", s, err)
}
r.reset([]byte{10})
if _, err := r.readUint16("uint16"); err == nil || !strings.Contains(err.Error(), "error reading uint16") {
t.Fatalf("unexpected error: %v", err)
}
r.reset([]byte{0x82, 0xff, 0x3})
l, _, err := r.readPacketLenWithCheck(false)
if err != nil {
t.Fatal("error getting packet len")
}
if l != 0xff82 {
t.Fatalf("expected length 0xff82 got 0x%x", l)
}
r.reset([]byte{0xff, 0xff, 0xff, 0xff, 0xff})
if _, _, err := r.readPacketLenWithCheck(false); err == nil || !strings.Contains(err.Error(), "malformed") {
t.Fatalf("unexpected error: %v", err)
}
r.reset([]byte{0, 2, 'a', 'b', mqttPacketPub, 0x82, 0xff, 0x3})
r.readString("")
for i := 0; i < 2; i++ {
r.pstart = r.pos
b, err := r.readByte("")
if err != nil {
t.Fatalf("Error reading byte: %v", err)
}
if pt := b & mqttPacketMask; pt != mqttPacketPub {
t.Fatalf("Unexpected byte: %v", b)
}
pl, complete, err := r.readPacketLen()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if complete {
t.Fatal("Expected to be incomplete")
}
if pl != 0 {
t.Fatalf("Expected pl to be 0, got %v", pl)
}
if i > 0 {
break
}
if !bytes.Equal(r.pbuf, []byte{mqttPacketPub, 0x82, 0xff, 0x3}) {
t.Fatalf("Invalid recorded partial: %v", r.pbuf)
}
r.reset([]byte{'a', 'b', 'c'})
if !bytes.Equal(r.buf, []byte{mqttPacketPub, 0x82, 0xff, 0x3, 'a', 'b', 'c'}) {
t.Fatalf("Invalid buffer: %v", r.buf)
}
if r.pbuf != nil {
t.Fatalf("Partial buffer should have been reset, got %v", r.pbuf)
}
if r.pos != 0 {
t.Fatalf("Pos should have been reset, got %v", r.pos)
}
if r.pstart != 0 {
t.Fatalf("Pstart should have been reset, got %v", r.pstart)
}
}
// On second pass, the pbuf should have been extended with 'abc'
if !bytes.Equal(r.pbuf, []byte{mqttPacketPub, 0x82, 0xff, 0x3, 'a', 'b', 'c'}) {
t.Fatalf("Invalid recorded partial: %v", r.pbuf)
}
}
func TestMQTTWriter(t *testing.T) {
w := &mqttWriter{}
w.WriteUint16(1234)
r := &mqttReader{}
r.reset(w.Bytes())
if v, err := r.readUint16(""); err != nil || v != 1234 {
t.Fatalf("unexpected value: v=%v err=%v", v, err)
}
w.Reset()
w.WriteString("test")
r.reset(w.Bytes())
if len(r.buf) != 6 {
t.Fatalf("Expected 2 bytes size before string, got %v", r.buf)
}
w.Reset()
w.WriteBytes([]byte("test"))
r.reset(w.Bytes())
if len(r.buf) != 6 {
t.Fatalf("Expected 2 bytes size before bytes, got %v", r.buf)
}
ints := []int{
0, 1, 127, 128, 16383, 16384, 2097151, 2097152, 268435455,
}
lens := []int{
1, 1, 1, 2, 2, 3, 3, 4, 4,
}
tl := 0
w.Reset()
for i, v := range ints {
w.WriteVarInt(v)
tl += lens[i]
if tl != w.Len() {
t.Fatalf("expected len %d, got %d", tl, w.Len())
}
}
r.reset(w.Bytes())
for _, v := range ints {
x, _, _ := r.readPacketLenWithCheck(false)
if v != x {
t.Fatalf("expected %d, got %d", v, x)
}
}
}
func testMQTTDefaultOptions() *Options {
o := DefaultOptions()
o.ServerName = nuid.Next()
o.Cluster.Port = 0
o.Gateway.Name = ""
o.Gateway.Port = 0
o.LeafNode.Port = 0
o.Websocket.Port = 0
o.MQTT.Host = "127.0.0.1"
o.MQTT.Port = -1
o.JetStream = true
return o
}
func testMQTTRunServer(t testing.TB, o *Options) *Server {
t.Helper()
o.NoLog = false
if o.StoreDir == _EMPTY_ {
o.StoreDir = createDir(t, "mqtt_js")
}
s, err := NewServer(o)
if err != nil {
t.Fatalf("Error creating server: %v", err)
}
l := &DummyLogger{}
s.SetLogger(l, true, true)
go s.Start()
if err := s.readyForConnections(3 * time.Second); err != nil {
testMQTTShutdownServer(s)
t.Fatal(err)
}
return s
}
func testMQTTShutdownRestartedServer(s **Server) {
srv := *s
testMQTTShutdownServer(srv)
*s = nil
}
func testMQTTShutdownServer(s *Server) {
if c := s.JetStreamConfig(); c != nil {
dir := strings.TrimSuffix(c.StoreDir, JetStreamStoreDir)
defer os.RemoveAll(dir)
}
s.Shutdown()
}
func testMQTTDefaultTLSOptions(t *testing.T, verify bool) *Options {
t.Helper()
o := testMQTTDefaultOptions()
tc := &TLSConfigOpts{
CertFile: "../test/configs/certs/server-cert.pem",
KeyFile: "../test/configs/certs/server-key.pem",
CaFile: "../test/configs/certs/ca.pem",
Verify: verify,
}
var err error
o.MQTT.TLSConfig, err = GenTLSConfig(tc)
o.MQTT.TLSTimeout = 2.0
if err != nil {
t.Fatalf("Error creating tls config: %v", err)
}
return o
}
func TestMQTTServerNameRequired(t *testing.T) {
conf := createConfFile(t, []byte(`
mqtt {
port: -1
}
`))
defer removeFile(t, conf)
o, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing config file: %v", err)
}
if _, err := NewServer(o); err == nil || err.Error() != errMQTTServerNameMustBeSet.Error() {
t.Fatalf("Expected error about requiring server name to be set, got %v", err)
}
}
func TestMQTTStandaloneRequiresJetStream(t *testing.T) {
conf := createConfFile(t, []byte(`
server_name: mqtt
mqtt {
port: -1
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/key.pem"
}
}
`))
defer removeFile(t, conf)
o, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing config file: %v", err)
}
if _, err := NewServer(o); err == nil || err.Error() != errMQTTStandaloneNeedsJetStream.Error() {
t.Fatalf("Expected error about requiring JetStream in standalone mode, got %v", err)
}
}
func TestMQTTConfig(t *testing.T) {
conf := createConfFile(t, []byte(`
jetstream: enabled
server_name: mqtt
mqtt {
port: -1
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/key.pem"
}
}
`))
defer removeFile(t, conf)
s, o := RunServerWithConfig(conf)
defer testMQTTShutdownServer(s)
if o.MQTT.TLSConfig == nil {
t.Fatal("expected TLS config to be set")
}
}
func TestMQTTValidateOptions(t *testing.T) {
nmqtto := DefaultOptions()
mqtto := testMQTTDefaultOptions()
for _, test := range []struct {
name string
getOpts func() *Options
err error
}{
{"mqtt disabled", func() *Options { return nmqtto.Clone() }, nil},
{"mqtt username not allowed if users specified", func() *Options {
o := mqtto.Clone()
o.Users = []*User{{Username: "abc", Password: "pwd"}}
o.MQTT.Username = "b"
o.MQTT.Password = "pwd"
return o
}, errMQTTUserMixWithUsersNKeys},
{"mqtt token not allowed if users specified", func() *Options {
o := mqtto.Clone()
o.Nkeys = []*NkeyUser{{Nkey: "abc"}}
o.MQTT.Token = "mytoken"
return o
}, errMQTTTokenMixWIthUsersNKeys},
{"ack wait should be >=0", func() *Options {
o := mqtto.Clone()
o.MQTT.AckWait = -10 * time.Second
return o
}, errMQTTAckWaitMustBePositive},
} {
t.Run(test.name, func(t *testing.T) {
err := validateMQTTOptions(test.getOpts())
if test.err == nil && err != nil {
t.Fatalf("Unexpected error: %v", err)
} else if test.err != nil && (err == nil || err.Error() != test.err.Error()) {
t.Fatalf("Expected error to contain %q, got %v", test.err, err)
}
})
}
}
func TestMQTTParseOptions(t *testing.T) {
for _, test := range []struct {
name string
content string
checkOpt func(*MQTTOpts) error
err string
}{
// Negative tests
{"bad type", "mqtt: []", nil, "to be a map"},
{"bad listen", "mqtt: { listen: [] }", nil, "port or host:port"},
{"bad port", `mqtt: { port: "abc" }`, nil, "not int64"},
{"bad host", `mqtt: { host: 123 }`, nil, "not string"},
{"bad tls", `mqtt: { tls: 123 }`, nil, "not map[string]interface {}"},
{"unknown field", `mqtt: { this_does_not_exist: 123 }`, nil, "unknown"},
{"ack wait", `mqtt: {ack_wait: abc}`, nil, "invalid duration"},
{"max ack pending", `mqtt: {max_ack_pending: abc}`, nil, "not int64"},
{"max ack pending too high", `mqtt: {max_ack_pending: 12345678}`, nil, "invalid value"},
// Positive tests
{"tls gen fails", `
mqtt {
tls {
cert_file: "./configs/certs/server.pem"
}
}`, nil, "missing 'key_file'"},
{"listen port only", `mqtt { listen: 1234 }`, func(o *MQTTOpts) error {
if o.Port != 1234 {
return fmt.Errorf("expected 1234, got %v", o.Port)
}
return nil
}, ""},
{"listen host and port", `mqtt { listen: "localhost:1234" }`, func(o *MQTTOpts) error {
if o.Host != "localhost" || o.Port != 1234 {
return fmt.Errorf("expected localhost:1234, got %v:%v", o.Host, o.Port)
}
return nil
}, ""},
{"host", `mqtt { host: "localhost" }`, func(o *MQTTOpts) error {
if o.Host != "localhost" {
return fmt.Errorf("expected localhost, got %v", o.Host)
}
return nil
}, ""},
{"port", `mqtt { port: 1234 }`, func(o *MQTTOpts) error {
if o.Port != 1234 {
return fmt.Errorf("expected 1234, got %v", o.Port)
}
return nil
}, ""},
{"tls config",
`
mqtt {
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/key.pem"
}
}
`, func(o *MQTTOpts) error {
if o.TLSConfig == nil {
return fmt.Errorf("TLSConfig should have been set")
}
return nil
}, ""},
{"no auth user",
`
mqtt {
no_auth_user: "noauthuser"
}
`, func(o *MQTTOpts) error {
if o.NoAuthUser != "noauthuser" {
return fmt.Errorf("Invalid NoAuthUser value: %q", o.NoAuthUser)
}
return nil
}, ""},
{"auth block",
`
mqtt {
authorization {
user: "mqttuser"
password: "pwd"
token: "token"
timeout: 2.0
}
}
`, func(o *MQTTOpts) error {
if o.Username != "mqttuser" || o.Password != "pwd" || o.Token != "token" || o.AuthTimeout != 2.0 {
return fmt.Errorf("Invalid auth block: %+v", o)
}
return nil
}, ""},
{"auth timeout as int",
`
mqtt {
authorization {
timeout: 2
}
}
`, func(o *MQTTOpts) error {
if o.AuthTimeout != 2.0 {
return fmt.Errorf("Invalid auth timeout: %v", o.AuthTimeout)
}
return nil
}, ""},
{"ack wait",
`
mqtt {
ack_wait: "10s"
}
`, func(o *MQTTOpts) error {
if o.AckWait != 10*time.Second {
return fmt.Errorf("Invalid ack wait: %v", o.AckWait)
}
return nil
}, ""},
{"max ack pending",
`
mqtt {
max_ack_pending: 123
}
`, func(o *MQTTOpts) error {
if o.MaxAckPending != 123 {
return fmt.Errorf("Invalid max ack pending: %v", o.MaxAckPending)
}
return nil
}, ""},
} {
t.Run(test.name, func(t *testing.T) {
conf := createConfFile(t, []byte(test.content))
defer removeFile(t, conf)
o, err := ProcessConfigFile(conf)
if test.err != _EMPTY_ {
if err == nil || !strings.Contains(err.Error(), test.err) {
t.Fatalf("For content: %q, expected error about %q, got %v", test.content, test.err, err)
}
return
} else if err != nil {
t.Fatalf("Unexpected error for content %q: %v", test.content, err)
}
if err := test.checkOpt(&o.MQTT); err != nil {
t.Fatalf("Incorrect option for content %q: %v", test.content, err.Error())
}
})
}
}
func TestMQTTStart(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
nc, err := net.Dial("tcp", fmt.Sprintf("%s:%d", o.MQTT.Host, o.MQTT.Port))
if err != nil {
t.Fatalf("Unable to create tcp connection to mqtt port: %v", err)
}
nc.Close()
// Check failure to start due to port in use
o2 := testMQTTDefaultOptions()
o2.MQTT.Port = o.MQTT.Port
s2, err := NewServer(o2)
if err != nil {
t.Fatalf("Error creating server: %v", err)
}
defer s2.Shutdown()
l := &captureFatalLogger{fatalCh: make(chan string, 1)}
s2.SetLogger(l, false, false)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
s2.Start()
wg.Done()
}()
select {
case e := <-l.fatalCh:
if !strings.Contains(e, "Unable to listen for MQTT connections") {
t.Fatalf("Unexpected error: %q", e)
}
case <-time.After(time.Second):
t.Fatal("Should have gotten a fatal error")
}
}
func TestMQTTTLS(t *testing.T) {
o := testMQTTDefaultTLSOptions(t, false)
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
c, _, err := testMQTTConnectRetryWithError(t, &mqttConnInfo{tls: true}, o.MQTT.Host, o.MQTT.Port, 0)
if err != nil {
t.Fatal(err)
}
c.Close()
c = nil
testMQTTShutdownServer(s)
// Force client cert verification
o = testMQTTDefaultTLSOptions(t, true)
s = testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
c, _, err = testMQTTConnectRetryWithError(t, &mqttConnInfo{tls: true}, o.MQTT.Host, o.MQTT.Port, 0)
if err == nil || c != nil {
if c != nil {
c.Close()
}
t.Fatal("Handshake expected to fail since client did not provide cert")
}
// Add client cert.
tc := &TLSConfigOpts{
CertFile: "../test/configs/certs/client-cert.pem",
KeyFile: "../test/configs/certs/client-key.pem",
}
tlsc, err := GenTLSConfig(tc)
if err != nil {
t.Fatalf("Error generating tls config: %v", err)
}
tlsc.InsecureSkipVerify = true
c, _, err = testMQTTConnectRetryWithError(t, &mqttConnInfo{
tls: true,
tlsc: tlsc,
}, o.MQTT.Host, o.MQTT.Port, 0)
if err != nil {
t.Fatal(err)
}
c.Close()
c = nil
testMQTTShutdownServer(s)
// Lower TLS timeout so low that we should fail
o.MQTT.TLSTimeout = 0.001
s = testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
nc, err := net.Dial("tcp", fmt.Sprintf("%s:%d", o.MQTT.Host, o.MQTT.Port))
if err != nil {
t.Fatalf("Unable to create tcp connection to mqtt port: %v", err)
}
defer nc.Close()
time.Sleep(100 * time.Millisecond)
tlsConn := tls.Client(nc, tlsc)
tlsConn.SetDeadline(time.Now().Add(time.Second))
if err := tlsConn.Handshake(); err == nil {
t.Fatal("Expected failure, did not get one")
}
}
type mqttConnInfo struct {
clientID string
cleanSess bool
keepAlive uint16
will *mqttWill
user string
pass string
ws bool
tls bool
tlsc *tls.Config
}
func testMQTTGetClient(t testing.TB, s *Server, clientID string) *client {
t.Helper()
var mc *client
s.mu.Lock()
for _, c := range s.clients {
c.mu.Lock()
if c.isMqtt() && c.mqtt.cid == clientID {
mc = c
}
c.mu.Unlock()
if mc != nil {
break
}
}
s.mu.Unlock()
if mc == nil {
t.Fatalf("Did not find client %q", clientID)
}
return mc
}
func testMQTTRead(c net.Conn) ([]byte, error) {
var buf [512]byte
// Make sure that test does not block
c.SetReadDeadline(time.Now().Add(testMQTTTimeout))
n, err := c.Read(buf[:])
if err != nil {
return nil, err
}
c.SetReadDeadline(time.Time{})
return copyBytes(buf[:n]), nil
}
func testMQTTWrite(c net.Conn, buf []byte) (int, error) {
c.SetWriteDeadline(time.Now().Add(testMQTTTimeout))
n, err := c.Write(buf)
c.SetWriteDeadline(time.Time{})
return n, err
}
func testMQTTConnect(t testing.TB, ci *mqttConnInfo, host string, port int) (net.Conn, *mqttReader) {
t.Helper()
return testMQTTConnectRetry(t, ci, host, port, 0)
}
func testMQTTConnectRetry(t testing.TB, ci *mqttConnInfo, host string, port int, retryCount int) (net.Conn, *mqttReader) {
t.Helper()
c, r, err := testMQTTConnectRetryWithError(t, ci, host, port, retryCount)
if err != nil {
t.Fatal(err)
}
return c, r
}
func testMQTTConnectRetryWithError(t testing.TB, ci *mqttConnInfo, host string, port int, retryCount int) (net.Conn, *mqttReader, error) {
retry := func(c net.Conn) bool {
if c != nil {
c.Close()
}
if retryCount == 0 {
return false
}
time.Sleep(time.Second)
retryCount--
return true
}
addr := fmt.Sprintf("%s:%d", host, port)
var c net.Conn
var err error
RETRY:
if ci.ws {
var br *bufio.Reader
c, br, _, err = testNewWSClientWithError(t, testWSClientOptions{
host: host,
port: port,
noTLS: !ci.tls,
path: mqttWSPath,
})
if err == nil {
c = &mqttWrapAsWs{Conn: c, t: t, br: br}
}
} else {
c, err = net.Dial("tcp", addr)
if err == nil && ci.tls {
tc := ci.tlsc
if tc == nil {
tc = &tls.Config{InsecureSkipVerify: true}
}
c = tls.Client(c, tc)
c.SetDeadline(time.Now().Add(time.Second))
err = c.(*tls.Conn).Handshake()
c.SetDeadline(time.Time{})
}
}
if err != nil {
if retry(c) {
goto RETRY
}
return nil, nil, fmt.Errorf("Error creating mqtt connection: %v", err)
}
proto := mqttCreateConnectProto(ci)
if _, err := testMQTTWrite(c, proto); err != nil {
if retry(c) {
goto RETRY
}
return nil, nil, fmt.Errorf("Error writing connect: %v", err)
}
buf, err := testMQTTRead(c)
if err != nil {
if retry(c) {
goto RETRY
}
return nil, nil, fmt.Errorf("Error reading: %v", err)
}
mr := &mqttReader{reader: c}
mr.reset(buf)
return c, mr, nil
}
func mqttCreateConnectProto(ci *mqttConnInfo) []byte {
flags := byte(0)
if ci.cleanSess {
flags |= mqttConnFlagCleanSession
}
if ci.will != nil {
flags |= mqttConnFlagWillFlag | (ci.will.qos << 3)
if ci.will.retain {
flags |= mqttConnFlagWillRetain
}
}
if ci.user != _EMPTY_ {
flags |= mqttConnFlagUsernameFlag
}
if ci.pass != _EMPTY_ {
flags |= mqttConnFlagPasswordFlag
}
pkLen := 2 + len(mqttProtoName) +
1 + // proto level
1 + // flags
2 + // keepAlive
2 + len(ci.clientID)
if ci.will != nil {
pkLen += 2 + len(ci.will.topic)
pkLen += 2 + len(ci.will.message)
}
if ci.user != _EMPTY_ {
pkLen += 2 + len(ci.user)
}
if ci.pass != _EMPTY_ {
pkLen += 2 + len(ci.pass)
}
w := &mqttWriter{}
w.WriteByte(mqttPacketConnect)
w.WriteVarInt(pkLen)
w.WriteString(string(mqttProtoName))
w.WriteByte(0x4)
w.WriteByte(flags)
w.WriteUint16(ci.keepAlive)
w.WriteString(ci.clientID)
if ci.will != nil {
w.WriteBytes(ci.will.topic)
w.WriteBytes(ci.will.message)
}
if ci.user != _EMPTY_ {
w.WriteString(ci.user)
}
if ci.pass != _EMPTY_ {
w.WriteBytes([]byte(ci.pass))
}
return w.Bytes()
}
func testMQTTCheckConnAck(t testing.TB, r *mqttReader, rc byte, sessionPresent bool) {
t.Helper()
b, pl := testMQTTReadPacket(t, r)
pt := b & mqttPacketMask
if pt != mqttPacketConnectAck {
t.Fatalf("Expected ConnAck (%x), got %x", mqttPacketConnectAck, pt)
}
if pl != 2 {
t.Fatalf("ConnAck packet length should be 2, got %v", pl)
}
caf, err := r.readByte("connack flags")
if err != nil {
t.Fatalf("Error reading packet length: %v", err)
}
if caf&0xfe != 0 {
t.Fatalf("ConnAck flag bits 7-1 should all be 0, got %x", caf>>1)
}
if sp := caf == 1; sp != sessionPresent {
t.Fatalf("Expected session present flag=%v got %v", sessionPresent, sp)
}
carc, err := r.readByte("connack return code")
if err != nil {
t.Fatalf("Error reading returned code: %v", err)
}
if carc != rc {
t.Fatalf("Expected return code to be %v, got %v", rc, carc)
}
}
func TestMQTTRequiresJSEnabled(t *testing.T) {
o := testMQTTDefaultOptions()
acc := NewAccount("mqtt")
o.Accounts = []*Account{acc}
o.Users = []*User{{Username: "mqtt", Account: acc}}
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
addr := fmt.Sprintf("%s:%d", o.MQTT.Host, o.MQTT.Port)
c, err := net.Dial("tcp", addr)
if err != nil {
t.Fatalf("Error creating mqtt connection: %v", err)
}
defer c.Close()
proto := mqttCreateConnectProto(&mqttConnInfo{cleanSess: true, user: "mqtt"})
if _, err := testMQTTWrite(c, proto); err != nil {
t.Fatalf("Error writing connect: %v", err)
}
if _, err := testMQTTRead(c); err == nil {
t.Fatal("Expected failure, did not get one")
}
}
func testMQTTEnableJSForAccount(t *testing.T, s *Server, accName string) {
t.Helper()
acc, err := s.LookupAccount(accName)
if err != nil {
t.Fatalf("Error looking up account: %v", err)
}
limits := &JetStreamAccountLimits{
MaxConsumers: -1,
MaxStreams: -1,
MaxMemory: 1024 * 1024,
MaxStore: 1024 * 1024,
}
if err := acc.EnableJetStream(limits); err != nil {
t.Fatalf("Error enabling JS: %v", err)
}
}
func TestMQTTTLSVerifyAndMap(t *testing.T) {
accName := "MyAccount"
acc := NewAccount(accName)
certUserName := "CN=example.com,OU=NATS.io"
users := []*User{{Username: certUserName, Account: acc}}
for _, test := range []struct {
name string
filtering bool
provideCert bool
}{
{"no filtering, client provides cert", false, true},
{"no filtering, client does not provide cert", false, false},
{"filtering, client provides cert", true, true},
{"filtering, client does not provide cert", true, false},
} {
t.Run(test.name, func(t *testing.T) {
o := testMQTTDefaultOptions()
o.Host = "localhost"
o.Accounts = []*Account{acc}
o.Users = users
if test.filtering {
o.Users[0].AllowedConnectionTypes = testCreateAllowedConnectionTypes([]string{jwt.ConnectionTypeStandard, jwt.ConnectionTypeMqtt})
}
tc := &TLSConfigOpts{
CertFile: "../test/configs/certs/tlsauth/server.pem",
KeyFile: "../test/configs/certs/tlsauth/server-key.pem",
CaFile: "../test/configs/certs/tlsauth/ca.pem",
Verify: true,
}
tlsc, err := GenTLSConfig(tc)
if err != nil {
t.Fatalf("Error creating tls config: %v", err)
}
o.MQTT.TLSConfig = tlsc
o.MQTT.TLSTimeout = 2.0
o.MQTT.TLSMap = true
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
testMQTTEnableJSForAccount(t, s, accName)
tlscc := &tls.Config{}
if test.provideCert {
tc := &TLSConfigOpts{
CertFile: "../test/configs/certs/tlsauth/client.pem",
KeyFile: "../test/configs/certs/tlsauth/client-key.pem",
}
var err error
tlscc, err = GenTLSConfig(tc)
if err != nil {
t.Fatalf("Error generating tls config: %v", err)
}
}
tlscc.InsecureSkipVerify = true
if test.provideCert {
tlscc.MinVersion = tls.VersionTLS13
}
mc, r, err := testMQTTConnectRetryWithError(t, &mqttConnInfo{
cleanSess: true,
tls: true,
tlsc: tlscc,
}, o.MQTT.Host, o.MQTT.Port, 0)
if !test.provideCert {
if err == nil {
t.Fatal("Expected error, did not get one")
} else if !strings.Contains(err.Error(), "bad certificate") {
t.Fatalf("Unexpected error: %v", err)
}
return
}
if err != nil {
t.Fatalf("Error reading: %v", err)
}
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
var c *client
s.mu.Lock()
for _, sc := range s.clients {
sc.mu.Lock()
if sc.isMqtt() {
c = sc
}
sc.mu.Unlock()
if c != nil {
break
}
}
s.mu.Unlock()
if c == nil {
t.Fatal("Client not found")
}
var uname string
var accname string
c.mu.Lock()
uname = c.opts.Username
if c.acc != nil {
accname = c.acc.GetName()
}
c.mu.Unlock()
if uname != certUserName {
t.Fatalf("Expected username %q, got %q", certUserName, uname)
}
if accname != accName {
t.Fatalf("Expected account %q, got %v", accName, accname)
}
})
}
}
func TestMQTTBasicAuth(t *testing.T) {
for _, test := range []struct {
name string
opts func() *Options
user string
pass string
rc byte
}{
{
"top level auth, no override, wrong u/p",
func() *Options {
o := testMQTTDefaultOptions()
o.Username = "normal"
o.Password = "client"
return o
},
"mqtt", "client", mqttConnAckRCNotAuthorized,
},
{
"top level auth, no override, correct u/p",
func() *Options {
o := testMQTTDefaultOptions()
o.Username = "normal"
o.Password = "client"
return o
},
"normal", "client", mqttConnAckRCConnectionAccepted,
},
{
"no top level auth, mqtt auth, wrong u/p",
func() *Options {
o := testMQTTDefaultOptions()
o.MQTT.Username = "mqtt"
o.MQTT.Password = "client"
return o
},
"normal", "client", mqttConnAckRCNotAuthorized,
},
{
"no top level auth, mqtt auth, correct u/p",
func() *Options {
o := testMQTTDefaultOptions()
o.MQTT.Username = "mqtt"
o.MQTT.Password = "client"
return o
},
"mqtt", "client", mqttConnAckRCConnectionAccepted,
},
{
"top level auth, mqtt override, wrong u/p",
func() *Options {
o := testMQTTDefaultOptions()
o.Username = "normal"
o.Password = "client"
o.MQTT.Username = "mqtt"
o.MQTT.Password = "client"
return o
},
"normal", "client", mqttConnAckRCNotAuthorized,
},
{
"top level auth, mqtt override, correct u/p",
func() *Options {
o := testMQTTDefaultOptions()
o.Username = "normal"
o.Password = "client"
o.MQTT.Username = "mqtt"
o.MQTT.Password = "client"
return o
},
"mqtt", "client", mqttConnAckRCConnectionAccepted,
},
} {
t.Run(test.name, func(t *testing.T) {
o := test.opts()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{
cleanSess: true,
user: test.user,
pass: test.pass,
}
mc, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, test.rc, false)
})
}
}
func TestMQTTAuthTimeout(t *testing.T) {
for _, test := range []struct {
name string
at float64
mat float64
ok bool
}{
{"use top-level auth timeout", 0.5, 0.0, true},
{"use mqtt auth timeout", 0.5, 0.05, false},
} {
t.Run(test.name, func(t *testing.T) {
o := testMQTTDefaultOptions()
o.AuthTimeout = test.at
o.MQTT.Username = "mqtt"
o.MQTT.Password = "client"
o.MQTT.AuthTimeout = test.mat
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mc, err := net.Dial("tcp", fmt.Sprintf("%s:%d", o.MQTT.Host, o.MQTT.Port))
if err != nil {
t.Fatalf("Error connecting: %v", err)
}
defer mc.Close()
time.Sleep(100 * time.Millisecond)
ci := &mqttConnInfo{
cleanSess: true,
user: "mqtt",
pass: "client",
}
proto := mqttCreateConnectProto(ci)
if _, err := testMQTTWrite(mc, proto); err != nil {
if test.ok {
t.Fatalf("Error sending connect: %v", err)
}
// else it is ok since we got disconnected due to auth timeout
return
}
buf, err := testMQTTRead(mc)
if err != nil {
if test.ok {
t.Fatalf("Error reading: %v", err)
}
// else it is ok since we got disconnected due to auth timeout
return
}
r := &mqttReader{reader: mc}
r.reset(buf)
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
time.Sleep(500 * time.Millisecond)
testMQTTPublish(t, mc, r, 1, false, false, "foo", 1, []byte("msg"))
})
}
}
func TestMQTTTokenAuth(t *testing.T) {
for _, test := range []struct {
name string
opts func() *Options
token string
rc byte
}{
{
"top level auth, no override, wrong token",
func() *Options {
o := testMQTTDefaultOptions()
o.Authorization = "goodtoken"
return o
},
"badtoken", mqttConnAckRCNotAuthorized,
},
{
"top level auth, no override, correct token",
func() *Options {
o := testMQTTDefaultOptions()
o.Authorization = "goodtoken"
return o
},
"goodtoken", mqttConnAckRCConnectionAccepted,
},
{
"no top level auth, mqtt auth, wrong token",
func() *Options {
o := testMQTTDefaultOptions()
o.MQTT.Token = "goodtoken"
return o
},
"badtoken", mqttConnAckRCNotAuthorized,
},
{
"no top level auth, mqtt auth, correct token",
func() *Options {
o := testMQTTDefaultOptions()
o.MQTT.Token = "goodtoken"
return o
},
"goodtoken", mqttConnAckRCConnectionAccepted,
},
{
"top level auth, mqtt override, wrong token",
func() *Options {
o := testMQTTDefaultOptions()
o.Authorization = "clienttoken"
o.MQTT.Token = "mqtttoken"
return o
},
"clienttoken", mqttConnAckRCNotAuthorized,
},
{
"top level auth, mqtt override, correct token",
func() *Options {
o := testMQTTDefaultOptions()
o.Authorization = "clienttoken"
o.MQTT.Token = "mqtttoken"
return o
},
"mqtttoken", mqttConnAckRCConnectionAccepted,
},
} {
t.Run(test.name, func(t *testing.T) {
o := test.opts()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{
cleanSess: true,
user: "ignore_use_token",
pass: test.token,
}
mc, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, test.rc, false)
})
}
}
func TestMQTTJWTWithAllowedConnectionTypes(t *testing.T) {
o := testMQTTDefaultOptions()
// Create System Account
syskp, _ := nkeys.CreateAccount()
syspub, _ := syskp.PublicKey()
sysAc := jwt.NewAccountClaims(syspub)
sysjwt, err := sysAc.Encode(oKp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
// Create memory resolver and store system account
mr := &MemAccResolver{}
mr.Store(syspub, sysjwt)
if err != nil {
t.Fatalf("Error saving system account JWT to memory resolver: %v", err)
}
// Add system account and memory resolver to server options
o.SystemAccount = syspub
o.AccountResolver = mr
setupAddTrusted(o)
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
for _, test := range []struct {
name string
connectionTypes []string
rc byte
}{
{"not allowed", []string{jwt.ConnectionTypeStandard}, mqttConnAckRCNotAuthorized},
{"allowed", []string{jwt.ConnectionTypeStandard, strings.ToLower(jwt.ConnectionTypeMqtt)}, mqttConnAckRCConnectionAccepted},
{"allowed with unknown", []string{jwt.ConnectionTypeMqtt, "SomeNewType"}, mqttConnAckRCConnectionAccepted},
{"not allowed with unknown", []string{"SomeNewType"}, mqttConnAckRCNotAuthorized},
} {
t.Run(test.name, func(t *testing.T) {
nuc := newJWTTestUserClaims()
nuc.AllowedConnectionTypes = test.connectionTypes
nuc.BearerToken = true
okp, _ := nkeys.FromSeed(oSeed)
akp, _ := nkeys.CreateAccount()
apub, _ := akp.PublicKey()
nac := jwt.NewAccountClaims(apub)
// Enable Jetstream on account with lax limitations
nac.Limits.JetStreamLimits.Consumer = -1
nac.Limits.JetStreamLimits.Streams = -1
nac.Limits.JetStreamLimits.MemoryStorage = 1024 * 1024
nac.Limits.JetStreamLimits.DiskStorage = 1024 * 1024
ajwt, err := nac.Encode(okp)
if err != nil {
t.Fatalf("Error generating account JWT: %v", err)
}
nkp, _ := nkeys.CreateUser()
pub, _ := nkp.PublicKey()
nuc.Subject = pub
jwt, err := nuc.Encode(akp)
if err != nil {
t.Fatalf("Error generating user JWT: %v", err)
}
addAccountToMemResolver(s, apub, ajwt)
ci := &mqttConnInfo{
cleanSess: true,
user: "ignore_use_token",
pass: jwt,
}
mc, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, test.rc, false)
})
}
}
func TestMQTTUsersAuth(t *testing.T) {
users := []*User{{Username: "user", Password: "pwd"}}
for _, test := range []struct {
name string
opts func() *Options
user string
pass string
rc byte
}{
{
"no filtering, wrong user",
func() *Options {
o := testMQTTDefaultOptions()
o.Users = users
return o
},
"wronguser", "pwd", mqttConnAckRCNotAuthorized,
},
{
"no filtering, correct user",
func() *Options {
o := testMQTTDefaultOptions()
o.Users = users
return o
},
"user", "pwd", mqttConnAckRCConnectionAccepted,
},
{
"filtering, user not allowed",
func() *Options {
o := testMQTTDefaultOptions()
o.Users = users
// Only allowed for regular clients
o.Users[0].AllowedConnectionTypes = testCreateAllowedConnectionTypes([]string{jwt.ConnectionTypeStandard})
return o
},
"user", "pwd", mqttConnAckRCNotAuthorized,
},
{
"filtering, user allowed",
func() *Options {
o := testMQTTDefaultOptions()
o.Users = users
o.Users[0].AllowedConnectionTypes = testCreateAllowedConnectionTypes([]string{jwt.ConnectionTypeStandard, jwt.ConnectionTypeMqtt})
return o
},
"user", "pwd", mqttConnAckRCConnectionAccepted,
},
{
"filtering, wrong password",
func() *Options {
o := testMQTTDefaultOptions()
o.Users = users
o.Users[0].AllowedConnectionTypes = testCreateAllowedConnectionTypes([]string{jwt.ConnectionTypeStandard, jwt.ConnectionTypeMqtt})
return o
},
"user", "badpassword", mqttConnAckRCNotAuthorized,
},
} {
t.Run(test.name, func(t *testing.T) {
o := test.opts()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{
cleanSess: true,
user: test.user,
pass: test.pass,
}
mc, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, test.rc, false)
})
}
}
func TestMQTTNoAuthUserValidation(t *testing.T) {
o := testMQTTDefaultOptions()
o.Users = []*User{{Username: "user", Password: "pwd"}}
// Should fail because it is not part of o.Users.
o.MQTT.NoAuthUser = "notfound"
if _, err := NewServer(o); err == nil || !strings.Contains(err.Error(), "not present as user") {
t.Fatalf("Expected error saying not present as user, got %v", err)
}
// Set a valid no auth user for global options, but still should fail because
// of o.MQTT.NoAuthUser
o.NoAuthUser = "user"
o.MQTT.NoAuthUser = "notfound"
if _, err := NewServer(o); err == nil || !strings.Contains(err.Error(), "not present as user") {
t.Fatalf("Expected error saying not present as user, got %v", err)
}
}
func TestMQTTNoAuthUser(t *testing.T) {
for _, test := range []struct {
name string
override bool
useAuth bool
expectedUser string
expectedAcc string
}{
{"no override, no user provided", false, false, "noauth", "normal"},
{"no override, user povided", false, true, "user", "normal"},
{"override, no user provided", true, false, "mqttnoauth", "mqtt"},
{"override, user provided", true, true, "mqttuser", "mqtt"},
} {
t.Run(test.name, func(t *testing.T) {
o := testMQTTDefaultOptions()
normalAcc := NewAccount("normal")
mqttAcc := NewAccount("mqtt")
o.Accounts = []*Account{normalAcc, mqttAcc}
o.Users = []*User{
{Username: "noauth", Password: "pwd", Account: normalAcc},
{Username: "user", Password: "pwd", Account: normalAcc},
{Username: "mqttnoauth", Password: "pwd", Account: mqttAcc},
{Username: "mqttuser", Password: "pwd", Account: mqttAcc},
}
o.NoAuthUser = "noauth"
if test.override {
o.MQTT.NoAuthUser = "mqttnoauth"
}
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
testMQTTEnableJSForAccount(t, s, "normal")
testMQTTEnableJSForAccount(t, s, "mqtt")
ci := &mqttConnInfo{clientID: "mqtt", cleanSess: true}
if test.useAuth {
ci.user = test.expectedUser
ci.pass = "pwd"
}
mc, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
c := testMQTTGetClient(t, s, "mqtt")
c.mu.Lock()
uname := c.opts.Username
aname := c.acc.GetName()
c.mu.Unlock()
if uname != test.expectedUser {
t.Fatalf("Expected selected user to be %q, got %q", test.expectedUser, uname)
}
if aname != test.expectedAcc {
t.Fatalf("Expected selected account to be %q, got %q", test.expectedAcc, aname)
}
})
}
}
func TestMQTTConnectNotFirstPacket(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
l := &captureErrorLogger{errCh: make(chan string, 10)}
s.SetLogger(l, false, false)
c, err := net.Dial("tcp", fmt.Sprintf("%s:%d", o.MQTT.Host, o.MQTT.Port))
if err != nil {
t.Fatalf("Error on dial: %v", err)
}
defer c.Close()
w := &mqttWriter{}
mqttWritePublish(w, 0, false, false, "foo", 0, []byte("hello"))
if _, err := testMQTTWrite(c, w.Bytes()); err != nil {
t.Fatalf("Error publishing: %v", err)
}
testMQTTExpectDisconnect(t, c)
select {
case err := <-l.errCh:
if !strings.Contains(err, "should be a CONNECT") {
t.Fatalf("Expected error about first packet being a CONNECT, got %v", err)
}
case <-time.After(time.Second):
t.Fatal("Did not log any error")
}
}
func TestMQTTSecondConnect(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
proto := mqttCreateConnectProto(&mqttConnInfo{cleanSess: true})
if _, err := testMQTTWrite(mc, proto); err != nil {
t.Fatalf("Error writing connect: %v", err)
}
testMQTTExpectDisconnect(t, mc)
}
func TestMQTTParseConnect(t *testing.T) {
for _, test := range []struct {
name string
proto []byte
pl int
err string
}{
{"packet in buffer error", []byte{0}, 10, io.ErrUnexpectedEOF.Error()},
{"bad proto name", []byte{0, 4, 'B', 'A', 'D'}, 5, "protocol name"},
{"invalid proto name", []byte{0, 3, 'B', 'A', 'D'}, 5, "expected connect packet with protocol name"},
{"old proto not supported", []byte{0, 6, 'M', 'Q', 'I', 's', 'd', 'p'}, 8, "older protocol"},
{"error on protocol level", []byte{0, 4, 'M', 'Q', 'T', 'T'}, 6, "protocol level"},
{"unacceptable protocol version", []byte{0, 4, 'M', 'Q', 'T', 'T', 10}, 7, "unacceptable protocol version"},
{"error on flags", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel}, 7, "flags"},
{"reserved flag", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, 1}, 8, errMQTTConnFlagReserved.Error()},
{"will qos without will flag", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, 1 << 3}, 8, "if Will flag is set to 0, Will QoS must be 0 too"},
{"will retain without will flag", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, 1 << 5}, 8, errMQTTWillAndRetainFlag.Error()},
{"will qos", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, 3<<3 | 1<<2}, 8, "if Will flag is set to 1, Will QoS can be 0, 1 or 2"},
{"no user but password", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagPasswordFlag}, 8, errMQTTPasswordFlagAndNoUser.Error()},
{"missing keep alive", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, 0}, 8, "keep alive"},
{"missing client ID", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, 0, 0, 1}, 10, "client ID"},
{"empty client ID", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, 0, 0, 1, 0, 0}, 12, errMQTTCIDEmptyNeedsCleanFlag.Error()},
{"invalid utf8 client ID", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, 0, 0, 1, 0, 1, 241}, 13, "invalid utf8 for client ID"},
{"missing will topic", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagWillFlag | mqttConnFlagCleanSession, 0, 0, 0, 0}, 12, "Will topic"},
{"empty will topic", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagWillFlag | mqttConnFlagCleanSession, 0, 0, 0, 0, 0, 0}, 14, errMQTTEmptyWillTopic.Error()},
{"invalid utf8 will topic", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagWillFlag | mqttConnFlagCleanSession, 0, 0, 0, 0, 0, 1, 241}, 15, "invalid utf8 for Will topic"},
{"invalid wildcard will topic", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagWillFlag | mqttConnFlagCleanSession, 0, 0, 0, 0, 0, 1, '#'}, 15, "wildcards not allowed"},
{"error on will message", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagWillFlag | mqttConnFlagCleanSession, 0, 0, 0, 0, 0, 1, 'a', 0, 3}, 17, "Will message"},
{"error on username", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagUsernameFlag | mqttConnFlagCleanSession, 0, 0, 0, 0}, 12, "user name"},
{"empty username", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagUsernameFlag | mqttConnFlagCleanSession, 0, 0, 0, 0, 0, 0}, 14, errMQTTEmptyUsername.Error()},
{"invalid utf8 username", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagUsernameFlag | mqttConnFlagCleanSession, 0, 0, 0, 0, 0, 1, 241}, 15, "invalid utf8 for user name"},
{"error on password", []byte{0, 4, 'M', 'Q', 'T', 'T', mqttProtoLevel, mqttConnFlagUsernameFlag | mqttConnFlagPasswordFlag | mqttConnFlagCleanSession, 0, 0, 0, 0, 0, 1, 'a'}, 15, "password"},
} {
t.Run(test.name, func(t *testing.T) {
r := &mqttReader{}
r.reset(test.proto)
mqtt := &mqtt{r: r}
c := &client{mqtt: mqtt}
if _, _, err := c.mqttParseConnect(r, test.pl); err == nil || !strings.Contains(err.Error(), test.err) {
t.Fatalf("Expected error %q, got %v", test.err, err)
}
})
}
}
func TestMQTTConnectFailsOnParse(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
addr := fmt.Sprintf("%s:%d", o.MQTT.Host, o.MQTT.Port)
c, err := net.Dial("tcp", addr)
if err != nil {
t.Fatalf("Error creating mqtt connection: %v", err)
}
pkLen := 2 + len(mqttProtoName) +
1 + // proto level
1 + // flags
2 + // keepAlive
2 + len("mqtt")
w := &mqttWriter{}
w.WriteByte(mqttPacketConnect)
w.WriteVarInt(pkLen)
w.WriteString(string(mqttProtoName))
w.WriteByte(0x7)
w.WriteByte(mqttConnFlagCleanSession)
w.WriteUint16(0)
w.WriteString("mqtt")
c.Write(w.Bytes())
buf, err := testMQTTRead(c)
if err != nil {
t.Fatalf("Error reading: %v", err)
}
r := &mqttReader{reader: c}
r.reset(buf)
testMQTTCheckConnAck(t, r, mqttConnAckRCUnacceptableProtocolVersion, false)
}
func TestMQTTConnKeepAlive(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true, keepAlive: 1}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, mc, r, 0, false, false, "foo", 0, []byte("msg"))
time.Sleep(2 * time.Second)
testMQTTExpectDisconnect(t, mc)
}
func TestMQTTDontSetPinger(t *testing.T) {
o := testMQTTDefaultOptions()
o.PingInterval = 15 * time.Millisecond
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mc, r := testMQTTConnect(t, &mqttConnInfo{clientID: "mqtt", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
c := testMQTTGetClient(t, s, "mqtt")
c.mu.Lock()
timerSet := c.ping.tmr != nil
c.mu.Unlock()
if timerSet {
t.Fatalf("Ping timer should not be set for MQTT clients")
}
// Wait a bit and expect nothing (and connection should still be valid)
testMQTTExpectNothing(t, r)
testMQTTPublish(t, mc, r, 0, false, false, "foo", 0, []byte("msg"))
}
func TestMQTTUnsupportedPackets(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
for _, test := range []struct {
name string
packetType byte
}{
{"pubrec", mqttPacketPubRec},
{"pubrel", mqttPacketPubRel},
{"pubcomp", mqttPacketPubComp},
} {
t.Run(test.name, func(t *testing.T) {
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
w := &mqttWriter{}
pt := test.packetType
if test.packetType == mqttPacketPubRel {
pt |= byte(0x2)
}
w.WriteByte(pt)
w.WriteVarInt(2)
w.WriteUint16(1)
mc.Write(w.Bytes())
testMQTTExpectDisconnect(t, mc)
})
}
}
func TestMQTTTopicAndSubjectConversion(t *testing.T) {
for _, test := range []struct {
name string
mqttTopic string
natsSubject string
err string
}{
{"/", "/", "/./", ""},
{"//", "//", "/././", ""},
{"///", "///", "/./././", ""},
{"////", "////", "/././././", ""},
{"foo", "foo", "foo", ""},
{"/foo", "/foo", "/.foo", ""},
{"//foo", "//foo", "/./.foo", ""},
{"///foo", "///foo", "/././.foo", ""},
{"///foo/", "///foo/", "/././.foo./", ""},
{"///foo//", "///foo//", "/././.foo././", ""},
{"///foo///", "///foo///", "/././.foo./././", ""},
{"foo/bar", "foo/bar", "foo.bar", ""},
{"/foo/bar", "/foo/bar", "/.foo.bar", ""},
{"/foo/bar/", "/foo/bar/", "/.foo.bar./", ""},
{"foo/bar/baz", "foo/bar/baz", "foo.bar.baz", ""},
{"/foo/bar/baz", "/foo/bar/baz", "/.foo.bar.baz", ""},
{"/foo/bar/baz/", "/foo/bar/baz/", "/.foo.bar.baz./", ""},
{"bar", "bar/", "bar./", ""},
{"bar//", "bar//", "bar././", ""},
{"bar///", "bar///", "bar./././", ""},
{"foo//bar", "foo//bar", "foo./.bar", ""},
{"foo///bar", "foo///bar", "foo././.bar", ""},
{"foo////bar", "foo////bar", "foo./././.bar", ""},
// These should produce errors
{"foo/+", "foo/+", "", "wildcards not allowed in publish"},
{"foo/#", "foo/#", "", "wildcards not allowed in publish"},
{"foo bar", "foo bar", "", "not supported"},
{"foo.bar", "foo.bar", "", "not supported"},
} {
t.Run(test.name, func(t *testing.T) {
res, err := mqttTopicToNATSPubSubject([]byte(test.mqttTopic))
if test.err != _EMPTY_ {
if err == nil || !strings.Contains(err.Error(), test.err) {
t.Fatalf("Expected error %q, got %q", test.err, err.Error())
}
return
}
toNATS := string(res)
if toNATS != test.natsSubject {
t.Fatalf("Expected subject %q got %q", test.natsSubject, toNATS)
}
res = natsSubjectToMQTTTopic(string(res))
backToMQTT := string(res)
if backToMQTT != test.mqttTopic {
t.Fatalf("Expected topic %q got %q (NATS conversion was %q)", test.mqttTopic, backToMQTT, toNATS)
}
})
}
}
func TestMQTTFilterConversion(t *testing.T) {
// Similar to TopicConversion test except that wildcards are OK here.
// So testing only those.
for _, test := range []struct {
name string
mqttTopic string
natsSubject string
}{
{"single level wildcard", "+", "*"},
{"single level wildcard", "/+", "/.*"},
{"single level wildcard", "+/", "*./"},
{"single level wildcard", "/+/", "/.*./"},
{"single level wildcard", "foo/+", "foo.*"},
{"single level wildcard", "foo/+/", "foo.*./"},
{"single level wildcard", "foo/+/bar", "foo.*.bar"},
{"single level wildcard", "foo/+/+", "foo.*.*"},
{"single level wildcard", "foo/+/+/", "foo.*.*./"},
{"single level wildcard", "foo/+/+/bar", "foo.*.*.bar"},
{"single level wildcard", "foo//+", "foo./.*"},
{"single level wildcard", "foo//+/", "foo./.*./"},
{"single level wildcard", "foo//+//", "foo./.*././"},
{"single level wildcard", "foo//+//bar", "foo./.*./.bar"},
{"single level wildcard", "foo///+///bar", "foo././.*././.bar"},
{"multi level wildcard", "#", ">"},
{"multi level wildcard", "/#", "/.>"},
{"multi level wildcard", "/foo/#", "/.foo.>"},
{"multi level wildcard", "foo/#", "foo.>"},
{"multi level wildcard", "foo//#", "foo./.>"},
{"multi level wildcard", "foo///#", "foo././.>"},
{"multi level wildcard", "foo/bar/#", "foo.bar.>"},
} {
t.Run(test.name, func(t *testing.T) {
res, err := mqttFilterToNATSSubject([]byte(test.mqttTopic))
if err != nil {
t.Fatalf("Error: %v", err)
}
if string(res) != test.natsSubject {
t.Fatalf("Expected subject %q got %q", test.natsSubject, res)
}
})
}
}
func TestMQTTParseSub(t *testing.T) {
for _, test := range []struct {
name string
proto []byte
b byte
pl int
err string
}{
{"reserved flag", nil, 3, 0, "wrong subscribe reserved flags"},
{"ensure packet loaded", []byte{1, 2}, mqttSubscribeFlags, 10, io.ErrUnexpectedEOF.Error()},
{"error reading packet id", []byte{1}, mqttSubscribeFlags, 1, "reading packet identifier"},
{"missing filters", []byte{0, 1}, mqttSubscribeFlags, 2, "subscribe protocol must contain at least 1 topic filter"},
{"error reading topic", []byte{0, 1, 0, 2, 'a'}, mqttSubscribeFlags, 5, "topic filter"},
{"empty topic", []byte{0, 1, 0, 0}, mqttSubscribeFlags, 4, errMQTTTopicFilterCannotBeEmpty.Error()},
{"invalid utf8 topic", []byte{0, 1, 0, 1, 241}, mqttSubscribeFlags, 5, "invalid utf8 for topic filter"},
{"missing qos", []byte{0, 1, 0, 1, 'a'}, mqttSubscribeFlags, 5, "QoS"},
{"invalid qos", []byte{0, 1, 0, 1, 'a', 3}, mqttSubscribeFlags, 6, "subscribe QoS value must be 0, 1 or 2"},
} {
t.Run(test.name, func(t *testing.T) {
r := &mqttReader{}
r.reset(test.proto)
mqtt := &mqtt{r: r}
c := &client{mqtt: mqtt}
if _, _, err := c.mqttParseSubsOrUnsubs(r, test.b, test.pl, true); err == nil || !strings.Contains(err.Error(), test.err) {
t.Fatalf("Expected error %q, got %v", test.err, err)
}
})
}
}
func testMQTTSub(t testing.TB, pi uint16, c net.Conn, r *mqttReader, filters []*mqttFilter, expected []byte) {
t.Helper()
w := &mqttWriter{}
pkLen := 2 // for pi
for i := 0; i < len(filters); i++ {
f := filters[i]
pkLen += 2 + len(f.filter) + 1
}
w.WriteByte(mqttPacketSub | mqttSubscribeFlags)
w.WriteVarInt(pkLen)
w.WriteUint16(pi)
for i := 0; i < len(filters); i++ {
f := filters[i]
w.WriteBytes([]byte(f.filter))
w.WriteByte(f.qos)
}
if _, err := testMQTTWrite(c, w.Bytes()); err != nil {
t.Fatalf("Error writing SUBSCRIBE protocol: %v", err)
}
b, pl := testMQTTReadPacket(t, r)
if pt := b & mqttPacketMask; pt != mqttPacketSubAck {
t.Fatalf("Expected SUBACK packet %x, got %x", mqttPacketSubAck, pt)
}
rpi, err := r.readUint16("packet identifier")
if err != nil || rpi != pi {
t.Fatalf("Error with packet identifier expected=%v got: %v err=%v", pi, rpi, err)
}
for i, rem := 0, pl-2; rem > 0; rem-- {
qos, err := r.readByte("filter qos")
if err != nil {
t.Fatal(err)
}
if qos != expected[i] {
t.Fatalf("For topic filter %q expected qos of %v, got %v",
filters[i].filter, expected[i], qos)
}
i++
}
}
func TestMQTTSubAck(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
subs := []*mqttFilter{
{filter: "foo", qos: 0},
{filter: "bar", qos: 1},
{filter: "baz", qos: 2}, // Since we don't support, we should receive a result of 1
{filter: "foo/#/bar", qos: 0}, // Invalid sub, so we should receive a result of mqttSubAckFailure
}
expected := []byte{
0,
1,
1,
mqttSubAckFailure,
}
testMQTTSub(t, 1, mc, r, subs, expected)
}
func testMQTTFlush(t testing.TB, c net.Conn, bw *bufio.Writer, r *mqttReader) {
t.Helper()
w := &mqttWriter{}
w.WriteByte(mqttPacketPing)
w.WriteByte(0)
if bw != nil {
bw.Write(w.Bytes())
bw.Flush()
} else {
c.Write(w.Bytes())
}
ab, l := testMQTTReadPacket(t, r)
if pt := ab & mqttPacketMask; pt != mqttPacketPingResp {
t.Fatalf("Expected ping response got %x", pt)
}
if l != 0 {
t.Fatalf("Expected PINGRESP length to be 0, got %v", l)
}
}
func testMQTTExpectNothing(t testing.TB, r *mqttReader) {
t.Helper()
var buf [128]byte
r.reader.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
if n, err := r.reader.Read(buf[:]); err == nil {
t.Fatalf("Expected nothing, got %v", buf[:n])
}
r.reader.SetReadDeadline(time.Time{})
}
func testMQTTCheckPubMsg(t testing.TB, c net.Conn, r *mqttReader, topic string, flags byte, payload []byte) {
t.Helper()
pflags, pi := testMQTTGetPubMsg(t, c, r, topic, payload)
if pflags != flags {
t.Fatalf("Expected flags to be %x, got %x", flags, pflags)
}
if pi > 0 {
testMQTTSendPubAck(t, c, pi)
}
}
func testMQTTCheckPubMsgNoAck(t testing.TB, c net.Conn, r *mqttReader, topic string, flags byte, payload []byte) uint16 {
t.Helper()
pflags, pi := testMQTTGetPubMsg(t, c, r, topic, payload)
if pflags != flags {
t.Fatalf("Expected flags to be %x, got %x", flags, pflags)
}
return pi
}
func testMQTTGetPubMsg(t testing.TB, c net.Conn, r *mqttReader, topic string, payload []byte) (byte, uint16) {
t.Helper()
b, pl := testMQTTReadPacket(t, r)
if pt := b & mqttPacketMask; pt != mqttPacketPub {
t.Fatalf("Expected PUBLISH packet %x, got %x", mqttPacketPub, pt)
}
pflags := b & mqttPacketFlagMask
qos := (pflags & mqttPubFlagQoS) >> 1
start := r.pos
ptopic, err := r.readString("topic name")
if err != nil {
t.Fatal(err)
}
if ptopic != topic {
t.Fatalf("Expected topic %q, got %q", topic, ptopic)
}
var pi uint16
if qos > 0 {
pi, err = r.readUint16("packet identifier")
if err != nil {
t.Fatal(err)
}
}
msgLen := pl - (r.pos - start)
if r.pos+msgLen > len(r.buf) {
t.Fatalf("computed message length goes beyond buffer: ml=%v pos=%v lenBuf=%v",
msgLen, r.pos, len(r.buf))
}
ppayload := r.buf[r.pos : r.pos+msgLen]
if !bytes.Equal(payload, ppayload) {
t.Fatalf("Expected payload %q, got %q", payload, ppayload)
}
r.pos += msgLen
return pflags, pi
}
func testMQTTSendPubAck(t testing.TB, c net.Conn, pi uint16) {
t.Helper()
w := &mqttWriter{}
w.WriteByte(mqttPacketPubAck)
w.WriteVarInt(2)
w.WriteUint16(pi)
if _, err := testMQTTWrite(c, w.Bytes()); err != nil {
t.Fatalf("Error writing PUBACK: %v", err)
}
}
func testMQTTPublish(t testing.TB, c net.Conn, r *mqttReader, qos byte, dup, retain bool, topic string, pi uint16, payload []byte) {
t.Helper()
w := &mqttWriter{}
mqttWritePublish(w, qos, dup, retain, topic, pi, payload)
if _, err := testMQTTWrite(c, w.Bytes()); err != nil {
t.Fatalf("Error writing PUBLISH proto: %v", err)
}
if qos > 0 {
// Since we don't support QoS 2, we should get disconnected
if qos == 2 {
testMQTTExpectDisconnect(t, c)
return
}
b, _ := testMQTTReadPacket(t, r)
if pt := b & mqttPacketMask; pt != mqttPacketPubAck {
t.Fatalf("Expected PUBACK packet %x, got %x", mqttPacketPubAck, pt)
}
rpi, err := r.readUint16("packet identifier")
if err != nil || rpi != pi {
t.Fatalf("Error with packet identifier expected=%v got: %v err=%v", pi, rpi, err)
}
}
}
func TestMQTTParsePub(t *testing.T) {
for _, test := range []struct {
name string
flags byte
proto []byte
pl int
err string
}{
{"qos not supported", 0x4, nil, 0, "not supported"},
{"packet in buffer error", 0, nil, 10, io.ErrUnexpectedEOF.Error()},
{"error on topic", 0, []byte{0, 3, 'f', 'o'}, 4, "topic"},
{"empty topic", 0, []byte{0, 0}, 2, errMQTTTopicIsEmpty.Error()},
{"wildcards topic", 0, []byte{0, 1, '#'}, 3, "wildcards not allowed"},
{"error on packet identifier", mqttPubQos1, []byte{0, 3, 'f', 'o', 'o'}, 5, "packet identifier"},
{"invalid packet identifier", mqttPubQos1, []byte{0, 3, 'f', 'o', 'o', 0, 0}, 7, errMQTTPacketIdentifierIsZero.Error()},
} {
t.Run(test.name, func(t *testing.T) {
r := &mqttReader{}
r.reset(test.proto)
mqtt := &mqtt{r: r}
c := &client{mqtt: mqtt}
pp := &mqttPublish{flags: test.flags}
if err := c.mqttParsePub(r, test.pl, pp); err == nil || !strings.Contains(err.Error(), test.err) {
t.Fatalf("Expected error %q, got %v", test.err, err)
}
})
}
}
func TestMQTTParsePubAck(t *testing.T) {
for _, test := range []struct {
name string
proto []byte
pl int
err string
}{
{"packet in buffer error", nil, 10, io.ErrUnexpectedEOF.Error()},
{"error reading packet identifier", []byte{0}, 1, "packet identifier"},
{"invalid packet identifier", []byte{0, 0}, 2, errMQTTPacketIdentifierIsZero.Error()},
} {
t.Run(test.name, func(t *testing.T) {
r := &mqttReader{}
r.reset(test.proto)
if _, err := mqttParsePubAck(r, test.pl); err == nil || !strings.Contains(err.Error(), test.err) {
t.Fatalf("Expected error %q, got %v", test.err, err)
}
})
}
}
func TestMQTTPublish(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
mcp, mpr := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcp.Close()
testMQTTCheckConnAck(t, mpr, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, mcp, mpr, 0, false, false, "foo", 0, []byte("msg"))
testMQTTPublish(t, mcp, mpr, 1, false, false, "foo", 1, []byte("msg"))
testMQTTPublish(t, mcp, mpr, 2, false, false, "foo", 2, []byte("msg"))
}
func TestMQTTSub(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
mcp, mpr := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcp.Close()
testMQTTCheckConnAck(t, mpr, mqttConnAckRCConnectionAccepted, false)
for _, test := range []struct {
name string
mqttSubTopic string
natsPubSubject string
mqttPubTopic string
ok bool
}{
{"1 level match", "foo", "foo", "foo", true},
{"1 level no match", "foo", "bar", "bar", false},
{"2 levels match", "foo/bar", "foo.bar", "foo/bar", true},
{"2 levels no match", "foo/bar", "foo.baz", "foo/baz", false},
{"3 levels match", "/foo/bar", "/.foo.bar", "/foo/bar", true},
{"3 levels no match", "/foo/bar", "/.foo.baz", "/foo/baz", false},
{"single level wc", "foo/+", "foo.bar.baz", "foo/bar/baz", false},
{"single level wc", "foo/+", "foo.bar./", "foo/bar/", false},
{"single level wc", "foo/+", "foo.bar", "foo/bar", true},
{"single level wc", "foo/+", "foo./", "foo/", true},
{"single level wc", "foo/+", "foo", "foo", false},
{"single level wc", "foo/+", "/.foo", "/foo", false},
{"multiple level wc", "foo/#", "foo.bar.baz./", "foo/bar/baz/", true},
{"multiple level wc", "foo/#", "foo.bar.baz", "foo/bar/baz", true},
{"multiple level wc", "foo/#", "foo.bar./", "foo/bar/", true},
{"multiple level wc", "foo/#", "foo.bar", "foo/bar", true},
{"multiple level wc", "foo/#", "foo./", "foo/", true},
{"multiple level wc", "foo/#", "foo", "foo", true},
{"multiple level wc", "foo/#", "/.foo", "/foo", false},
} {
t.Run(test.name, func(t *testing.T) {
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: test.mqttSubTopic, qos: 0}}, []byte{0})
testMQTTFlush(t, mc, nil, r)
natsPub(t, nc, test.natsPubSubject, []byte("msg"))
if test.ok {
testMQTTCheckPubMsg(t, mc, r, test.mqttPubTopic, 0, []byte("msg"))
} else {
testMQTTExpectNothing(t, r)
}
testMQTTPublish(t, mcp, mpr, 0, false, false, test.mqttPubTopic, 0, []byte("msg"))
if test.ok {
testMQTTCheckPubMsg(t, mc, r, test.mqttPubTopic, 0, []byte("msg"))
} else {
testMQTTExpectNothing(t, r)
}
})
}
}
func TestMQTTSubQoS(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
mcp, mpr := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcp.Close()
testMQTTCheckConnAck(t, mpr, mqttConnAckRCConnectionAccepted, false)
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
mqttTopic := "foo/bar"
// Subscribe with QoS 1
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo/#", qos: 1}}, []byte{1})
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: mqttTopic, qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, r)
// Publish from NATS, which means QoS 0
natsPub(t, nc, "foo.bar", []byte("NATS"))
// Will receive as QoS 0
testMQTTCheckPubMsg(t, mc, r, mqttTopic, 0, []byte("NATS"))
testMQTTCheckPubMsg(t, mc, r, mqttTopic, 0, []byte("NATS"))
// Publish from MQTT with QoS 0
testMQTTPublish(t, mcp, mpr, 0, false, false, mqttTopic, 0, []byte("msg"))
// Will receive as QoS 0
testMQTTCheckPubMsg(t, mc, r, mqttTopic, 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, mqttTopic, 0, []byte("msg"))
// Publish from MQTT with QoS 1
testMQTTPublish(t, mcp, mpr, 1, false, false, mqttTopic, 1, []byte("msg"))
pflags1, pi1 := testMQTTGetPubMsg(t, mc, r, mqttTopic, []byte("msg"))
if pflags1 != 0x2 {
t.Fatalf("Expected flags to be 0x2, got %v", pflags1)
}
pflags2, pi2 := testMQTTGetPubMsg(t, mc, r, mqttTopic, []byte("msg"))
if pflags2 != 0x2 {
t.Fatalf("Expected flags to be 0x2, got %v", pflags2)
}
if pi1 == pi2 {
t.Fatalf("packet identifier for message 1: %v should be different from message 2", pi1)
}
testMQTTSendPubAck(t, mc, pi1)
testMQTTSendPubAck(t, mc, pi2)
}
func getSubQoS(sub *subscription) int {
if sub.mqtt != nil {
return int(sub.mqtt.qos)
}
return -1
}
func TestMQTTSubDups(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mcp, mpr := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcp.Close()
testMQTTCheckConnAck(t, mpr, mqttConnAckRCConnectionAccepted, false)
mc, r := testMQTTConnect(t, &mqttConnInfo{clientID: "sub", user: "sub", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
// Test with single SUBSCRIBE protocol but multiple filters
filters := []*mqttFilter{
{filter: "foo", qos: 1},
{filter: "foo", qos: 0},
}
testMQTTSub(t, 1, mc, r, filters, []byte{1, 0})
testMQTTFlush(t, mc, nil, r)
// And also with separate SUBSCRIBE protocols
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "bar", qos: 0}}, []byte{0})
// Ask for QoS 2 but server will downgrade to 1
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "bar", qos: 2}}, []byte{1})
testMQTTFlush(t, mc, nil, r)
// Publish and test msg received only once
testMQTTPublish(t, mcp, r, 0, false, false, "foo", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo", 0, []byte("msg"))
testMQTTExpectNothing(t, r)
testMQTTPublish(t, mcp, r, 0, false, false, "bar", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "bar", 0, []byte("msg"))
testMQTTExpectNothing(t, r)
// Check that the QoS for subscriptions have been updated to the latest received filter
var err error
subc := testMQTTGetClient(t, s, "sub")
subc.mu.Lock()
if subc.opts.Username != "sub" {
err = fmt.Errorf("wrong user name")
}
if err == nil {
if sub := subc.subs["foo"]; sub == nil || getSubQoS(sub) != 0 {
err = fmt.Errorf("subscription foo QoS should be 0, got %v", getSubQoS(sub))
}
}
if err == nil {
if sub := subc.subs["bar"]; sub == nil || getSubQoS(sub) != 1 {
err = fmt.Errorf("subscription bar QoS should be 1, got %v", getSubQoS(sub))
}
}
subc.mu.Unlock()
if err != nil {
t.Fatal(err)
}
// Now subscribe on "foo/#" which means that a PUBLISH on "foo" will be received
// by this subscription and also the one on "foo".
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo/#", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, r)
// Publish and test msg received twice
testMQTTPublish(t, mcp, r, 0, false, false, "foo", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo", 0, []byte("msg"))
checkWCSub := func(expectedQoS int) {
t.Helper()
subc.mu.Lock()
defer subc.mu.Unlock()
// When invoked with expectedQoS==1, we have the following subs:
// foo (QoS-0), bar (QoS-1), foo.> (QoS-1)
// which means (since QoS-1 have a JS consumer + sub for delivery
// and foo.> causes a "foo fwc") that we should have the following
// number of NATS subs: foo (1), bar (2), foo.> (2) and "foo fwc" (2),
// so total=7.
// When invoked with expectedQoS==0, it means that we have replaced
// foo/# QoS-1 to QoS-0, so we should have 2 less NATS subs,
// so total=5
expected := 7
if expectedQoS == 0 {
expected = 5
}
if lenmap := len(subc.subs); lenmap != expected {
t.Fatalf("Subs map should have %v entries, got %v", expected, lenmap)
}
if sub, ok := subc.subs["foo.>"]; !ok {
t.Fatal("Expected sub foo.> to be present but was not")
} else if getSubQoS(sub) != expectedQoS {
t.Fatalf("Expected sub foo.> QoS to be %v, got %v", expectedQoS, getSubQoS(sub))
}
if sub, ok := subc.subs["foo fwc"]; !ok {
t.Fatal("Expected sub foo fwc to be present but was not")
} else if getSubQoS(sub) != expectedQoS {
t.Fatalf("Expected sub foo fwc QoS to be %v, got %v", expectedQoS, getSubQoS(sub))
}
// Make sure existing sub on "foo" qos was not changed.
if sub, ok := subc.subs["foo"]; !ok {
t.Fatal("Expected sub foo to be present but was not")
} else if getSubQoS(sub) != 0 {
t.Fatalf("Expected sub foo QoS to be 0, got %v", getSubQoS(sub))
}
}
checkWCSub(1)
// Sub again on same subject with lower QoS
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo/#", qos: 0}}, []byte{0})
testMQTTFlush(t, mc, nil, r)
// Publish and test msg received twice
testMQTTPublish(t, mcp, r, 0, false, false, "foo", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo", 0, []byte("msg"))
checkWCSub(0)
}
func TestMQTTSubWithSpaces(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mcp, mpr := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcp.Close()
testMQTTCheckConnAck(t, mpr, mqttConnAckRCConnectionAccepted, false)
mc, r := testMQTTConnect(t, &mqttConnInfo{user: "sub", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo bar", qos: 0}}, []byte{mqttSubAckFailure})
}
func TestMQTTSubCaseSensitive(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mcp, mpr := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcp.Close()
testMQTTCheckConnAck(t, mpr, mqttConnAckRCConnectionAccepted, false)
mc, r := testMQTTConnect(t, &mqttConnInfo{user: "sub", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "Foo/Bar", qos: 0}}, []byte{0})
testMQTTFlush(t, mc, nil, r)
testMQTTPublish(t, mcp, r, 0, false, false, "Foo/Bar", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "Foo/Bar", 0, []byte("msg"))
testMQTTPublish(t, mcp, r, 0, false, false, "foo/bar", 0, []byte("msg"))
testMQTTExpectNothing(t, r)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
natsPub(t, nc, "Foo.Bar", []byte("nats"))
testMQTTCheckPubMsg(t, mc, r, "Foo/Bar", 0, []byte("nats"))
natsPub(t, nc, "foo.bar", []byte("nats"))
testMQTTExpectNothing(t, r)
}
func TestMQTTPubSubMatrix(t *testing.T) {
for _, test := range []struct {
name string
natsPub bool
mqttPub bool
mqttPubQoS byte
natsSub bool
mqttSubQoS0 bool
mqttSubQoS1 bool
}{
{"NATS to MQTT sub QoS-0", true, false, 0, false, true, false},
{"NATS to MQTT sub QoS-1", true, false, 0, false, false, true},
{"NATS to MQTT sub QoS-0 and QoS-1", true, false, 0, false, true, true},
{"MQTT QoS-0 to NATS sub", false, true, 0, true, false, false},
{"MQTT QoS-0 to MQTT sub QoS-0", false, true, 0, false, true, false},
{"MQTT QoS-0 to MQTT sub QoS-1", false, true, 0, false, false, true},
{"MQTT QoS-0 to NATS sub and MQTT sub QoS-0", false, true, 0, true, true, false},
{"MQTT QoS-0 to NATS sub and MQTT sub QoS-1", false, true, 0, true, false, true},
{"MQTT QoS-0 to all subs", false, true, 0, true, true, true},
{"MQTT QoS-1 to NATS sub", false, true, 1, true, false, false},
{"MQTT QoS-1 to MQTT sub QoS-0", false, true, 1, false, true, false},
{"MQTT QoS-1 to MQTT sub QoS-1", false, true, 1, false, false, true},
{"MQTT QoS-1 to NATS sub and MQTT sub QoS-0", false, true, 1, true, true, false},
{"MQTT QoS-1 to NATS sub and MQTT sub QoS-1", false, true, 1, true, false, true},
{"MQTT QoS-1 to all subs", false, true, 1, true, true, true},
} {
t.Run(test.name, func(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
mc1, r1 := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc1.Close()
testMQTTCheckConnAck(t, r1, mqttConnAckRCConnectionAccepted, false)
mc2, r2 := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, false)
// First setup subscriptions based on test options.
var ns *nats.Subscription
if test.natsSub {
ns = natsSubSync(t, nc, "foo")
}
if test.mqttSubQoS0 {
testMQTTSub(t, 1, mc1, r1, []*mqttFilter{{filter: "foo", qos: 0}}, []byte{0})
testMQTTFlush(t, mc1, nil, r1)
}
if test.mqttSubQoS1 {
testMQTTSub(t, 1, mc2, r2, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, mc2, nil, r2)
}
// Just as a barrier
natsFlush(t, nc)
// Now publish
if test.natsPub {
natsPubReq(t, nc, "foo", "", []byte("msg"))
} else {
testMQTTPublish(t, mc, r, test.mqttPubQoS, false, false, "foo", 1, []byte("msg"))
}
// Check message received
if test.natsSub {
natsNexMsg(t, ns, time.Second)
// Make sure no other is received
if msg, err := ns.NextMsg(50 * time.Millisecond); err == nil {
t.Fatalf("Should not have gotten a second message, got %v", msg)
}
}
if test.mqttSubQoS0 {
testMQTTCheckPubMsg(t, mc1, r1, "foo", 0, []byte("msg"))
testMQTTExpectNothing(t, r1)
}
if test.mqttSubQoS1 {
var expectedFlag byte
if test.mqttPubQoS > 0 {
expectedFlag = test.mqttPubQoS << 1
}
testMQTTCheckPubMsg(t, mc2, r2, "foo", expectedFlag, []byte("msg"))
testMQTTExpectNothing(t, r2)
}
})
}
}
func TestMQTTPreventSubWithMQTTSubPrefix(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, r,
[]*mqttFilter{{filter: strings.ReplaceAll(mqttSubPrefix, ".", "/") + "foo/bar", qos: 1}},
[]byte{mqttSubAckFailure})
}
func TestMQTTSubWithNATSStream(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo/bar", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, r)
mcp, rp := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTFlush(t, mcp, nil, rp)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
sc := &StreamConfig{
Name: "test",
Storage: FileStorage,
Retention: InterestPolicy,
Subjects: []string{"foo.>"},
}
mset, err := s.GlobalAccount().addStream(sc)
if err != nil {
t.Fatalf("Unable to create stream: %v", err)
}
sub := natsSubSync(t, nc, "bar")
cc := &ConsumerConfig{
Durable: "dur",
AckPolicy: AckExplicit,
DeliverSubject: "bar",
}
if _, err := mset.addConsumer(cc); err != nil {
t.Fatalf("Unable to add consumer: %v", err)
}
// Now send message from NATS
resp, err := nc.Request("foo.bar", []byte("nats"), time.Second)
if err != nil {
t.Fatalf("Error publishing: %v", err)
}
ar := &ApiResponse{}
if err := json.Unmarshal(resp.Data, ar); err != nil || ar.Error != nil {
t.Fatalf("Unexpected response: err=%v resp=%+v", err, ar.Error)
}
// Check that message is received by both
checkRecv := func(content string, flags byte) {
t.Helper()
if msg := natsNexMsg(t, sub, time.Second); string(msg.Data) != content {
t.Fatalf("Expected %q, got %q", content, msg.Data)
}
testMQTTCheckPubMsg(t, mc, r, "foo/bar", flags, []byte(content))
}
checkRecv("nats", 0)
// Send from MQTT as a QoS0
testMQTTPublish(t, mcp, rp, 0, false, false, "foo/bar", 0, []byte("qos0"))
checkRecv("qos0", 0)
// Send from MQTT as a QoS1
testMQTTPublish(t, mcp, rp, 1, false, false, "foo/bar", 1, []byte("qos1"))
checkRecv("qos1", mqttPubQos1)
}
func TestMQTTTrackPendingOverrun(t *testing.T) {
sess := &mqttSession{pending: make(map[uint16]*mqttPending)}
sub := &subscription{mqtt: &mqttSub{qos: 1}}
sess.ppi = 0xFFFF
pi, _ := sess.trackPending(1, _EMPTY_, sub)
if pi != 1 {
t.Fatalf("Expected 1, got %v", pi)
}
p := &mqttPending{}
for i := 1; i <= 0xFFFF; i++ {
sess.pending[uint16(i)] = p
}
pi, _ = sess.trackPending(1, _EMPTY_, sub)
if pi != 0 {
t.Fatalf("Expected 0, got %v", pi)
}
delete(sess.pending, 1234)
pi, _ = sess.trackPending(1, _EMPTY_, sub)
if pi != 1234 {
t.Fatalf("Expected 1234, got %v", pi)
}
}
func TestMQTTSubRestart(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
mc, r := testMQTTConnect(t, &mqttConnInfo{clientID: "sub", cleanSess: false}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
// Start an MQTT subscription QoS=1 on "foo"
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, r)
// Now start a NATS subscription on ">" (anything that would match the JS consumer delivery subject)
natsSubSync(t, nc, ">")
natsFlush(t, nc)
// Restart the MQTT client
testMQTTDisconnect(t, mc, nil)
mc, r = testMQTTConnect(t, &mqttConnInfo{clientID: "sub", cleanSess: false}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
// Restart an MQTT subscription QoS=1 on "foo"
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, r)
pc, pr := testMQTTConnect(t, &mqttConnInfo{clientID: "pub", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer pc.Close()
testMQTTCheckConnAck(t, pr, mqttConnAckRCConnectionAccepted, false)
// Publish a message QoS1
testMQTTPublish(t, pc, pr, 1, false, false, "foo", 1, []byte("msg1"))
// Make sure we receive it
testMQTTCheckPubMsg(t, mc, r, "foo", mqttPubQos1, []byte("msg1"))
// Now "restart" the subscription but as a Qos0
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo", qos: 0}}, []byte{0})
testMQTTFlush(t, mc, nil, r)
// Publish a message QoS
testMQTTPublish(t, pc, pr, 1, false, false, "foo", 1, []byte("msg2"))
// Make sure we receive but as a QoS0
testMQTTCheckPubMsg(t, mc, r, "foo", 0, []byte("msg2"))
}
func testMQTTGetClusterTemplaceNoLeaf() string {
return strings.Replace(jsClusterTemplWithLeafAndMQTT, "{{leaf}}", "", 1)
}
func TestMQTTSubPropagation(t *testing.T) {
cl := createJetStreamClusterWithTemplate(t, testMQTTGetClusterTemplaceNoLeaf(), "MQTT", 2)
defer cl.shutdown()
o := cl.opts[0]
s2 := cl.servers[1]
nc := natsConnect(t, s2.ClientURL())
defer nc.Close()
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo/#", qos: 0}}, []byte{0})
testMQTTFlush(t, mc, nil, r)
// Because in MQTT foo/# means foo.> but also foo, check that this is propagated
checkSubInterest(t, s2, globalAccountName, "foo", time.Second)
// Publish on foo.bar, foo./ and foo and we should receive them
natsPub(t, nc, "foo.bar", []byte("hello"))
testMQTTCheckPubMsg(t, mc, r, "foo/bar", 0, []byte("hello"))
natsPub(t, nc, "foo./", []byte("from"))
testMQTTCheckPubMsg(t, mc, r, "foo/", 0, []byte("from"))
natsPub(t, nc, "foo", []byte("NATS"))
testMQTTCheckPubMsg(t, mc, r, "foo", 0, []byte("NATS"))
}
func TestMQTTCluster(t *testing.T) {
cl := createJetStreamClusterWithTemplate(t, testMQTTGetClusterTemplaceNoLeaf(), "MQTT", 2)
defer cl.shutdown()
for _, topTest := range []struct {
name string
restart bool
}{
{"first_start", true},
{"restart", false},
} {
t.Run(topTest.name, func(t *testing.T) {
for _, test := range []struct {
name string
subQos byte
}{
{"qos_0", 0},
{"qos_1", 1},
} {
t.Run(test.name, func(t *testing.T) {
clientID := nuid.Next()
o := cl.opts[0]
mc, r := testMQTTConnectRetry(t, &mqttConnInfo{clientID: clientID, cleanSess: false}, o.MQTT.Host, o.MQTT.Port, 5)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo/#", qos: test.subQos}}, []byte{test.subQos})
testMQTTFlush(t, mc, nil, r)
check := func(mc net.Conn, r *mqttReader, o *Options, s *Server) {
t.Helper()
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
natsPub(t, nc, "foo.bar", []byte("fromNats"))
testMQTTCheckPubMsg(t, mc, r, "foo/bar", 0, []byte("fromNats"))
mpc, pr := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mpc.Close()
testMQTTCheckConnAck(t, pr, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, mpc, pr, 0, false, false, "foo/baz", 0, []byte("mqtt_qos0"))
testMQTTCheckPubMsg(t, mc, r, "foo/baz", 0, []byte("mqtt_qos0"))
testMQTTPublish(t, mpc, pr, 1, false, false, "foo/bat", 1, []byte("mqtt_qos1"))
expectedQoS := byte(0)
if test.subQos == 1 {
expectedQoS = mqttPubQos1
}
testMQTTCheckPubMsg(t, mc, r, "foo/bat", expectedQoS, []byte("mqtt_qos1"))
testMQTTDisconnect(t, mpc, nil)
}
check(mc, r, cl.opts[0], cl.servers[0])
check(mc, r, cl.opts[1], cl.servers[1])
// Start the same subscription from the other server. It should disconnect
// the one connected in the first server.
o = cl.opts[1]
mc2, r2 := testMQTTConnect(t, &mqttConnInfo{clientID: clientID, cleanSess: false}, o.MQTT.Host, o.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, true)
// Expect first connection to be closed.
testMQTTExpectDisconnect(t, mc)
// Now re-run the checks
check(mc2, r2, cl.opts[0], cl.servers[0])
check(mc2, r2, cl.opts[1], cl.servers[1])
// Disconnect our sub and restart with clean session then disconnect again to clear the state.
testMQTTDisconnect(t, mc2, nil)
mc2, r2 = testMQTTConnect(t, &mqttConnInfo{clientID: clientID, cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, false)
testMQTTFlush(t, mc2, nil, r2)
testMQTTDisconnect(t, mc2, nil)
// Remove the session from the flappers so we can restart the test
// without failure and have to wait for 1sec before being able to reconnect.
s := cl.servers[0]
sm := &s.mqtt.sessmgr
sm.mu.Lock()
asm := sm.sessions[globalAccountName]
sm.mu.Unlock()
if asm != nil {
asm.mu.Lock()
delete(asm.flappers, clientID)
asm.mu.Unlock()
}
})
}
if topTest.restart {
cl.stopAll()
cl.restartAll()
streams := []string{mqttStreamName, mqttRetainedMsgsStreamName, mqttSessStreamName}
for _, sn := range streams {
cl.waitOnStreamLeader(globalAccountName, sn)
}
cl.waitOnConsumerLeader(globalAccountName, mqttRetainedMsgsStreamName, "$MQTT_rmsgs_esFhDys3")
cl.waitOnConsumerLeader(globalAccountName, mqttRetainedMsgsStreamName, "$MQTT_rmsgs_z3WIzPtj")
}
})
}
}
func TestMQTTClusterRetainedMsg(t *testing.T) {
cl := createJetStreamClusterWithTemplate(t, testMQTTGetClusterTemplaceNoLeaf(), "MQTT", 2)
defer cl.shutdown()
srv1Opts := cl.opts[0]
srv2Opts := cl.opts[1]
// Connect subscription on server 1.
mc, rc := testMQTTConnectRetry(t, &mqttConnInfo{clientID: "sub", cleanSess: false}, srv1Opts.MQTT.Host, srv1Opts.MQTT.Port, 5)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "foo/#", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, rc)
// Create a publisher from server 2.
mp, rp := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, srv2Opts.MQTT.Host, srv2Opts.MQTT.Port)
defer mp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
// Send retained message.
testMQTTPublish(t, mp, rp, 1, false, true, "foo/bar", 1, []byte("retained"))
// Check it is received.
testMQTTCheckPubMsg(t, mc, rc, "foo/bar", mqttPubQos1, []byte("retained"))
// Start a new subscription on server 1 and make sure we receive the retained message
mc2, rc2 := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, srv1Opts.MQTT.Host, srv1Opts.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, rc2, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc2, rc2, []*mqttFilter{{filter: "foo/#", qos: 1}}, []byte{1})
testMQTTCheckPubMsg(t, mc2, rc2, "foo/bar", mqttPubQos1|mqttPubFlagRetain, []byte("retained"))
testMQTTDisconnect(t, mc2, nil)
// Send an empty retained message which should remove it from storage, but still be delivered.
testMQTTPublish(t, mp, rp, 1, false, true, "foo/bar", 1, []byte(""))
testMQTTCheckPubMsg(t, mc, rc, "foo/bar", mqttPubQos1, []byte(""))
// Now shutdown the consumer connection
testMQTTDisconnect(t, mc, nil)
mc.Close()
// Reconnect to server where the retained message was published (server 2)
mc, rc = testMQTTConnect(t, &mqttConnInfo{clientID: "sub", cleanSess: false}, srv2Opts.MQTT.Host, srv2Opts.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, true)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "foo/#", qos: 1}}, []byte{1})
// The retained message should not be delivered.
testMQTTExpectNothing(t, rc)
// Now disconnect and reconnect back to first server
testMQTTDisconnect(t, mc, nil)
mc.Close()
// Now reconnect to the server 1, which is not where the messages were published, and check
// that we don't receive the message.
mc, rc = testMQTTConnect(t, &mqttConnInfo{clientID: "sub", cleanSess: false}, srv1Opts.MQTT.Host, srv1Opts.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, true)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "foo/#", qos: 1}}, []byte{1})
testMQTTExpectNothing(t, rc)
testMQTTDisconnect(t, mc, nil)
mc.Close()
// Will now test network deletes
// Create a subscription on server 1 and server 2
mc, rc = testMQTTConnect(t, &mqttConnInfo{clientID: "sub_one", cleanSess: false}, srv1Opts.MQTT.Host, srv1Opts.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, rc)
mc2, rc2 = testMQTTConnect(t, &mqttConnInfo{clientID: "sub_two", cleanSess: false}, srv2Opts.MQTT.Host, srv2Opts.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, rc2, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc2, rc2, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
testMQTTFlush(t, mc2, nil, rc2)
// Publish 1 retained message (producer is connected to server 2)
testMQTTPublish(t, mp, rp, 1, false, true, "bar", 1, []byte("msg1"))
// Make sure messages are received by both
testMQTTCheckPubMsg(t, mc, rc, "bar", mqttPubQos1, []byte("msg1"))
testMQTTCheckPubMsg(t, mc2, rc2, "bar", mqttPubQos1, []byte("msg1"))
// Now send an empty retained message that should delete it. For the one on server 1,
// this will be a network delete.
testMQTTPublish(t, mp, rp, 1, false, true, "bar", 1, []byte(""))
testMQTTCheckPubMsg(t, mc, rc, "bar", mqttPubQos1, []byte(""))
testMQTTCheckPubMsg(t, mc2, rc2, "bar", mqttPubQos1, []byte(""))
// Now send a new retained message
testMQTTPublish(t, mp, rp, 1, false, true, "bar", 1, []byte("msg2"))
// Again, verify that they all receive it.
testMQTTCheckPubMsg(t, mc, rc, "bar", mqttPubQos1, []byte("msg2"))
testMQTTCheckPubMsg(t, mc2, rc2, "bar", mqttPubQos1, []byte("msg2"))
// But now, restart the consumer that was in the server that processed the
// original network delete.
testMQTTDisconnect(t, mc, nil)
mc.Close()
mc, rc = testMQTTConnect(t, &mqttConnInfo{clientID: "sub_one", cleanSess: false}, srv1Opts.MQTT.Host, srv1Opts.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, true)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
// Expect the message to be delivered as retained
testMQTTCheckPubMsg(t, mc, rc, "bar", mqttPubQos1|mqttPubFlagRetain, []byte("msg2"))
}
func TestMQTTRetainedMsgNetworkUpdates(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mc, rc := testMQTTConnect(t, &mqttConnInfo{clientID: "sub", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
c := testMQTTGetClient(t, s, "sub")
asm := c.mqtt.asm
// For this test, we are going to simulate updates arriving in a
// mixed order and verify that we have the expected outcome.
check := func(t *testing.T, subject string, present bool, current, floor uint64) {
t.Helper()
asm.mu.RLock()
defer asm.mu.RUnlock()
erm, ok := asm.retmsgs[subject]
if present && !ok {
t.Fatalf("Subject %q not present", subject)
} else if !present && ok {
t.Fatalf("Subject %q should not be present", subject)
} else if !present {
return
}
if floor != erm.floor {
t.Fatalf("Expected floor to be %v, got %v", floor, erm.floor)
}
if erm.sseq != current {
t.Fatalf("Expected current sequence to be %v, got %v", current, erm.sseq)
}
}
type action struct {
add bool
seq uint64
}
for _, test := range []struct {
subject string
order []action
seq uint64
floor uint64
}{
{"foo.1", []action{{true, 1}, {true, 2}, {true, 3}}, 3, 0},
{"foo.2", []action{{true, 3}, {true, 1}, {true, 2}}, 3, 0},
{"foo.3", []action{{true, 1}, {false, 1}, {true, 2}}, 2, 0},
{"foo.4", []action{{false, 2}, {true, 1}, {true, 3}, {true, 2}}, 3, 0},
{"foo.5", []action{{false, 2}, {true, 1}, {true, 2}}, 0, 2},
{"foo.6", []action{{true, 1}, {true, 2}, {false, 2}}, 0, 2},
} {
t.Run(test.subject, func(t *testing.T) {
for _, a := range test.order {
if a.add {
rm := &mqttRetainedMsg{sseq: a.seq}
asm.handleRetainedMsg(test.subject, rm)
} else {
asm.handleRetainedMsgDel(test.subject, a.seq)
}
}
check(t, test.subject, true, test.seq, test.floor)
})
}
for _, subject := range []string{"foo.5", "foo.6"} {
t.Run("clear_"+subject, func(t *testing.T) {
// Now add a new message, which should clear the floor.
rm := &mqttRetainedMsg{sseq: 3}
asm.handleRetainedMsg(subject, rm)
check(t, subject, true, 3, 0)
// Now do a non network delete and make sure it is gone.
asm.handleRetainedMsgDel(subject, 0)
check(t, subject, false, 0, 0)
})
}
}
func TestMQTTClusterReplicasCount(t *testing.T) {
for _, test := range []struct {
size int
replicas int
}{
{1, 1},
{2, 2},
{3, 3},
{5, 3},
} {
t.Run(fmt.Sprintf("size %v", test.size), func(t *testing.T) {
var s *Server
var o *Options
if test.size == 1 {
o = testMQTTDefaultOptions()
s = testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
} else {
cl := createJetStreamClusterWithTemplate(t, testMQTTGetClusterTemplaceNoLeaf(), "MQTT", test.size)
defer cl.shutdown()
o = cl.opts[0]
s = cl.randomServer()
}
mc, rc := testMQTTConnect(t, &mqttConnInfo{clientID: "sub", cleanSess: false}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "foo/#", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, rc)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
// Check the replicas of all MQTT streams
js, err := nc.JetStream()
if err != nil {
t.Fatalf("Error getting js: %v", err)
}
for _, sname := range []string{
mqttStreamName,
mqttRetainedMsgsStreamName,
mqttSessStreamName,
} {
t.Run(sname, func(t *testing.T) {
si, err := js.StreamInfo(sname)
if err != nil {
t.Fatalf("Error geting stream info: %v", err)
}
if si.Config.Replicas != test.replicas {
t.Fatalf("Expected %v replicas, got %v", test.replicas, si.Config.Replicas)
}
})
}
})
}
}
func TestMQTTClusterCanCreateSessionWithOnServerDown(t *testing.T) {
cl := createJetStreamClusterWithTemplate(t, testMQTTGetClusterTemplaceNoLeaf(), "MQTT", 3)
defer cl.shutdown()
o := cl.opts[0]
mc, rc := testMQTTConnectRetry(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port, 5)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
mc.Close()
// Shutdown one of the server.
sd := cl.servers[1].StoreDir()
defer os.RemoveAll(strings.TrimSuffix(sd, JetStreamStoreDir))
cl.servers[1].Shutdown()
// Make sure there is a meta leader
cl.waitOnPeerCount(2)
cl.waitOnLeader()
// Now try to create a new session. Since we use a single stream now for all sessions,
// this should succeed.
o = cl.opts[2]
// We may still get failures because of some JS APIs may timeout while things
// settle, so try again for a certain amount of times.
mc, rc = testMQTTConnectRetry(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port, 5)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
}
func TestMQTTClusterPlacement(t *testing.T) {
sc := createJetStreamSuperCluster(t, 3, 2)
defer sc.shutdown()
c := sc.randomCluster()
lnc := c.createLeafNodesWithTemplateAndStartPort(jsClusterTemplWithLeafAndMQTT, "SPOKE", 3, 22111)
defer lnc.shutdown()
sc.waitOnPeerCount(9)
sc.waitOnLeader()
for i := 0; i < 10; i++ {
mc, rc := testMQTTConnectRetry(t, &mqttConnInfo{cleanSess: true}, lnc.opts[i%3].MQTT.Host, lnc.opts[i%3].MQTT.Port, 5)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
}
// Now check that MQTT assets have been created in the LEAF node's side, not the Hub.
nc := natsConnect(t, lnc.servers[0].ClientURL())
defer nc.Close()
js, err := nc.JetStream()
if err != nil {
t.Fatalf("Unable to get JetStream: %v", err)
}
count := 0
for si := range js.StreamsInfo() {
if si.Cluster == nil || si.Cluster.Name != "SPOKE" {
t.Fatalf("Expected asset %q to be placed on spoke cluster, was placed on %+v", si.Config.Name, si.Cluster)
}
for _, repl := range si.Cluster.Replicas {
if !strings.HasPrefix(repl.Name, "SPOKE-") {
t.Fatalf("Replica on the wrong cluster: %+v", repl)
}
}
if si.State.Consumers > 0 {
for ci := range js.ConsumersInfo(si.Config.Name) {
if ci.Cluster == nil || ci.Cluster.Name != "SPOKE" {
t.Fatalf("Expected asset %q to be placed on spoke cluster, was placed on %+v", ci.Name, si.Cluster)
}
for _, repl := range ci.Cluster.Replicas {
if !strings.HasPrefix(repl.Name, "SPOKE-") {
t.Fatalf("Replica on the wrong cluster: %+v", repl)
}
}
}
}
count++
}
if count == 0 {
t.Fatal("No stream found!")
}
}
func TestMQTTLeafnodeWithoutJSToClusterWithJS(t *testing.T) {
getClusterOpts := func(name string, i int) *Options {
o := testMQTTDefaultOptions()
o.ServerName = name
o.Cluster.Name = "hub"
o.Cluster.Host = "127.0.0.1"
o.Cluster.Port = 2790 + i
o.Routes = RoutesFromStr("nats://127.0.0.1:2791,nats://127.0.0.1:2792,nats://127.0.0.1:2793")
o.LeafNode.Host = "127.0.0.1"
o.LeafNode.Port = -1
return o
}
o1 := getClusterOpts("S1", 1)
s1 := testMQTTRunServer(t, o1)
defer testMQTTShutdownServer(s1)
o2 := getClusterOpts("S2", 2)
s2 := testMQTTRunServer(t, o2)
defer testMQTTShutdownServer(s2)
o3 := getClusterOpts("S3", 3)
s3 := testMQTTRunServer(t, o3)
defer testMQTTShutdownServer(s3)
cluster := []*Server{s1, s2, s3}
checkClusterFormed(t, cluster...)
checkFor(t, 10*time.Second, 50*time.Millisecond, func() error {
for _, s := range cluster {
if s.JetStreamIsLeader() {
return nil
}
}
return fmt.Errorf("no leader yet")
})
// Now define a leafnode that has mqtt enabled, but no JS. This should still work.
lno := testMQTTDefaultOptions()
// Make sure jetstream is not explicitly defined here.
lno.JetStream = false
// Use RoutesFromStr() to make an array of urls
urls := RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d,nats://127.0.0.1:%d,nats://127.0.0.1:%d",
o1.LeafNode.Port, o2.LeafNode.Port, o3.LeafNode.Port))
lno.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: urls}}
ln := RunServer(lno)
defer ln.Shutdown()
// Now connect to leafnode and subscribe
mc, rc := testMQTTConnect(t, &mqttConnInfo{clientID: "sub", cleanSess: true}, lno.MQTT.Host, lno.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, rc)
connectAndPublish := func(o *Options) {
mp, rp := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, mp, rp, 1, false, false, "foo", 1, []byte("msg"))
}
// Connect a publisher from leafnode and publish, verify message is received.
connectAndPublish(lno)
testMQTTCheckPubMsg(t, mc, rc, "foo", mqttPubQos1, []byte("msg"))
// Connect from one server in the cluster check it works from there too.
connectAndPublish(o3)
testMQTTCheckPubMsg(t, mc, rc, "foo", mqttPubQos1, []byte("msg"))
// Connect from a server in the hub and subscribe
mc2, rc2 := testMQTTConnect(t, &mqttConnInfo{clientID: "sub2", cleanSess: true}, o2.MQTT.Host, o2.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, rc2, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc2, rc2, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, mc2, nil, rc2)
// Connect a publisher from leafnode and publish, verify message is received.
connectAndPublish(lno)
testMQTTCheckPubMsg(t, mc2, rc2, "foo", mqttPubQos1, []byte("msg"))
// Connect from one server in the cluster check it works from there too.
connectAndPublish(o1)
testMQTTCheckPubMsg(t, mc2, rc2, "foo", mqttPubQos1, []byte("msg"))
}
func TestMQTTImportExport(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
server_name: "mqtt"
jetstream {
store_dir=org_dir
}
accounts {
org {
jetstream: enabled
users: [{user: org, password: pwd}]
imports = [{stream: {account: "device", subject: "foo"}, prefix: "org"}]
}
device {
users: [{user: device, password: pwd}]
exports = [{stream: "foo"}]
}
}
mqtt {
listen: "127.0.0.1:-1"
}
no_auth_user: device
`))
defer os.Remove(conf)
defer os.RemoveAll("org_dir")
s, o := RunServerWithConfig(conf)
defer s.Shutdown()
mc1, rc1 := testMQTTConnect(t, &mqttConnInfo{clientID: "sub1", user: "org", pass: "pwd", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc1.Close()
testMQTTCheckConnAck(t, rc1, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc1, rc1, []*mqttFilter{{filter: "org/foo", qos: 0}}, []byte{0})
testMQTTFlush(t, mc1, nil, rc1)
mc2, rc2 := testMQTTConnect(t, &mqttConnInfo{clientID: "sub2", user: "org", pass: "pwd", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, rc2, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc2, rc2, []*mqttFilter{{filter: "org/foo", qos: 1}}, []byte{1})
testMQTTFlush(t, mc2, nil, rc2)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
natsPub(t, nc, "foo", []byte("msg"))
// Verify message is received on receiver side.
testMQTTCheckPubMsg(t, mc1, rc1, "org/foo", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc2, rc2, "org/foo", 0, []byte("msg"))
}
func TestMQTTSessionMovingDomains(t *testing.T) {
tmpl := strings.Replace(jsClusterTemplWithLeafAndMQTT, "{{leaf}}", `leafnodes { listen: 127.0.0.1:-1 }`, 1)
tmpl = strings.Replace(tmpl, "store_dir:", "domain: HUB, store_dir:", 1)
c := createJetStreamCluster(t, tmpl, "HUB", _EMPTY_, 3, 22020, true)
defer c.shutdown()
c.waitOnLeader()
tmpl = strings.Replace(jsClusterTemplWithLeafAndMQTT, "store_dir:", "domain: SPOKE, store_dir:", 1)
lnc := c.createLeafNodesWithTemplateAndStartPort(tmpl, "SPOKE", 3, 22111)
defer lnc.shutdown()
lnc.waitOnPeerCount(3)
connectSubAndDisconnect := func(host string, port int, present bool) {
t.Helper()
mc, rc := testMQTTConnectRetry(t, &mqttConnInfo{clientID: "sub", cleanSess: false}, host, port, 5)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, present)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, rc)
testMQTTDisconnect(t, mc, nil)
}
// Create a session on the HUB. Make sure we don't use "clean" session so that
// it is not removed when the client connection closes.
for i := 0; i < 7; i++ {
var present bool
if i > 0 {
present = true
}
connectSubAndDisconnect(c.opts[0].MQTT.Host, c.opts[0].MQTT.Port, present)
}
// Now move to the SPOKE cluster, this is a brand new session there, so should not be present.
connectSubAndDisconnect(lnc.opts[1].MQTT.Host, lnc.opts[1].MQTT.Port, false)
// Move back to HUB cluster. Make it interesting by connecting to a different
// server in that cluster. This should work, and present flag should be true.
connectSubAndDisconnect(c.opts[2].MQTT.Host, c.opts[2].MQTT.Port, true)
}
func TestMQTTParseUnsub(t *testing.T) {
for _, test := range []struct {
name string
proto []byte
b byte
pl int
err string
}{
{"reserved flag", nil, 3, 0, "wrong unsubscribe reserved flags"},
{"ensure packet loaded", []byte{1, 2}, mqttUnsubscribeFlags, 10, io.ErrUnexpectedEOF.Error()},
{"error reading packet id", []byte{1}, mqttUnsubscribeFlags, 1, "reading packet identifier"},
{"missing filters", []byte{0, 1}, mqttUnsubscribeFlags, 2, "subscribe protocol must contain at least 1 topic filter"},
{"error reading topic", []byte{0, 1, 0, 2, 'a'}, mqttUnsubscribeFlags, 5, "topic filter"},
{"empty topic", []byte{0, 1, 0, 0}, mqttUnsubscribeFlags, 4, errMQTTTopicFilterCannotBeEmpty.Error()},
{"invalid utf8 topic", []byte{0, 1, 0, 1, 241}, mqttUnsubscribeFlags, 5, "invalid utf8 for topic filter"},
} {
t.Run(test.name, func(t *testing.T) {
r := &mqttReader{}
r.reset(test.proto)
mqtt := &mqtt{r: r}
c := &client{mqtt: mqtt}
if _, _, err := c.mqttParseSubsOrUnsubs(r, test.b, test.pl, false); err == nil || !strings.Contains(err.Error(), test.err) {
t.Fatalf("Expected error %q, got %v", test.err, err)
}
})
}
}
func testMQTTUnsub(t *testing.T, pi uint16, c net.Conn, r *mqttReader, filters []*mqttFilter) {
t.Helper()
w := &mqttWriter{}
pkLen := 2 // for pi
for i := 0; i < len(filters); i++ {
f := filters[i]
pkLen += 2 + len(f.filter)
}
w.WriteByte(mqttPacketUnsub | mqttUnsubscribeFlags)
w.WriteVarInt(pkLen)
w.WriteUint16(pi)
for i := 0; i < len(filters); i++ {
f := filters[i]
w.WriteBytes([]byte(f.filter))
}
if _, err := testMQTTWrite(c, w.Bytes()); err != nil {
t.Fatalf("Error writing UNSUBSCRIBE protocol: %v", err)
}
b, _ := testMQTTReadPacket(t, r)
if pt := b & mqttPacketMask; pt != mqttPacketUnsubAck {
t.Fatalf("Expected UNSUBACK packet %x, got %x", mqttPacketUnsubAck, pt)
}
rpi, err := r.readUint16("packet identifier")
if err != nil || rpi != pi {
t.Fatalf("Error with packet identifier expected=%v got: %v err=%v", pi, rpi, err)
}
}
func TestMQTTUnsub(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
mcp, mpr := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcp.Close()
testMQTTCheckConnAck(t, mpr, mqttConnAckRCConnectionAccepted, false)
mc, r := testMQTTConnect(t, &mqttConnInfo{user: "sub", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "foo", qos: 0}}, []byte{0})
testMQTTFlush(t, mc, nil, r)
// Publish and test msg received
testMQTTPublish(t, mcp, r, 0, false, false, "foo", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo", 0, []byte("msg"))
// Unsubscribe
testMQTTUnsub(t, 1, mc, r, []*mqttFilter{{filter: "foo"}})
// Publish and test msg not received
testMQTTPublish(t, mcp, r, 0, false, false, "foo", 0, []byte("msg"))
testMQTTExpectNothing(t, r)
// Use of wildcards subs
filters := []*mqttFilter{
{filter: "foo/bar", qos: 0},
{filter: "foo/#", qos: 0},
}
testMQTTSub(t, 1, mc, r, filters, []byte{0, 0})
testMQTTFlush(t, mc, nil, r)
// Publish and check that message received twice
testMQTTPublish(t, mcp, r, 0, false, false, "foo/bar", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo/bar", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo/bar", 0, []byte("msg"))
// Unsub the wildcard one
testMQTTUnsub(t, 1, mc, r, []*mqttFilter{{filter: "foo/#"}})
// Publish and check that message received once
testMQTTPublish(t, mcp, r, 0, false, false, "foo/bar", 0, []byte("msg"))
testMQTTCheckPubMsg(t, mc, r, "foo/bar", 0, []byte("msg"))
testMQTTExpectNothing(t, r)
// Unsub last
testMQTTUnsub(t, 1, mc, r, []*mqttFilter{{filter: "foo/bar"}})
// Publish and test msg not received
testMQTTPublish(t, mcp, r, 0, false, false, "foo/bar", 0, []byte("msg"))
testMQTTExpectNothing(t, r)
}
func testMQTTExpectDisconnect(t testing.TB, c net.Conn) {
t.Helper()
if buf, err := testMQTTRead(c); err == nil {
t.Fatalf("Expected connection to be disconnected, got %s", buf)
}
}
func TestMQTTPublishTopicErrors(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
for _, test := range []struct {
name string
topic string
}{
{"empty", ""},
{"with single level wildcard", "foo/+"},
{"with multiple level wildcard", "foo/#"},
} {
t.Run(test.name, func(t *testing.T) {
mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, mc, r, 0, false, false, test.topic, 0, []byte("msg"))
testMQTTExpectDisconnect(t, mc)
})
}
}
func testMQTTDisconnect(t testing.TB, c net.Conn, bw *bufio.Writer) {
t.Helper()
w := &mqttWriter{}
w.WriteByte(mqttPacketDisconnect)
w.WriteByte(0)
if bw != nil {
bw.Write(w.Bytes())
bw.Flush()
} else {
c.Write(w.Bytes())
}
testMQTTExpectDisconnect(t, c)
}
func TestMQTTWill(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
sub := natsSubSync(t, nc, "will.topic")
natsFlush(t, nc)
willMsg := []byte("bye")
for _, test := range []struct {
name string
willExpected bool
willQoS byte
}{
{"will qos 0", true, 0},
{"will qos 1", true, 1},
{"proper disconnect no will", false, 0},
} {
t.Run(test.name, func(t *testing.T) {
mcs, rs := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcs.Close()
testMQTTCheckConnAck(t, rs, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mcs, rs, []*mqttFilter{{filter: "will/#", qos: 1}}, []byte{1})
testMQTTFlush(t, mcs, nil, rs)
mc, r := testMQTTConnect(t,
&mqttConnInfo{
cleanSess: true,
will: &mqttWill{
topic: []byte("will/topic"),
message: willMsg,
qos: test.willQoS,
},
}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
if test.willExpected {
mc.Close()
testMQTTCheckPubMsg(t, mcs, rs, "will/topic", test.willQoS<<1, willMsg)
wm := natsNexMsg(t, sub, time.Second)
if !bytes.Equal(wm.Data, willMsg) {
t.Fatalf("Expected will message to be %q, got %q", willMsg, wm.Data)
}
} else {
testMQTTDisconnect(t, mc, nil)
testMQTTExpectNothing(t, rs)
if wm, err := sub.NextMsg(100 * time.Millisecond); err == nil {
t.Fatalf("Should not have receive a message, got subj=%q data=%q",
wm.Subject, wm.Data)
}
}
})
}
}
func TestMQTTWillRetain(t *testing.T) {
for _, test := range []struct {
name string
pubQoS byte
subQoS byte
}{
{"pub QoS0 sub QoS0", 0, 0},
{"pub QoS0 sub QoS1", 0, 1},
{"pub QoS1 sub QoS0", 1, 0},
{"pub QoS1 sub QoS1", 1, 1},
} {
t.Run(test.name, func(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
willTopic := []byte("will/topic")
willMsg := []byte("bye")
mc, r := testMQTTConnect(t,
&mqttConnInfo{
cleanSess: true,
will: &mqttWill{
topic: willTopic,
message: willMsg,
qos: test.pubQoS,
retain: true,
},
}, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
// Disconnect the client
mc.Close()
// Wait for the server to process the connection close, which will
// cause the "will" message to be published (and retained).
checkClientsCount(t, s, 0)
// Create subscription on will topic and expect will message.
mcs, rs := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mcs.Close()
testMQTTCheckConnAck(t, rs, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mcs, rs, []*mqttFilter{{filter: "will/#", qos: test.subQoS}}, []byte{test.subQoS})
pflags, _ := testMQTTGetPubMsg(t, mcs, rs, "will/topic", willMsg)
if pflags&mqttPubFlagRetain == 0 {
t.Fatalf("expected retain flag to be set, it was not: %v", pflags)
}
// Expected QoS will be the lesser of the pub/sub QoS.
expectedQoS := test.pubQoS
if test.subQoS == 0 {
expectedQoS = 0
}
if qos := mqttGetQoS(pflags); qos != expectedQoS {
t.Fatalf("expected qos to be %v, got %v", expectedQoS, qos)
}
})
}
}
func TestMQTTWillRetainPermViolation(t *testing.T) {
template := `
port: -1
jetstream: enabled
server_name: mqtt
authorization {
mqtt_perms = {
publish = ["%s"]
subscribe = ["foo", "bar", "$MQTT.sub.>"]
}
users = [
{user: mqtt, password: pass, permissions: $mqtt_perms}
]
}
mqtt {
port: -1
}
`
conf := createConfFile(t, []byte(fmt.Sprintf(template, "foo")))
defer removeFile(t, conf)
s, o := RunServerWithConfig(conf)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{
cleanSess: true,
user: "mqtt",
pass: "pass",
}
// We create first a connection with the Will topic that the publisher
// is allowed to publish to.
ci.will = &mqttWill{
topic: []byte("foo"),
message: []byte("bye"),
qos: 1,
retain: true,
}
mc, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
// Disconnect, which will cause the Will to be sent with retain flag.
mc.Close()
// Wait for the server to process the connection close, which will
// cause the "will" message to be published (and retained).
checkClientsCount(t, s, 0)
// Create a subscription on the Will subject and we should receive it.
ci.will = nil
mcs, rs := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mcs.Close()
testMQTTCheckConnAck(t, rs, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mcs, rs, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
pflags, _ := testMQTTGetPubMsg(t, mcs, rs, "foo", []byte("bye"))
if pflags&mqttPubFlagRetain == 0 {
t.Fatalf("expected retain flag to be set, it was not: %v", pflags)
}
if qos := mqttGetQoS(pflags); qos != 1 {
t.Fatalf("expected qos to be 1, got %v", qos)
}
testMQTTDisconnect(t, mcs, nil)
// Now create another connection with a Will that client is not allowed to publish to.
ci.will = &mqttWill{
topic: []byte("bar"),
message: []byte("bye"),
qos: 1,
retain: true,
}
mc, r = testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
// Disconnect, to cause Will to be produced, but in that case should not be stored
// since user not allowed to publish on "bar".
mc.Close()
// Wait for the server to process the connection close, which will
// cause the "will" message to be published (and retained).
checkClientsCount(t, s, 0)
// Create sub on "bar" which user is allowed to subscribe to.
ci.will = nil
mcs, rs = testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mcs.Close()
testMQTTCheckConnAck(t, rs, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mcs, rs, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
// No Will should be published since it should not have been stored in the first place.
testMQTTExpectNothing(t, rs)
testMQTTDisconnect(t, mcs, nil)
// Now remove permission to publish on "foo" and check that a new subscription
// on "foo" is now not getting the will message because the original user no
// longer has permission to do so.
reloadUpdateConfig(t, s, conf, fmt.Sprintf(template, "baz"))
mcs, rs = testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mcs.Close()
testMQTTCheckConnAck(t, rs, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mcs, rs, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTExpectNothing(t, rs)
testMQTTDisconnect(t, mcs, nil)
}
func TestMQTTPublishRetain(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
for _, test := range []struct {
name string
retained bool
sentValue string
expectedValue string
subGetsIt bool
}{
{"publish retained", true, "retained", "retained", true},
{"publish not retained", false, "not retained", "retained", true},
{"remove retained", true, "", "", false},
} {
t.Run(test.name, func(t *testing.T) {
mc1, rs1 := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc1.Close()
testMQTTCheckConnAck(t, rs1, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, mc1, rs1, 0, false, test.retained, "foo", 0, []byte(test.sentValue))
testMQTTFlush(t, mc1, nil, rs1)
mc2, rs2 := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, rs2, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc2, rs2, []*mqttFilter{{filter: "foo/#", qos: 1}}, []byte{1})
if test.subGetsIt {
pflags, _ := testMQTTGetPubMsg(t, mc2, rs2, "foo", []byte(test.expectedValue))
if pflags&mqttPubFlagRetain == 0 {
t.Fatalf("retain flag should have been set, it was not: flags=%v", pflags)
}
} else {
testMQTTExpectNothing(t, rs2)
}
testMQTTDisconnect(t, mc1, nil)
testMQTTDisconnect(t, mc2, nil)
})
}
}
func TestMQTTPublishRetainPermViolation(t *testing.T) {
o := testMQTTDefaultOptions()
o.Users = []*User{
{
Username: "mqtt",
Password: "pass",
Permissions: &Permissions{
Publish: &SubjectPermission{Allow: []string{"foo"}},
Subscribe: &SubjectPermission{Allow: []string{"bar", "$MQTT.sub.>"}},
},
},
}
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{
cleanSess: true,
user: "mqtt",
pass: "pass",
}
mc1, rs1 := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc1.Close()
testMQTTCheckConnAck(t, rs1, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, mc1, rs1, 0, false, true, "bar", 0, []byte("retained"))
testMQTTFlush(t, mc1, nil, rs1)
mc2, rs2 := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc2.Close()
testMQTTCheckConnAck(t, rs2, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc2, rs2, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
testMQTTExpectNothing(t, rs2)
testMQTTDisconnect(t, mc1, nil)
testMQTTDisconnect(t, mc2, nil)
}
func TestMQTTPublishViolation(t *testing.T) {
o := testMQTTDefaultOptions()
o.Users = []*User{
{
Username: "mqtt",
Password: "pass",
Permissions: &Permissions{
Publish: &SubjectPermission{Allow: []string{"foo.bar"}},
Subscribe: &SubjectPermission{Allow: []string{"foo.*", "$MQTT.sub.>"}},
},
},
}
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{
user: "mqtt",
pass: "pass",
}
ci.clientID = "sub"
mc, rc := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "foo/+", qos: 1}}, []byte{1})
testMQTTFlush(t, mc, nil, rc)
ci.clientID = "pub"
ci.cleanSess = true
mp, rp := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
// These should be received since publisher has the right to publish on foo.bar
testMQTTPublish(t, mp, rp, 0, false, false, "foo/bar", 0, []byte("msg1"))
testMQTTCheckPubMsg(t, mc, rc, "foo/bar", 0, []byte("msg1"))
testMQTTPublish(t, mp, rp, 1, false, false, "foo/bar", 1, []byte("msg2"))
testMQTTCheckPubMsg(t, mc, rc, "foo/bar", mqttPubQos1, []byte("msg2"))
// But these should not be cause pub has no permission to publish on foo.baz
testMQTTPublish(t, mp, rp, 0, false, false, "foo/baz", 0, []byte("msg3"))
testMQTTExpectNothing(t, rc)
testMQTTPublish(t, mp, rp, 1, false, false, "foo/baz", 1, []byte("msg4"))
testMQTTExpectNothing(t, rc)
// Disconnect publisher
testMQTTDisconnect(t, mp, nil)
mp.Close()
// Disconnect subscriber and restart it to make sure that it does not receive msg3/msg4
testMQTTDisconnect(t, mc, nil)
mc.Close()
ci.cleanSess = false
ci.clientID = "sub"
mc, rc = testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer mc.Close()
testMQTTCheckConnAck(t, rc, mqttConnAckRCConnectionAccepted, true)
testMQTTSub(t, 1, mc, rc, []*mqttFilter{{filter: "foo/+", qos: 1}}, []byte{1})
testMQTTExpectNothing(t, rc)
}
func TestMQTTCleanSession(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{
clientID: "me",
cleanSess: false,
}
c, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTDisconnect(t, c, nil)
c, r = testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
testMQTTDisconnect(t, c, nil)
ci.cleanSess = true
c, r = testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTDisconnect(t, c, nil)
}
func TestMQTTDuplicateClientID(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{
clientID: "me",
cleanSess: false,
}
c1, r1 := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c1.Close()
testMQTTCheckConnAck(t, r1, mqttConnAckRCConnectionAccepted, false)
c2, r2 := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, true)
// The old client should be disconnected.
testMQTTExpectDisconnect(t, c1)
}
func TestMQTTPersistedSession(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownRestartedServer(&s)
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
cipub := &mqttConnInfo{clientID: "pub", cleanSess: true}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r,
[]*mqttFilter{
{filter: "foo/#", qos: 1},
{filter: "bar", qos: 1},
{filter: "baz", qos: 0},
},
[]byte{1, 1, 0})
testMQTTFlush(t, c, nil, r)
// Shutdown server, close connection and restart server. It should
// have restored the session and consumers.
dir := strings.TrimSuffix(s.JetStreamConfig().StoreDir, JetStreamStoreDir)
s.Shutdown()
c.Close()
o.Port = -1
o.MQTT.Port = -1
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// There is already the defer for shutdown at top of function
// Create a publisher that will send qos1 so we verify that messages
// are stored for the persisted sessions.
c, r = testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, c, r, 1, false, false, "foo/bar", 1, []byte("msg0"))
testMQTTFlush(t, c, nil, r)
testMQTTDisconnect(t, c, nil)
c.Close()
// Recreate session
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
// Since consumers have been recovered, messages should be received
// (MQTT does not need client to recreate consumers for a recovered
// session)
// Check that qos1 publish message is received.
testMQTTCheckPubMsg(t, c, r, "foo/bar", mqttPubQos1, []byte("msg0"))
// Flush to prevent publishes to be done too soon since we are
// receiving the CONNACK before the subscriptions are restored.
testMQTTFlush(t, c, nil, r)
// Now publish some messages to all subscriptions.
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
natsPub(t, nc, "foo.bar", []byte("msg1"))
testMQTTCheckPubMsg(t, c, r, "foo/bar", 0, []byte("msg1"))
natsPub(t, nc, "foo", []byte("msg2"))
testMQTTCheckPubMsg(t, c, r, "foo", 0, []byte("msg2"))
natsPub(t, nc, "bar", []byte("msg3"))
testMQTTCheckPubMsg(t, c, r, "bar", 0, []byte("msg3"))
natsPub(t, nc, "baz", []byte("msg4"))
testMQTTCheckPubMsg(t, c, r, "baz", 0, []byte("msg4"))
// Now unsub "bar" and verify that message published on this topic
// is not received.
testMQTTUnsub(t, 1, c, r, []*mqttFilter{{filter: "bar"}})
natsPub(t, nc, "bar", []byte("msg5"))
testMQTTExpectNothing(t, r)
nc.Close()
s.Shutdown()
c.Close()
o.Port = -1
o.MQTT.Port = -1
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// There is already the defer for shutdown at top of function
// Recreate a client
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
nc = natsConnect(t, s.ClientURL())
defer nc.Close()
natsPub(t, nc, "foo.bar", []byte("msg6"))
testMQTTCheckPubMsg(t, c, r, "foo/bar", 0, []byte("msg6"))
natsPub(t, nc, "foo", []byte("msg7"))
testMQTTCheckPubMsg(t, c, r, "foo", 0, []byte("msg7"))
// Make sure that we did not recover bar.
natsPub(t, nc, "bar", []byte("msg8"))
testMQTTExpectNothing(t, r)
natsPub(t, nc, "baz", []byte("msg9"))
testMQTTCheckPubMsg(t, c, r, "baz", 0, []byte("msg9"))
// Have the sub client send a subscription downgrading the qos1 subscription.
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo/#", qos: 0}}, []byte{0})
testMQTTFlush(t, c, nil, r)
nc.Close()
s.Shutdown()
c.Close()
o.Port = -1
o.MQTT.Port = -1
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// There is already the defer for shutdown at top of function
// Recreate the sub client
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
// Publish as a qos1
c2, r2 := testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer c2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, c2, r2, 1, false, false, "foo/bar", 1, []byte("msg10"))
// Verify that it is received as qos0 which is the qos of the subscription.
testMQTTCheckPubMsg(t, c, r, "foo/bar", 0, []byte("msg10"))
testMQTTDisconnect(t, c, nil)
c.Close()
testMQTTDisconnect(t, c2, nil)
c2.Close()
// Finally, recreate the sub with clean session and ensure that all is gone
cisub.cleanSess = true
for i := 0; i < 2; i++ {
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
nc = natsConnect(t, s.ClientURL())
defer nc.Close()
natsPub(t, nc, "foo.bar", []byte("msg11"))
testMQTTExpectNothing(t, r)
natsPub(t, nc, "foo", []byte("msg12"))
testMQTTExpectNothing(t, r)
// Make sure that we did not recover bar.
natsPub(t, nc, "bar", []byte("msg13"))
testMQTTExpectNothing(t, r)
natsPub(t, nc, "baz", []byte("msg14"))
testMQTTExpectNothing(t, r)
testMQTTDisconnect(t, c, nil)
c.Close()
nc.Close()
s.Shutdown()
o.Port = -1
o.MQTT.Port = -1
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// There is already the defer for shutdown at top of function
}
}
func TestMQTTRecoverSessionAndAddNewSub(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownRestartedServer(&s)
cisub := &mqttConnInfo{clientID: "sub1", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTDisconnect(t, c, nil)
c.Close()
// Shutdown server, close connection and restart server. It should
// have restored the session and consumers.
dir := strings.TrimSuffix(s.JetStreamConfig().StoreDir, JetStreamStoreDir)
s.Shutdown()
c.Close()
o.Port = -1
o.MQTT.Port = -1
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// No need for defer since it is done top of function
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
// Now add sub and make sure it does not crash
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, c, nil, r)
// Now repeat with a new client but without server restart.
cisub2 := &mqttConnInfo{clientID: "sub2", cleanSess: false}
c2, r2 := testMQTTConnect(t, cisub2, o.MQTT.Host, o.MQTT.Port)
defer c2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, false)
testMQTTDisconnect(t, c2, nil)
c2.Close()
c2, r2 = testMQTTConnect(t, cisub2, o.MQTT.Host, o.MQTT.Port)
defer c2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, true)
testMQTTSub(t, 1, c2, r2, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
testMQTTFlush(t, c2, nil, r2)
}
func TestMQTTRecoverSessionWithSubAndClientResendSub(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownRestartedServer(&s)
cisub1 := &mqttConnInfo{clientID: "sub1", cleanSess: false}
c, r := testMQTTConnect(t, cisub1, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
// Have a client send a SUBSCRIBE protocol for foo, QoS1
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTDisconnect(t, c, nil)
c.Close()
// Restart the server now.
dir := strings.TrimSuffix(s.JetStreamConfig().StoreDir, JetStreamStoreDir)
s.Shutdown()
o.Port = -1
o.MQTT.Port = -1
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// No need for defer since it is done top of function
// Now restart the client. Since the client was created with cleanSess==false,
// the server will have recorded the subscriptions for this client.
c, r = testMQTTConnect(t, cisub1, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
// At this point, the server has recreated the subscription on foo, QoS1.
// For applications that restart, it is possible (likely) that they
// will resend their SUBSCRIBE protocols, so do so now:
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, c, nil, r)
checkNumSub := func(clientID string) {
t.Helper()
// Find the MQTT client...
mc := testMQTTGetClient(t, s, clientID)
// Check how many NATS subscriptions are registered.
var fooSub int
var otherSub int
mc.mu.Lock()
for _, sub := range mc.subs {
switch string(sub.subject) {
case "foo":
fooSub++
default:
otherSub++
}
}
mc.mu.Unlock()
// We should have 2 subscriptions, one on "foo", and one for the JS durable
// consumer's delivery subject.
if fooSub != 1 {
t.Fatalf("Expected 1 sub on 'foo', got %v", fooSub)
}
if otherSub != 1 {
t.Fatalf("Expected 1 subscription for JS durable, got %v", otherSub)
}
}
checkNumSub("sub1")
c.Close()
// Now same but without the server restart in-between.
cisub2 := &mqttConnInfo{clientID: "sub2", cleanSess: false}
c, r = testMQTTConnect(t, cisub2, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTDisconnect(t, c, nil)
c.Close()
// Restart client
c, r = testMQTTConnect(t, cisub2, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, c, nil, r)
// Check client subs
checkNumSub("sub2")
}
func TestMQTTFlappingSession(t *testing.T) {
mqttSessJailDur = 250 * time.Millisecond
mqttFlapCleanItvl = 350 * time.Millisecond
defer func() {
mqttSessJailDur = mqttSessFlappingJailDur
mqttFlapCleanItvl = mqttSessFlappingCleanupInterval
}()
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{clientID: "flapper", cleanSess: false}
c, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
// Let's get a handle on the asm to check things later.
cli := testMQTTGetClient(t, s, "flapper")
asm := cli.mqtt.asm
// Start a new connection with the same clientID, which should replace
// the old one and put it in the flappers map.
c2, r2 := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, true)
// Should be disconnected...
testMQTTExpectDisconnect(t, c)
// Now try to reconnect "c" and we should fail. We have to do this manually,
// since we expect it to fail.
addr := fmt.Sprintf("%s:%d", o.MQTT.Host, o.MQTT.Port)
c, err := net.Dial("tcp", addr)
if err != nil {
t.Fatalf("Error creating mqtt connection: %v", err)
}
defer c.Close()
proto := mqttCreateConnectProto(ci)
if _, err := testMQTTWrite(c, proto); err != nil {
t.Fatalf("Error writing connect: %v", err)
}
if _, err := testMQTTRead(c); err == nil {
t.Fatal("Expected connection to fail")
}
// This should be in the flappers map, but after 250ms should be cleared.
for i := 0; i < 2; i++ {
asm.mu.RLock()
_, present := asm.flappers["flapper"]
asm.mu.RUnlock()
if i == 0 {
if !present {
t.Fatal("Did not find the client ID in the flappers map")
}
// Wait for more than the cleanup interval
time.Sleep(mqttFlapCleanItvl + 100*time.Millisecond)
} else if present {
t.Fatal("The client ID should have been cleared from the map")
}
}
}
func TestMQTTLockedSession(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
sm := &s.mqtt.sessmgr
sm.mu.Lock()
asm := sm.sessions[globalAccountName]
sm.mu.Unlock()
if asm == nil {
t.Fatalf("account session manager not found")
}
// Get the session for "sub"
cli := testMQTTGetClient(t, s, "sub")
sess := cli.mqtt.sess
// Pretend that the session above is locked.
if err := asm.lockSession(sess, cli); err != nil {
t.Fatalf("Unable to lock session: %v", err)
}
defer asm.unlockSession(sess)
// Now try to connect another client that wants to use "sub".
// We can't use testMQTTConnect() because it is going to fail.
addr := fmt.Sprintf("%s:%d", o.MQTT.Host, o.MQTT.Port)
c2, err := net.Dial("tcp", addr)
if err != nil {
t.Fatalf("Error creating mqtt connection: %v", err)
}
defer c2.Close()
proto := mqttCreateConnectProto(ci)
if _, err := testMQTTWrite(c2, proto); err != nil {
t.Fatalf("Error writing connect: %v", err)
}
if _, err := testMQTTRead(c2); err == nil {
t.Fatal("Expected connection to fail")
}
// Now try again, but this time release the session while waiting
// to connect and it should succeed.
time.AfterFunc(250*time.Millisecond, func() { asm.unlockSession(sess) })
c3, r3 := testMQTTConnect(t, ci, o.MQTT.Host, o.MQTT.Port)
defer c3.Close()
testMQTTCheckConnAck(t, r3, mqttConnAckRCConnectionAccepted, true)
}
func TestMQTTPersistRetainedMsg(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownRestartedServer(&s)
dir := strings.TrimSuffix(s.JetStreamConfig().StoreDir, JetStreamStoreDir)
cipub := &mqttConnInfo{clientID: "pub", cleanSess: true}
c, r := testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, c, r, 1, false, true, "foo", 1, []byte("foo1"))
testMQTTPublish(t, c, r, 1, false, true, "foo", 1, []byte("foo2"))
testMQTTPublish(t, c, r, 1, false, true, "bar", 1, []byte("bar1"))
testMQTTPublish(t, c, r, 0, false, true, "baz", 1, []byte("baz1"))
// Remove bar
testMQTTPublish(t, c, r, 1, false, true, "bar", 1, nil)
testMQTTFlush(t, c, nil, r)
testMQTTDisconnect(t, c, nil)
c.Close()
s.Shutdown()
o.Port = -1
o.MQTT.Port = -1
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// There is already the defer for shutdown at top of function
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTCheckPubMsg(t, c, r, "foo", mqttPubFlagRetain|mqttPubQos1, []byte("foo2"))
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "baz", qos: 1}}, []byte{1})
testMQTTCheckPubMsg(t, c, r, "baz", mqttPubFlagRetain, []byte("baz1"))
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
testMQTTExpectNothing(t, r)
testMQTTDisconnect(t, c, nil)
c.Close()
}
func TestMQTTConnAckFirstPacket(t *testing.T) {
o := testMQTTDefaultOptions()
o.NoLog, o.Debug, o.Trace = true, false, false
s := RunServer(o)
defer testMQTTShutdownServer(s)
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 0}}, []byte{0})
testMQTTDisconnect(t, c, nil)
c.Close()
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
wg := sync.WaitGroup{}
wg.Add(1)
ch := make(chan struct{}, 1)
ready := make(chan struct{})
go func() {
defer wg.Done()
close(ready)
for {
nc.Publish("foo", []byte("msg"))
select {
case <-ch:
return
default:
}
}
}()
<-ready
for i := 0; i < 100; i++ {
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
w := &mqttWriter{}
w.WriteByte(mqttPacketDisconnect)
w.WriteByte(0)
c.Write(w.Bytes())
// Wait to be disconnected, we can't use testMQTTDisconnect() because
// it would fail because we may still receive some NATS messages.
var b [10]byte
for {
if _, err := c.Read(b[:]); err != nil {
break
}
}
c.Close()
}
close(ch)
wg.Wait()
}
func TestMQTTRedeliveryAckWait(t *testing.T) {
o := testMQTTDefaultOptions()
o.MQTT.AckWait = 250 * time.Millisecond
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
cipub := &mqttConnInfo{clientID: "pub", cleanSess: true}
cp, rp := testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("foo1"))
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 2, []byte("foo2"))
testMQTTDisconnect(t, cp, nil)
cp.Close()
for i := 0; i < 2; i++ {
flags := mqttPubQos1
if i > 0 {
flags |= mqttPubFlagDup
}
pi1 := testMQTTCheckPubMsgNoAck(t, c, r, "foo", flags, []byte("foo1"))
pi2 := testMQTTCheckPubMsgNoAck(t, c, r, "foo", flags, []byte("foo2"))
if pi1 != 1 || pi2 != 2 {
t.Fatalf("Unexpected pi values: %v, %v", pi1, pi2)
}
}
// Ack first message
testMQTTSendPubAck(t, c, 1)
// Redelivery should only be for second message now
for i := 0; i < 2; i++ {
flags := mqttPubQos1 | mqttPubFlagDup
pi := testMQTTCheckPubMsgNoAck(t, c, r, "foo", flags, []byte("foo2"))
if pi != 2 {
t.Fatalf("Unexpected pi to be 2, got %v", pi)
}
}
// Restart client, should receive second message with pi==2
c.Close()
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
// Check that message is received with proper pi
pi := testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1|mqttPubFlagDup, []byte("foo2"))
if pi != 2 {
t.Fatalf("Unexpected pi to be 2, got %v", pi)
}
// Now ack second message
testMQTTSendPubAck(t, c, 2)
// Flush to make sure it is processed before checking client's maps
testMQTTFlush(t, c, nil, r)
// Look for the sub client
mc := testMQTTGetClient(t, s, "sub")
mc.mu.Lock()
sess := mc.mqtt.sess
sess.mu.Lock()
lpi := len(sess.pending)
var lsseq int
for _, sseqToPi := range sess.cpending {
lsseq += len(sseqToPi)
}
sess.mu.Unlock()
mc.mu.Unlock()
if lpi != 0 || lsseq != 0 {
t.Fatalf("Maps should be empty, got %v, %v", lpi, lsseq)
}
}
func TestMQTTAckWaitConfigChange(t *testing.T) {
o := testMQTTDefaultOptions()
o.MQTT.AckWait = 250 * time.Millisecond
s := testMQTTRunServer(t, o)
defer testMQTTShutdownRestartedServer(&s)
dir := strings.TrimSuffix(s.JetStreamConfig().StoreDir, JetStreamStoreDir)
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
sendMsg := func(topic, payload string) {
t.Helper()
cipub := &mqttConnInfo{clientID: "pub", cleanSess: true}
cp, rp := testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, topic, 1, []byte(payload))
testMQTTDisconnect(t, cp, nil)
cp.Close()
}
sendMsg("foo", "msg1")
for i := 0; i < 2; i++ {
flags := mqttPubQos1
if i > 0 {
flags |= mqttPubFlagDup
}
testMQTTCheckPubMsgNoAck(t, c, r, "foo", flags, []byte("msg1"))
}
// Restart the server with a different AckWait option value.
// Verify that MQTT sub restart succeeds. It will keep the
// original value.
c.Close()
s.Shutdown()
o.Port = -1
o.MQTT.Port = -1
o.MQTT.AckWait = 10 * time.Millisecond
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// There is already the defer for shutdown at top of function
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1|mqttPubFlagDup, []byte("msg1"))
start := time.Now()
testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1|mqttPubFlagDup, []byte("msg1"))
if dur := time.Since(start); dur < 200*time.Millisecond {
t.Fatalf("AckWait seem to have changed for existing subscription: %v", dur)
}
// Create new subscription
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
sendMsg("bar", "msg2")
testMQTTCheckPubMsgNoAck(t, c, r, "bar", mqttPubQos1, []byte("msg2"))
start = time.Now()
testMQTTCheckPubMsgNoAck(t, c, r, "bar", mqttPubQos1|mqttPubFlagDup, []byte("msg2"))
if dur := time.Since(start); dur > 50*time.Millisecond {
t.Fatalf("AckWait new value not used by new sub: %v", dur)
}
c.Close()
}
func TestMQTTUnsubscribeWithPendingAcks(t *testing.T) {
o := testMQTTDefaultOptions()
o.MQTT.AckWait = 250 * time.Millisecond
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
cipub := &mqttConnInfo{clientID: "pub", cleanSess: true}
cp, rp := testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg"))
testMQTTDisconnect(t, cp, nil)
cp.Close()
for i := 0; i < 2; i++ {
flags := mqttPubQos1
if i > 0 {
flags |= mqttPubFlagDup
}
testMQTTCheckPubMsgNoAck(t, c, r, "foo", flags, []byte("msg"))
}
testMQTTUnsub(t, 1, c, r, []*mqttFilter{{filter: "foo"}})
testMQTTFlush(t, c, nil, r)
mc := testMQTTGetClient(t, s, "sub")
mc.mu.Lock()
sess := mc.mqtt.sess
sess.mu.Lock()
pal := len(sess.pending)
sess.mu.Unlock()
mc.mu.Unlock()
if pal != 0 {
t.Fatalf("Expected pending ack map to be empty, got %v", pal)
}
}
func TestMQTTMaxAckPending(t *testing.T) {
o := testMQTTDefaultOptions()
o.MQTT.MaxAckPending = 1
s := testMQTTRunServer(t, o)
defer testMQTTShutdownRestartedServer(&s)
dir := strings.TrimSuffix(s.JetStreamConfig().StoreDir, JetStreamStoreDir)
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
cipub := &mqttConnInfo{clientID: "pub", cleanSess: true}
cp, rp := testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg1"))
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg2"))
pi := testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1, []byte("msg1"))
// Check that we don't receive the second one due to max ack pending
testMQTTExpectNothing(t, r)
// Now ack first message
testMQTTSendPubAck(t, c, pi)
// Now we should receive message 2
testMQTTCheckPubMsg(t, c, r, "foo", mqttPubQos1, []byte("msg2"))
testMQTTDisconnect(t, c, nil)
// Give a chance to the server to register that this client is gone.
checkClientsCount(t, s, 1)
// Send 2 messages while sub is offline
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg3"))
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg4"))
// Restart consumer
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
// Should receive only message 3
pi = testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1, []byte("msg3"))
testMQTTExpectNothing(t, r)
// Ack and get the next
testMQTTSendPubAck(t, c, pi)
testMQTTCheckPubMsg(t, c, r, "foo", mqttPubQos1, []byte("msg4"))
// Make sure this message gets ack'ed
mcli := testMQTTGetClient(t, s, cisub.clientID)
checkFor(t, time.Second, 15*time.Millisecond, func() error {
mcli.mu.Lock()
sess := mcli.mqtt.sess
sess.mu.Lock()
np := len(sess.pending)
sess.mu.Unlock()
mcli.mu.Unlock()
if np != 0 {
return fmt.Errorf("Still %v pending messages", np)
}
return nil
})
// Check that change to config does not prevent restart of sub.
cp.Close()
c.Close()
s.Shutdown()
o.Port = -1
o.MQTT.Port = -1
o.MQTT.MaxAckPending = 2
o.StoreDir = dir
s = testMQTTRunServer(t, o)
// There is already the defer for shutdown at top of function
cp, rp = testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg5"))
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg6"))
// Restart consumer
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, true)
// Should receive only message 5
pi = testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1, []byte("msg5"))
testMQTTExpectNothing(t, r)
// Ack and get the next
testMQTTSendPubAck(t, c, pi)
testMQTTCheckPubMsg(t, c, r, "foo", mqttPubQos1, []byte("msg6"))
}
func TestMQTTMaxAckPendingForMultipleSubs(t *testing.T) {
o := testMQTTDefaultOptions()
o.MQTT.AckWait = time.Second
o.MQTT.MaxAckPending = 1
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
cisub := &mqttConnInfo{clientID: "sub", cleanSess: true}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
cipub := &mqttConnInfo{clientID: "pub", cleanSess: true}
cp, rp := testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg1"))
pi := testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1, []byte("msg1"))
// Now send a second message but on topic bar
testMQTTPublish(t, cp, rp, 1, false, false, "bar", 1, []byte("msg2"))
// JS allows us to limit per consumer, but we apply the limit to the
// session, so although JS will attempt to delivery this message,
// the MQTT code will suppress it.
testMQTTExpectNothing(t, r)
// Ack the first message.
testMQTTSendPubAck(t, c, pi)
// Now we should get the second message
testMQTTCheckPubMsg(t, c, r, "bar", mqttPubQos1|mqttPubFlagDup, []byte("msg2"))
}
func TestMQTTMaxAckPendingOverLimit(t *testing.T) {
o := testMQTTDefaultOptions()
o.MQTT.MaxAckPending = 20000
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
checkTMax := func(sess *mqttSession, expected int) {
t.Helper()
sess.mu.Lock()
tmax := sess.tmaxack
sess.mu.Unlock()
if tmax != expected {
t.Fatalf("Expected current tmax to be %v, got %v", expected, tmax)
}
}
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
mc := testMQTTGetClient(t, s, "sub")
mc.mu.Lock()
sess := mc.mqtt.sess
mc.mu.Unlock()
// After this one, total would be 20000
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
checkTMax(sess, 20000)
// This one will count for 2, so total will be 60000
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "bar/#", qos: 1}}, []byte{1})
checkTMax(sess, 60000)
// This should fail
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{mqttSubAckFailure})
checkTMax(sess, 60000)
// Remove the one with wildcard
testMQTTUnsub(t, 1, c, r, []*mqttFilter{{filter: "bar/#"}})
checkTMax(sess, 20000)
// Now we could add 2 more without wildcards
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
checkTMax(sess, 40000)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "baz", qos: 1}}, []byte{1})
checkTMax(sess, 60000)
// Again, this one should fail
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "bat", qos: 1}}, []byte{mqttSubAckFailure})
checkTMax(sess, 60000)
// Now remove all and check that we are at 0
testMQTTUnsub(t, 1, c, r, []*mqttFilter{{filter: "foo"}})
checkTMax(sess, 40000)
testMQTTUnsub(t, 1, c, r, []*mqttFilter{{filter: "bar"}})
checkTMax(sess, 20000)
testMQTTUnsub(t, 1, c, r, []*mqttFilter{{filter: "baz"}})
checkTMax(sess, 0)
}
func TestMQTTConfigReload(t *testing.T) {
template := `
jetstream: true
server_name: mqtt
mqtt {
port: -1
ack_wait: %s
max_ack_pending: %s
}
`
conf := createConfFile(t, []byte(fmt.Sprintf(template, `"5s"`, `10000`)))
defer removeFile(t, conf)
s, o := RunServerWithConfig(conf)
defer testMQTTShutdownServer(s)
if val := o.MQTT.AckWait; val != 5*time.Second {
t.Fatalf("Invalid ackwait: %v", val)
}
if val := o.MQTT.MaxAckPending; val != 10000 {
t.Fatalf("Invalid ackwait: %v", val)
}
changeCurrentConfigContentWithNewContent(t, conf, []byte(fmt.Sprintf(template, `"250ms"`, `1`)))
if err := s.Reload(); err != nil {
t.Fatalf("Error on reload: %v", err)
}
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
cipub := &mqttConnInfo{clientID: "pub", cleanSess: true}
cp, rp := testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg1"))
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg2"))
testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1, []byte("msg1"))
start := time.Now()
testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1|mqttPubFlagDup, []byte("msg1"))
if dur := time.Since(start); dur > 500*time.Millisecond {
t.Fatalf("AckWait not applied? dur=%v", dur)
}
c.Close()
cp.Close()
testMQTTShutdownServer(s)
changeCurrentConfigContentWithNewContent(t, conf, []byte(fmt.Sprintf(template, `"30s"`, `1`)))
s, o = RunServerWithConfig(conf)
defer testMQTTShutdownServer(s)
cisub.cleanSess = true
c, r = testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
cipub = &mqttConnInfo{clientID: "pub", cleanSess: true}
cp, rp = testMQTTConnect(t, cipub, o.MQTT.Host, o.MQTT.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg1"))
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg2"))
testMQTTCheckPubMsgNoAck(t, c, r, "foo", mqttPubQos1, []byte("msg1"))
testMQTTExpectNothing(t, r)
// Increase the max ack pending
changeCurrentConfigContentWithNewContent(t, conf, []byte(fmt.Sprintf(template, `"30s"`, `10`)))
// Reload now
if err := s.Reload(); err != nil {
t.Fatalf("Error on reload: %v", err)
}
// Reload will have effect only on new subscriptions.
// Create a new subscription, and we should not be able to get the 2 messages.
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "bar", qos: 1}}, []byte{1})
testMQTTPublish(t, cp, rp, 1, false, false, "bar", 1, []byte("msg3"))
testMQTTPublish(t, cp, rp, 1, false, false, "bar", 1, []byte("msg4"))
testMQTTCheckPubMsg(t, c, r, "bar", mqttPubQos1, []byte("msg3"))
testMQTTCheckPubMsg(t, c, r, "bar", mqttPubQos1, []byte("msg4"))
}
func TestMQTTStreamInfoReturnsNonEmptySubject(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
cisub := &mqttConnInfo{clientID: "sub", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
nc := natsConnect(t, s.ClientURL())
defer nc.Close()
// Check that we can query all MQTT streams. MQTT streams are
// created without subject filter, however, if we return them like this,
// the 'nats' utility will fail to display them due to some xml validation.
for _, sname := range []string{
mqttStreamName,
mqttRetainedMsgsStreamName,
} {
t.Run(sname, func(t *testing.T) {
resp, err := nc.Request(fmt.Sprintf(JSApiStreamInfoT, sname), nil, time.Second)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var bResp JSApiStreamInfoResponse
if err = json.Unmarshal(resp.Data, &bResp); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(bResp.Config.Subjects) == 0 {
t.Fatalf("No subject returned, which will cause nats tooling to fail: %+v", bResp.Config)
}
})
}
}
func TestMQTTWebsocketToMQTTPort(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
http: "127.0.0.1:-1"
server_name: "mqtt"
jetstream: enabled
mqtt {
listen: "127.0.0.1:-1"
}
websocket {
listen: "127.0.0.1:-1"
no_tls: true
}
`))
defer removeFile(t, conf)
s, o := RunServerWithConfig(conf)
defer testMQTTShutdownServer(s)
l := &captureErrorLogger{errCh: make(chan string, 10)}
s.SetLogger(l, false, false)
ci := &mqttConnInfo{cleanSess: true, ws: true}
if _, _, err := testMQTTConnectRetryWithError(t, ci, o.MQTT.Host, o.MQTT.Port, 0); err == nil {
t.Fatal("Expected error during connect")
}
select {
case e := <-l.errCh:
if !strings.Contains(e, errMQTTNotWebsocketPort.Error()) {
t.Fatalf("Unexpected error: %v", e)
}
case <-time.After(time.Second):
t.Fatal("No error regarding wrong port")
}
}
func TestMQTTWebsocket(t *testing.T) {
template := `
listen: "127.0.0.1:-1"
http: "127.0.0.1:-1"
server_name: "mqtt"
jetstream: enabled
accounts {
MQTT {
jetstream: enabled
users [
{user: "mqtt", pass: "pwd", connection_types: ["%s"%s]}
]
}
}
mqtt {
listen: "127.0.0.1:-1"
}
websocket {
listen: "127.0.0.1:-1"
no_tls: true
}
`
s, o, conf := runReloadServerWithContent(t, []byte(fmt.Sprintf(template, jwt.ConnectionTypeMqtt, "")))
defer removeFile(t, conf)
defer testMQTTShutdownServer(s)
cisub := &mqttConnInfo{clientID: "sub", user: "mqtt", pass: "pwd", ws: true}
c, r := testMQTTConnect(t, cisub, o.Websocket.Host, o.Websocket.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCNotAuthorized, false)
c.Close()
ws := fmt.Sprintf(`, "%s"`, jwt.ConnectionTypeMqttWS)
reloadUpdateConfig(t, s, conf, fmt.Sprintf(template, jwt.ConnectionTypeMqtt, ws))
cisub = &mqttConnInfo{clientID: "sub", user: "mqtt", pass: "pwd", ws: true}
c, r = testMQTTConnect(t, cisub, o.Websocket.Host, o.Websocket.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, c, nil, r)
cipub := &mqttConnInfo{clientID: "pub", user: "mqtt", pass: "pwd", ws: true}
cp, rp := testMQTTConnect(t, cipub, o.Websocket.Host, o.Websocket.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg1"))
testMQTTCheckPubMsg(t, c, r, "foo", mqttPubQos1, []byte("msg1"))
}
type chunkWriteConn struct {
net.Conn
}
func (cwc *chunkWriteConn) Write(p []byte) (int, error) {
max := len(p)
cs := rand.Intn(max) + 1
if cs < max {
if pn, perr := cwc.Conn.Write(p[:cs]); perr != nil {
return pn, perr
}
time.Sleep(10 * time.Millisecond)
if pn, perr := cwc.Conn.Write(p[cs:]); perr != nil {
return pn, perr
}
return len(p), nil
}
return cwc.Conn.Write(p)
}
func TestMQTTPartial(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
http: "127.0.0.1:-1"
server_name: "mqtt"
jetstream: enabled
mqtt {
listen: "127.0.0.1:-1"
}
websocket {
listen: "127.0.0.1:-1"
no_tls: true
}
`))
defer removeFile(t, conf)
s, o := RunServerWithConfig(conf)
defer testMQTTShutdownServer(s)
for _, test := range []struct {
name string
ws bool
}{
{"standard", false},
{"websocket", true},
} {
t.Run(test.name, func(t *testing.T) {
ci := &mqttConnInfo{cleanSess: true, ws: test.ws}
host, port := o.MQTT.Host, o.MQTT.Port
if test.ws {
host, port = o.Websocket.Host, o.Websocket.Port
}
c, r := testMQTTConnect(t, ci, host, port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
c = &chunkWriteConn{Conn: c}
cp, rp := testMQTTConnect(t, ci, host, port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
cp = &chunkWriteConn{Conn: cp}
subj := nuid.Next()
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: subj, qos: 1}}, []byte{1})
testMQTTFlush(t, c, nil, r)
for i := 0; i < 10; i++ {
testMQTTPublish(t, cp, rp, 1, false, false, subj, 1, []byte("msg"))
testMQTTCheckPubMsg(t, c, r, subj, mqttPubQos1, []byte("msg"))
}
})
}
}
func TestMQTTWebsocketTLS(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
http: "127.0.0.1:-1"
server_name: "mqtt"
jetstream: enabled
mqtt {
listen: "127.0.0.1:-1"
}
websocket {
listen: "127.0.0.1:-1"
tls {
cert_file: '../test/configs/certs/server-cert.pem'
key_file: '../test/configs/certs/server-key.pem'
ca_file: '../test/configs/certs/ca.pem'
}
}
`))
defer removeFile(t, conf)
s, o := RunServerWithConfig(conf)
defer testMQTTShutdownServer(s)
c, r := testMQTTConnect(t, &mqttConnInfo{clientID: "sub", ws: true, tls: true}, o.Websocket.Host, o.Websocket.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(t, 1, c, r, []*mqttFilter{{filter: "foo", qos: 1}}, []byte{1})
testMQTTFlush(t, c, nil, r)
cp, rp := testMQTTConnect(t, &mqttConnInfo{clientID: "pub", ws: true, tls: true}, o.Websocket.Host, o.Websocket.Port)
defer cp.Close()
testMQTTCheckConnAck(t, rp, mqttConnAckRCConnectionAccepted, false)
testMQTTPublish(t, cp, rp, 1, false, false, "foo", 1, []byte("msg1"))
testMQTTCheckPubMsg(t, c, r, "foo", mqttPubQos1, []byte("msg1"))
}
func TestMQTTTransferSessionStreamsToMuxed(t *testing.T) {
cl := createJetStreamClusterWithTemplate(t, testMQTTGetClusterTemplaceNoLeaf(), "MQTT", 3)
defer cl.shutdown()
nc, js := jsClientConnect(t, cl.randomServer())
defer nc.Close()
// Create 2 streams that start with "$MQTT_sess_" to check for transfer to new
// mux'ed unique "$MQTT_sess" stream. One of this stream will not contain a
// proper session record, and we will check that the stream does not get deleted.
sessStreamName1 := mqttSessionsStreamNamePrefix + string(getHash("sub"))
if _, err := js.AddStream(&nats.StreamConfig{
Name: sessStreamName1,
Subjects: []string{sessStreamName1},
Replicas: 3,
MaxMsgs: 1,
}); err != nil {
t.Fatalf("Unable to add stream: %v", err)
}
// Then add the session record
ps := mqttPersistedSession{
ID: "sub",
Subs: map[string]byte{"foo": 1},
Cons: map[string]*ConsumerConfig{"foo": {
Durable: "d6INCtp3_cK39H5WHEtOSU7sLy2oQv3",
DeliverSubject: "$MQTT.sub.cK39H5WHEtOSU7sLy2oQrR",
DeliverPolicy: DeliverNew,
AckPolicy: AckExplicit,
FilterSubject: "$MQTT.msgs.foo",
MaxAckPending: 1024,
}},
}
b, _ := json.Marshal(&ps)
if _, err := js.Publish(sessStreamName1, b); err != nil {
t.Fatalf("Error on publish: %v", err)
}
// Create the stream that has "$MQTT_sess_" prefix, but that is not really a MQTT session stream
sessStreamName2 := mqttSessionsStreamNamePrefix + "ivan"
if _, err := js.AddStream(&nats.StreamConfig{
Name: sessStreamName2,
Subjects: []string{sessStreamName2},
Replicas: 3,
MaxMsgs: 1,
}); err != nil {
t.Fatalf("Unable to add stream: %v", err)
}
if _, err := js.Publish(sessStreamName2, []byte("some content")); err != nil {
t.Fatalf("Error on publish: %v", err)
}
cl.waitOnStreamLeader(globalAccountName, sessStreamName1)
cl.waitOnStreamLeader(globalAccountName, sessStreamName2)
// Now create a real MQTT connection
o := cl.opts[0]
sc, sr := testMQTTConnectRetry(t, &mqttConnInfo{clientID: "sub"}, o.MQTT.Host, o.MQTT.Port, 10)
defer sc.Close()
testMQTTCheckConnAck(t, sr, mqttConnAckRCConnectionAccepted, true)
// Check that old session stream is gone, but the non session stream is still present.
var gotIt = false
for info := range js.StreamsInfo() {
if strings.HasPrefix(info.Config.Name, mqttSessionsStreamNamePrefix) {
if strings.HasSuffix(info.Config.Name, "_ivan") {
gotIt = true
} else {
t.Fatalf("The stream %q should have been deleted", info.Config.Name)
}
}
}
if !gotIt {
t.Fatalf("The stream %q should not have been deleted", mqttSessionsStreamNamePrefix+"ivan")
}
// We want to check that the record was properly transferred.
rmsg, err := js.GetMsg(mqttSessStreamName, 2)
if err != nil {
t.Fatalf("Unable to get session message: %v", err)
}
ps2 := &mqttPersistedSession{}
if err := json.Unmarshal(rmsg.Data, ps2); err != nil {
t.Fatalf("Error unpacking session record: %v", err)
}
if ps2.ID != "sub" {
t.Fatalf("Unexpected session record, %+v vs %+v", ps2, ps)
}
if qos, ok := ps2.Subs["foo"]; !ok || qos != 1 {
t.Fatalf("Unexpected session record, %+v vs %+v", ps2, ps)
}
if cons, ok := ps2.Cons["foo"]; !ok || !reflect.DeepEqual(cons, ps.Cons["foo"]) {
t.Fatalf("Unexpected session record, %+v vs %+v", ps2, ps)
}
// Make sure we don't attempt to transfer again by creating a subscription
// on the "stream names" API, which is used to get the list of streams to transfer
sub := natsSubSync(t, nc, JSApiStreams)
// Make sure to connect an MQTT client from a different node so that this node
// gets a connection for the account for the first time and tries to create
// all MQTT streams, etc..
o = cl.opts[1]
sc, sr = testMQTTConnectRetry(t, &mqttConnInfo{clientID: "sub2"}, o.MQTT.Host, o.MQTT.Port, 10)
defer sc.Close()
testMQTTCheckConnAck(t, sr, mqttConnAckRCConnectionAccepted, false)
if _, err := sub.NextMsg(200 * time.Millisecond); err == nil {
t.Fatal("Looks like attempt to transfer was done again")
}
}
func TestMQTTConnectAndDisconnectEvent(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
http: "127.0.0.1:-1"
server_name: "mqtt"
jetstream: enabled
accounts {
MQTT {
jetstream: enabled
users: [{user: "mqtt", password: "pwd"}]
}
SYS {
users: [{user: "sys", password: "pwd"}]
}
}
mqtt {
listen: "127.0.0.1:-1"
}
system_account: "SYS"
`))
defer os.Remove(conf)
s, o := RunServerWithConfig(conf)
defer testMQTTShutdownServer(s)
nc := natsConnect(t, s.ClientURL(), nats.UserInfo("sys", "pwd"))
defer nc.Close()
accConn := natsSubSync(t, nc, fmt.Sprintf(connectEventSubj, "MQTT"))
accDisc := natsSubSync(t, nc, fmt.Sprintf(disconnectEventSubj, "MQTT"))
accAuth := natsSubSync(t, nc, fmt.Sprintf(authErrorEventSubj, s.ID()))
natsFlush(t, nc)
checkConnEvent := func(data []byte, expected string) {
t.Helper()
var ce ConnectEventMsg
json.Unmarshal(data, &ce)
if ce.Client.MQTTClient != expected {
t.Fatalf("Expected client ID %q, got this connect event: %+v", expected, ce)
}
}
checkDiscEvent := func(data []byte, expected string) {
t.Helper()
var de DisconnectEventMsg
json.Unmarshal(data, &de)
if de.Client.MQTTClient != expected {
t.Fatalf("Expected client ID %q, got this disconnect event: %+v", expected, de)
}
}
c1, r1 := testMQTTConnect(t, &mqttConnInfo{user: "mqtt", pass: "pwd", clientID: "conn1", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer c1.Close()
testMQTTCheckConnAck(t, r1, mqttConnAckRCConnectionAccepted, false)
cm := natsNexMsg(t, accConn, time.Second)
checkConnEvent(cm.Data, "conn1")
c2, r2 := testMQTTConnect(t, &mqttConnInfo{user: "mqtt", pass: "pwd", clientID: "conn2", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer c2.Close()
testMQTTCheckConnAck(t, r2, mqttConnAckRCConnectionAccepted, false)
cm = natsNexMsg(t, accConn, time.Second)
checkConnEvent(cm.Data, "conn2")
c3, r3 := testMQTTConnect(t, &mqttConnInfo{user: "mqtt", pass: "pwd", clientID: "conn3", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer c3.Close()
testMQTTCheckConnAck(t, r3, mqttConnAckRCConnectionAccepted, false)
cm = natsNexMsg(t, accConn, time.Second)
checkConnEvent(cm.Data, "conn3")
testMQTTDisconnect(t, c3, nil)
cm = natsNexMsg(t, accDisc, time.Second)
checkDiscEvent(cm.Data, "conn3")
// Now try a bad auth
c4, r4 := testMQTTConnect(t, &mqttConnInfo{clientID: "conn4", cleanSess: true}, o.MQTT.Host, o.MQTT.Port)
defer c4.Close()
testMQTTCheckConnAck(t, r4, mqttConnAckRCNotAuthorized, false)
// This will generate an auth error, which is a disconnect event
cm = natsNexMsg(t, accAuth, time.Second)
checkDiscEvent(cm.Data, "conn4")
url := fmt.Sprintf("http://127.0.0.1:%d/", s.MonitorAddr().Port)
for mode := 0; mode < 2; mode++ {
c := pollConz(t, s, mode, url+"connz", nil)
if c.Conns == nil || len(c.Conns) != 3 {
t.Fatalf("Expected 3 connections in array, got %v", len(c.Conns))
}
// Check that client ID is present
for _, conn := range c.Conns {
if conn.Type == clientTypeStringMap[MQTT] && conn.MQTTClient == _EMPTY_ {
t.Fatalf("Expected a client ID to be set, got %+v", conn)
}
}
// Check that we can select based on client ID:
c = pollConz(t, s, mode, url+"connz?mqtt_client=conn2", &ConnzOptions{MQTTClient: "conn2"})
if c.Conns == nil || len(c.Conns) != 1 {
t.Fatalf("Expected 1 connection in array, got %v", len(c.Conns))
}
if c.Conns[0].MQTTClient != "conn2" {
t.Fatalf("Unexpected client ID: %+v", c.Conns[0])
}
// Check that we have the closed ones
c = pollConz(t, s, mode, url+"connz?state=closed", &ConnzOptions{State: ConnClosed})
if c.Conns == nil || len(c.Conns) != 2 {
t.Fatalf("Expected 2 connections in array, got %v", len(c.Conns))
}
for _, conn := range c.Conns {
if conn.MQTTClient == _EMPTY_ {
t.Fatalf("Expected a client ID, got %+v", conn)
}
}
// Check that we can select with client ID for closed state
c = pollConz(t, s, mode, url+"connz?state=closed&mqtt_client=conn3", &ConnzOptions{State: ConnClosed, MQTTClient: "conn3"})
if c.Conns == nil || len(c.Conns) != 1 {
t.Fatalf("Expected 1 connection in array, got %v", len(c.Conns))
}
if c.Conns[0].MQTTClient != "conn3" {
t.Fatalf("Unexpected client ID: %+v", c.Conns[0])
}
// Check that we can select with client ID for closed state (but in this case not found)
c = pollConz(t, s, mode, url+"connz?state=closed&mqtt_client=conn5", &ConnzOptions{State: ConnClosed, MQTTClient: "conn5"})
if len(c.Conns) != 0 {
t.Fatalf("Expected 0 connection in array, got %v", len(c.Conns))
}
}
reply := nc.NewRespInbox()
replySub := natsSubSync(t, nc, reply)
// Test system events now
for _, test := range []struct {
opt interface{}
cid string
}{
{&ConnzOptions{MQTTClient: "conn1"}, "conn1"},
{&ConnzOptions{MQTTClient: "conn3", State: ConnClosed}, "conn3"},
{&ConnzOptions{MQTTClient: "conn4", State: ConnClosed}, "conn4"},
{&ConnzOptions{MQTTClient: "conn5"}, _EMPTY_},
{json.RawMessage(`{"mqtt_client":"conn1"}`), "conn1"},
{json.RawMessage(fmt.Sprintf(`{"mqtt_client":"conn3", "state":%v}`, ConnClosed)), "conn3"},
{json.RawMessage(fmt.Sprintf(`{"mqtt_client":"conn4", "state":%v}`, ConnClosed)), "conn4"},
{json.RawMessage(`{"mqtt_client":"conn5"}`), _EMPTY_},
} {
t.Run("sys connz", func(t *testing.T) {
b, _ := json.Marshal(test.opt)
// set a header to make sure request parsing knows to ignore them
nc.PublishMsg(&nats.Msg{
Subject: fmt.Sprintf("%s.CONNZ", serverStatsPingReqSubj),
Reply: reply,
Data: b,
})
msg := natsNexMsg(t, replySub, time.Second)
var response ServerAPIResponse
if err := json.Unmarshal(msg.Data, &response); err != nil {
t.Fatalf("Error unmarshalling response json: %v", err)
}
tmp, _ := json.Marshal(response.Data)
cz := &Connz{}
if err := json.Unmarshal(tmp, cz); err != nil {
t.Fatalf("Error unmarshalling connz: %v", err)
}
if test.cid == _EMPTY_ {
if len(cz.Conns) != 0 {
t.Fatalf("Expected no connections, got %v", len(cz.Conns))
}
return
}
if len(cz.Conns) != 1 {
t.Fatalf("Expected single connection, got %v", len(cz.Conns))
}
conn := cz.Conns[0]
if conn.MQTTClient != test.cid {
t.Fatalf("Expected client ID %q, got %q", test.cid, conn.MQTTClient)
}
})
}
}
func TestMQTTClientIDInLogStatements(t *testing.T) {
o := testMQTTDefaultOptions()
s := testMQTTRunServer(t, o)
defer testMQTTShutdownServer(s)
l := &captureDebugLogger{dbgCh: make(chan string, 10)}
s.SetLogger(l, true, false)
cisub := &mqttConnInfo{clientID: "my_client_id", cleanSess: false}
c, r := testMQTTConnect(t, cisub, o.MQTT.Host, o.MQTT.Port)
defer c.Close()
testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false)
testMQTTDisconnect(t, c, nil)
c.Close()
tm := time.NewTimer(2 * time.Second)
var connected bool
var disconnected bool
for {
select {
case dl := <-l.dbgCh:
if strings.Contains(dl, "my_client_id") {
if strings.Contains(dl, "Client connected") {
connected = true
} else if strings.Contains(dl, "Client connection closed") {
disconnected = true
}
if connected && disconnected {
// OK!
return
}
}
case <-tm.C:
t.Fatal("Did not get the debug statements or client_id in them")
}
}
}
//////////////////////////////////////////////////////////////////////////
//
// Benchmarks
//
//////////////////////////////////////////////////////////////////////////
const (
mqttPubSubj = "a"
mqttBenchBufLen = 32768
)
func mqttBenchPubQoS0(b *testing.B, subject, payload string, numSubs int) {
b.StopTimer()
o := testMQTTDefaultOptions()
s := RunServer(o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{clientID: "pub", cleanSess: true}
c, br := testMQTTConnect(b, ci, o.MQTT.Host, o.MQTT.Port)
testMQTTCheckConnAck(b, br, mqttConnAckRCConnectionAccepted, false)
w := &mqttWriter{}
mqttWritePublish(w, 0, false, false, subject, 0, []byte(payload))
sendOp := w.Bytes()
dch := make(chan error, 1)
totalSize := int64(len(sendOp))
cdch := 0
createSub := func(i int) {
ci := &mqttConnInfo{clientID: fmt.Sprintf("sub%d", i), cleanSess: true}
cs, brs := testMQTTConnect(b, ci, o.MQTT.Host, o.MQTT.Port)
testMQTTCheckConnAck(b, brs, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(b, 1, cs, brs, []*mqttFilter{{filter: subject, qos: 0}}, []byte{0})
testMQTTFlush(b, cs, nil, brs)
w := &mqttWriter{}
varHeaderAndPayload := 2 + len(subject) + len(payload)
w.WriteVarInt(varHeaderAndPayload)
size := 1 + w.Len() + varHeaderAndPayload
totalSize += int64(size)
go func() {
mqttBenchConsumeMsgQoS0(cs, int64(b.N)*int64(size), dch)
cs.Close()
}()
}
for i := 0; i < numSubs; i++ {
createSub(i + 1)
cdch++
}
bw := bufio.NewWriterSize(c, mqttBenchBufLen)
b.SetBytes(totalSize)
b.StartTimer()
for i := 0; i < b.N; i++ {
bw.Write(sendOp)
}
testMQTTFlush(b, c, bw, br)
for i := 0; i < cdch; i++ {
if e := <-dch; e != nil {
b.Fatal(e.Error())
}
}
b.StopTimer()
c.Close()
s.Shutdown()
}
func mqttBenchConsumeMsgQoS0(c net.Conn, total int64, dch chan<- error) {
var buf [mqttBenchBufLen]byte
var err error
var n int
for size := int64(0); size < total; {
n, err = c.Read(buf[:])
if err != nil {
break
}
size += int64(n)
}
dch <- err
}
func mqttBenchPubQoS1(b *testing.B, subject, payload string, numSubs int) {
b.StopTimer()
o := testMQTTDefaultOptions()
o.MQTT.MaxAckPending = 0xFFFF
s := RunServer(o)
defer testMQTTShutdownServer(s)
ci := &mqttConnInfo{cleanSess: true}
c, br := testMQTTConnect(b, ci, o.MQTT.Host, o.MQTT.Port)
testMQTTCheckConnAck(b, br, mqttConnAckRCConnectionAccepted, false)
w := &mqttWriter{}
mqttWritePublish(w, 1, false, false, subject, 1, []byte(payload))
// For reported bytes we will count the PUBLISH + PUBACK (4 bytes)
totalSize := int64(len(w.Bytes()) + 4)
w.Reset()
pi := uint16(1)
maxpi := uint16(60000)
ppich := make(chan error, 10)
dch := make(chan error, 1+numSubs)
cdch := 1
// Start go routine to consume PUBACK for published QoS 1 messages.
go mqttBenchConsumePubAck(c, b.N, dch, ppich)
createSub := func(i int) {
ci := &mqttConnInfo{clientID: fmt.Sprintf("sub%d", i), cleanSess: true}
cs, brs := testMQTTConnect(b, ci, o.MQTT.Host, o.MQTT.Port)
testMQTTCheckConnAck(b, brs, mqttConnAckRCConnectionAccepted, false)
testMQTTSub(b, 1, cs, brs, []*mqttFilter{{filter: subject, qos: 1}}, []byte{1})
testMQTTFlush(b, cs, nil, brs)
w := &mqttWriter{}
varHeaderAndPayload := 2 + len(subject) + 2 + len(payload)
w.WriteVarInt(varHeaderAndPayload)
size := 1 + w.Len() + varHeaderAndPayload
// Add to the bytes reported the size of message sent to subscriber + PUBACK (4 bytes)
totalSize += int64(size + 4)
go func() {
mqttBenchConsumeMsgQos1(cs, b.N, size, dch)
cs.Close()
}()
}
for i := 0; i < numSubs; i++ {
createSub(i + 1)
cdch++
}
flush := func() {
b.Helper()
if _, err := c.Write(w.Bytes()); err != nil {
b.Fatalf("Error on write: %v", err)
}
w.Reset()
}
b.SetBytes(totalSize)
b.StartTimer()
for i := 0; i < b.N; i++ {
if pi <= maxpi {
mqttWritePublish(w, 1, false, false, subject, pi, []byte(payload))
pi++
if w.Len() >= mqttBenchBufLen {
flush()
}
} else {
if w.Len() > 0 {
flush()
}
if pi > 60000 {
pi = 1
maxpi = 0
}
if e := <-ppich; e != nil {
b.Fatal(e.Error())
}
maxpi += 10000
i--
}
}
if w.Len() > 0 {
flush()
}
for i := 0; i < cdch; i++ {
if e := <-dch; e != nil {
b.Fatal(e.Error())
}
}
b.StopTimer()
c.Close()
s.Shutdown()
}
func mqttBenchConsumeMsgQos1(c net.Conn, total, size int, dch chan<- error) {
var buf [mqttBenchBufLen]byte
pubAck := [4]byte{mqttPacketPubAck, 0x2, 0, 0}
var err error
var n int
var pi uint16
var prev int
for i := 0; i < total; {
n, err = c.Read(buf[:])
if err != nil {
break
}
n += prev
for ; n >= size; n -= size {
i++
pi++
pubAck[2] = byte(pi >> 8)
pubAck[3] = byte(pi)
if _, err = c.Write(pubAck[:4]); err != nil {
dch <- err
return
}
if pi == 60000 {
pi = 0
}
}
prev = n
}
dch <- err
}
func mqttBenchConsumePubAck(c net.Conn, total int, dch, ppich chan<- error) {
var buf [mqttBenchBufLen]byte
var err error
var n int
var pi uint16
var prev int
for i := 0; i < total; {
n, err = c.Read(buf[:])
if err != nil {
break
}
n += prev
for ; n >= 4; n -= 4 {
i++
pi++
if pi%10000 == 0 {
ppich <- nil
}
if pi == 60001 {
pi = 0
}
}
prev = n
}
ppich <- err
dch <- err
}
func BenchmarkMQTT_QoS0_Pub_______0b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, "", 0)
}
func BenchmarkMQTT_QoS0_Pub_______8b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(8), 0)
}
func BenchmarkMQTT_QoS0_Pub______32b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(32), 0)
}
func BenchmarkMQTT_QoS0_Pub_____128b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(128), 0)
}
func BenchmarkMQTT_QoS0_Pub_____256b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(256), 0)
}
func BenchmarkMQTT_QoS0_Pub_______1K_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(1024), 0)
}
func BenchmarkMQTT_QoS0_PubSub1___0b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, "", 1)
}
func BenchmarkMQTT_QoS0_PubSub1___8b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(8), 1)
}
func BenchmarkMQTT_QoS0_PubSub1__32b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(32), 1)
}
func BenchmarkMQTT_QoS0_PubSub1_128b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(128), 1)
}
func BenchmarkMQTT_QoS0_PubSub1_256b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(256), 1)
}
func BenchmarkMQTT_QoS0_PubSub1___1K_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(1024), 1)
}
func BenchmarkMQTT_QoS0_PubSub2___0b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, "", 2)
}
func BenchmarkMQTT_QoS0_PubSub2___8b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(8), 2)
}
func BenchmarkMQTT_QoS0_PubSub2__32b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(32), 2)
}
func BenchmarkMQTT_QoS0_PubSub2_128b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(128), 2)
}
func BenchmarkMQTT_QoS0_PubSub2_256b_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(256), 2)
}
func BenchmarkMQTT_QoS0_PubSub2___1K_Payload(b *testing.B) {
mqttBenchPubQoS0(b, mqttPubSubj, sizedString(1024), 2)
}
func BenchmarkMQTT_QoS1_Pub_______0b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, "", 0)
}
func BenchmarkMQTT_QoS1_Pub_______8b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(8), 0)
}
func BenchmarkMQTT_QoS1_Pub______32b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(32), 0)
}
func BenchmarkMQTT_QoS1_Pub_____128b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(128), 0)
}
func BenchmarkMQTT_QoS1_Pub_____256b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(256), 0)
}
func BenchmarkMQTT_QoS1_Pub_______1K_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(1024), 0)
}
func BenchmarkMQTT_QoS1_PubSub1___0b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, "", 1)
}
func BenchmarkMQTT_QoS1_PubSub1___8b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(8), 1)
}
func BenchmarkMQTT_QoS1_PubSub1__32b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(32), 1)
}
func BenchmarkMQTT_QoS1_PubSub1_128b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(128), 1)
}
func BenchmarkMQTT_QoS1_PubSub1_256b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(256), 1)
}
func BenchmarkMQTT_QoS1_PubSub1___1K_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(1024), 1)
}
func BenchmarkMQTT_QoS1_PubSub2___0b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, "", 2)
}
func BenchmarkMQTT_QoS1_PubSub2___8b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(8), 2)
}
func BenchmarkMQTT_QoS1_PubSub2__32b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(32), 2)
}
func BenchmarkMQTT_QoS1_PubSub2_128b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(128), 2)
}
func BenchmarkMQTT_QoS1_PubSub2_256b_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(256), 2)
}
func BenchmarkMQTT_QoS1_PubSub2___1K_Payload(b *testing.B) {
mqttBenchPubQoS1(b, mqttPubSubj, sizedString(1024), 2)
}
| 1 | 14,203 | This was on purpose that I would set logging (NoLog=false) and a dummy logger below. The idea is to have better code coverage and test of debug/trace statements. Any reason you have removed this? | nats-io-nats-server | go |
@@ -83,11 +83,11 @@ public final class StringBuilderConstantParameters
.build();
}
List<ExpressionTree> arguments = result.get();
- Stream<String> prefixStream = arguments.stream().findFirst()
+ Stream<String> prefixStream = arguments.stream()
+ .findFirst()
.map(ASTHelpers::getType)
- .filter(type ->
- ASTHelpers.isSameType(type, state.getTypeFromString("java.lang.String"), state))
- .map(ignored -> Stream.<String>of())
+ .filter(type -> ASTHelpers.isSameType(type, state.getTypeFromString("java.lang.String"), state))
+ .map(ignored -> Stream.<String>empty())
.orElseGet(() -> Stream.of("\"\""));
return buildDescription(tree) | 1 | /*
* (c) Copyright 2019 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.errorprone;
import com.google.auto.service.AutoService;
import com.google.common.base.Preconditions;
import com.google.common.collect.Streams;
import com.google.errorprone.BugPattern;
import com.google.errorprone.BugPattern.SeverityLevel;
import com.google.errorprone.VisitorState;
import com.google.errorprone.bugpatterns.BugChecker;
import com.google.errorprone.fixes.SuggestedFix;
import com.google.errorprone.matchers.Description;
import com.google.errorprone.matchers.Matcher;
import com.google.errorprone.matchers.Matchers;
import com.google.errorprone.matchers.method.MethodMatchers;
import com.google.errorprone.util.ASTHelpers;
import com.sun.source.tree.BinaryTree;
import com.sun.source.tree.ConditionalExpressionTree;
import com.sun.source.tree.ExpressionTree;
import com.sun.source.tree.MemberSelectTree;
import com.sun.source.tree.MethodInvocationTree;
import com.sun.source.tree.NewClassTree;
import com.sun.source.util.SimpleTreeVisitor;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@AutoService(BugChecker.class)
@BugPattern(
name = "StringBuilderConstantParameters",
link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = BugPattern.LinkType.CUSTOM,
providesFix = BugPattern.ProvidesFix.REQUIRES_HUMAN_ATTENTION,
severity = SeverityLevel.WARNING,
summary = "StringBuilder with a constant number of parameters should be replaced by simple concatenation")
public final class StringBuilderConstantParameters
extends BugChecker implements BugChecker.MethodInvocationTreeMatcher {
private static final String MESSAGE =
"StringBuilder with a constant number of parameters should be replaced by simple concatenation.\nThe Java "
+ "compiler (jdk8) replaces concatenation of a constant number of arguments with a StringBuilder, "
+ "while jdk 9+ take advantage of JEP 280 (https://openjdk.java.net/jeps/280) to efficiently "
+ "pre-size the result for better performance than a StringBuilder.";
private static final long serialVersionUID = 1L;
private static final Matcher<ExpressionTree> STRING_BUILDER_TYPE_MATCHER = Matchers.isSameType(StringBuilder.class);
private static final Matcher<ExpressionTree> STRING_BUILDER_TO_STRING =
MethodMatchers.instanceMethod()
.onExactClass(StringBuilder.class.getName())
.named("toString")
.withParameters();
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
if (!STRING_BUILDER_TO_STRING.matches(tree, state)) {
return Description.NO_MATCH;
}
Optional<List<ExpressionTree>> result = tree.getMethodSelect().accept(StringBuilderVisitor.INSTANCE, state);
if (!result.isPresent()) {
return Description.NO_MATCH;
}
// Avoid rewriting code that removes comments.
if (ASTHelpers.containsComments(tree, state)) {
return buildDescription(tree)
.setMessage(MESSAGE)
.build();
}
List<ExpressionTree> arguments = result.get();
Stream<String> prefixStream = arguments.stream().findFirst()
.map(ASTHelpers::getType)
.filter(type ->
ASTHelpers.isSameType(type, state.getTypeFromString("java.lang.String"), state))
.map(ignored -> Stream.<String>of())
.orElseGet(() -> Stream.of("\"\""));
return buildDescription(tree)
.setMessage(MESSAGE)
.addFix(SuggestedFix.builder()
.replace(tree, Streams.concat(prefixStream, arguments.stream()
.map(node -> getArgumentSourceString(state, node)))
.collect(Collectors.joining(" + ")))
.build())
.build();
}
private static String getArgumentSourceString(VisitorState state, ExpressionTree tree) {
String originalSource = state.getSourceForNode(tree);
// Ternary expressions must be parenthesized to avoid leaking into preceding or following expressions.
if (tree instanceof ConditionalExpressionTree || tree instanceof BinaryTree) {
return '(' + originalSource + ')';
}
return originalSource;
}
/**
* {@link StringBuilderVisitor} checks if a {@link StringBuilder#toString()} invocation can be followed up
* a fluent invocation chain, therefore must have a constant number of arguments.
* If so, the visitor results in a present {@link Optional} of {@link ExpressionTree arguments} in the order
* they are {@link StringBuilder#append(Object) appended}, otherwise an {@link Optional#empty() empty optional}
* is returned.
* This allows us to maintain a single implementation for validation and building a {@link SuggestedFix} without
* sacrificing build time allocating objects for {@link StringBuilder builders} which don't fit our pattern.
*/
private static final class StringBuilderVisitor
extends SimpleTreeVisitor<Optional<List<ExpressionTree>>, VisitorState> {
private static final StringBuilderVisitor INSTANCE = new StringBuilderVisitor();
private StringBuilderVisitor() {
super(Optional.empty());
}
@Override
public Optional<List<ExpressionTree>> visitNewClass(NewClassTree node, VisitorState state) {
if (!STRING_BUILDER_TYPE_MATCHER.matches(node.getIdentifier(), state)) {
return defaultAction(node, state);
}
if (node.getArguments().isEmpty()) {
return Optional.of(new ArrayList<>());
}
if (node.getArguments().size() == 1
// We shouldn't replace pre-sized builders until we target java 11 across most libraries.
&& (ASTHelpers.isSameType(
ASTHelpers.getType(node.getArguments().get(0)),
state.getTypeFromString("java.lang.String"), state)
|| ASTHelpers.isSameType(
ASTHelpers.getType(node.getArguments().get(0)),
state.getTypeFromString("java.lang.CharSequence"), state))) {
List<ExpressionTree> resultList = new ArrayList<>();
resultList.add(node.getArguments().get(0));
return Optional.of(resultList);
}
return Optional.empty();
}
@Override
public Optional<List<ExpressionTree>> visitMemberSelect(MemberSelectTree node, VisitorState state) {
if (node.getIdentifier().contentEquals("append")
|| node.getIdentifier().contentEquals("toString")) {
return node.getExpression().accept(this, state);
}
return defaultAction(node, state);
}
@Override
public Optional<List<ExpressionTree>> visitMethodInvocation(
MethodInvocationTree node,
VisitorState state) {
Optional<List<ExpressionTree>> result = node.getMethodSelect().accept(this, state);
if (result.isPresent()) {
Preconditions.checkState(node.getArguments().size() == 1, "Expected a single argument to 'append'");
result.get().add(node.getArguments().get(0));
}
return result;
}
}
}
| 1 | 7,915 | does adding a `Stream.<String>empty()` help here? | palantir-gradle-baseline | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.