patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -1112,7 +1112,7 @@ func (c *client) flushOutbound() bool { } // Check to see if we can reuse buffers. - if len(cnb) > 0 { + if len(cnb) > 0 && n >= int64(len(cnb[0])) { oldp := cnb[0][:0] if cap(oldp) >= int(c.out.sz) { // Replace primary or secondary if they are nil, reusing same buffer.
1
// Copyright 2012-2020 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "bytes" "crypto/tls" "encoding/json" "fmt" "io" "math/rand" "net" "regexp" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/nats-io/jwt" ) // Type of client connection. const ( // CLIENT is an end user. CLIENT = iota // ROUTER represents another server in the cluster. ROUTER // GATEWAY is a link between 2 clusters. GATEWAY // SYSTEM is an internal system client. SYSTEM // LEAF is for leaf node connections. LEAF ) const ( // ClientProtoZero is the original Client protocol from 2009. // http://nats.io/documentation/internals/nats-protocol/ ClientProtoZero = iota // ClientProtoInfo signals a client can receive more then the original INFO block. // This can be used to update clients on other cluster members, etc. ClientProtoInfo ) const ( pingProto = "PING" + _CRLF_ pongProto = "PONG" + _CRLF_ errProto = "-ERR '%s'" + _CRLF_ okProto = "+OK" + _CRLF_ ) func init() { rand.Seed(time.Now().UnixNano()) } const ( // Scratch buffer size for the processMsg() calls. msgScratchSize = 1024 msgHeadProto = "RMSG " msgHeadProtoLen = len(msgHeadProto) // For controlling dynamic buffer sizes. startBufSize = 512 // For INFO/CONNECT block minBufSize = 64 // Smallest to shrink to for PING/PONG maxBufSize = 65536 // 64k shortsToShrink = 2 // Trigger to shrink dynamic buffers maxFlushPending = 10 // Max fsps to have in order to wait for writeLoop readLoopReport = 2 * time.Second // Server should not send a PING (for RTT) before the first PONG has // been sent to the client. However, in case some client libs don't // send CONNECT+PING, cap the maximum time before server can send // the RTT PING. maxNoRTTPingBeforeFirstPong = 2 * time.Second // For stalling fast producers stallClientMinDuration = 100 * time.Millisecond stallClientMaxDuration = time.Second ) var readLoopReportThreshold = readLoopReport // Represent client booleans with a bitmask type clientFlag uint16 // Some client state represented as flags const ( connectReceived clientFlag = 1 << iota // The CONNECT proto has been received infoReceived // The INFO protocol has been received firstPongSent // The first PONG has been sent handshakeComplete // For TLS clients, indicate that the handshake is complete flushOutbound // Marks client as having a flushOutbound call in progress. noReconnect // Indicate that on close, this connection should not attempt a reconnect closeConnection // Marks that closeConnection has already been called. writeLoopStarted // Marks that the writeLoop has been started. skipFlushOnClose // Marks that flushOutbound() should not be called on connection close. expectConnect // Marks if this connection is expected to send a CONNECT ) // set the flag (would be equivalent to set the boolean to true) func (cf *clientFlag) set(c clientFlag) { *cf |= c } // clear the flag (would be equivalent to set the boolean to false) func (cf *clientFlag) clear(c clientFlag) { *cf &= ^c } // isSet returns true if the flag is set, false otherwise func (cf clientFlag) isSet(c clientFlag) bool { return cf&c != 0 } // setIfNotSet will set the flag `c` only if that flag was not already // set and return true to indicate that the flag has been set. Returns // false otherwise. func (cf *clientFlag) setIfNotSet(c clientFlag) bool { if *cf&c == 0 { *cf |= c return true } return false } // ClosedState is the reason client was closed. This will // be passed into calls to clearConnection, but will only // be stored in ConnInfo for monitoring. type ClosedState int const ( ClientClosed = ClosedState(iota + 1) AuthenticationTimeout AuthenticationViolation TLSHandshakeError SlowConsumerPendingBytes SlowConsumerWriteDeadline WriteError ReadError ParseError StaleConnection ProtocolViolation BadClientProtocolVersion WrongPort MaxAccountConnectionsExceeded MaxConnectionsExceeded MaxPayloadExceeded MaxControlLineExceeded MaxSubscriptionsExceeded DuplicateRoute RouteRemoved ServerShutdown AuthenticationExpired WrongGateway MissingAccount Revocation ) // Some flags passed to processMsgResultsEx const pmrNoFlag int = 0 const ( pmrCollectQueueNames int = 1 << iota pmrIgnoreEmptyQueueFilter pmrAllowSendFromRouteToRoute ) type client struct { // Here first because of use of atomics, and memory alignment. stats // Indicate if we should check gwrm or not. Since checking gwrm is done // when processing inbound messages and requires the lock we want to // check only when needed. This is set/get using atomic, so needs to // be memory aligned. cgwrt int32 mpay int32 msubs int32 mcl int32 mu sync.Mutex kind int cid uint64 opts clientOpts start time.Time nonce []byte nc net.Conn ncs string out outbound srv *Server acc *Account user *NkeyUser host string port uint16 subs map[string]*subscription perms *permissions replies map[string]*resp mperms *msgDeny darray []string in readCache pcd map[*client]struct{} atmr *time.Timer ping pinfo msgb [msgScratchSize]byte last time.Time parseState rtt time.Duration rttStart time.Time rrTracking map[string]*remoteLatency rrMax int route *route gw *gateway leaf *leaf // To keep track of gateway replies mapping gwrm map[string]*gwReplyMap flags clientFlag // Compact booleans into a single field. Size will be increased when needed. debug bool trace bool echo bool } // Struct for PING initiation from the server. type pinfo struct { tmr *time.Timer last time.Time out int } // outbound holds pending data for a socket. type outbound struct { p []byte // Primary write buffer s []byte // Secondary for use post flush nb net.Buffers // net.Buffers for writev IO sz int32 // limit size per []byte, uses variable BufSize constants, start, min, max. sws int32 // Number of short writes, used for dynamic resizing. pb int64 // Total pending/queued bytes. pm int32 // Total pending/queued messages. fsp int32 // Flush signals that are pending per producer from readLoop's pcd. sch chan struct{} // To signal writeLoop that there is data to flush. wdl time.Duration // Snapshot of write deadline. mp int64 // Snapshot of max pending for client. lft time.Duration // Last flush time for Write. stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in. lwb int32 // Last byte size of Write. } type perm struct { allow *Sublist deny *Sublist } type permissions struct { sub perm pub perm resp *ResponsePermission pcache map[string]bool } // This is used to dynamically track responses and reply subjects // for dynamic permissioning. type resp struct { t time.Time n int } // msgDeny is used when a user permission for subscriptions has a deny // clause but a subscription could be made that is of broader scope. // e.g. deny = "foo", but user subscribes to "*". That subscription should // succeed but no message sent on foo should be delivered. type msgDeny struct { deny *Sublist dcache map[string]bool } // routeTarget collects information regarding routes and queue groups for // sending information to a remote. type routeTarget struct { sub *subscription qs []byte _qs [32]byte } const ( maxResultCacheSize = 512 maxDenyPermCacheSize = 256 maxPermCacheSize = 128 pruneSize = 32 routeTargetInit = 8 replyPermLimit = 4096 ) // Used in readloop to cache hot subject lookups and group statistics. type readCache struct { // These are for clients who are bound to a single account. genid uint64 results map[string]*SublistResult // This is for routes and gateways to have their own L1 as well that is account aware. pacache map[string]*perAccountCache // This is for when we deliver messages across a route. We use this structure // to make sure to only send one message and properly scope to queues as needed. rts []routeTarget prand *rand.Rand // These are all temporary totals for an invocation of a read in readloop. msgs int32 bytes int32 subs int32 rsz int32 // Read buffer size srs int32 // Short reads, used for dynamic buffer resizing. } const ( defaultMaxPerAccountCacheSize = 4096 defaultPrunePerAccountCacheSize = 256 defaultClosedSubsCheckInterval = 5 * time.Minute ) var ( maxPerAccountCacheSize = defaultMaxPerAccountCacheSize prunePerAccountCacheSize = defaultPrunePerAccountCacheSize closedSubsCheckInterval = defaultClosedSubsCheckInterval ) // perAccountCache is for L1 semantics for inbound messages from a route or gateway to mimic the performance of clients. type perAccountCache struct { acc *Account results *SublistResult genid uint64 } func (c *client) String() (id string) { return c.ncs } // GetName returns the application supplied name for the connection. func (c *client) GetName() string { c.mu.Lock() name := c.opts.Name c.mu.Unlock() return name } // GetOpts returns the client options provided by the application. func (c *client) GetOpts() *clientOpts { return &c.opts } // GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil // otherwise. Implements the ClientAuth interface. func (c *client) GetTLSConnectionState() *tls.ConnectionState { tc, ok := c.nc.(*tls.Conn) if !ok { return nil } state := tc.ConnectionState() return &state } // This is the main subscription struct that indicates // interest in published messages. // FIXME(dlc) - This is getting bloated for normal subs, need // to optionally have an opts section for non-normal stuff. type subscription struct { client *client im *streamImport // This is for import stream support. shadow []*subscription // This is to track shadowed accounts. subject []byte queue []byte sid []byte nm int64 max int64 qw int32 closed int32 } // Indicate that this subscription is closed. // This is used in pruning of route and gateway cache items. func (s *subscription) close() { atomic.StoreInt32(&s.closed, 1) } // Return true if this subscription was unsubscribed // or its connection has been closed. func (s *subscription) isClosed() bool { return atomic.LoadInt32(&s.closed) == 1 } type clientOpts struct { Echo bool `json:"echo"` Verbose bool `json:"verbose"` Pedantic bool `json:"pedantic"` TLSRequired bool `json:"tls_required"` Nkey string `json:"nkey,omitempty"` JWT string `json:"jwt,omitempty"` Sig string `json:"sig,omitempty"` Authorization string `json:"auth_token,omitempty"` Username string `json:"user,omitempty"` Password string `json:"pass,omitempty"` Name string `json:"name"` Lang string `json:"lang"` Version string `json:"version"` Protocol int `json:"protocol"` Account string `json:"account,omitempty"` AccountNew bool `json:"new_account,omitempty"` // Routes only Import *SubjectPermission `json:"import,omitempty"` Export *SubjectPermission `json:"export,omitempty"` } var defaultOpts = clientOpts{Verbose: true, Pedantic: true, Echo: true} var internalOpts = clientOpts{Verbose: false, Pedantic: false, Echo: false} func init() { rand.Seed(time.Now().UnixNano()) } // Lock should be held func (c *client) initClient() { s := c.srv c.cid = atomic.AddUint64(&s.gcid, 1) // Outbound data structure setup c.out.sz = startBufSize c.out.sch = make(chan struct{}, 1) opts := s.getOpts() // Snapshots to avoid mutex access in fast paths. c.out.wdl = opts.WriteDeadline c.out.mp = opts.MaxPending c.subs = make(map[string]*subscription) c.echo = true c.debug = (atomic.LoadInt32(&c.srv.logging.debug) != 0) c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0) if c.kind == SYSTEM && !c.srv.logging.traceSysAcc { c.trace = false } // This is a scratch buffer used for processMsg() // The msg header starts with "RMSG ", which can be used // for both local and routes. // in bytes that is [82 77 83 71 32]. c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32} // This is to track pending clients that have data to be flushed // after we process inbound msgs from our own connection. c.pcd = make(map[*client]struct{}) // snapshot the string version of the connection var conn string if ip, ok := c.nc.(*net.TCPConn); ok { conn = ip.RemoteAddr().String() host, port, _ := net.SplitHostPort(conn) iPort, _ := strconv.Atoi(port) c.host, c.port = host, uint16(iPort) } switch c.kind { case CLIENT: c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid) case ROUTER: c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid) case GATEWAY: c.ncs = fmt.Sprintf("%s - gid:%d", conn, c.cid) case LEAF: c.ncs = fmt.Sprintf("%s - lid:%d", conn, c.cid) case SYSTEM: c.ncs = "SYSTEM" } } // RemoteAddress expose the Address of the client connection, // nil when not connected or unknown func (c *client) RemoteAddress() net.Addr { c.mu.Lock() defer c.mu.Unlock() if c.nc == nil { return nil } return c.nc.RemoteAddr() } // Helper function to report errors. func (c *client) reportErrRegisterAccount(acc *Account, err error) { if err == ErrTooManyAccountConnections { c.maxAccountConnExceeded() return } c.Errorf("Problem registering with account [%s]", acc.Name) c.sendErr("Failed Account Registration") } // registerWithAccount will register the given user with a specific // account. This will change the subject namespace. func (c *client) registerWithAccount(acc *Account) error { if acc == nil || acc.sl == nil { return ErrBadAccount } // If we were previously registered, usually to $G, do accounting here to remove. if c.acc != nil { if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil { c.srv.decActiveAccounts() } } c.mu.Lock() kind := c.kind srv := c.srv c.acc = acc c.applyAccountLimits() c.mu.Unlock() // Check if we have a max connections violation if kind == CLIENT && acc.MaxTotalConnectionsReached() { return ErrTooManyAccountConnections } else if kind == LEAF && acc.MaxTotalLeafNodesReached() { return ErrTooManyAccountConnections } // Add in new one. if prev := acc.addClient(c); prev == 0 && srv != nil { srv.incActiveAccounts() } return nil } // Helper to determine if we have met or exceeded max subs. func (c *client) subsAtLimit() bool { return c.msubs != jwt.NoLimit && len(c.subs) >= int(c.msubs) } // Apply account limits // Lock is held on entry. // FIXME(dlc) - Should server be able to override here? func (c *client) applyAccountLimits() { if c.acc == nil || (c.kind != CLIENT && c.kind != LEAF) { return } // Set here, will need to fo checks for NoLimit. if c.acc.msubs != jwt.NoLimit { c.msubs = c.acc.msubs } if c.acc.mpay != jwt.NoLimit { c.mpay = c.acc.mpay } s := c.srv opts := s.getOpts() // We check here if the server has an option set that is lower than the account limit. if c.mpay != jwt.NoLimit && opts.MaxPayload != 0 && int32(opts.MaxPayload) < c.acc.mpay { c.Errorf("Max Payload set to %d from server config which overrides %d from account claims", opts.MaxPayload, c.acc.mpay) c.mpay = int32(opts.MaxPayload) } // We check here if the server has an option set that is lower than the account limit. if c.msubs != jwt.NoLimit && opts.MaxSubs != 0 && opts.MaxSubs < int(c.acc.msubs) { c.Errorf("Max Subscriptions set to %d from server config which overrides %d from account claims", opts.MaxSubs, c.acc.msubs) c.msubs = int32(opts.MaxSubs) } if c.subsAtLimit() { go func() { c.maxSubsExceeded() time.Sleep(20 * time.Millisecond) c.closeConnection(MaxSubscriptionsExceeded) }() } } // RegisterUser allows auth to call back into a new client // with the authenticated user. This is used to map // any permissions into the client and setup accounts. func (c *client) RegisterUser(user *User) { // Register with proper account and sublist. if user.Account != nil { if err := c.registerWithAccount(user.Account); err != nil { c.reportErrRegisterAccount(user.Account, err) return } } c.mu.Lock() // Assign permissions. if user.Permissions == nil { // Reset perms to nil in case client previously had them. c.perms = nil c.mperms = nil } else { c.setPermissions(user.Permissions) } c.mu.Unlock() } // RegisterNkey allows auth to call back into a new nkey // client with the authenticated user. This is used to map // any permissions into the client and setup accounts. func (c *client) RegisterNkeyUser(user *NkeyUser) error { // Register with proper account and sublist. if user.Account != nil { if err := c.registerWithAccount(user.Account); err != nil { c.reportErrRegisterAccount(user.Account, err) return err } } c.mu.Lock() c.user = user // Assign permissions. if user.Permissions == nil { // Reset perms to nil in case client previously had them. c.perms = nil c.mperms = nil } else { c.setPermissions(user.Permissions) } c.mu.Unlock() return nil } func splitSubjectQueue(sq string) ([]byte, []byte, error) { vals := strings.Fields(strings.TrimSpace(sq)) s := []byte(vals[0]) var q []byte if len(vals) == 2 { q = []byte(vals[1]) } else if len(vals) > 2 { return nil, nil, fmt.Errorf("invalid subject-queue %q", sq) } return s, q, nil } // Initializes client.perms structure. // Lock is held on entry. func (c *client) setPermissions(perms *Permissions) { if perms == nil { return } c.perms = &permissions{} c.perms.pcache = make(map[string]bool) // Loop over publish permissions if perms.Publish != nil { if perms.Publish.Allow != nil { c.perms.pub.allow = NewSublistWithCache() } for _, pubSubject := range perms.Publish.Allow { sub := &subscription{subject: []byte(pubSubject)} c.perms.pub.allow.Insert(sub) } if len(perms.Publish.Deny) > 0 { c.perms.pub.deny = NewSublistWithCache() } for _, pubSubject := range perms.Publish.Deny { sub := &subscription{subject: []byte(pubSubject)} c.perms.pub.deny.Insert(sub) } } // Check if we are allowed to send responses. if perms.Response != nil { rp := *perms.Response c.perms.resp = &rp c.replies = make(map[string]*resp) } // Loop over subscribe permissions if perms.Subscribe != nil { var err error if len(perms.Subscribe.Allow) > 0 { c.perms.sub.allow = NewSublistWithCache() } for _, subSubject := range perms.Subscribe.Allow { sub := &subscription{} sub.subject, sub.queue, err = splitSubjectQueue(subSubject) if err != nil { c.Errorf("%s", err.Error()) continue } c.perms.sub.allow.Insert(sub) } if len(perms.Subscribe.Deny) > 0 { c.perms.sub.deny = NewSublistWithCache() // Also hold onto this array for later. c.darray = perms.Subscribe.Deny } for _, subSubject := range perms.Subscribe.Deny { sub := &subscription{} sub.subject, sub.queue, err = splitSubjectQueue(subSubject) if err != nil { c.Errorf("%s", err.Error()) continue } c.perms.sub.deny.Insert(sub) } } } // Check to see if we have an expiration for the user JWT via base claims. // FIXME(dlc) - Clear on connect with new JWT. func (c *client) checkExpiration(claims *jwt.ClaimsData) { if claims.Expires == 0 { return } tn := time.Now().Unix() if claims.Expires < tn { return } expiresAt := time.Duration(claims.Expires - tn) c.setExpirationTimer(expiresAt * time.Second) } // This will load up the deny structure used for filtering delivered // messages based on a deny clause for subscriptions. // Lock should be held. func (c *client) loadMsgDenyFilter() { c.mperms = &msgDeny{NewSublistWithCache(), make(map[string]bool)} for _, sub := range c.darray { c.mperms.deny.Insert(&subscription{subject: []byte(sub)}) } } // writeLoop is the main socket write functionality. // Runs in its own Go routine. func (c *client) writeLoop() { defer c.srv.grWG.Done() c.mu.Lock() if c.isClosed() { c.mu.Unlock() return } c.flags.set(writeLoopStarted) ch := c.out.sch c.mu.Unlock() // This will clear connection state and remove it from the server. defer c.teardownConn() // Used to check that we did flush from last wake up. waitOk := true // Used to limit the wait for a signal const maxWait = time.Second t := time.NewTimer(maxWait) var close bool // Main loop. Will wait to be signaled and then will use // buffered outbound structure for efficient writev to the underlying socket. for { c.mu.Lock() if close = c.flags.isSet(closeConnection); !close { owtf := c.out.fsp > 0 && c.out.pb < maxBufSize && c.out.fsp < maxFlushPending if waitOk && (c.out.pb == 0 || owtf) { c.mu.Unlock() // Reset our timer t.Reset(maxWait) // Wait on pending data. select { case <-ch: case <-t.C: } c.mu.Lock() close = c.flags.isSet(closeConnection) } } if close { c.flushAndClose(false) c.mu.Unlock() return } // Flush data waitOk = c.flushOutbound() c.mu.Unlock() } } // flushClients will make sure to flush any clients we may have // sent to during processing. We pass in a budget as a time.Duration // for how much time to spend in place flushing for this client. This // will normally be called in the readLoop of the client who sent the // message that now is being delivered. func (c *client) flushClients(budget time.Duration) time.Time { last := time.Now() // Check pending clients for flush. for cp := range c.pcd { // TODO(dlc) - Wonder if it makes more sense to create a new map? delete(c.pcd, cp) // Queue up a flush for those in the set cp.mu.Lock() // Update last activity for message delivery cp.last = last // Remove ourselves from the pending list. cp.out.fsp-- // Just ignore if this was closed. if cp.flags.isSet(closeConnection) { cp.mu.Unlock() continue } if budget > 0 && cp.flushOutbound() { budget -= cp.out.lft } else { cp.flushSignal() } cp.mu.Unlock() } return last } // readLoop is the main socket read functionality. // Runs in its own Go routine. func (c *client) readLoop() { // Grab the connection off the client, it will be cleared on a close. // We check for that after the loop, but want to avoid a nil dereference c.mu.Lock() s := c.srv defer s.grWG.Done() if c.isClosed() { c.mu.Unlock() return } nc := c.nc c.in.rsz = startBufSize // Snapshot max control line since currently can not be changed on reload and we // were checking it on each call to parse. If this changes and we allow MaxControlLine // to be reloaded without restart, this code will need to change. c.mcl = MAX_CONTROL_LINE_SIZE if s != nil { if opts := s.getOpts(); opts != nil { c.mcl = int32(opts.MaxControlLine) } } // Check the per-account-cache for closed subscriptions cpacc := c.kind == ROUTER || c.kind == GATEWAY // Last per-account-cache check for closed subscriptions lpacc := time.Now() c.mu.Unlock() defer func() { // These are used only in the readloop, so we can set them to nil // on exit of the readLoop. c.in.results, c.in.pacache = nil, nil }() // Start read buffer. b := make([]byte, c.in.rsz) for { n, err := nc.Read(b) // If we have any data we will try to parse and exit at the end. if n == 0 && err != nil { c.closeConnection(closedStateForErr(err)) return } start := time.Now() // Clear inbound stats cache c.in.msgs = 0 c.in.bytes = 0 c.in.subs = 0 // Main call into parser for inbound data. This will generate callouts // to process messages, etc. if err := c.parse(b[:n]); err != nil { if dur := time.Since(start); dur >= readLoopReportThreshold { c.Warnf("Readloop processing time: %v", dur) } // Need to call flushClients because some of the clients have been // assigned messages and their "fsp" incremented, and need now to be // decremented and their writeLoop signaled. c.flushClients(0) // handled inline if err != ErrMaxPayload && err != ErrAuthentication { c.Error(err) c.closeConnection(ProtocolViolation) } return } // Updates stats for client and server that were collected // from parsing through the buffer. if c.in.msgs > 0 { atomic.AddInt64(&c.inMsgs, int64(c.in.msgs)) atomic.AddInt64(&c.inBytes, int64(c.in.bytes)) atomic.AddInt64(&s.inMsgs, int64(c.in.msgs)) atomic.AddInt64(&s.inBytes, int64(c.in.bytes)) } // Budget to spend in place flushing outbound data. // Client will be checked on several fronts to see // if applicable. Routes and Gateways will never // spend time flushing outbound in place. var budget time.Duration if c.kind == CLIENT { budget = time.Millisecond } // Flush, or signal to writeLoop to flush to socket. last := c.flushClients(budget) // Update activity, check read buffer size. c.mu.Lock() closed := c.isClosed() // Activity based on interest changes or data/msgs. if c.in.msgs > 0 || c.in.subs > 0 { c.last = last } if n >= cap(b) { c.in.srs = 0 } else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to. c.in.srs++ } // Update read buffer size as/if needed. if n >= cap(b) && cap(b) < maxBufSize { // Grow c.in.rsz = int32(cap(b) * 2) b = make([]byte, c.in.rsz) } else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink { // Shrink, for now don't accelerate, ping/pong will eventually sort it out. c.in.rsz = int32(cap(b) / 2) b = make([]byte, c.in.rsz) } c.mu.Unlock() if dur := time.Since(start); dur >= readLoopReportThreshold { c.Warnf("Readloop processing time: %v", dur) } // Check to see if we got closed, e.g. slow consumer if closed { return } // We could have had a read error from above but still read some data. // If so do the close here unconditionally. if err != nil { c.closeConnection(closedStateForErr(err)) return } if cpacc && start.Sub(lpacc) >= closedSubsCheckInterval { c.pruneClosedSubFromPerAccountCache() lpacc = time.Now() } } } // Returns the appropriate closed state for a given read error. func closedStateForErr(err error) ClosedState { if err == io.EOF { return ClientClosed } return ReadError } // collapsePtoNB will place primary onto nb buffer as needed in prep for WriteTo. // This will return a copy on purpose. func (c *client) collapsePtoNB() net.Buffers { if c.out.p != nil { p := c.out.p c.out.p = nil return append(c.out.nb, p) } return c.out.nb } // This will handle the fixup needed on a partial write. // Assume pending has been already calculated correctly. func (c *client) handlePartialWrite(pnb net.Buffers) { nb := c.collapsePtoNB() // The partial needs to be first, so append nb to pnb c.out.nb = append(pnb, nb...) } // flushOutbound will flush outbound buffer to a client. // Will return true if data was attempted to be written. // Lock must be held func (c *client) flushOutbound() bool { if c.flags.isSet(flushOutbound) { // For CLIENT connections, it is possible that the readLoop calls // flushOutbound(). If writeLoop and readLoop compete and we are // here we should release the lock to reduce the risk of spinning. c.mu.Unlock() runtime.Gosched() c.mu.Lock() return false } c.flags.set(flushOutbound) defer c.flags.clear(flushOutbound) // Check for nothing to do. if c.nc == nil || c.srv == nil || c.out.pb == 0 { return true // true because no need to queue a signal. } // Place primary on nb, assign primary to secondary, nil out nb and secondary. nb := c.collapsePtoNB() c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil // For selecting primary replacement. cnb := nb // In case it goes away after releasing the lock. nc := c.nc attempted := c.out.pb apm := c.out.pm // Capture this (we change the value in some tests) wdl := c.out.wdl // Do NOT hold lock during actual IO. c.mu.Unlock() // flush here now := time.Now() // FIXME(dlc) - writev will do multiple IOs past 1024 on // most platforms, need to account for that with deadline? nc.SetWriteDeadline(now.Add(wdl)) // Actual write to the socket. n, err := nb.WriteTo(nc) nc.SetWriteDeadline(time.Time{}) lft := time.Since(now) // Re-acquire client lock. c.mu.Lock() if err != nil { // Handle timeout error (slow consumer) differently if ne, ok := err.(net.Error); ok && ne.Timeout() { if closed := c.handleWriteTimeout(n, attempted, len(cnb)); closed { return true } } else { // Other errors will cause connection to be closed. // For clients, report as debug but for others report as error. report := c.Debugf if c.kind != CLIENT { report = c.Errorf } report("Error flushing: %v", err) c.markConnAsClosed(WriteError, true) return true } } // Update flush time statistics. c.out.lft = lft c.out.lwb = int32(n) // Subtract from pending bytes and messages. c.out.pb -= int64(c.out.lwb) c.out.pm -= apm // FIXME(dlc) - this will not be totally accurate on partials. // Check for partial writes // TODO(dlc) - zero write with no error will cause lost message and the writeloop to spin. if int64(c.out.lwb) != attempted && n > 0 { c.handlePartialWrite(nb) } else if c.out.lwb >= c.out.sz { c.out.sws = 0 } // Adjust based on what we wrote plus any pending. pt := int64(c.out.lwb) + c.out.pb // Adjust sz as needed downward, keeping power of 2. // We do this at a slower rate. if pt < int64(c.out.sz) && c.out.sz > minBufSize { c.out.sws++ if c.out.sws > shortsToShrink { c.out.sz >>= 1 } } // Adjust sz as needed upward, keeping power of 2. if pt > int64(c.out.sz) && c.out.sz < maxBufSize { c.out.sz <<= 1 } // Check to see if we can reuse buffers. if len(cnb) > 0 { oldp := cnb[0][:0] if cap(oldp) >= int(c.out.sz) { // Replace primary or secondary if they are nil, reusing same buffer. if c.out.p == nil { c.out.p = oldp } else if c.out.s == nil || cap(c.out.s) < int(c.out.sz) { c.out.s = oldp } } } // Check that if there is still data to send and writeLoop is in wait, // then we need to signal. if c.out.pb > 0 { c.flushSignal() } // Check if we have a stalled gate and if so and we are recovering release // any stalled producers. Only kind==CLIENT will stall. if c.out.stc != nil && (int64(c.out.lwb) == attempted || c.out.pb < c.out.mp/2) { close(c.out.stc) c.out.stc = nil } return true } // This is invoked from flushOutbound() for io/timeout error (slow consumer). // Returns a boolean to indicate if the connection has been closed or not. // Lock is held on entry. func (c *client) handleWriteTimeout(written, attempted int64, numChunks int) bool { if tlsConn, ok := c.nc.(*tls.Conn); ok { if !tlsConn.ConnectionState().HandshakeComplete { // Likely a TLSTimeout error instead... c.markConnAsClosed(TLSHandshakeError, true) // Would need to coordinate with tlstimeout() // to avoid double logging, so skip logging // here, and don't report a slow consumer error. return true } } else if c.flags.isSet(expectConnect) && !c.flags.isSet(connectReceived) { // Under some conditions, a connection may hit a slow consumer write deadline // before the authorization timeout. If that is the case, then we handle // as slow consumer though we do not increase the counter as that can be // misleading. c.markConnAsClosed(SlowConsumerWriteDeadline, true) return true } // Slow consumer here.. atomic.AddInt64(&c.srv.slowConsumers, 1) c.Noticef("Slow Consumer Detected: WriteDeadline of %v exceeded with %d chunks of %d total bytes.", c.out.wdl, numChunks, attempted) // We always close CLIENT connections, or when nothing was written at all... if c.kind == CLIENT || written == 0 { c.markConnAsClosed(SlowConsumerWriteDeadline, true) return true } return false } // Marks this connection has closed with the given reason. // Sets the closeConnection flag and skipFlushOnClose flag if asked. // Depending on the kind of connection, the connection will be saved. // If a writeLoop has been started, the final flush/close/teardown will // be done there, otherwise flush and close of TCP connection is done here in place. // Returns true if closed in place, flase otherwise. // Lock is held on entry. func (c *client) markConnAsClosed(reason ClosedState, skipFlush bool) bool { if c.flags.isSet(closeConnection) { return false } c.flags.set(closeConnection) if skipFlush { c.flags.set(skipFlushOnClose) } // Save off the connection if its a client or leafnode. if c.kind == CLIENT || c.kind == LEAF { if nc := c.nc; nc != nil && c.srv != nil { // TODO: May want to send events to single go routine instead // of creating a new go routine for each save. go c.srv.saveClosedClient(c, nc, reason) } } // If writeLoop exists, let it do the final flush, close and teardown. if c.flags.isSet(writeLoopStarted) { c.flushSignal() return false } // Flush (if skipFlushOnClose is not set) and close in place. If flushing, // use a small WriteDeadline. c.flushAndClose(true) return true } // flushSignal will use server to queue the flush IO operation to a pool of flushers. // Lock must be held. func (c *client) flushSignal() bool { select { case c.out.sch <- struct{}{}: return true default: } return false } func (c *client) traceMsg(msg []byte) { if !c.trace { return } maxTrace := c.srv.getOpts().MaxTracedMsgLen if maxTrace > 0 && (len(msg)-LEN_CR_LF) > maxTrace { c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", msg[:maxTrace]) } else { c.Tracef("<<- MSG_PAYLOAD: [%q]", msg[:len(msg)-LEN_CR_LF]) } } func (c *client) traceInOp(op string, arg []byte) { c.traceOp("<<- %s", op, arg) } func (c *client) traceOutOp(op string, arg []byte) { c.traceOp("->> %s", op, arg) } func (c *client) traceOp(format, op string, arg []byte) { if !c.trace { return } opa := []interface{}{} if op != "" { opa = append(opa, op) } if arg != nil { opa = append(opa, string(arg)) } c.Tracef(format, opa) } // Process the information messages from Clients and other Routes. func (c *client) processInfo(arg []byte) error { info := Info{} if err := json.Unmarshal(arg, &info); err != nil { return err } switch c.kind { case ROUTER: c.processRouteInfo(&info) case GATEWAY: c.processGatewayInfo(&info) case LEAF: return c.processLeafnodeInfo(&info) } return nil } func (c *client) processErr(errStr string) { switch c.kind { case CLIENT: c.Errorf("Client Error %s", errStr) case ROUTER: c.Errorf("Route Error %s", errStr) case GATEWAY: c.Errorf("Gateway Error %s", errStr) case LEAF: c.Errorf("Leafnode Error %s", errStr) } c.closeConnection(ParseError) } // Password pattern matcher. var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`) // removePassFromTrace removes any notion of passwords from trace // messages for logging. func removePassFromTrace(arg []byte) []byte { if !bytes.Contains(arg, []byte(`pass`)) { return arg } // Take a copy of the connect proto just for the trace message. var _arg [4096]byte buf := append(_arg[:0], arg...) m := passPat.FindAllSubmatchIndex(buf, -1) if len(m) == 0 { return arg } redactedPass := []byte("[REDACTED]") for _, i := range m { if len(i) < 4 { continue } start := i[2] end := i[3] // Replace password substring. buf = append(buf[:start], append(redactedPass, buf[end:]...)...) break } return buf } // Returns the RTT by computing the elapsed time since now and `start`. // On Windows VM where I (IK) run tests, time.Since() will return 0 // (I suspect some time granularity issues). So return at minimum 1ns. func computeRTT(start time.Time) time.Duration { rtt := time.Since(start) if rtt <= 0 { rtt = time.Nanosecond } return rtt } func (c *client) processConnect(arg []byte) error { if c.trace { c.traceInOp("CONNECT", removePassFromTrace(arg)) } c.mu.Lock() // If we can't stop the timer because the callback is in progress... if !c.clearAuthTimer() { // wait for it to finish and handle sending the failure back to // the client. for !c.isClosed() { c.mu.Unlock() time.Sleep(25 * time.Millisecond) c.mu.Lock() } c.mu.Unlock() return nil } c.last = time.Now() // Estimate RTT to start. if c.kind == CLIENT { c.rtt = computeRTT(c.start) if c.srv != nil { c.clearPingTimer() c.srv.setFirstPingTimer(c) } } kind := c.kind srv := c.srv // Moved unmarshalling of clients' Options under the lock. // The client has already been added to the server map, so it is possible // that other routines lookup the client, and access its options under // the client's lock, so unmarshalling the options outside of the lock // would cause data RACEs. if err := json.Unmarshal(arg, &c.opts); err != nil { c.mu.Unlock() return err } // Indicate that the CONNECT protocol has been received, and that the // server now knows which protocol this client supports. c.flags.set(connectReceived) // Capture these under lock c.echo = c.opts.Echo proto := c.opts.Protocol verbose := c.opts.Verbose lang := c.opts.Lang account := c.opts.Account accountNew := c.opts.AccountNew ujwt := c.opts.JWT c.mu.Unlock() if srv != nil { // Applicable to clients only: // As soon as c.opts is unmarshalled and if the proto is at // least ClientProtoInfo, we need to increment the following counter. // This is decremented when client is removed from the server's // clients map. if kind == CLIENT && proto >= ClientProtoInfo { srv.mu.Lock() srv.cproto++ srv.mu.Unlock() } // Check for Auth if ok := srv.checkAuthentication(c); !ok { // We may fail here because we reached max limits on an account. if ujwt != "" { c.mu.Lock() acc := c.acc c.mu.Unlock() if acc != nil && acc != srv.gacc { return ErrTooManyAccountConnections } } c.authViolation() return ErrAuthentication } // Check for Account designation, this section should be only used when there is not a jwt. if account != "" { var acc *Account var wasNew bool var err error if !srv.NewAccountsAllowed() { acc, err = srv.LookupAccount(account) if err != nil { c.Errorf(err.Error()) c.sendErr(ErrMissingAccount.Error()) return err } else if accountNew && acc != nil { c.sendErrAndErr(ErrAccountExists.Error()) return ErrAccountExists } } else { // We can create this one on the fly. acc, wasNew = srv.LookupOrRegisterAccount(account) if accountNew && !wasNew { c.sendErrAndErr(ErrAccountExists.Error()) return ErrAccountExists } } // If we are here we can register ourselves with the new account. if err := c.registerWithAccount(acc); err != nil { c.reportErrRegisterAccount(acc, err) return ErrBadAccount } } else if c.acc == nil { // By default register with the global account. c.registerWithAccount(srv.gacc) } } switch kind { case CLIENT: // Check client protocol request if it exists. if proto < ClientProtoZero || proto > ClientProtoInfo { c.sendErr(ErrBadClientProtocol.Error()) c.closeConnection(BadClientProtocolVersion) return ErrBadClientProtocol } if verbose { c.sendOK() } case ROUTER: // Delegate the rest of processing to the route return c.processRouteConnect(srv, arg, lang) case GATEWAY: // Delegate the rest of processing to the gateway return c.processGatewayConnect(arg) case LEAF: // Delegate the rest of processing to the leaf node return c.processLeafNodeConnect(srv, arg, lang) } return nil } func (c *client) sendErrAndErr(err string) { c.sendErr(err) c.Errorf(err) } func (c *client) sendErrAndDebug(err string) { c.sendErr(err) c.Debugf(err) } func (c *client) authTimeout() { c.sendErrAndDebug("Authentication Timeout") c.closeConnection(AuthenticationTimeout) } func (c *client) authExpired() { c.sendErrAndDebug("User Authentication Expired") c.closeConnection(AuthenticationExpired) } func (c *client) accountAuthExpired() { c.sendErrAndDebug("Account Authentication Expired") c.closeConnection(AuthenticationExpired) } func (c *client) authViolation() { var s *Server var hasTrustedNkeys, hasNkeys, hasUsers bool if s = c.srv; s != nil { s.mu.Lock() hasTrustedNkeys = len(s.trustedKeys) > 0 hasNkeys = s.nkeys != nil hasUsers = s.users != nil s.mu.Unlock() defer s.sendAuthErrorEvent(c) } if hasTrustedNkeys { c.Errorf("%v", ErrAuthentication) } else if hasNkeys { c.Errorf("%s - Nkey %q", ErrAuthentication.Error(), c.opts.Nkey) } else if hasUsers { c.Errorf("%s - User %q", ErrAuthentication.Error(), c.opts.Username) } else { c.Errorf(ErrAuthentication.Error()) } c.sendErr("Authorization Violation") c.closeConnection(AuthenticationViolation) } func (c *client) maxAccountConnExceeded() { c.sendErrAndErr(ErrTooManyAccountConnections.Error()) c.closeConnection(MaxAccountConnectionsExceeded) } func (c *client) maxConnExceeded() { c.sendErrAndErr(ErrTooManyConnections.Error()) c.closeConnection(MaxConnectionsExceeded) } func (c *client) maxSubsExceeded() { c.sendErrAndErr(ErrTooManySubs.Error()) } func (c *client) maxPayloadViolation(sz int, max int32) { c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max) c.sendErr("Maximum Payload Violation") c.closeConnection(MaxPayloadExceeded) } // queueOutbound queues data for a clientconnection. // Return if the data is referenced or not. If referenced, the caller // should not reuse the `data` array. // Lock should be held. func (c *client) queueOutbound(data []byte) bool { // Do not keep going if closed if c.flags.isSet(closeConnection) { return false } // Assume data will not be referenced referenced := false // Add to pending bytes total. c.out.pb += int64(len(data)) // Check for slow consumer via pending bytes limit. // ok to return here, client is going away. if c.kind == CLIENT && c.out.pb > c.out.mp { atomic.AddInt64(&c.srv.slowConsumers, 1) c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp) c.markConnAsClosed(SlowConsumerPendingBytes, true) return referenced } if c.out.p == nil && len(data) < maxBufSize { if c.out.sz == 0 { c.out.sz = startBufSize } if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { c.out.p = c.out.s c.out.s = nil } else { // FIXME(dlc) - make power of 2 if less than maxBufSize? c.out.p = make([]byte, 0, c.out.sz) } } // Determine if we copy or reference available := cap(c.out.p) - len(c.out.p) if len(data) > available { // We can't fit everything into existing primary, but message will // fit in next one we allocate or utilize from the secondary. // So copy what we can. if available > 0 && len(data) < int(c.out.sz) { c.out.p = append(c.out.p, data[:available]...) data = data[available:] } // Put the primary on the nb if it has a payload if len(c.out.p) > 0 { c.out.nb = append(c.out.nb, c.out.p) c.out.p = nil } // Check for a big message, and if found place directly on nb // FIXME(dlc) - do we need signaling of ownership here if we want len(data) < maxBufSize if len(data) > maxBufSize { c.out.nb = append(c.out.nb, data) referenced = true } else { // We will copy to primary. if c.out.p == nil { // Grow here if (c.out.sz << 1) <= maxBufSize { c.out.sz <<= 1 } if len(data) > int(c.out.sz) { c.out.p = make([]byte, 0, len(data)) } else { if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { // TODO(dlc) - Size mismatch? c.out.p = c.out.s c.out.s = nil } else { c.out.p = make([]byte, 0, c.out.sz) } } } c.out.p = append(c.out.p, data...) } } else { c.out.p = append(c.out.p, data...) } // Check here if we should create a stall channel if we are falling behind. // We do this here since if we wait for consumer's writeLoop it could be // too late with large number of fan in producers. if c.out.pb > c.out.mp/2 && c.out.stc == nil { c.out.stc = make(chan struct{}) } return referenced } // Assume the lock is held upon entry. func (c *client) enqueueProtoAndFlush(proto []byte, doFlush bool) { if c.isClosed() { return } c.queueOutbound(proto) if !(doFlush && c.flushOutbound()) { c.flushSignal() } } // Queues and then flushes the connection. This should only be called when // the writeLoop cannot be started yet. Use enqueueProto() otherwise. // Lock is held on entry. func (c *client) sendProtoNow(proto []byte) { c.enqueueProtoAndFlush(proto, true) } // Enqueues the given protocol and signal the writeLoop if necessary. // Lock is held on entry. func (c *client) enqueueProto(proto []byte) { c.enqueueProtoAndFlush(proto, false) } // Assume the lock is held upon entry. func (c *client) sendPong() { c.traceOutOp("PONG", nil) c.enqueueProto([]byte(pongProto)) } // Used to kick off a RTT measurement for latency tracking. func (c *client) sendRTTPing() bool { c.mu.Lock() sent := c.sendRTTPingLocked() c.mu.Unlock() return sent } // Used to kick off a RTT measurement for latency tracking. // This is normally called only when the caller has checked that // the c.rtt is 0 and wants to force an update by sending a PING. // Client lock held on entry. func (c *client) sendRTTPingLocked() bool { // Most client libs send a CONNECT+PING and wait for a PONG from the // server. So if firstPongSent flag is set, it is ok for server to // send the PING. But in case we have client libs that don't do that, // allow the send of the PING if more than 2 secs have elapsed since // the client TCP connection was accepted. if !c.flags.isSet(closeConnection) && (c.flags.isSet(firstPongSent) || time.Since(c.start) > maxNoRTTPingBeforeFirstPong) { c.sendPing() return true } return false } // Assume the lock is held upon entry. func (c *client) sendPing() { c.rttStart = time.Now() c.ping.out++ c.traceOutOp("PING", nil) c.enqueueProto([]byte(pingProto)) } // Generates the INFO to be sent to the client with the client ID included. // info arg will be copied since passed by value. // Assume lock is held. func (c *client) generateClientInfoJSON(info Info) []byte { info.CID = c.cid info.ClientIP = c.host info.MaxPayload = c.mpay // Generate the info json b, _ := json.Marshal(info) pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)} return bytes.Join(pcs, []byte(" ")) } func (c *client) sendErr(err string) { c.mu.Lock() c.traceOutOp("-ERR", []byte(err)) c.enqueueProto([]byte(fmt.Sprintf(errProto, err))) c.mu.Unlock() } func (c *client) sendOK() { c.mu.Lock() c.traceOutOp("OK", nil) c.enqueueProto([]byte(okProto)) c.pcd[c] = needFlush c.mu.Unlock() } func (c *client) processPing() { c.mu.Lock() c.traceInOp("PING", nil) if c.isClosed() { c.mu.Unlock() return } c.sendPong() // Record this to suppress us sending one if this // is within a given time interval for activity. c.ping.last = time.Now() // If not a CLIENT, we are done. Also the CONNECT should // have been received, but make sure it is so before proceeding if c.kind != CLIENT || !c.flags.isSet(connectReceived) { c.mu.Unlock() return } // If we are here, the CONNECT has been received so we know // if this client supports async INFO or not. var ( checkInfoChange bool srv = c.srv ) // For older clients, just flip the firstPongSent flag if not already // set and we are done. if c.opts.Protocol < ClientProtoInfo || srv == nil { c.flags.setIfNotSet(firstPongSent) } else { // This is a client that supports async INFO protocols. // If this is the first PING (so firstPongSent is not set yet), // we will need to check if there was a change in cluster topology // or we have a different max payload. We will send this first before // pong since most clients do flush after connect call. checkInfoChange = !c.flags.isSet(firstPongSent) } c.mu.Unlock() if checkInfoChange { opts := srv.getOpts() srv.mu.Lock() c.mu.Lock() // Now that we are under both locks, we can flip the flag. // This prevents sendAsyncInfoToClients() and code here to // send a double INFO protocol. c.flags.set(firstPongSent) // If there was a cluster update since this client was created, // send an updated INFO protocol now. if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) { c.enqueueProto(c.generateClientInfoJSON(srv.copyInfo())) } c.mu.Unlock() srv.mu.Unlock() } } func (c *client) processPong() { c.traceInOp("PONG", nil) c.mu.Lock() c.ping.out = 0 c.rtt = computeRTT(c.rttStart) srv := c.srv reorderGWs := c.kind == GATEWAY && c.gw.outbound c.mu.Unlock() if reorderGWs { srv.gateway.orderOutboundConnections() } } func (c *client) processPub(trace bool, arg []byte) error { if trace { c.traceInOp("PUB", arg) } // Unroll splitArgs to avoid runtime/heap issues a := [MAX_PUB_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } c.pa.arg = arg switch len(args) { case 2: c.pa.subject = args[0] c.pa.reply = nil c.pa.size = parseSize(args[1]) c.pa.szb = args[1] case 3: c.pa.subject = args[0] c.pa.reply = args[1] c.pa.size = parseSize(args[2]) c.pa.szb = args[2] default: return fmt.Errorf("processPub Parse Error: '%s'", arg) } // If number overruns an int64, parseSize() will have returned a negative value if c.pa.size < 0 { return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg) } maxPayload := atomic.LoadInt32(&c.mpay) // Use int64() to avoid int32 overrun... if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) { c.maxPayloadViolation(c.pa.size, maxPayload) return ErrMaxPayload } if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) { c.sendErr("Invalid Publish Subject") } return nil } func splitArg(arg []byte) [][]byte { a := [MAX_MSG_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } return args } func (c *client) processSub(argo []byte, noForward bool) (*subscription, error) { c.traceInOp("SUB", argo) // Indicate activity. c.in.subs++ // Copy so we do not reference a potentially large buffer // FIXME(dlc) - make more efficient. arg := make([]byte, len(argo)) copy(arg, argo) args := splitArg(arg) sub := &subscription{client: c} switch len(args) { case 2: sub.subject = args[0] sub.queue = nil sub.sid = args[1] case 3: sub.subject = args[0] sub.queue = args[1] sub.sid = args[2] default: return nil, fmt.Errorf("processSub Parse Error: '%s'", arg) } c.mu.Lock() // Grab connection type, account and server info. kind := c.kind acc := c.acc srv := c.srv sid := string(sub.sid) // This check does not apply to SYSTEM clients (because they don't have a `nc`...) if kind != SYSTEM && c.isClosed() { c.mu.Unlock() return sub, nil } // Check permissions if applicable. if kind == CLIENT { // First do a pass whether queue subscription is valid. This does not necessarily // mean that it will not be able to plain subscribe. // // allow = ["foo"] -> can subscribe or queue subscribe to foo using any queue // allow = ["foo v1"] -> can only queue subscribe to 'foo v1', no plain subs allowed. // allow = ["foo", "foo v1"] -> can subscribe to 'foo' but can only queue subscribe to 'foo v1' // if sub.queue != nil { if !c.canQueueSubscribe(string(sub.subject), string(sub.queue)) { c.mu.Unlock() c.subPermissionViolation(sub) return nil, nil } } else if !c.canSubscribe(string(sub.subject)) { c.mu.Unlock() c.subPermissionViolation(sub) return nil, nil } } // Check if we have a maximum on the number of subscriptions. if c.subsAtLimit() { c.mu.Unlock() c.maxSubsExceeded() return nil, nil } var updateGWs bool var err error // Subscribe here. if c.subs[sid] == nil { c.subs[sid] = sub if acc != nil && acc.sl != nil { err = acc.sl.Insert(sub) if err != nil { delete(c.subs, sid) } else { updateGWs = c.srv.gateway.enabled } } } // Unlocked from here onward c.mu.Unlock() if err != nil { c.sendErr("Invalid Subject") return nil, nil } else if c.opts.Verbose && kind != SYSTEM { c.sendOK() } // No account just return. if acc == nil { return sub, nil } if err := c.addShadowSubscriptions(acc, sub); err != nil { c.Errorf(err.Error()) } if noForward { return sub, nil } // If we are routing and this is a local sub, add to the route map for the associated account. if kind == CLIENT || kind == SYSTEM { srv.updateRouteSubscriptionMap(acc, sub, 1) if updateGWs { srv.gatewayUpdateSubInterest(acc.Name, sub, 1) } } // Now check on leafnode updates. srv.updateLeafNodes(acc, sub, 1) return sub, nil } // If the client's account has stream imports and there are matches for // this subscription's subject, then add shadow subscriptions in the // other accounts that export this subject. func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error { if acc == nil { return ErrMissingAccount } var ( rims [32]*streamImport ims = rims[:0] rfroms [32]*streamImport froms = rfroms[:0] tokens []string tsa [32]string hasWC bool ) acc.mu.RLock() // Loop over the import subjects. We have 3 scenarios. If we exact // match or we know the proposed subject is a strict subset of the // import we can subscribe to the subscription's subject directly. // The third scenario is where the proposed subject has a wildcard // and may not be an exact subset, but is a match. Therefore we have to // subscribe to the import subject, not the subscription's subject. for _, im := range acc.imports.streams { if im.invalid { continue } subj := string(sub.subject) if subj == im.prefix+im.from { ims = append(ims, im) continue } if tokens == nil { tokens = tsa[:0] start := 0 for i := 0; i < len(subj); i++ { // This is not perfect, but the test below will // be more exact, this is just to trigger the // additional test. if subj[i] == pwc || subj[i] == fwc { hasWC = true } else if subj[i] == btsep { tokens = append(tokens, subj[start:i]) start = i + 1 } } tokens = append(tokens, subj[start:]) } if isSubsetMatch(tokens, im.prefix+im.from) { ims = append(ims, im) } else if hasWC { if subjectIsSubsetMatch(im.prefix+im.from, subj) { froms = append(froms, im) } } } acc.mu.RUnlock() var shadow []*subscription if len(ims) > 0 || len(froms) > 0 { shadow = make([]*subscription, 0, len(ims)+len(froms)) } // Now walk through collected importMaps for _, im := range ims { // We will create a shadow subscription. nsub, err := c.addShadowSub(sub, im, false) if err != nil { return err } shadow = append(shadow, nsub) } // Now walk through importMaps that we need to subscribe // exactly to the "from" property. for _, im := range froms { // We will create a shadow subscription. nsub, err := c.addShadowSub(sub, im, true) if err != nil { return err } shadow = append(shadow, nsub) } if shadow != nil { c.mu.Lock() sub.shadow = shadow c.mu.Unlock() } return nil } // Add in the shadow subscription. func (c *client) addShadowSub(sub *subscription, im *streamImport, useFrom bool) (*subscription, error) { nsub := *sub // copy nsub.im = im if useFrom { nsub.subject = []byte(im.from) } else if im.prefix != "" { // redo subject here to match subject in the publisher account space. // Just remove prefix from what they gave us. That maps into other space. nsub.subject = sub.subject[len(im.prefix):] } c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name) if err := im.acc.sl.Insert(&nsub); err != nil { errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name) c.Debugf(errs) return nil, fmt.Errorf(errs) } // Update our route map here. c.srv.updateRouteSubscriptionMap(im.acc, &nsub, 1) return &nsub, nil } // canSubscribe determines if the client is authorized to subscribe to the // given subject. Assumes caller is holding lock. func (c *client) canSubscribe(subject string) bool { if c.perms == nil { return true } allowed := true // Check allow list. If no allow list that means all are allowed. Deny can overrule. if c.perms.sub.allow != nil { r := c.perms.sub.allow.Match(subject) allowed = len(r.psubs) != 0 } // If we have a deny list and we think we are allowed, check that as well. if allowed && c.perms.sub.deny != nil { r := c.perms.sub.deny.Match(subject) allowed = len(r.psubs) == 0 // We use the actual subscription to signal us to spin up the deny mperms // and cache. We check if the subject is a wildcard that contains any of // the deny clauses. // FIXME(dlc) - We could be smarter and track when these go away and remove. if allowed && c.mperms == nil && subjectHasWildcard(subject) { // Whip through the deny array and check if this wildcard subject is within scope. for _, sub := range c.darray { tokens := strings.Split(sub, tsep) if isSubsetMatch(tokens, sub) { c.loadMsgDenyFilter() break } } } } return allowed } func queueMatches(queue string, qsubs [][]*subscription) bool { if len(qsubs) == 0 { return true } for _, qsub := range qsubs { qs := qsub[0] qname := string(qs.queue) // NOTE: '*' and '>' tokens can also be valid // queue names so we first check against the // literal name. e.g. v1.* == v1.* if queue == qname || (subjectHasWildcard(qname) && subjectIsSubsetMatch(queue, qname)) { return true } } return false } func (c *client) canQueueSubscribe(subject, queue string) bool { if c.perms == nil { return true } allowed := true if c.perms.sub.allow != nil { r := c.perms.sub.allow.Match(subject) // If perms DO NOT have queue name, then psubs will be greater than // zero. If perms DO have queue name, then qsubs will be greater than // zero. allowed = len(r.psubs) > 0 if len(r.qsubs) > 0 { // If the queue appears in the allow list, then DO allow. allowed = queueMatches(queue, r.qsubs) } } if allowed && c.perms.sub.deny != nil { r := c.perms.sub.deny.Match(subject) // If perms DO NOT have queue name, then psubs will be greater than // zero. If perms DO have queue name, then qsubs will be greater than // zero. allowed = len(r.psubs) == 0 if len(r.qsubs) > 0 { // If the queue appears in the deny list, then DO NOT allow. allowed = !queueMatches(queue, r.qsubs) } } return allowed } // Low level unsubscribe for a given client. func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool) { c.mu.Lock() if !force && sub.max > 0 && sub.nm < sub.max { c.Debugf( "Deferring actual UNSUB(%s): %d max, %d received", string(sub.subject), sub.max, sub.nm) c.mu.Unlock() return } c.traceOp("<-> %s", "DELSUB", sub.sid) if c.kind != CLIENT && c.kind != SYSTEM { c.removeReplySubTimeout(sub) } // Remove accounting if requested. This will be false when we close a connection // with open subscriptions. if remove { delete(c.subs, string(sub.sid)) if acc != nil { acc.sl.Remove(sub) } } // Check to see if we have shadow subscriptions. var updateRoute bool shadowSubs := sub.shadow sub.shadow = nil if len(shadowSubs) > 0 { updateRoute = (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil } sub.close() c.mu.Unlock() // Process shadow subs if we have them. for _, nsub := range shadowSubs { if err := nsub.im.acc.sl.Remove(nsub); err != nil { c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name) } else if updateRoute { c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1) } // Now check on leafnode updates. c.srv.updateLeafNodes(nsub.im.acc, nsub, -1) } // Now check to see if this was part of a respMap entry for service imports. if acc != nil { acc.checkForRespEntry(string(sub.subject)) } } func (c *client) processUnsub(arg []byte) error { c.traceInOp("UNSUB", arg) args := splitArg(arg) var sid []byte max := -1 switch len(args) { case 1: sid = args[0] case 2: sid = args[0] max = parseSize(args[1]) default: return fmt.Errorf("processUnsub Parse Error: '%s'", arg) } // Indicate activity. c.in.subs++ var sub *subscription var ok, unsub bool c.mu.Lock() // Grab connection type. kind := c.kind srv := c.srv var acc *Account updateGWs := false if sub, ok = c.subs[string(sid)]; ok { acc = c.acc if max > 0 { sub.max = int64(max) } else { // Clear it here to override sub.max = 0 unsub = true } updateGWs = srv.gateway.enabled } c.mu.Unlock() if c.opts.Verbose { c.sendOK() } if unsub { c.unsubscribe(acc, sub, false, true) if acc != nil && kind == CLIENT || kind == SYSTEM { srv.updateRouteSubscriptionMap(acc, sub, -1) if updateGWs { srv.gatewayUpdateSubInterest(acc.Name, sub, -1) } } // Now check on leafnode updates. srv.updateLeafNodes(acc, sub, -1) } return nil } // checkDenySub will check if we are allowed to deliver this message in the // presence of deny clauses for subscriptions. Deny clauses will not prevent // larger scoped wildcard subscriptions, so we need to check at delivery time. // Lock should be held. func (c *client) checkDenySub(subject string) bool { if denied, ok := c.mperms.dcache[subject]; ok { return denied } else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 { c.mperms.dcache[subject] = true return true } else { c.mperms.dcache[subject] = false } if len(c.mperms.dcache) > maxDenyPermCacheSize { c.pruneDenyCache() } return false } func (c *client) msgHeader(mh []byte, sub *subscription, reply []byte) []byte { if len(sub.sid) > 0 { mh = append(mh, sub.sid...) mh = append(mh, ' ') } if reply != nil { mh = append(mh, reply...) mh = append(mh, ' ') } mh = append(mh, c.pa.szb...) mh = append(mh, _CRLF_...) return mh } func (c *client) stalledWait(producer *client) { stall := c.out.stc ttl := stallDuration(c.out.pb, c.out.mp) c.mu.Unlock() defer c.mu.Lock() select { case <-stall: case <-time.After(ttl): producer.Debugf("Timed out of fast producer stall (%v)", ttl) } } func stallDuration(pb, mp int64) time.Duration { ttl := stallClientMinDuration if pb >= mp { ttl = stallClientMaxDuration } else if hmp := mp / 2; pb > hmp { bsz := hmp / 10 additional := int64(ttl) * ((pb - hmp) / bsz) ttl += time.Duration(additional) } return ttl } // Used to treat maps as efficient set var needFlush = struct{}{} // deliverMsg will deliver a message to a matching subscription and its underlying client. // We process all connection/client types. mh is the part that will be protocol/client specific. func (c *client) deliverMsg(sub *subscription, subject, mh, msg []byte, gwrply bool) bool { if sub.client == nil { return false } client := sub.client client.mu.Lock() // Check echo if c == client && !client.echo { client.mu.Unlock() return false } // Check if we have a subscribe deny clause. This will trigger us to check the subject // for a match against the denied subjects. if client.mperms != nil && client.checkDenySub(string(subject)) { client.mu.Unlock() return false } // This is set under the client lock using atomic because it can be // checked with atomic without the client lock. Here, we don't need // the atomic operation since we are under the lock. if sub.closed == 1 { client.mu.Unlock() return false } srv := client.srv sub.nm++ // Check if we should auto-unsubscribe. if sub.max > 0 { if client.kind == ROUTER && sub.nm >= sub.max { // The only router based messages that we will see here are remoteReplies. // We handle these slightly differently. defer client.removeReplySub(sub) } else { // For routing.. shouldForward := client.kind == CLIENT || client.kind == SYSTEM && client.srv != nil // If we are at the exact number, unsubscribe but // still process the message in hand, otherwise // unsubscribe and drop message on the floor. if sub.nm == sub.max { client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'", sub.max, string(sub.sid)) // Due to defer, reverse the code order so that execution // is consistent with other cases where we unsubscribe. if shouldForward { if srv.gateway.enabled { defer srv.gatewayUpdateSubInterest(client.acc.Name, sub, -1) } defer srv.updateRouteSubscriptionMap(client.acc, sub, -1) } defer client.unsubscribe(client.acc, sub, true, true) } else if sub.nm > sub.max { client.Debugf("Auto-unsubscribe limit [%d] exceeded", sub.max) client.mu.Unlock() client.unsubscribe(client.acc, sub, true, true) if shouldForward { srv.updateRouteSubscriptionMap(client.acc, sub, -1) if srv.gateway.enabled { srv.gatewayUpdateSubInterest(client.acc.Name, sub, -1) } } return false } } } // Update statistics // The msg includes the CR_LF, so pull back out for accounting. msgSize := int64(len(msg) - LEN_CR_LF) // No atomic needed since accessed under client lock. // Monitor is reading those also under client's lock. client.outMsgs++ client.outBytes += msgSize atomic.AddInt64(&srv.outMsgs, 1) atomic.AddInt64(&srv.outBytes, msgSize) // Check for internal subscription. if client.kind == SYSTEM { s := client.srv client.mu.Unlock() s.deliverInternalMsg(sub, c, subject, c.pa.reply, msg[:msgSize]) return true } // If we are a client and we detect that the consumer we are // sending to is in a stalled state, go ahead and wait here // with a limit. if c.kind == CLIENT && client.out.stc != nil { client.stalledWait(c) } // Check for closed connection if client.isClosed() { client.mu.Unlock() return false } // Do a fast check here to see if we should be tracking this from a latency // perspective. This will be for a request being received for an exported service. // This needs to be from a non-client (otherwise tracking happens at requestor). // // Also this check captures if the original reply (c.pa.reply) is a GW routed // reply (since it is known to be > minReplyLen). If that is the case, we need to // track the binding between the routed reply and the reply set in the message // header (which is c.pa.reply without the GNR routing prefix). if client.kind == CLIENT && len(c.pa.reply) > minReplyLen { if gwrply { // Note we keep track "in" the destination client (`client`) but the // routed reply subject is in `c.pa.reply`. Should that change, we // would have to pass the "reply" in deliverMsg(). srv.trackGWReply(client, c.pa.reply) } // If we do not have a registered RTT queue that up now. if client.rtt == 0 { client.sendRTTPingLocked() } // FIXME(dlc) - We may need to optimize this. // We will have tagged this with a suffix ('.T') if we are tracking. This is // needed from sampling. Not all will be tracked. if c.kind != CLIENT && client.acc.IsExportServiceTracking(string(subject)) && isTrackedReply(c.pa.reply) { client.trackRemoteReply(string(c.pa.reply)) } } // Queue to outbound buffer client.queueOutbound(mh) client.queueOutbound(msg) client.out.pm++ // If we are tracking dynamic publish permissions that track reply subjects, // do that accounting here. We only look at client.replies which will be non-nil. if client.replies != nil && len(c.pa.reply) > 0 { client.replies[string(c.pa.reply)] = &resp{time.Now(), 0} if len(client.replies) > replyPermLimit { client.pruneReplyPerms() } } // Check outbound threshold and queue IO flush if needed. // This is specifically looking at situations where we are getting behind and may want // to intervene before this producer goes back to top of readloop. We are in the producer's // readloop go routine at this point. // FIXME(dlc) - We may call this alot, maybe suppress after first call? if client.out.pm > 1 && client.out.pb > maxBufSize*2 { client.flushSignal() } // Add the data size we are responsible for here. This will be processed when we // return to the top of the readLoop. if _, ok := c.pcd[client]; !ok { client.out.fsp++ c.pcd[client] = needFlush } if c.trace { client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil) } client.mu.Unlock() return true } // This will track a remote reply for an exported service that has requested // latency tracking. // Lock assumed to be held. func (c *client) trackRemoteReply(reply string) { if c.rrTracking == nil { c.rrTracking = make(map[string]*remoteLatency) c.rrMax = c.acc.MaxAutoExpireResponseMaps() } rl := remoteLatency{ Account: c.acc.Name, ReqId: reply, } rl.M2.RequestStart = time.Now() c.rrTracking[reply] = &rl if len(c.rrTracking) >= c.rrMax { c.pruneRemoteTracking() } } // pruneReplyPerms will remove any stale or expired entries // in our reply cache. We make sure to not check too often. func (c *client) pruneReplyPerms() { // Make sure we do not check too often. if c.perms.resp == nil { return } mm := c.perms.resp.MaxMsgs ttl := c.perms.resp.Expires now := time.Now() for k, resp := range c.replies { if mm > 0 && resp.n >= mm { delete(c.replies, k) } else if ttl > 0 && now.Sub(resp.t) > ttl { delete(c.replies, k) } } } // pruneDenyCache will prune the deny cache via randomly // deleting items. Doing so pruneSize items at a time. // Lock must be held for this one since it is shared under // deliverMsg. func (c *client) pruneDenyCache() { r := 0 for subject := range c.mperms.dcache { delete(c.mperms.dcache, subject) if r++; r > pruneSize { break } } } // prunePubPermsCache will prune the cache via randomly // deleting items. Doing so pruneSize items at a time. func (c *client) prunePubPermsCache() { r := 0 for subject := range c.perms.pcache { delete(c.perms.pcache, subject) if r++; r > pruneSize { break } } } // pruneRemoteTracking will prune any remote tracking objects // that are too old. These are orphaned when a service is not // sending reponses etc. // Lock should be held upon entry. func (c *client) pruneRemoteTracking() { ttl := c.acc.AutoExpireTTL() now := time.Now() for reply, rl := range c.rrTracking { if now.Sub(rl.M2.RequestStart) > ttl { delete(c.rrTracking, reply) } } } // pubAllowed checks on publish permissioning. // Lock should not be held. func (c *client) pubAllowed(subject string) bool { return c.pubAllowedFullCheck(subject, true) } // pubAllowedFullCheck checks on all publish permissioning depending // on the flag for dynamic reply permissions. func (c *client) pubAllowedFullCheck(subject string, fullCheck bool) bool { if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) { return true } // Check if published subject is allowed if we have permissions in place. allowed, ok := c.perms.pcache[subject] if ok { return allowed } // Cache miss, check allow then deny as needed. if c.perms.pub.allow != nil { r := c.perms.pub.allow.Match(subject) allowed = len(r.psubs) != 0 } else { // No entries means all are allowed. Deny will overrule as needed. allowed = true } // If we have a deny list and are currently allowed, check that as well. if allowed && c.perms.pub.deny != nil { r := c.perms.pub.deny.Match(subject) allowed = len(r.psubs) == 0 } // If we are currently not allowed but we are tracking reply subjects // dynamically, check to see if we are allowed here but avoid pcache. // We need to acquire the lock though. if !allowed && fullCheck && c.perms.resp != nil { c.mu.Lock() if resp := c.replies[subject]; resp != nil { resp.n++ // Check if we have sent too many responses. if c.perms.resp.MaxMsgs > 0 && resp.n > c.perms.resp.MaxMsgs { delete(c.replies, subject) } else if c.perms.resp.Expires > 0 && time.Since(resp.t) > c.perms.resp.Expires { delete(c.replies, subject) } else { allowed = true } } c.mu.Unlock() } else { // Update our cache here. c.perms.pcache[string(subject)] = allowed // Prune if needed. if len(c.perms.pcache) > maxPermCacheSize { c.prunePubPermsCache() } } return allowed } // Test whether a reply subject is a service import reply. func isServiceReply(reply []byte) bool { // This function is inlined and checking this way is actually faster // than byte-by-byte comparison. return len(reply) > 3 && string(reply[:4]) == replyPrefix } // Test whether a reply subject is a service import or a gateway routed reply. func isReservedReply(reply []byte) bool { if isServiceReply(reply) { return true } // Faster to check with string([:]) than byte-by-byte if len(reply) > gwReplyPrefixLen && string(reply[:gwReplyPrefixLen]) == gwReplyPrefix { return true } return false } // This will decide to call the client code or router code. func (c *client) processInboundMsg(msg []byte) { switch c.kind { case CLIENT: c.processInboundClientMsg(msg) case ROUTER: c.processInboundRoutedMsg(msg) case GATEWAY: c.processInboundGatewayMsg(msg) case LEAF: c.processInboundLeafMsg(msg) } } // processInboundClientMsg is called to process an inbound msg from a client. func (c *client) processInboundClientMsg(msg []byte) { // Update statistics // The msg includes the CR_LF, so pull back out for accounting. c.in.msgs++ c.in.bytes += int32(len(msg) - LEN_CR_LF) if c.trace { c.traceMsg(msg) } // Check that client (could be here with SYSTEM) is not publishing on reserved "$GNR" prefix. if c.kind == CLIENT && hasGWRoutedReplyPrefix(c.pa.subject) { c.pubPermissionViolation(c.pa.subject) return } // Check pub permissions if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) { c.pubPermissionViolation(c.pa.subject) return } // Now check for reserved replies. These are used for service imports. if len(c.pa.reply) > 0 && isReservedReply(c.pa.reply) { c.replySubjectViolation(c.pa.reply) return } if c.opts.Verbose { c.sendOK() } // Mostly under testing scenarios. if c.srv == nil || c.acc == nil { return } // Check if this client's gateway replies map is not empty if atomic.LoadInt32(&c.cgwrt) > 0 && c.handleGWReplyMap(msg) { return } // Check to see if we need to map/route to another account. if c.acc.imports.services != nil { c.checkForImportServices(c.acc, msg) } // If we have an exported service and we are doing remote tracking, check this subject // to see if we need to report the latency. if c.rrTracking != nil { c.mu.Lock() rl := c.rrTracking[string(c.pa.subject)] if rl != nil { delete(c.rrTracking, string(c.pa.subject)) } rtt := c.rtt c.mu.Unlock() if rl != nil { sl := &rl.M2 // Fill this in and send it off to the other side. sl.AppName = c.opts.Name sl.ServiceLatency = time.Since(sl.RequestStart) - rtt sl.NATSLatency.Responder = rtt sl.TotalLatency = sl.ServiceLatency + rtt sanitizeLatencyMetric(sl) lsub := remoteLatencySubjectForResponse(c.pa.subject) c.srv.sendInternalAccountMsg(nil, lsub, &rl) // Send to SYS account } } // Match the subscriptions. We will use our own L1 map if // it's still valid, avoiding contention on the shared sublist. var r *SublistResult var ok bool genid := atomic.LoadUint64(&c.acc.sl.genid) if genid == c.in.genid && c.in.results != nil { r, ok = c.in.results[string(c.pa.subject)] } else { // Reset our L1 completely. c.in.results = make(map[string]*SublistResult) c.in.genid = genid } // Go back to the sublist data structure. if !ok { r = c.acc.sl.Match(string(c.pa.subject)) c.in.results[string(c.pa.subject)] = r // Prune the results cache. Keeps us from unbounded growth. Random delete. if len(c.in.results) > maxResultCacheSize { n := 0 for subject := range c.in.results { delete(c.in.results, subject) if n++; n > pruneSize { break } } } } var qnames [][]byte // Check for no interest, short circuit if so. // This is the fanout scale. if len(r.psubs)+len(r.qsubs) > 0 { flag := pmrNoFlag // If there are matching queue subs and we are in gateway mode, // we need to keep track of the queue names the messages are // delivered to. When sending to the GWs, the RMSG will include // those names so that the remote clusters do not deliver messages // to their queue subs of the same names. if len(r.qsubs) > 0 && c.srv.gateway.enabled && atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 { flag |= pmrCollectQueueNames } qnames = c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply, flag) } // Now deal with gateways if c.srv.gateway.enabled { c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, qnames) } } // This is invoked knowing that this client has some GW replies // in its map. It will check if one is find for the c.pa.subject // and if so will process it directly (send to GWs and LEAF) and // return true to notify the caller that the message was handled. // If there is no mapping for the subject, false is returned. func (c *client) handleGWReplyMap(msg []byte) bool { c.mu.Lock() rm, ok := c.gwrm[string(c.pa.subject)] if !ok { c.mu.Unlock() return false } // Set subject to the mapped reply subject c.pa.subject = []byte(rm.ms) var rl *remoteLatency var rtt time.Duration if c.rrTracking != nil { rl = c.rrTracking[string(c.pa.subject)] if rl != nil { delete(c.rrTracking, string(c.pa.subject)) } rtt = c.rtt } c.mu.Unlock() if rl != nil { sl := &rl.M2 // Fill this in and send it off to the other side. sl.AppName = c.opts.Name sl.ServiceLatency = time.Since(sl.RequestStart) - rtt sl.NATSLatency.Responder = rtt sl.TotalLatency = sl.ServiceLatency + rtt sanitizeLatencyMetric(sl) lsub := remoteLatencySubjectForResponse(c.pa.subject) c.srv.sendInternalAccountMsg(nil, lsub, &rl) // Send to SYS account } // Check for leaf nodes if c.srv.gwLeafSubs.Count() > 0 { if r := c.srv.gwLeafSubs.Match(string(c.pa.subject)); len(r.psubs) > 0 { c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply, pmrNoFlag) } } if c.srv.gateway.enabled { c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, nil) } return true } // This checks and process import services by doing the mapping and sending the // message onward if applicable. func (c *client) checkForImportServices(acc *Account, msg []byte) { if acc == nil || acc.imports.services == nil { return } acc.mu.RLock() si := acc.imports.services[string(c.pa.subject)] invalid := si != nil && si.invalid acc.mu.RUnlock() // Get the results from the other account for the mapped "to" subject. // If we have been marked invalid simply return here. if si != nil && !invalid && si.acc != nil && si.acc.sl != nil { var nrr []byte if c.pa.reply != nil { var latency *serviceLatency var tracking bool if tracking = shouldSample(si.latency); tracking { latency = si.latency } // We want to remap this to provide anonymity. nrr = si.acc.newServiceReply(tracking) si.acc.addRespServiceImport(acc, string(nrr), string(c.pa.reply), si.rt, latency) // Track our responses for cleanup if not auto-expire. if si.rt != Singleton { acc.addRespMapEntry(si.acc, string(c.pa.reply), string(nrr)) } else if si.latency != nil && c.rtt == 0 { // We have a service import that we are tracking but have not established RTT. c.sendRTTPing() } } // FIXME(dlc) - Do L1 cache trick from above. rr := si.acc.sl.Match(si.to) // Check to see if we have no results and this is an internal serviceImport. If so we // need to clean that up. if len(rr.psubs)+len(rr.qsubs) == 0 && si.internal { // We may also have a response entry, so go through that way. si.acc.checkForRespEntry(si.to) } flags := pmrNoFlag // If we are a route or gateway or leafnode and this message is flipped to a queue subscriber we // need to handle that since the processMsgResults will want a queue filter. if c.kind == GATEWAY || c.kind == ROUTER || c.kind == LEAF { flags |= pmrIgnoreEmptyQueueFilter } if c.srv.gateway.enabled { flags |= pmrCollectQueueNames queues := c.processMsgResults(si.acc, rr, msg, []byte(si.to), nrr, flags) c.sendMsgToGateways(si.acc, msg, []byte(si.to), nrr, queues) } else { c.processMsgResults(si.acc, rr, msg, []byte(si.to), nrr, flags) } shouldRemove := si.ae // Calculate tracking info here if we are tracking this request/response. if si.tracking { if requesting := firstSubFromResult(rr); requesting != nil { shouldRemove = acc.sendTrackingLatency(si, requesting.client, c) } } if shouldRemove { acc.removeServiceImport(si.from) } } } func (c *client) addSubToRouteTargets(sub *subscription) { if c.in.rts == nil { c.in.rts = make([]routeTarget, 0, routeTargetInit) } for i := range c.in.rts { rt := &c.in.rts[i] if rt.sub.client == sub.client { if sub.queue != nil { rt.qs = append(rt.qs, sub.queue...) rt.qs = append(rt.qs, ' ') } return } } var rt *routeTarget lrts := len(c.in.rts) // If we are here we do not have the sub yet in our list // If we have to grow do so here. if lrts == cap(c.in.rts) { c.in.rts = append(c.in.rts, routeTarget{}) } c.in.rts = c.in.rts[:lrts+1] rt = &c.in.rts[lrts] rt.sub = sub rt.qs = rt._qs[:0] if sub.queue != nil { rt.qs = append(rt.qs, sub.queue...) rt.qs = append(rt.qs, ' ') } } // This processes the sublist results for a given message. func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, subject, reply []byte, flags int) [][]byte { var queues [][]byte // msg header for clients. msgh := c.msgb[1:msgHeadProtoLen] msgh = append(msgh, subject...) msgh = append(msgh, ' ') si := len(msgh) // For sending messages across routes and leafnodes. // Reset if we have one since we reuse this data structure. if c.in.rts != nil { c.in.rts = c.in.rts[:0] } var rplyHasGWPrefix bool var creply = reply // If the reply subject is a GW routed reply, we will perform some // tracking in deliverMsg(). We also want to send to the user the // reply without the prefix. `creply` will be set to that and be // used to create the message header for client connections. if rplyHasGWPrefix = isGWRoutedReply(reply); rplyHasGWPrefix { creply = reply[gwSubjectOffset:] } // Loop over all normal subscriptions that match. for _, sub := range r.psubs { // Check if this is a send to a ROUTER. We now process // these after everything else. switch sub.client.kind { case ROUTER: if (c.kind != ROUTER && !c.isSolicitedLeafNode()) || (flags&pmrAllowSendFromRouteToRoute != 0) { c.addSubToRouteTargets(sub) } continue case GATEWAY: // Never send to gateway from here. continue case LEAF: // We handle similarly to routes and use the same data structures. // Leaf node delivery audience is different however. // Also leaf nodes are always no echo, so we make sure we are not // going to send back to ourselves here. if c != sub.client && (c.kind != ROUTER || !c.isSolicitedLeafNode()) { c.addSubToRouteTargets(sub) } continue } // Check for stream import mapped subs. These apply to local subs only. if sub.im != nil && sub.im.prefix != "" { // Redo the subject here on the fly. msgh = c.msgb[1:msgHeadProtoLen] msgh = append(msgh, sub.im.prefix...) msgh = append(msgh, subject...) msgh = append(msgh, ' ') si = len(msgh) } // Normal delivery mh := c.msgHeader(msgh[:si], sub, creply) c.deliverMsg(sub, subject, mh, msg, rplyHasGWPrefix) } // Set these up to optionally filter based on the queue lists. // This is for messages received from routes which will have directed // guidance on which queue groups we should deliver to. qf := c.pa.queues // For all non-client connections, we may still want to send messages to // leaf nodes or routes even if there are no queue filters since we collect // them above and do not process inline like normal clients. // However, do select queue subs if asked to ignore empty queue filter. if c.kind != CLIENT && qf == nil && flags&pmrIgnoreEmptyQueueFilter == 0 { goto sendToRoutesOrLeafs } // Check to see if we have our own rand yet. Global rand // has contention with lots of clients, etc. if c.in.prand == nil { c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } // Process queue subs for i := 0; i < len(r.qsubs); i++ { qsubs := r.qsubs[i] // If we have a filter check that here. We could make this a map or someting more // complex but linear search since we expect queues to be small. Should be faster // and more cache friendly. if qf != nil && len(qsubs) > 0 { tqn := qsubs[0].queue for _, qn := range qf { if bytes.Equal(qn, tqn) { goto selectQSub } } continue } selectQSub: // We will hold onto remote or lead qsubs when we are coming from // a route or a leaf node just in case we can no longer do local delivery. var rsub, sub *subscription var _ql [32]*subscription src := c.kind // If we just came from a route we want to prefer local subs. // So only select from local subs but remember the first rsub // in case all else fails. if src == ROUTER { ql := _ql[:0] for i := 0; i < len(qsubs); i++ { sub = qsubs[i] if sub.client.kind == CLIENT { ql = append(ql, sub) } else if rsub == nil { rsub = sub } } qsubs = ql } sindex := 0 lqs := len(qsubs) if lqs > 1 { sindex = c.in.prand.Int() % lqs } // Find a subscription that is able to deliver this message starting at a random index. for i := 0; i < lqs; i++ { if sindex+i < lqs { sub = qsubs[sindex+i] } else { sub = qsubs[(sindex+i)%lqs] } if sub == nil { continue } // We have taken care of preferring local subs for a message from a route above. // Here we just care about a client or leaf and skipping a leaf and preferring locals. if dst := sub.client.kind; dst == ROUTER || dst == LEAF { if (src == LEAF || src == CLIENT) && dst == LEAF { if rsub == nil { rsub = sub } continue } else { c.addSubToRouteTargets(sub) if flags&pmrCollectQueueNames != 0 { queues = append(queues, sub.queue) } } break } // Check for mapped subs if sub.im != nil && sub.im.prefix != "" { // Redo the subject here on the fly. msgh = c.msgb[1:msgHeadProtoLen] msgh = append(msgh, sub.im.prefix...) msgh = append(msgh, subject...) msgh = append(msgh, ' ') si = len(msgh) } var rreply = reply if rplyHasGWPrefix && sub.client.kind == CLIENT { rreply = creply } // "rreply" will be stripped of the $GNR prefix (if present) // for client connections only. mh := c.msgHeader(msgh[:si], sub, rreply) if c.deliverMsg(sub, subject, mh, msg, rplyHasGWPrefix) { // Clear rsub rsub = nil if flags&pmrCollectQueueNames != 0 { queues = append(queues, sub.queue) } break } } if rsub != nil { // If we are here we tried to deliver to a local qsub // but failed. So we will send it to a remote or leaf node. c.addSubToRouteTargets(rsub) if flags&pmrCollectQueueNames != 0 { queues = append(queues, rsub.queue) } } } sendToRoutesOrLeafs: // If no messages for routes or leafnodes return here. if len(c.in.rts) == 0 { return queues } // We address by index to avoid struct copy. // We have inline structs for memory layout and cache coherency. for i := range c.in.rts { rt := &c.in.rts[i] kind := rt.sub.client.kind mh := c.msgb[:msgHeadProtoLen] if kind == ROUTER { // Router (and Gateway) nodes are RMSG. Set here since leafnodes may rewrite. mh[0] = 'R' mh = append(mh, acc.Name...) mh = append(mh, ' ') } else { // Leaf nodes are LMSG mh[0] = 'L' // Remap subject if its a shadow subscription, treat like a normal client. if rt.sub.im != nil && rt.sub.im.prefix != "" { mh = append(mh, rt.sub.im.prefix...) } } mh = append(mh, subject...) mh = append(mh, ' ') if len(rt.qs) > 0 { if reply != nil { mh = append(mh, "+ "...) // Signal that there is a reply. mh = append(mh, reply...) mh = append(mh, ' ') } else { mh = append(mh, "| "...) // Only queues } mh = append(mh, rt.qs...) } else if reply != nil { mh = append(mh, reply...) mh = append(mh, ' ') } mh = append(mh, c.pa.szb...) mh = append(mh, _CRLF_...) c.deliverMsg(rt.sub, subject, mh, msg, false) } return queues } func (c *client) pubPermissionViolation(subject []byte) { c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject)) c.Errorf("Publish Violation - %s, Subject %q", c.getAuthUser(), subject) } func (c *client) subPermissionViolation(sub *subscription) { errTxt := fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject) logTxt := fmt.Sprintf("Subscription Violation - %s, Subject %q, SID %s", c.getAuthUser(), sub.subject, sub.sid) if sub.queue != nil { errTxt = fmt.Sprintf("Permissions Violation for Subscription to %q using queue %q", sub.subject, sub.queue) logTxt = fmt.Sprintf("Subscription Violation - %s, Subject %q, Queue: %q, SID %s", c.getAuthUser(), sub.subject, sub.queue, sub.sid) } c.sendErr(errTxt) c.Errorf(logTxt) } func (c *client) replySubjectViolation(reply []byte) { c.sendErr(fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply)) c.Errorf("Publish Violation - %s, Reply %q", c.getAuthUser(), reply) } func (c *client) processPingTimer() { c.mu.Lock() c.ping.tmr = nil // Check if connection is still opened if c.isClosed() { c.mu.Unlock() return } c.Debugf("%s Ping Timer", c.typeString()) // If we have had activity within the PingInterval then // there is no need to send a ping. This can be client data // or if we received a ping from the other side. pingInterval := c.srv.getOpts().PingInterval now := time.Now() needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL if delta := now.Sub(c.last); delta < pingInterval && !needRTT { c.Debugf("Delaying PING due to client activity %v ago", delta.Round(time.Second)) } else if delta := now.Sub(c.ping.last); delta < pingInterval && !needRTT { c.Debugf("Delaying PING due to remote ping %v ago", delta.Round(time.Second)) } else { // Check for violation if c.ping.out+1 > c.srv.getOpts().MaxPingsOut { c.Debugf("Stale Client Connection - Closing") c.enqueueProto([]byte(fmt.Sprintf(errProto, "Stale Connection"))) c.mu.Unlock() c.closeConnection(StaleConnection) return } // Send PING c.sendPing() } // Reset to fire again. c.setPingTimer() c.mu.Unlock() } // Lock should be held func (c *client) setPingTimer() { if c.srv == nil { return } d := c.srv.getOpts().PingInterval c.ping.tmr = time.AfterFunc(d, c.processPingTimer) } // Lock should be held func (c *client) clearPingTimer() { if c.ping.tmr == nil { return } c.ping.tmr.Stop() c.ping.tmr = nil } // Lock should be held func (c *client) setAuthTimer(d time.Duration) { c.atmr = time.AfterFunc(d, c.authTimeout) } // Lock should be held func (c *client) clearAuthTimer() bool { if c.atmr == nil { return true } stopped := c.atmr.Stop() c.atmr = nil return stopped } // We may reuse atmr for expiring user jwts, // so check connectReceived. // Lock assume held on entry. func (c *client) awaitingAuth() bool { return !c.flags.isSet(connectReceived) && c.atmr != nil } // This will set the atmr for the JWT expiration time. // We will lock on entry. func (c *client) setExpirationTimer(d time.Duration) { c.mu.Lock() c.atmr = time.AfterFunc(d, c.authExpired) c.mu.Unlock() } // Possibly flush the connection and then close the low level connection. // The boolean `minimalFlush` indicates if the flush operation should have a // minimal write deadline. // Lock is held on entry. func (c *client) flushAndClose(minimalFlush bool) { if !c.flags.isSet(skipFlushOnClose) && c.out.pb > 0 { if minimalFlush { const lowWriteDeadline = 100 * time.Millisecond // Reduce the write deadline if needed. if c.out.wdl > lowWriteDeadline { c.out.wdl = lowWriteDeadline } } c.flushOutbound() } c.out.p, c.out.s = nil, nil // Close the low level connection. WriteDeadline need to be set // in case this is a TLS connection. if c.nc != nil { c.nc.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)) c.nc.Close() } } func (c *client) typeString() string { switch c.kind { case CLIENT: return "Client" case ROUTER: return "Router" case GATEWAY: return "Gateway" case LEAF: return "LeafNode" } return "Unknown Type" } // processSubsOnConfigReload removes any subscriptions the client has that are no // longer authorized, and check for imports (accounts) due to a config reload. func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) { c.mu.Lock() var ( checkPerms = c.perms != nil checkAcc = c.acc != nil acc = c.acc ) if !checkPerms && !checkAcc { c.mu.Unlock() return } var ( _subs [32]*subscription subs = _subs[:0] _removed [32]*subscription removed = _removed[:0] srv = c.srv ) if checkAcc { // We actually only want to check if stream imports have changed. if _, ok := awcsti[acc.Name]; !ok { checkAcc = false } } // We will clear any mperms we have here. It will rebuild on the fly with canSubscribe, // so we do that here as we collect them. We will check result down below. c.mperms = nil // Collect client's subs under the lock for _, sub := range c.subs { // Just checking to rebuild mperms under the lock, will collect removed though here. // Only collect under subs array of canSubscribe and checkAcc true. canSub := c.canSubscribe(string(sub.subject)) canQSub := sub.queue != nil && c.canQueueSubscribe(string(sub.subject), string(sub.queue)) if !canSub && !canQSub { removed = append(removed, sub) } else if checkAcc { subs = append(subs, sub) } } c.mu.Unlock() // This list is all subs who are allowed and we need to check accounts. for _, sub := range subs { c.mu.Lock() oldShadows := sub.shadow sub.shadow = nil c.mu.Unlock() c.addShadowSubscriptions(acc, sub) for _, nsub := range oldShadows { nsub.im.acc.sl.Remove(nsub) } } // Unsubscribe all that need to be removed and report back to client and logs. for _, sub := range removed { c.unsubscribe(acc, sub, true, true) c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)", sub.subject, sub.sid)) srv.Noticef("Removed sub %q (sid %q) for %s - not authorized", sub.subject, sub.sid, c.getAuthUser()) } } // Allows us to count up all the queue subscribers during close. type qsub struct { sub *subscription n int32 } func (c *client) closeConnection(reason ClosedState) { c.mu.Lock() if c.nc == nil || c.flags.isSet(closeConnection) { c.mu.Unlock() return } // This will set the closeConnection flag and save the connection, etc.. // Will return true if no writeLoop was started and TCP connection was // closed in place, in which case we need to do the teardown. teardownNow := c.markConnAsClosed(reason, false) c.mu.Unlock() if teardownNow { c.teardownConn() } } // Clear the state of this connection and remove it from the server. // If the connection was initiated (such as ROUTE, GATEWAY, etc..) this may trigger // a reconnect. This function MUST be called only once per connection. It normally // happens when the writeLoop returns, or in closeConnection() if no writeLoop has // been started. func (c *client) teardownConn() { c.mu.Lock() // Be consistent with the creation: for routes and gateways, // we use Noticef on create, so use that too for delete. if c.kind == ROUTER || c.kind == GATEWAY { c.Noticef("%s connection closed", c.typeString()) } else { // Client and Leaf Node connections. c.Debugf("%s connection closed", c.typeString()) } c.clearAuthTimer() c.clearPingTimer() // Unblock anyone who is potentially stalled waiting on us. if c.out.stc != nil { close(c.out.stc) c.out.stc = nil } c.nc = nil var ( retryImplicit bool connectURLs []string gwName string gwIsOutbound bool gwCfg *gatewayCfg kind = c.kind srv = c.srv noReconnect = c.flags.isSet(noReconnect) acc = c.acc ) // Snapshot for use if we are a client connection. // FIXME(dlc) - we can just stub in a new one for client // and reference existing one. var subs []*subscription if kind == CLIENT || kind == LEAF { var _subs [32]*subscription subs = _subs[:0] for _, sub := range c.subs { // Auto-unsubscribe subscriptions must be unsubscribed forcibly. sub.max = 0 sub.close() subs = append(subs, sub) } } if c.route != nil { if !noReconnect { retryImplicit = c.route.retry } connectURLs = c.route.connectURLs } if kind == GATEWAY { gwName = c.gw.name gwIsOutbound = c.gw.outbound gwCfg = c.gw.cfg } c.mu.Unlock() // Remove client's or leaf node subscriptions. if (kind == CLIENT || kind == LEAF) && acc != nil { acc.sl.RemoveBatch(subs) } else if kind == ROUTER { go c.removeRemoteSubs() } if srv != nil { // This is a route that disconnected, but we are not in lame duck mode... if len(connectURLs) > 0 && !srv.isLameDuckMode() { // Unless disabled, possibly update the server's INFO protocol // and send to clients that know how to handle async INFOs. if !srv.getOpts().Cluster.NoAdvertise { srv.removeClientConnectURLsAndSendINFOToClients(connectURLs) } } // Unregister srv.removeClient(c) // Update remote subscriptions. if acc != nil && (kind == CLIENT || kind == LEAF) { qsubs := map[string]*qsub{} for _, sub := range subs { // Call unsubscribe here to cleanup shadow subscriptions and such. c.unsubscribe(acc, sub, true, false) // Update route as normal for a normal subscriber. if sub.queue == nil { srv.updateRouteSubscriptionMap(acc, sub, -1) } else { // We handle queue subscribers special in case we // have a bunch we can just send one update to the // connected routes. key := string(sub.subject) + " " + string(sub.queue) if esub, ok := qsubs[key]; ok { esub.n++ } else { qsubs[key] = &qsub{sub, 1} } } if srv.gateway.enabled { srv.gatewayUpdateSubInterest(acc.Name, sub, -1) } // Now check on leafnode updates. srv.updateLeafNodes(acc, sub, -1) } // Process any qsubs here. for _, esub := range qsubs { srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n)) srv.updateLeafNodes(acc, esub.sub, -(esub.n)) } if prev := acc.removeClient(c); prev == 1 && srv != nil { srv.decActiveAccounts() } } } // Don't reconnect connections that have been marked with // the no reconnect flag. if noReconnect { return } // Check for a solicited route. If it was, start up a reconnect unless // we are already connected to the other end. if c.isSolicitedRoute() || retryImplicit { // Capture these under lock c.mu.Lock() rid := c.route.remoteID rtype := c.route.routeType rurl := c.route.url c.mu.Unlock() srv.mu.Lock() defer srv.mu.Unlock() // It is possible that the server is being shutdown. // If so, don't try to reconnect if !srv.running { return } if rid != "" && srv.remotes[rid] != nil { srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid) return } else if rid == srv.info.ID { srv.Debugf("Detected route to self, ignoring \"%s\"", rurl) return } else if rtype != Implicit || retryImplicit { srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl) // Keep track of this go-routine so we can wait for it on // server shutdown. srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) }) } } else if srv != nil && kind == GATEWAY && gwIsOutbound { if gwCfg != nil { srv.Debugf("Attempting reconnect for gateway %q", gwName) // Run this as a go routine since we may be called within // the solicitGateway itself if there was an error during // the creation of the gateway connection. srv.startGoRoutine(func() { srv.reconnectGateway(gwCfg) }) } else { srv.Debugf("Gateway %q not in configuration, not attempting reconnect", gwName) } } else if c.isSolicitedLeafNode() { // Check if this is a solicited leaf node. Start up a reconnect. srv.startGoRoutine(func() { srv.reConnectToRemoteLeafNode(c.leaf.remote) }) } } // Set the noReconnect flag. This is used before a call to closeConnection() // to prevent the connection to reconnect (routes, gateways). func (c *client) setNoReconnect() { c.mu.Lock() c.flags.set(noReconnect) c.mu.Unlock() } // Returns the client's RTT value with the protection of the client's lock. func (c *client) getRTTValue() time.Duration { c.mu.Lock() rtt := c.rtt c.mu.Unlock() return rtt } // This function is used by ROUTER and GATEWAY connections to // look for a subject on a given account (since these type of // connections are not bound to a specific account). // If the c.pa.subject is found in the cache, the cached result // is returned, otherwse, we match the account's sublist and update // the cache. The cache is pruned if reaching a certain size. func (c *client) getAccAndResultFromCache() (*Account, *SublistResult) { var ( acc *Account pac *perAccountCache r *SublistResult ok bool ) // Check our cache. if pac, ok = c.in.pacache[string(c.pa.pacache)]; ok { // Check the genid to see if it's still valid. if genid := atomic.LoadUint64(&pac.acc.sl.genid); genid != pac.genid { ok = false delete(c.in.pacache, string(c.pa.pacache)) } else { acc = pac.acc r = pac.results } } if !ok { // Match correct account and sublist. if acc, _ = c.srv.LookupAccount(string(c.pa.account)); acc == nil { return nil, nil } // Match against the account sublist. r = acc.sl.Match(string(c.pa.subject)) // Store in our cache c.in.pacache[string(c.pa.pacache)] = &perAccountCache{acc, r, atomic.LoadUint64(&acc.sl.genid)} // Check if we need to prune. if len(c.in.pacache) > maxPerAccountCacheSize { c.prunePerAccountCache() } } return acc, r } // Account will return the associated account for this client. func (c *client) Account() *Account { if c == nil { return nil } c.mu.Lock() defer c.mu.Unlock() return c.acc } // prunePerAccountCache will prune off a random number of cache entries. func (c *client) prunePerAccountCache() { n := 0 for cacheKey := range c.in.pacache { delete(c.in.pacache, cacheKey) if n++; n > prunePerAccountCacheSize { break } } } // pruneClosedSubFromPerAccountCache remove entries that contain subscriptions // that have been closed. func (c *client) pruneClosedSubFromPerAccountCache() { for cacheKey, pac := range c.in.pacache { for _, sub := range pac.results.psubs { if sub.isClosed() { goto REMOVE } } for _, qsub := range pac.results.qsubs { for _, sub := range qsub { if sub.isClosed() { goto REMOVE } } } continue REMOVE: delete(c.in.pacache, cacheKey) } } // getAuthUser returns the auth user for the client. func (c *client) getAuthUser() string { switch { case c.opts.Nkey != "": return fmt.Sprintf("Nkey %q", c.opts.Nkey) case c.opts.Username != "": return fmt.Sprintf("User %q", c.opts.Username) default: return `User "N/A"` } } // isClosed returns true if either closeConnection or clearConnection // flag have been set, or if `nc` is nil, which may happen in tests. func (c *client) isClosed() bool { return c.flags.isSet(closeConnection) || c.nc == nil } // Logging functionality scoped to a client or route. func (c *client) Error(err error) { c.srv.Errors(c, err) } func (c *client) Errorf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Errorf(format, v...) } func (c *client) Debugf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Debugf(format, v...) } func (c *client) Noticef(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Noticef(format, v...) } func (c *client) Tracef(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Tracef(format, v...) } func (c *client) Warnf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Warnf(format, v...) }
1
10,042
Seems len(cnb[0]) will be the length after the write since of the *v thing we saw no?
nats-io-nats-server
go
@@ -37,6 +37,7 @@ var ( type QueueLoad struct { Key string Operation string + Object interface{} } func Start() error {
1
/* Copyright 2017 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spc import ( "fmt" "github.com/golang/glog" clientset "github.com/openebs/maya/pkg/client/clientset/versioned" informers "github.com/openebs/maya/pkg/client/informers/externalversions" "github.com/openebs/maya/pkg/signals" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "time" ) var ( masterURL string kubeconfig string ) type QueueLoad struct { Key string Operation string } func Start() error { // set up signals so we handle the first shutdown signal gracefully stopCh := signals.SetupSignalHandler() // Get in cluster config cfg, err := getClusterConfig(kubeconfig) if err != nil { return fmt.Errorf("Error building kubeconfig: %s", err.Error()) } // Building Kubernetes Clientset kubeClient, err := kubernetes.NewForConfig(cfg) if err != nil { return fmt.Errorf("Error building kubernetes clientset: %s", err.Error()) } // Building OpenEBS Clientset openebsClient, err := clientset.NewForConfig(cfg) if err != nil { return fmt.Errorf("Error building openebs clientset: %s", err.Error()) } kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) spcInformerFactory := informers.NewSharedInformerFactory(openebsClient, time.Second*30) controller := NewController(kubeClient, openebsClient, kubeInformerFactory, spcInformerFactory) go kubeInformerFactory.Start(stopCh) go spcInformerFactory.Start(stopCh) // Threadiness defines the nubmer of workers to be launched in Run function return controller.Run(2, stopCh) } // Cannot be unit tested // GetClusterConfig return the config for k8s. func getClusterConfig(kubeconfig string) (*rest.Config, error) { var masterURL string cfg, err := rest.InClusterConfig() if err != nil { glog.Errorf("Failed to get k8s Incluster config. %+v", err) if kubeconfig == "" { return nil, fmt.Errorf("Kubeconfig is empty: %v", err.Error()) } cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) if err != nil { return nil, fmt.Errorf("Error building kubeconfig: %s", err.Error()) } } return cfg, err }
1
8,899
can we make use of storagepoolclaim type rather than the generic interface{} type?
openebs-maya
go
@@ -652,6 +652,8 @@ func (k *Kad) Start(_ context.Context) error { k.wg.Add(1) go k.manage() + k.AddPeers(k.previouslyConnected()...) + go func() { select { case <-k.halt:
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package kademlia import ( "context" random "crypto/rand" "encoding/json" "errors" "fmt" "math/big" "net" "sync" "syscall" "time" "github.com/ethersphere/bee/pkg/addressbook" "github.com/ethersphere/bee/pkg/blocker" "github.com/ethersphere/bee/pkg/discovery" "github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/pingpong" "github.com/ethersphere/bee/pkg/shed" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/topology" im "github.com/ethersphere/bee/pkg/topology/kademlia/internal/metrics" "github.com/ethersphere/bee/pkg/topology/kademlia/internal/waitnext" "github.com/ethersphere/bee/pkg/topology/pslice" ma "github.com/multiformats/go-multiaddr" "golang.org/x/sync/errgroup" ) const ( maxConnAttempts = 1 // when there is maxConnAttempts failed connect calls for a given peer it is considered non-connectable maxBootNodeAttempts = 3 // how many attempts to dial to boot-nodes before giving up defaultBitSuffixLength = 3 // the number of bits used to create pseudo addresses for balancing addPeerBatchSize = 500 peerConnectionAttemptTimeout = 5 * time.Second // timeout for establishing a new connection with peer. flagTimeout = 5 * time.Minute // how long before blocking a flagged peer blockDuration = time.Hour // how long to blocklist an unresponsive peer for blockWorkerWakup = time.Second * 10 // wake up interval for the blocker worker ) var ( nnLowWatermark = 2 // the number of peers in consecutive deepest bins that constitute as nearest neighbours quickSaturationPeers = 4 saturationPeers = 8 overSaturationPeers = 20 bootNodeOverSaturationPeers = 20 shortRetry = 30 * time.Second timeToRetry = 2 * shortRetry broadcastBinSize = 4 peerPingPollTime = 10 * time.Second // how often to ping a peer ) var ( errOverlayMismatch = errors.New("overlay mismatch") errPruneEntry = errors.New("prune entry") errEmptyBin = errors.New("empty bin") errAnnounceLightNode = errors.New("announcing light node") ) type ( binSaturationFunc func(bin uint8, peers, connected *pslice.PSlice) (saturated bool, oversaturated bool) sanctionedPeerFunc func(peer swarm.Address) bool pruneFunc func(depth uint8) staticPeerFunc func(peer swarm.Address) bool ) var noopSanctionedPeerFn = func(_ swarm.Address) bool { return false } // Options for injecting services to Kademlia. type Options struct { SaturationFunc binSaturationFunc Bootnodes []ma.Multiaddr BootnodeMode bool BitSuffixLength int PruneFunc pruneFunc StaticNodes []swarm.Address } // Kad is the Swarm forwarding kademlia implementation. type Kad struct { base swarm.Address // this node's overlay address discovery discovery.Driver // the discovery driver addressBook addressbook.Interface // address book to get underlays p2p p2p.Service // p2p service to connect to nodes with saturationFunc binSaturationFunc // pluggable saturation function bitSuffixLength int // additional depth of common prefix for bin commonBinPrefixes [][]swarm.Address // list of address prefixes for each bin connectedPeers *pslice.PSlice // a slice of peers sorted and indexed by po, indexes kept in `bins` knownPeers *pslice.PSlice // both are po aware slice of addresses bootnodes []ma.Multiaddr depth uint8 // current neighborhood depth radius uint8 // storage area of responsibility depthMu sync.RWMutex // protect depth changes manageC chan struct{} // trigger the manage forever loop to connect to new peers peerSig []chan struct{} peerSigMtx sync.Mutex logger logging.Logger // logger bootnode bool // indicates whether the node is working in bootnode mode collector *im.Collector quit chan struct{} // quit channel halt chan struct{} // halt channel done chan struct{} // signal that `manage` has quit wg sync.WaitGroup waitNext *waitnext.WaitNext metrics metrics pruneFunc pruneFunc // pluggable prune function pinger pingpong.Interface staticPeer staticPeerFunc bgBroadcastCtx context.Context bgBroadcastCancel context.CancelFunc blocker *blocker.Blocker } // New returns a new Kademlia. func New( base swarm.Address, addressbook addressbook.Interface, discovery discovery.Driver, p2p p2p.Service, pinger pingpong.Interface, metricsDB *shed.DB, logger logging.Logger, o Options, ) (*Kad, error) { if o.SaturationFunc == nil { os := overSaturationPeers if o.BootnodeMode { os = bootNodeOverSaturationPeers } o.SaturationFunc = binSaturated(os, isStaticPeer(o.StaticNodes)) } if o.BitSuffixLength == 0 { o.BitSuffixLength = defaultBitSuffixLength } start := time.Now() imc, err := im.NewCollector(metricsDB) if err != nil { return nil, err } logger.Debugf("kademlia: NewCollector(...) took %v", time.Since(start)) k := &Kad{ base: base, discovery: discovery, addressBook: addressbook, p2p: p2p, saturationFunc: o.SaturationFunc, bitSuffixLength: o.BitSuffixLength, commonBinPrefixes: make([][]swarm.Address, int(swarm.MaxBins)), connectedPeers: pslice.New(int(swarm.MaxBins), base), knownPeers: pslice.New(int(swarm.MaxBins), base), bootnodes: o.Bootnodes, manageC: make(chan struct{}, 1), waitNext: waitnext.New(), logger: logger, bootnode: o.BootnodeMode, collector: imc, quit: make(chan struct{}), halt: make(chan struct{}), done: make(chan struct{}), metrics: newMetrics(), pruneFunc: o.PruneFunc, pinger: pinger, staticPeer: isStaticPeer(o.StaticNodes), } blocklistCallback := func(a swarm.Address) { k.logger.Debugf("kademlia: disconnecting peer %s for ping failure", a.String()) k.metrics.Blocklist.Inc() } k.blocker = blocker.New(p2p, flagTimeout, blockDuration, blockWorkerWakup, blocklistCallback, logger) if k.pruneFunc == nil { k.pruneFunc = k.pruneOversaturatedBins } if k.bitSuffixLength > 0 { k.commonBinPrefixes = generateCommonBinPrefixes(k.base, k.bitSuffixLength) } k.bgBroadcastCtx, k.bgBroadcastCancel = context.WithCancel(context.Background()) return k, nil } type peerConnInfo struct { po uint8 addr swarm.Address } // connectBalanced attempts to connect to the balanced peers first. func (k *Kad) connectBalanced(wg *sync.WaitGroup, peerConnChan chan<- *peerConnInfo) { skipPeers := func(peer swarm.Address) bool { if k.waitNext.Waiting(peer) { k.metrics.TotalBeforeExpireWaits.Inc() return true } return false } depth := k.NeighborhoodDepth() for i := range k.commonBinPrefixes { binPeersLength := k.knownPeers.BinSize(uint8(i)) // balancer should skip on bins where neighborhood connector would connect to peers anyway // and there are not enough peers in known addresses to properly balance the bin if i >= int(depth) && binPeersLength < len(k.commonBinPrefixes[i]) { continue } binPeers := k.knownPeers.BinPeers(uint8(i)) for j := range k.commonBinPrefixes[i] { pseudoAddr := k.commonBinPrefixes[i][j] closestConnectedPeer, err := closestPeer(k.connectedPeers, pseudoAddr, noopSanctionedPeerFn) if err != nil { if errors.Is(err, topology.ErrNotFound) { break } k.logger.Errorf("closest connected peer: %v", err) continue } closestConnectedPO := swarm.ExtendedProximity(closestConnectedPeer.Bytes(), pseudoAddr.Bytes()) if int(closestConnectedPO) >= i+k.bitSuffixLength+1 { continue } // Connect to closest known peer which we haven't tried connecting to recently. closestKnownPeer, err := closestPeerInSlice(binPeers, pseudoAddr, skipPeers) if err != nil { if errors.Is(err, topology.ErrNotFound) { break } k.logger.Errorf("closest known peer: %v", err) continue } if k.connectedPeers.Exists(closestKnownPeer) { continue } closestKnownPeerPO := swarm.ExtendedProximity(closestKnownPeer.Bytes(), pseudoAddr.Bytes()) if int(closestKnownPeerPO) < i+k.bitSuffixLength+1 { continue } select { case <-k.quit: return default: wg.Add(1) peerConnChan <- &peerConnInfo{ po: swarm.Proximity(k.base.Bytes(), closestKnownPeer.Bytes()), addr: closestKnownPeer, } } break } } } // connectNeighbours attempts to connect to the neighbours // which were not considered by the connectBalanced method. func (k *Kad) connectNeighbours(wg *sync.WaitGroup, peerConnChan chan<- *peerConnInfo) { sent := 0 var currentPo uint8 = 0 _ = k.knownPeers.EachBinRev(func(addr swarm.Address, po uint8) (bool, bool, error) { depth := k.NeighborhoodDepth() // out of depth, skip bin if po < depth { return false, true, nil } if po != currentPo { currentPo = po sent = 0 } if k.connectedPeers.Exists(addr) { return false, false, nil } if k.waitNext.Waiting(addr) { k.metrics.TotalBeforeExpireWaits.Inc() return false, false, nil } select { case <-k.quit: return true, false, nil default: wg.Add(1) peerConnChan <- &peerConnInfo{ po: po, addr: addr, } sent++ } // We want 'sent' equal to 'saturationPeers' // in order to skip to the next bin and speed up the topology build. return false, sent == saturationPeers, nil }) } // connectionAttemptsHandler handles the connection attempts // to peers sent by the producers to the peerConnChan. func (k *Kad) connectionAttemptsHandler(ctx context.Context, wg *sync.WaitGroup, neighbourhoodChan, balanceChan <-chan *peerConnInfo) { connect := func(peer *peerConnInfo) { bzzAddr, err := k.addressBook.Get(peer.addr) switch { case errors.Is(err, addressbook.ErrNotFound): k.logger.Debugf("kademlia: empty address book entry for peer %q", peer.addr) k.knownPeers.Remove(peer.addr) return case err != nil: k.logger.Debugf("kademlia: failed to get address book entry for peer %q: %v", peer.addr, err) return } remove := func(peer *peerConnInfo) { k.waitNext.Remove(peer.addr) k.knownPeers.Remove(peer.addr) if err := k.addressBook.Remove(peer.addr); err != nil { k.logger.Debugf("kademlia: could not remove peer %q from addressbook", peer.addr) } } switch err = k.connect(ctx, peer.addr, bzzAddr.Underlay); { case errors.Is(err, errPruneEntry): k.logger.Debugf("kademlia: dial to light node with overlay %q and underlay %q", peer.addr, bzzAddr.Underlay) remove(peer) return case errors.Is(err, errOverlayMismatch): k.logger.Debugf("kademlia: overlay mismatch has occurred to an overlay %q with underlay %q", peer.addr, bzzAddr.Underlay) remove(peer) return case err != nil: k.logger.Debugf("kademlia: peer not reachable from kademlia %q: %v", bzzAddr, err) k.logger.Warningf("peer not reachable when attempting to connect") return } k.waitNext.Set(peer.addr, time.Now().Add(shortRetry), 0) k.connectedPeers.Add(peer.addr) k.metrics.TotalOutboundConnections.Inc() k.collector.Record(peer.addr, im.PeerLogIn(time.Now(), im.PeerConnectionDirectionOutbound)) k.depthMu.Lock() k.depth = recalcDepth(k.connectedPeers, k.radius) k.depthMu.Unlock() k.logger.Debugf("kademlia: connected to peer: %q in bin: %d", peer.addr, peer.po) k.notifyManageLoop() k.notifyPeerSig() } var ( // The inProgress helps to avoid making a connection // to a peer who has the connection already in progress. inProgress = make(map[string]bool) inProgressMu sync.Mutex ) connAttempt := func(peerConnChan <-chan *peerConnInfo) { for { select { case <-k.quit: return case peer := <-peerConnChan: addr := peer.addr.String() if k.waitNext.Waiting(peer.addr) { k.metrics.TotalBeforeExpireWaits.Inc() wg.Done() continue } inProgressMu.Lock() if !inProgress[addr] { inProgress[addr] = true inProgressMu.Unlock() connect(peer) inProgressMu.Lock() delete(inProgress, addr) } inProgressMu.Unlock() wg.Done() } } } for i := 0; i < 16; i++ { go connAttempt(balanceChan) } for i := 0; i < 32; i++ { go connAttempt(neighbourhoodChan) } } // notifyManageLoop notifies kademlia manage loop. func (k *Kad) notifyManageLoop() { select { case k.manageC <- struct{}{}: default: } } // manage is a forever loop that manages the connection to new peers // once they get added or once others leave. func (k *Kad) manage() { defer k.wg.Done() defer close(k.done) defer k.logger.Debugf("kademlia manage loop exited") timer := time.NewTimer(0) ctx, cancel := context.WithCancel(context.Background()) go func() { <-k.quit if !timer.Stop() { <-timer.C } cancel() }() // The wg makes sure that we wait for all the connection attempts, // spun up by goroutines, to finish before we try the boot-nodes. var wg sync.WaitGroup neighbourhoodChan := make(chan *peerConnInfo) balanceChan := make(chan *peerConnInfo) go k.connectionAttemptsHandler(ctx, &wg, neighbourhoodChan, balanceChan) k.wg.Add(1) go func() { defer k.wg.Done() for { select { case <-k.halt: return case <-k.quit: return case <-time.After(5 * time.Minute): start := time.Now() k.logger.Tracef("kademlia: starting to flush metrics at %s", start) if err := k.collector.Flush(); err != nil { k.metrics.InternalMetricsFlushTotalErrors.Inc() k.logger.Debugf("kademlia: unable to flush metrics counters to the persistent store: %v", err) } else { k.metrics.InternalMetricsFlushTime.Observe(time.Since(start).Seconds()) k.logger.Tracef("kademlia: took %s to flush", time.Since(start)) } } } }() k.wg.Add(1) go func() { defer k.wg.Done() for { select { case <-k.halt: return case <-k.quit: return case <-timer.C: k.wg.Add(1) go func() { defer k.wg.Done() k.recordPeerLatencies(ctx) }() _ = timer.Reset(peerPingPollTime) } } }() for { select { case <-k.quit: return case <-time.After(15 * time.Second): k.notifyManageLoop() case <-k.manageC: start := time.Now() select { case <-k.halt: // halt stops dial-outs while shutting down return case <-k.quit: return default: } if k.bootnode { k.depthMu.Lock() depth := k.depth radius := k.radius k.depthMu.Unlock() k.metrics.CurrentDepth.Set(float64(depth)) k.metrics.CurrentRadius.Set(float64(radius)) k.metrics.CurrentlyKnownPeers.Set(float64(k.knownPeers.Length())) k.metrics.CurrentlyConnectedPeers.Set(float64(k.connectedPeers.Length())) continue } oldDepth := k.NeighborhoodDepth() k.connectBalanced(&wg, balanceChan) k.connectNeighbours(&wg, neighbourhoodChan) wg.Wait() k.depthMu.Lock() depth := k.depth radius := k.radius k.depthMu.Unlock() k.pruneFunc(depth) k.logger.Tracef( "kademlia: connector took %s to finish: old depth %d; new depth %d", time.Since(start), oldDepth, depth, ) k.metrics.CurrentDepth.Set(float64(depth)) k.metrics.CurrentRadius.Set(float64(radius)) k.metrics.CurrentlyKnownPeers.Set(float64(k.knownPeers.Length())) k.metrics.CurrentlyConnectedPeers.Set(float64(k.connectedPeers.Length())) if k.connectedPeers.Length() == 0 { select { case <-k.halt: continue default: } k.logger.Debug("kademlia: no connected peers, trying bootnodes") k.connectBootNodes(ctx) } } } } // recordPeerLatencies tries to record the average // peer latencies from the p2p layer. func (k *Kad) recordPeerLatencies(ctx context.Context) { ctx, cancel := context.WithTimeout(ctx, peerPingPollTime) defer cancel() var wg sync.WaitGroup _ = k.connectedPeers.EachBin(func(addr swarm.Address, _ uint8) (bool, bool, error) { wg.Add(1) go func() { defer wg.Done() l, err := k.pinger.Ping(ctx, addr, "ping") if err != nil { k.logger.Tracef("kademlia: cannot get latency for peer %s: %v", addr.String(), err) k.blocker.Flag(addr) k.metrics.Flag.Inc() return } k.blocker.Unflag(addr) k.metrics.Unflag.Inc() k.collector.Record(addr, im.PeerLatency(l)) v := k.collector.Inspect(addr).LatencyEWMA k.metrics.PeerLatencyEWMA.Observe(v.Seconds()) }() return false, false, nil }) wg.Wait() } // pruneOversaturatedBins disconnects out of depth peers from oversaturated bins // while maintaining the balance of the bin and favoring peers with longers connections func (k *Kad) pruneOversaturatedBins(depth uint8) { for i := range k.commonBinPrefixes { if i >= int(depth) { return } binPeersCount := k.connectedPeers.BinSize(uint8(i)) if binPeersCount < overSaturationPeers { continue } binPeers := k.connectedPeers.BinPeers(uint8(i)) peersToRemove := binPeersCount - overSaturationPeers for j := 0; peersToRemove > 0 && j < len(k.commonBinPrefixes[i]); j++ { pseudoAddr := k.commonBinPrefixes[i][j] peers := k.balancedSlotPeers(pseudoAddr, binPeers, i) if len(peers) <= 1 { continue } var smallestDuration time.Duration var newestPeer swarm.Address for _, peer := range peers { duration := k.collector.Inspect(peer).SessionConnectionDuration if smallestDuration == 0 || duration < smallestDuration { smallestDuration = duration newestPeer = peer } } err := k.p2p.Disconnect(newestPeer, "pruned from oversaturated bin") if err != nil { k.logger.Debugf("prune disconnect fail %v", err) } peersToRemove-- } } } func (k *Kad) balancedSlotPeers(pseudoAddr swarm.Address, peers []swarm.Address, po int) []swarm.Address { var ret []swarm.Address for _, peer := range peers { peerPo := swarm.ExtendedProximity(peer.Bytes(), pseudoAddr.Bytes()) if int(peerPo) >= po+k.bitSuffixLength+1 { ret = append(ret, peer) } } return ret } func (k *Kad) Start(_ context.Context) error { k.wg.Add(1) go k.manage() go func() { select { case <-k.halt: return case <-k.quit: return default: } var ( start = time.Now() addresses []swarm.Address ) err := k.addressBook.IterateOverlays(func(addr swarm.Address) (stop bool, err error) { addresses = append(addresses, addr) if len(addresses) == addPeerBatchSize { k.AddPeers(addresses...) addresses = nil } return false, nil }) if err != nil { k.logger.Errorf("addressbook overlays: %v", err) return } k.AddPeers(addresses...) k.metrics.StartAddAddressBookOverlaysTime.Observe(time.Since(start).Seconds()) }() // trigger the first manage loop immediately so that // we can start connecting to the bootnode quickly k.notifyManageLoop() return nil } func (k *Kad) connectBootNodes(ctx context.Context) { var attempts, connected int totalAttempts := maxBootNodeAttempts * len(k.bootnodes) ctx, cancel := context.WithTimeout(ctx, 15*time.Second) defer cancel() for _, addr := range k.bootnodes { if attempts >= totalAttempts || connected >= 3 { return } if _, err := p2p.Discover(ctx, addr, func(addr ma.Multiaddr) (stop bool, err error) { k.logger.Tracef("connecting to bootnode %s", addr) if attempts >= maxBootNodeAttempts { return true, nil } bzzAddress, err := k.p2p.Connect(ctx, addr) attempts++ k.metrics.TotalBootNodesConnectionAttempts.Inc() if err != nil { if !errors.Is(err, p2p.ErrAlreadyConnected) { k.logger.Debugf("connect fail %s: %v", addr, err) k.logger.Warningf("connect to bootnode %s", addr) return false, err } k.logger.Debugf("connect to bootnode fail: %v", err) return false, nil } if err := k.onConnected(ctx, bzzAddress.Overlay); err != nil { return false, err } k.metrics.TotalOutboundConnections.Inc() k.collector.Record(bzzAddress.Overlay, im.PeerLogIn(time.Now(), im.PeerConnectionDirectionOutbound)) k.logger.Tracef("connected to bootnode %s", addr) connected++ // connect to max 3 bootnodes return connected >= 3, nil }); err != nil && !errors.Is(err, context.Canceled) { k.logger.Debugf("discover fail %s: %v", addr, err) k.logger.Warningf("discover to bootnode %s", addr) return } } } // binSaturated indicates whether a certain bin is saturated or not. // when a bin is not saturated it means we would like to proactively // initiate connections to other peers in the bin. func binSaturated(oversaturationAmount int, staticNode staticPeerFunc) binSaturationFunc { return func(bin uint8, peers, connected *pslice.PSlice) (bool, bool) { potentialDepth := recalcDepth(peers, swarm.MaxPO) // short circuit for bins which are >= depth if bin >= potentialDepth { return false, false } // lets assume for now that the minimum number of peers in a bin // would be 2, under which we would always want to connect to new peers // obviously this should be replaced with a better optimization // the iterator is used here since when we check if a bin is saturated, // the plain number of size of bin might not suffice (for example for squared // gaps measurement) size := 0 _ = connected.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) { if po == bin && !staticNode(addr) { size++ } return false, false, nil }) return size >= saturationPeers, size >= oversaturationAmount } } // recalcDepth calculates and returns the kademlia depth. func recalcDepth(peers *pslice.PSlice, radius uint8) uint8 { // handle edge case separately if peers.Length() <= nnLowWatermark { return 0 } var ( peersCtr = uint(0) candidate = uint8(0) shallowestEmpty, noEmptyBins = peers.ShallowestEmpty() ) shallowestUnsaturated := uint8(0) binCount := 0 _ = peers.EachBinRev(func(_ swarm.Address, bin uint8) (bool, bool, error) { if bin == shallowestUnsaturated { binCount++ return false, false, nil } if bin > shallowestUnsaturated && binCount < quickSaturationPeers { // this means we have less than quickSaturationPeers in the previous bin // therefore we can return assuming that bin is the unsaturated one. return true, false, nil } shallowestUnsaturated = bin binCount = 1 return false, false, nil }) // if there are some empty bins and the shallowestEmpty is // smaller than the shallowestUnsaturated then set shallowest // unsaturated to the empty bin. if !noEmptyBins && shallowestEmpty < shallowestUnsaturated { shallowestUnsaturated = shallowestEmpty } _ = peers.EachBin(func(_ swarm.Address, po uint8) (bool, bool, error) { peersCtr++ if peersCtr >= uint(nnLowWatermark) { candidate = po return true, false, nil } return false, false, nil }) if shallowestUnsaturated > candidate { if radius < candidate { return radius } return candidate } if radius < shallowestUnsaturated { return radius } return shallowestUnsaturated } // connect connects to a peer and gossips its address to our connected peers, // as well as sends the peers we are connected to to the newly connected peer func (k *Kad) connect(ctx context.Context, peer swarm.Address, ma ma.Multiaddr) error { k.logger.Infof("attempting to connect to peer %q", peer) ctx, cancel := context.WithTimeout(ctx, peerConnectionAttemptTimeout) defer cancel() k.metrics.TotalOutboundConnectionAttempts.Inc() switch i, err := k.p2p.Connect(ctx, ma); { case errors.Is(err, p2p.ErrDialLightNode): return errPruneEntry case errors.Is(err, p2p.ErrAlreadyConnected): if !i.Overlay.Equal(peer) { return errOverlayMismatch } return nil case errors.Is(err, context.Canceled): return err case err != nil: k.logger.Debugf("could not connect to peer %q: %v", peer, err) retryTime := time.Now().Add(timeToRetry) var e *p2p.ConnectionBackoffError failedAttempts := 0 if errors.As(err, &e) { retryTime = e.TryAfter() } else { failedAttempts = k.waitNext.Attempts(peer) failedAttempts++ } k.metrics.TotalOutboundConnectionFailedAttempts.Inc() k.collector.Record(peer, im.IncSessionConnectionRetry()) ss := k.collector.Inspect(peer) quickPrune := (ss == nil || ss.HasAtMaxOneConnectionAttempt()) && isNetworkError(err) if (k.connectedPeers.Length() > 0 && quickPrune) || failedAttempts >= maxConnAttempts { k.waitNext.Remove(peer) k.knownPeers.Remove(peer) if err := k.addressBook.Remove(peer); err != nil { k.logger.Debugf("could not remove peer from addressbook: %q", peer) } k.logger.Debugf("kademlia pruned peer from address book %q", peer) } else { k.waitNext.Set(peer, retryTime, failedAttempts) } return err case !i.Overlay.Equal(peer): _ = k.p2p.Disconnect(peer, errOverlayMismatch.Error()) _ = k.p2p.Disconnect(i.Overlay, errOverlayMismatch.Error()) return errOverlayMismatch } return k.Announce(ctx, peer, true) } // Announce a newly connected peer to our connected peers, but also // notify the peer about our already connected peers func (k *Kad) Announce(ctx context.Context, peer swarm.Address, fullnode bool) error { var addrs []swarm.Address for bin := uint8(0); bin < swarm.MaxBins; bin++ { connectedPeers, err := randomSubset(k.connectedPeers.BinPeers(bin), broadcastBinSize) if err != nil { return err } for _, connectedPeer := range connectedPeers { if connectedPeer.Equal(peer) { continue } addrs = append(addrs, connectedPeer) if !fullnode { // we continue here so we dont gossip // about lightnodes to others. continue } // if kademlia is closing, dont enqueue anymore broadcast requests select { case <-k.bgBroadcastCtx.Done(): // we will not interfere with the announce operation by returning here continue default: } go func(connectedPeer swarm.Address) { // Create a new deadline ctx to prevent goroutine pile up cCtx, cCancel := context.WithTimeout(k.bgBroadcastCtx, time.Minute) defer cCancel() if err := k.discovery.BroadcastPeers(cCtx, connectedPeer, peer); err != nil { k.logger.Debugf("could not gossip peer %s to peer %s: %v", peer, connectedPeer, err) } }(connectedPeer) } } if len(addrs) == 0 { return nil } err := k.discovery.BroadcastPeers(ctx, peer, addrs...) if err != nil { k.logger.Errorf("kademlia: could not broadcast to peer %s", peer) _ = k.p2p.Disconnect(peer, "failed broadcasting to peer") } return err } // AnnounceTo announces a selected peer to another. func (k *Kad) AnnounceTo(ctx context.Context, addressee, peer swarm.Address, fullnode bool) error { if !fullnode { return errAnnounceLightNode } return k.discovery.BroadcastPeers(ctx, addressee, peer) } // AddPeers adds peers to the knownPeers list. // This does not guarantee that a connection will immediately // be made to the peer. func (k *Kad) AddPeers(addrs ...swarm.Address) { k.knownPeers.Add(addrs...) k.notifyManageLoop() } func (k *Kad) Pick(peer p2p.Peer) bool { k.metrics.PickCalls.Inc() if k.bootnode { // shortcircuit for bootnode mode - always accept connections, // at least until we find a better solution. return true } po := swarm.Proximity(k.base.Bytes(), peer.Address.Bytes()) _, oversaturated := k.saturationFunc(po, k.knownPeers, k.connectedPeers) // pick the peer if we are not oversaturated if !oversaturated { return true } k.metrics.PickCallsFalse.Inc() return false } func isStaticPeer(staticNodes []swarm.Address) func(overlay swarm.Address) bool { return func(overlay swarm.Address) bool { for _, addr := range staticNodes { if addr.Equal(overlay) { return true } } return false } } // Connected is called when a peer has dialed in. // If forceConnection is true `overSaturated` is ignored for non-bootnodes. func (k *Kad) Connected(ctx context.Context, peer p2p.Peer, forceConnection bool) (err error) { defer func() { if err == nil { k.metrics.TotalInboundConnections.Inc() k.collector.Record(peer.Address, im.PeerLogIn(time.Now(), im.PeerConnectionDirectionInbound)) } }() address := peer.Address po := swarm.Proximity(k.base.Bytes(), address.Bytes()) if _, overSaturated := k.saturationFunc(po, k.knownPeers, k.connectedPeers); overSaturated { if k.bootnode { randPeer, err := k.randomPeer(po) if err != nil { return fmt.Errorf("failed to get random peer to kick-out: %w", err) } _ = k.p2p.Disconnect(randPeer, "kicking out random peer to accommodate node") return k.onConnected(ctx, address) } if !forceConnection { return topology.ErrOversaturated } } return k.onConnected(ctx, address) } func (k *Kad) onConnected(ctx context.Context, addr swarm.Address) error { if err := k.Announce(ctx, addr, true); err != nil { return err } k.knownPeers.Add(addr) k.connectedPeers.Add(addr) k.waitNext.Remove(addr) k.depthMu.Lock() k.depth = recalcDepth(k.connectedPeers, k.radius) k.depthMu.Unlock() k.notifyManageLoop() k.notifyPeerSig() return nil } // Disconnected is called when peer disconnects. func (k *Kad) Disconnected(peer p2p.Peer) { k.logger.Debugf("kademlia: disconnected peer %s", peer.Address) k.connectedPeers.Remove(peer.Address) k.waitNext.SetTryAfter(peer.Address, time.Now().Add(timeToRetry)) k.metrics.TotalInboundDisconnections.Inc() k.collector.Record(peer.Address, im.PeerLogOut(time.Now())) k.depthMu.Lock() k.depth = recalcDepth(k.connectedPeers, k.radius) k.depthMu.Unlock() k.notifyManageLoop() k.notifyPeerSig() } func (k *Kad) notifyPeerSig() { k.peerSigMtx.Lock() defer k.peerSigMtx.Unlock() for _, c := range k.peerSig { // Every peerSig channel has a buffer capacity of 1, // so every receiver will get the signal even if the // select statement has the default case to avoid blocking. select { case c <- struct{}{}: default: } } } func closestPeer(peers *pslice.PSlice, addr swarm.Address, spf sanctionedPeerFunc) (swarm.Address, error) { closest := swarm.ZeroAddress err := peers.EachBinRev(closestPeerFunc(&closest, addr, spf)) if err != nil { return closest, err } // check if found if closest.IsZero() { return closest, topology.ErrNotFound } return closest, nil } func closestPeerInSlice(peers []swarm.Address, addr swarm.Address, spf sanctionedPeerFunc) (swarm.Address, error) { closest := swarm.ZeroAddress closestFunc := closestPeerFunc(&closest, addr, spf) for _, peer := range peers { _, _, err := closestFunc(peer, 0) if err != nil { return closest, err } } // check if found if closest.IsZero() { return closest, topology.ErrNotFound } return closest, nil } func closestPeerFunc(closest *swarm.Address, addr swarm.Address, spf sanctionedPeerFunc) func(peer swarm.Address, po uint8) (bool, bool, error) { return func(peer swarm.Address, po uint8) (bool, bool, error) { // check whether peer is sanctioned if spf(peer) { return false, false, nil } if closest.IsZero() { *closest = peer return false, false, nil } closer, err := peer.Closer(addr, *closest) if err != nil { return false, false, err } if closer { *closest = peer } return false, false, nil } } // ClosestPeer returns the closest peer to a given address. func (k *Kad) ClosestPeer(addr swarm.Address, includeSelf bool, skipPeers ...swarm.Address) (swarm.Address, error) { if k.connectedPeers.Length() == 0 { return swarm.Address{}, topology.ErrNotFound } closest := swarm.ZeroAddress if includeSelf { closest = k.base } err := k.connectedPeers.EachBinRev(func(peer swarm.Address, po uint8) (bool, bool, error) { for _, a := range skipPeers { if a.Equal(peer) { return false, false, nil } } if closest.IsZero() { closest = peer return false, false, nil } if closer, _ := peer.Closer(addr, closest); closer { closest = peer } return false, false, nil }) if err != nil { return swarm.Address{}, err } if closest.IsZero() { // no peers return swarm.Address{}, topology.ErrNotFound // only for light nodes } // check if self if closest.Equal(k.base) { return swarm.Address{}, topology.ErrWantSelf } return closest, nil } // IsWithinDepth returns if an address is within the neighborhood depth of a node. func (k *Kad) IsWithinDepth(addr swarm.Address) bool { return swarm.Proximity(k.base.Bytes(), addr.Bytes()) >= k.NeighborhoodDepth() } // EachNeighbor iterates from closest bin to farthest of the neighborhood peers. func (k *Kad) EachNeighbor(f topology.EachPeerFunc) error { depth := k.NeighborhoodDepth() fn := func(a swarm.Address, po uint8) (bool, bool, error) { if po < depth { return true, false, nil } return f(a, po) } return k.connectedPeers.EachBin(fn) } // EachNeighborRev iterates from farthest bin to closest of the neighborhood peers. func (k *Kad) EachNeighborRev(f topology.EachPeerFunc) error { depth := k.NeighborhoodDepth() fn := func(a swarm.Address, po uint8) (bool, bool, error) { if po < depth { return false, true, nil } return f(a, po) } return k.connectedPeers.EachBinRev(fn) } // EachPeer iterates from closest bin to farthest. func (k *Kad) EachPeer(f topology.EachPeerFunc) error { return k.connectedPeers.EachBin(f) } // EachPeerRev iterates from farthest bin to closest. func (k *Kad) EachPeerRev(f topology.EachPeerFunc) error { return k.connectedPeers.EachBinRev(f) } // SubscribePeersChange returns the channel that signals when the connected peers // set changes. Returned function is safe to be called multiple times. func (k *Kad) SubscribePeersChange() (c <-chan struct{}, unsubscribe func()) { channel := make(chan struct{}, 1) var closeOnce sync.Once k.peerSigMtx.Lock() defer k.peerSigMtx.Unlock() k.peerSig = append(k.peerSig, channel) unsubscribe = func() { k.peerSigMtx.Lock() defer k.peerSigMtx.Unlock() for i, c := range k.peerSig { if c == channel { k.peerSig = append(k.peerSig[:i], k.peerSig[i+1:]...) break } } closeOnce.Do(func() { close(channel) }) } return channel, unsubscribe } // NeighborhoodDepth returns the current Kademlia depth. func (k *Kad) NeighborhoodDepth() uint8 { k.depthMu.RLock() defer k.depthMu.RUnlock() return k.depth } // IsBalanced returns if Kademlia is balanced to bin. func (k *Kad) IsBalanced(bin uint8) bool { k.depthMu.RLock() defer k.depthMu.RUnlock() if int(bin) > len(k.commonBinPrefixes) { return false } // for each pseudo address for i := range k.commonBinPrefixes[bin] { pseudoAddr := k.commonBinPrefixes[bin][i] closestConnectedPeer, err := closestPeer(k.connectedPeers, pseudoAddr, noopSanctionedPeerFn) if err != nil { return false } closestConnectedPO := swarm.ExtendedProximity(closestConnectedPeer.Bytes(), pseudoAddr.Bytes()) if int(closestConnectedPO) < int(bin)+k.bitSuffixLength+1 { return false } } return true } func (k *Kad) SetRadius(r uint8) { k.depthMu.Lock() defer k.depthMu.Unlock() if k.radius == r { return } k.radius = r oldD := k.depth k.depth = recalcDepth(k.connectedPeers, k.radius) if k.depth != oldD { k.notifyManageLoop() } } func (k *Kad) Snapshot() *topology.KadParams { var infos []topology.BinInfo for i := int(swarm.MaxPO); i >= 0; i-- { infos = append(infos, topology.BinInfo{}) } ss := k.collector.Snapshot(time.Now()) _ = k.connectedPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) { infos[po].BinConnected++ infos[po].ConnectedPeers = append( infos[po].ConnectedPeers, &topology.PeerInfo{ Address: addr, Metrics: createMetricsSnapshotView(ss[addr.ByteString()]), }, ) return false, false, nil }) // output (k.knownPeers ¬ k.connectedPeers) here to not repeat the peers we already have in the connected peers list _ = k.knownPeers.EachBin(func(addr swarm.Address, po uint8) (bool, bool, error) { infos[po].BinPopulation++ for _, v := range infos[po].ConnectedPeers { // peer already connected, don't show in the known peers list if v.Address.Equal(addr) { return false, false, nil } } infos[po].DisconnectedPeers = append( infos[po].DisconnectedPeers, &topology.PeerInfo{ Address: addr, Metrics: createMetricsSnapshotView(ss[addr.ByteString()]), }, ) return false, false, nil }) return &topology.KadParams{ Base: k.base.String(), Population: k.knownPeers.Length(), Connected: k.connectedPeers.Length(), Timestamp: time.Now(), NNLowWatermark: nnLowWatermark, Depth: k.NeighborhoodDepth(), Bins: topology.KadBins{ Bin0: infos[0], Bin1: infos[1], Bin2: infos[2], Bin3: infos[3], Bin4: infos[4], Bin5: infos[5], Bin6: infos[6], Bin7: infos[7], Bin8: infos[8], Bin9: infos[9], Bin10: infos[10], Bin11: infos[11], Bin12: infos[12], Bin13: infos[13], Bin14: infos[14], Bin15: infos[15], Bin16: infos[16], Bin17: infos[17], Bin18: infos[18], Bin19: infos[19], Bin20: infos[20], Bin21: infos[21], Bin22: infos[22], Bin23: infos[23], Bin24: infos[24], Bin25: infos[25], Bin26: infos[26], Bin27: infos[27], Bin28: infos[28], Bin29: infos[29], Bin30: infos[30], Bin31: infos[31], }, } } // String returns a string represenstation of Kademlia. func (k *Kad) String() string { j := k.Snapshot() b, err := json.MarshalIndent(j, "", " ") if err != nil { k.logger.Errorf("could not marshal kademlia into json: %v", err) return "" } return string(b) } // Halt stops outgoing connections from happening. // This is needed while we shut down, so that further topology // changes do not happen while we shut down. func (k *Kad) Halt() { close(k.halt) } // Close shuts down kademlia. func (k *Kad) Close() error { k.logger.Info("kademlia shutting down") close(k.quit) _ = k.blocker.Close() cc := make(chan struct{}) k.bgBroadcastCancel() go func() { k.wg.Wait() close(cc) }() eg := errgroup.Group{} errTimeout := errors.New("timeout") eg.Go(func() error { select { case <-cc: case <-time.After(peerConnectionAttemptTimeout): k.logger.Warning("kademlia shutting down with announce goroutines") return errTimeout } return nil }) eg.Go(func() error { select { case <-k.done: case <-time.After(time.Second * 5): k.logger.Warning("kademlia manage loop did not shut down properly") return errTimeout } return nil }) err := eg.Wait() k.logger.Info("kademlia persisting peer metrics") start := time.Now() if err := k.collector.Finalize(start, false); err != nil { k.logger.Debugf("kademlia: unable to finalize open sessions: %v", err) } k.logger.Debugf("kademlia: Finalize(...) took %v", time.Since(start)) return err } func randomSubset(addrs []swarm.Address, count int) ([]swarm.Address, error) { if count >= len(addrs) { return addrs, nil } for i := 0; i < len(addrs); i++ { b, err := random.Int(random.Reader, big.NewInt(int64(len(addrs)))) if err != nil { return nil, err } j := int(b.Int64()) addrs[i], addrs[j] = addrs[j], addrs[i] } return addrs[:count], nil } func (k *Kad) randomPeer(bin uint8) (swarm.Address, error) { peers := k.connectedPeers.BinPeers(bin) for idx := 0; idx < len(peers); { // do not consider protected peers if k.staticPeer(peers[idx]) { peers = append(peers[:idx], peers[idx+1:]...) continue } idx++ } if len(peers) == 0 { return swarm.ZeroAddress, errEmptyBin } rndIndx, err := random.Int(random.Reader, big.NewInt(int64(len(peers)))) if err != nil { return swarm.ZeroAddress, err } return peers[rndIndx.Int64()], nil } // createMetricsSnapshotView creates new topology.MetricSnapshotView from the // given metrics.Snapshot and rounds all the timestamps and durations to its // nearest second, except for the peer latency, which is given in milliseconds. func createMetricsSnapshotView(ss *im.Snapshot) *topology.MetricSnapshotView { if ss == nil { return nil } return &topology.MetricSnapshotView{ LastSeenTimestamp: time.Unix(0, ss.LastSeenTimestamp).Unix(), SessionConnectionRetry: ss.SessionConnectionRetry, ConnectionTotalDuration: ss.ConnectionTotalDuration.Truncate(time.Second).Seconds(), SessionConnectionDuration: ss.SessionConnectionDuration.Truncate(time.Second).Seconds(), SessionConnectionDirection: string(ss.SessionConnectionDirection), LatencyEWMA: ss.LatencyEWMA.Milliseconds(), } } // isNetworkError is checking various conditions that relate to network problems. func isNetworkError(err error) bool { var netOpErr *net.OpError if errors.As(err, &netOpErr) { if netOpErr.Op == "dial" { return true } if netOpErr.Op == "read" { return true } } if errors.Is(err, syscall.ECONNREFUSED) { return true } if errors.Is(err, syscall.EPIPE) { return true } if errors.Is(err, syscall.ETIMEDOUT) { return true } return false }
1
15,801
this should happen before the `go k.manage()`, otherwise the node will always try the bootnodes first
ethersphere-bee
go
@@ -185,7 +185,7 @@ Blockly.ScratchBlocks.ProcedureUtils.removeAllInputs_ = function() { */ Blockly.ScratchBlocks.ProcedureUtils.createAllInputs_ = function(connectionMap) { // Split the proc into components, by %n, %b, and %s (ignoring escaped). - var procComponents = this.procCode_.split(/(?=[^\\]\%[nbs])/); + var procComponents = this.procCode_.split(/(?=[^\\]%[nbs])/); procComponents = procComponents.map(function(c) { return c.trim(); // Strip whitespace. });
1
/** * @license * Visual Blocks Editor * * Copyright 2012 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Procedure blocks for Scratch. */ 'use strict'; goog.provide('Blockly.Blocks.procedures'); goog.require('Blockly.Blocks'); goog.require('Blockly.constants'); // TODO: Create a namespace properly. Blockly.ScratchBlocks.ProcedureUtils = {}; // Serialization and deserialization. /** * Create XML to represent the (non-editable) name and arguments of a procedure * call block. * @return {!Element} XML storage element. * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.callerMutationToDom = function() { var container = document.createElement('mutation'); container.setAttribute('proccode', this.procCode_); container.setAttribute('argumentids', JSON.stringify(this.argumentIds_)); container.setAttribute('warp', JSON.stringify(this.warp_)); return container; }; /** * Parse XML to restore the (non-editable) name and arguments of a procedure * call block. * @param {!Element} xmlElement XML storage element. * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.callerDomToMutation = function(xmlElement) { this.procCode_ = xmlElement.getAttribute('proccode'); this.argumentIds_ = JSON.parse(xmlElement.getAttribute('argumentids')); this.warp_ = JSON.parse(xmlElement.getAttribute('warp')); this.updateDisplay_(); }; /** * Create XML to represent the (non-editable) name and arguments of a * procedures_prototype block or a procedures_declaration block. * @return {!Element} XML storage element. * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.definitionMutationToDom = function() { var container = document.createElement('mutation'); container.setAttribute('proccode', this.procCode_); container.setAttribute('argumentids', JSON.stringify(this.argumentIds_)); container.setAttribute('argumentnames', JSON.stringify(this.displayNames_)); container.setAttribute('argumentdefaults', JSON.stringify(this.argumentDefaults_)); container.setAttribute('warp', JSON.stringify(this.warp_)); return container; }; /** * Parse XML to restore the (non-editable) name and arguments of a * procedures_prototype block or a procedures_declaration block. * @param {!Element} xmlElement XML storage element. * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.definitionDomToMutation = function(xmlElement) { this.procCode_ = xmlElement.getAttribute('proccode'); this.warp_ = JSON.parse(xmlElement.getAttribute('warp')); this.argumentIds_ = JSON.parse(xmlElement.getAttribute('argumentids')); this.displayNames_ = JSON.parse(xmlElement.getAttribute('argumentnames')); this.argumentDefaults_ = JSON.parse( xmlElement.getAttribute('argumentdefaults')); this.updateDisplay_(); }; // End of serialization and deserialization. // Shared by all three procedure blocks (procedures_declaration, // procedures_call, and procedures_prototype). /** * Returns the name of the procedure this block calls, or the empty string if * it has not yet been set. * @return {string} Procedure name. * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.getProcCode = function() { return this.procCode_; }; /** * Update the block's structure and appearance to match the internally stored * mutation. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.updateDisplay_ = function() { var wasRendered = this.rendered; this.rendered = false; var connectionMap = this.disconnectOldBlocks_(); this.removeAllInputs_(); this.createAllInputs_(connectionMap); this.deleteShadows_(connectionMap); this.rendered = wasRendered; if (wasRendered && !this.isInsertionMarker()) { this.initSvg(); this.render(); } }; /** * Disconnect old blocks from all value inputs on this block, but hold onto them * in case they can be reattached later. * @return {!Object.<string, Blockly.Block>} An object mapping argument IDs to * the blocks that were connected to those IDs at the beginning of the * mutation. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.disconnectOldBlocks_ = function() { // Remove old stuff var connectionMap = {}; for (var i = 0, input; input = this.inputList[i]; i++) { if (input.connection) { // Remove the shadow DOM. Otherwise a shadow block will respawn // instantly, and we'd have to remove it when we remove the input. input.connection.setShadowDom(null); var target = input.connection.targetBlock(); connectionMap[input.name] = target; if (target) { input.connection.disconnect(); } } } return connectionMap; }; /** * Remove all inputs on the block, including dummy inputs. * Assumes no input has shadow DOM set. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.removeAllInputs_ = function() { // Delete inputs directly instead of with block.removeInput to avoid splicing // out of the input list at every index. for (var i = 0, input; input = this.inputList[i]; i++) { input.dispose(); } this.inputList = []; }; /** * Create all inputs specified by the new procCode, and populate them with * shadow blocks or reconnected old blocks as appropriate. * @param {!Object.<string, Blockly.Block>} connectionMap An object mapping * argument IDs to the blocks that were connected to those IDs at the * beginning of the mutation. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.createAllInputs_ = function(connectionMap) { // Split the proc into components, by %n, %b, and %s (ignoring escaped). var procComponents = this.procCode_.split(/(?=[^\\]\%[nbs])/); procComponents = procComponents.map(function(c) { return c.trim(); // Strip whitespace. }); // Create arguments and labels as appropriate. var argumentCount = 0; for (var i = 0, component; component = procComponents[i]; i++) { var labelText; if (component.substring(0, 1) == '%') { var argumentType = component.substring(1, 2); if (!(argumentType == 'n' || argumentType == 'b' || argumentType == 's')) { throw new Error( 'Found an custom procedure with an invalid type: ' + argumentType); } labelText = component.substring(2).trim(); var id = this.argumentIds_[argumentCount]; var oldBlock = null; if (connectionMap && (id in connectionMap)) { oldBlock = connectionMap[id]; } var input = this.appendValueInput(id); if (argumentType == 'b') { input.setCheck('Boolean'); } this.populateArgument_(argumentType, argumentCount, connectionMap, oldBlock, input); argumentCount++; } else { labelText = component.trim(); } this.addProcedureLabel_(labelText.replace(/\\%/, '%')); } }; /** * Delete all shadow blocks in the given map. * @param {!Object.<string, Blockly.Block>} connectionMap An object mapping * argument IDs to the blocks that were connected to those IDs at the * beginning of the mutation. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.deleteShadows_ = function(connectionMap) { // Get rid of all of the old shadow blocks if they aren't connected. if (connectionMap) { for (var id in connectionMap) { var block = connectionMap[id]; if (block && block.isShadow()) { block.dispose(); connectionMap[id] = null; } } } }; // End of shared code. /** * Add a label field with the given text to a procedures_call or * procedures_prototype block. * @param {string} text The label text. * @private */ Blockly.ScratchBlocks.ProcedureUtils.addLabelField_ = function(text) { this.appendDummyInput().appendField(text); }; /** * Add a label editor with the given text to a procedures_declaration * block. Editing the text in the label editor updates the text of the * corresponding label fields on function calls. * @param {string} text The label text. * @private */ Blockly.ScratchBlocks.ProcedureUtils.addLabelEditor_ = function(text) { if (text) { this.appendDummyInput(Blockly.utils.genUid()). appendField(new Blockly.FieldTextInputRemovable(text)); } }; /** * Build a DOM node representing a shadow block of the given type. * @param {string} type One of 's' (string) or 'n' (number). * @return {!Element} The DOM node representing the new shadow block. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.buildShadowDom_ = function(type) { var shadowDom = goog.dom.createDom('shadow'); if (type == 'n') { var shadowType = 'math_number'; var fieldName = 'NUM'; var fieldValue = '1'; } else { var shadowType = 'text'; var fieldName = 'TEXT'; var fieldValue = ''; } shadowDom.setAttribute('type', shadowType); var fieldDom = goog.dom.createDom('field', null, fieldValue); fieldDom.setAttribute('name', fieldName); shadowDom.appendChild(fieldDom); return shadowDom; }; /** * Create a new shadow block and attach it to the given input. * @param {!Blockly.Input} input The value input to attach a block to. * @param {string} argumentType One of 'b' (boolean), 's' (string) or * 'n' (number). * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.attachShadow_ = function(input, argumentType) { if (argumentType == 'n' || argumentType == 's') { var blockType = argumentType == 'n' ? 'math_number' : 'text'; Blockly.Events.disable(); try { var newBlock = this.workspace.newBlock(blockType); if (argumentType == 'n') { newBlock.setFieldValue('1', 'NUM'); } else { newBlock.setFieldValue('', 'TEXT'); } newBlock.setShadow(true); if (!this.isInsertionMarker()) { newBlock.initSvg(); newBlock.render(false); } } finally { Blockly.Events.enable(); } if (Blockly.Events.isEnabled()) { Blockly.Events.fire(new Blockly.Events.BlockCreate(newBlock)); } newBlock.outputConnection.connect(input.connection); } }; /** * Create a new argument reporter block. * @param {string} argumentType One of 'b' (boolean), 's' (string) or * 'n' (number). * @param {string} displayName The name of the argument as provided by the * user, which becomes the text of the label on the argument reporter block. * @return {!Blockly.BlockSvg} The newly created argument reporter block. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.createArgumentReporter_ = function( argumentType, displayName) { if (argumentType == 'n' || argumentType == 's') { var blockType = 'argument_reporter_string_number'; } else { var blockType = 'argument_reporter_boolean'; } Blockly.Events.disable(); try { var newBlock = this.workspace.newBlock(blockType); newBlock.setShadow(true); newBlock.setFieldValue(displayName, 'VALUE'); if (!this.isInsertionMarker()) { newBlock.initSvg(); newBlock.render(false); } } finally { Blockly.Events.enable(); } if (Blockly.Events.isEnabled()) { Blockly.Events.fire(new Blockly.Events.BlockCreate(newBlock)); } return newBlock; }; /** * Populate the argument by attaching the correct child block or shadow to the * given input. * @param {string} type One of 'b' (boolean), 's' (string) or 'n' (number). * @param {number} index The index of this argument into the argument id array. * @param {!Object.<string, Blockly.Block>} connectionMap An object mapping * argument IDs to the blocks that were connected to those IDs at the * beginning of the mutation. * @param {Blockly.BlockSvg} oldBlock The block that was previously connected to * this input, or null. * @param {!Blockly.Input} input The newly created input to populate. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.populateArgumentOnCaller_ = function(type, index, connectionMap, oldBlock, input) { if (connectionMap && oldBlock) { // Reattach the old block. connectionMap[input.name] = null; oldBlock.outputConnection.connect(input.connection); if (type != 'b') { // TODO: Preserve old shadow DOM. input.connection.setShadowDom(this.buildShadowDom_(type)); } } else { this.attachShadow_(input, type); } }; /** * Populate the argument by attaching the correct argument reporter to the given * input. * @param {string} type One of 'b' (boolean), 's' (string) or 'n' (number). * @param {number} index The index of this argument into the argument ID and * argument display name arrays. * @param {!Object.<string, Blockly.Block>} connectionMap An object mapping * argument IDs to the blocks that were connected to those IDs at the * beginning of the mutation. * @param {Blockly.BlockSvg} oldBlock The argument reporter that was previously * connected to this input, or null. * @param {!Blockly.Input} input The newly created input to populate. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.populateArgumentOnPrototype_ = function( type, index, connectionMap, oldBlock, input) { var oldTypeMatches = Blockly.ScratchBlocks.ProcedureUtils.checkOldTypeMatches_(oldBlock, type); var displayName = this.displayNames_[index]; // Decide which block to attach. if (connectionMap && oldBlock && oldTypeMatches) { // Update the text if needed. The old argument reporter is the same type, // and on the same input, but the argument's display name may have changed. var argumentReporter = oldBlock; argumentReporter.setFieldValue(displayName, 'VALUE'); connectionMap[input.name] = null; } else { var argumentReporter = this.createArgumentReporter_(type, displayName); } // Attach the block. input.connection.connect(argumentReporter.outputConnection); }; /** * Populate the argument by attaching the correct argument editor to the given * input. * @param {string} type One of 'b' (boolean), 's' (string) or 'n' (number). * @param {number} index The index of this argument into the argument id and * argument display name arrays. * @param {!Object.<string, Blockly.Block>} connectionMap An object mapping * argument IDs to the blocks that were connected to those IDs at the * beginning of the mutation. * @param {Blockly.BlockSvg} oldBlock The block that was previously connected to * this input, or null. * @param {!Blockly.Input} input The newly created input to populate. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.populateArgumentOnDeclaration_ = function( type, index, connectionMap, oldBlock, input) { // TODO: This always returns false, because it checks for argument reporter // blocks instead of argument editor blocks. Create a new version for argument // editors. var oldTypeMatches = Blockly.ScratchBlocks.ProcedureUtils.checkOldTypeMatches_(oldBlock, type); var displayName = this.displayNames_[index]; // Decide which block to attach. if (connectionMap && oldBlock && oldTypeMatches) { var argumentEditor = oldBlock; oldBlock.setFieldValue(displayName, 'TEXT'); connectionMap[input.name] = null; } else { var argumentEditor = this.createArgumentEditor_(type, displayName); } // Attach the block. input.connection.connect(argumentEditor.outputConnection); }; /** * Check whether the type of the old block corresponds to the given argument * type. * @param {Blockly.BlockSvg} oldBlock The old block to check. * @param {string} type The argument type. One of 'n', 'n', or 's'. * @return {boolean} True if the type matches, false otherwise. */ Blockly.ScratchBlocks.ProcedureUtils.checkOldTypeMatches_ = function(oldBlock, type) { if (!oldBlock) { return false; } if ((type == 'n' || type == 's') && oldBlock.type == 'argument_reporter_string_number') { return true; } if (type == 'b' && oldBlock.type == 'argument_reporter_boolean') { return true; } return false; }; /** * Create an argument editor. * An argument editor is a shadow block with a single text field, which is used * to set the display name of the argument. * @param {string} argumentType One of 'b' (boolean), 's' (string) or * 'n' (number). * @param {string} displayName The display name of this argument, which is the * text of the field on the shadow block. * @return {!Blockly.BlockSvg} The newly created argument editor block. * @private * @this Blockly.Block */ Blockly.ScratchBlocks.ProcedureUtils.createArgumentEditor_ = function( argumentType, displayName) { Blockly.Events.disable(); try { if (argumentType == 'n' || argumentType == 's') { var newBlock = this.workspace.newBlock('argument_editor_string_number'); } else { var newBlock = this.workspace.newBlock('argument_editor_boolean'); } newBlock.setFieldValue(displayName, 'TEXT'); newBlock.setShadow(true); if (!this.isInsertionMarker()) { newBlock.initSvg(); newBlock.render(false); } } finally { Blockly.Events.enable(); } if (Blockly.Events.isEnabled()) { Blockly.Events.fire(new Blockly.Events.BlockCreate(newBlock)); } return newBlock; }; /** * Update the serializable information on the block based on the existing inputs * and their text. */ Blockly.ScratchBlocks.ProcedureUtils.updateDeclarationProcCode_ = function() { this.procCode_ = ''; this.displayNames_ = []; this.argumentIds_ = []; for (var i = 0; i < this.inputList.length; i++) { if (i != 0) { this.procCode_ += ' '; } var input = this.inputList[i]; if (input.type == Blockly.DUMMY_INPUT) { this.procCode_ += input.fieldRow[0].getValue(); } else if (input.type == Blockly.INPUT_VALUE) { // Inspect the argument editor. var target = input.connection.targetBlock(); this.displayNames_.push(target.getFieldValue('TEXT')); this.argumentIds_.push(input.name); if (target.type == 'argument_editor_boolean') { this.procCode_ += '%b'; } else { this.procCode_ += '%s'; } } else { throw new Error( 'Unexpected input type on a procedure mutator root: ' + input.type); } } }; /** * Focus on the last argument editor or label editor on the block. * @private */ Blockly.ScratchBlocks.ProcedureUtils.focusLastEditor_ = function() { if (this.inputList.length > 0) { var newInput = this.inputList[this.inputList.length - 1]; if (newInput.type == Blockly.DUMMY_INPUT) { newInput.fieldRow[0].showEditor_(); } else if (newInput.type == Blockly.INPUT_VALUE) { // Inspect the argument editor. var target = newInput.connection.targetBlock(); target.getField('TEXT').showEditor_(); } } }; /** * Externally-visible function to add a label to the procedure declaration. * @public */ Blockly.ScratchBlocks.ProcedureUtils.addLabelExternal = function() { Blockly.WidgetDiv.hide(true); this.procCode_ = this.procCode_ + ' label text'; this.updateDisplay_(); this.focusLastEditor_(); }; /** * Externally-visible function to add a boolean argument to the procedure * declaration. * @public */ Blockly.ScratchBlocks.ProcedureUtils.addBooleanExternal = function() { Blockly.WidgetDiv.hide(true); this.procCode_ = this.procCode_ + ' %b'; this.displayNames_.push('boolean'); this.argumentIds_.push(Blockly.utils.genUid()); this.argumentDefaults_.push('todo'); this.updateDisplay_(); this.focusLastEditor_(); }; /** * Externally-visible function to add a string/number argument to the procedure * declaration. * @public */ Blockly.ScratchBlocks.ProcedureUtils.addStringNumberExternal = function() { Blockly.WidgetDiv.hide(true); this.procCode_ = this.procCode_ + ' %s'; this.displayNames_.push('number or text'); this.argumentIds_.push(Blockly.utils.genUid()); this.argumentDefaults_.push('todo'); this.updateDisplay_(); this.focusLastEditor_(); }; /** * Externally-visible function to get the warp on procedure declaration. * @return {boolean} The value of the warp_ property. * @public */ Blockly.ScratchBlocks.ProcedureUtils.getWarp = function() { return this.warp_; }; /** * Externally-visible function to set the warp on procedure declaration. * @param {boolean} warp The value of the warp_ property. * @public */ Blockly.ScratchBlocks.ProcedureUtils.setWarp = function(warp) { this.warp_ = warp; }; /** * Callback to remove a field, only for the declaration block. * @param {Blockly.Field} field The field being removed. * @public */ Blockly.ScratchBlocks.ProcedureUtils.removeFieldCallback = function(field) { // Do not delete if there is only one input if (this.inputList.length === 1) { return; } var inputNameToRemove = null; for (var n = 0; n < this.inputList.length; n++) { var input = this.inputList[n]; if (input.connection) { var target = input.connection.targetBlock(); if (target.getField(field.name) == field) { inputNameToRemove = input.name; } } else { for (var j = 0; j < input.fieldRow.length; j++) { if (input.fieldRow[j] == field) { inputNameToRemove = input.name; } } } } if (inputNameToRemove) { Blockly.WidgetDiv.hide(true); this.removeInput(inputNameToRemove); this.onChangeFn(); this.updateDisplay_(); } }; /** * Callback to pass removeField up to the declaration block from arguments. * @param {Blockly.Field} field The field being removed. * @public */ Blockly.ScratchBlocks.ProcedureUtils.removeArgumentCallback_ = function( field) { if (this.parentBlock_ && this.parentBlock_.removeFieldCallback) { this.parentBlock_.removeFieldCallback(field); } }; Blockly.Blocks['procedures_definition'] = { /** * Block for defining a procedure with no return value. * @this Blockly.Block */ init: function() { this.jsonInit({ "message0": "define %1", "args0": [ { "type": "input_statement", "name": "custom_block" } ], "extensions": ["colours_more", "shape_hat", "procedure_def_contextmenu"] }); } }; Blockly.Blocks['procedures_call'] = { /** * Block for calling a procedure with no return value. * @this Blockly.Block */ init: function() { this.jsonInit({ "extensions": ["colours_more", "shape_statement", "procedure_call_contextmenu"] }); this.procCode_ = ''; this.argumentIds_ = []; this.warp_ = false; }, // Shared. getProcCode: Blockly.ScratchBlocks.ProcedureUtils.getProcCode, removeAllInputs_: Blockly.ScratchBlocks.ProcedureUtils.removeAllInputs_, disconnectOldBlocks_: Blockly.ScratchBlocks.ProcedureUtils.disconnectOldBlocks_, deleteShadows_: Blockly.ScratchBlocks.ProcedureUtils.deleteShadows_, createAllInputs_: Blockly.ScratchBlocks.ProcedureUtils.createAllInputs_, updateDisplay_: Blockly.ScratchBlocks.ProcedureUtils.updateDisplay_, // Exist on all three blocks, but have different implementations. mutationToDom: Blockly.ScratchBlocks.ProcedureUtils.callerMutationToDom, domToMutation: Blockly.ScratchBlocks.ProcedureUtils.callerDomToMutation, populateArgument_: Blockly.ScratchBlocks.ProcedureUtils.populateArgumentOnCaller_, addProcedureLabel_: Blockly.ScratchBlocks.ProcedureUtils.addLabelField_, // Only exists on the external caller. attachShadow_: Blockly.ScratchBlocks.ProcedureUtils.attachShadow_, buildShadowDom_: Blockly.ScratchBlocks.ProcedureUtils.buildShadowDom_ }; Blockly.Blocks['procedures_prototype'] = { /** * Block for calling a procedure with no return value, for rendering inside * define block. * @this Blockly.Block */ init: function() { this.jsonInit({ "extensions": ["colours_more", "shape_statement"] }); /* Data known about the procedure. */ this.procCode_ = ''; this.displayNames_ = []; this.argumentIds_ = []; this.argumentDefaults_ = []; this.warp_ = false; }, // Shared. getProcCode: Blockly.ScratchBlocks.ProcedureUtils.getProcCode, removeAllInputs_: Blockly.ScratchBlocks.ProcedureUtils.removeAllInputs_, disconnectOldBlocks_: Blockly.ScratchBlocks.ProcedureUtils.disconnectOldBlocks_, deleteShadows_: Blockly.ScratchBlocks.ProcedureUtils.deleteShadows_, createAllInputs_: Blockly.ScratchBlocks.ProcedureUtils.createAllInputs_, updateDisplay_: Blockly.ScratchBlocks.ProcedureUtils.updateDisplay_, // Exist on all three blocks, but have different implementations. mutationToDom: Blockly.ScratchBlocks.ProcedureUtils.definitionMutationToDom, domToMutation: Blockly.ScratchBlocks.ProcedureUtils.definitionDomToMutation, populateArgument_: Blockly.ScratchBlocks.ProcedureUtils.populateArgumentOnPrototype_, addProcedureLabel_: Blockly.ScratchBlocks.ProcedureUtils.addLabelField_, // Only exists on procedures_prototype. createArgumentReporter_: Blockly.ScratchBlocks.ProcedureUtils.createArgumentReporter_ }; Blockly.Blocks['procedures_declaration'] = { /** * The root block in the procedure declaration editor. * @this Blockly.Block */ init: function() { this.jsonInit({ "extensions": ["colours_more", "shape_statement"] }); /* Data known about the procedure. */ this.procCode_ = ''; this.displayNames_ = []; this.argumentIds_ = []; this.argumentDefaults_ = []; this.warp_ = false; }, // Shared. getProcCode: Blockly.ScratchBlocks.ProcedureUtils.getProcCode, removeAllInputs_: Blockly.ScratchBlocks.ProcedureUtils.removeAllInputs_, disconnectOldBlocks_: Blockly.ScratchBlocks.ProcedureUtils.disconnectOldBlocks_, deleteShadows_: Blockly.ScratchBlocks.ProcedureUtils.deleteShadows_, createAllInputs_: Blockly.ScratchBlocks.ProcedureUtils.createAllInputs_, updateDisplay_: Blockly.ScratchBlocks.ProcedureUtils.updateDisplay_, // Exist on all three blocks, but have different implementations. mutationToDom: Blockly.ScratchBlocks.ProcedureUtils.definitionMutationToDom, domToMutation: Blockly.ScratchBlocks.ProcedureUtils.definitionDomToMutation, populateArgument_: Blockly.ScratchBlocks.ProcedureUtils.populateArgumentOnDeclaration_, addProcedureLabel_: Blockly.ScratchBlocks.ProcedureUtils.addLabelEditor_, // Exist on declaration and arguments editors, with different implementations. removeFieldCallback: Blockly.ScratchBlocks.ProcedureUtils.removeFieldCallback, // Only exist on procedures_declaration. createArgumentEditor_: Blockly.ScratchBlocks.ProcedureUtils.createArgumentEditor_, focusLastEditor_: Blockly.ScratchBlocks.ProcedureUtils.focusLastEditor_, getWarp: Blockly.ScratchBlocks.ProcedureUtils.getWarp, setWarp: Blockly.ScratchBlocks.ProcedureUtils.setWarp, addLabelExternal: Blockly.ScratchBlocks.ProcedureUtils.addLabelExternal, addBooleanExternal: Blockly.ScratchBlocks.ProcedureUtils.addBooleanExternal, addStringNumberExternal: Blockly.ScratchBlocks.ProcedureUtils.addStringNumberExternal, onChangeFn: Blockly.ScratchBlocks.ProcedureUtils.updateDeclarationProcCode_ }; Blockly.Blocks['argument_reporter_boolean'] = { init: function() { this.jsonInit({ "message0": " %1", "args0": [ { "type": "field_label_serializable", "name": "VALUE", "text": "" } ], "extensions": ["colours_more", "output_boolean"] }); } }; Blockly.Blocks['argument_reporter_string_number'] = { init: function() { this.jsonInit({ "message0": " %1", "args0": [ { "type": "field_label_serializable", "name": "VALUE", "text": "" } ], "extensions": ["colours_more", "output_number", "output_string"] }); } }; Blockly.Blocks['argument_editor_boolean'] = { init: function() { this.jsonInit({ "message0": " %1", "args0": [ { "type": "field_input_removable", "name": "TEXT", "text": "foo" } ], "colour": Blockly.Colours.textField, "colourSecondary": Blockly.Colours.textField, "colourTertiary": Blockly.Colours.textField, "extensions": ["output_boolean"] }); }, // Exist on declaration and arguments editors, with different implementations. removeFieldCallback: Blockly.ScratchBlocks.ProcedureUtils.removeArgumentCallback_ }; Blockly.Blocks['argument_editor_string_number'] = { init: function() { this.jsonInit({ "message0": " %1", "args0": [ { "type": "field_input_removable", "name": "TEXT", "text": "foo" } ], "colour": Blockly.Colours.textField, "colourSecondary": Blockly.Colours.textField, "colourTertiary": Blockly.Colours.textField, "extensions": ["output_number", "output_string"] }); }, // Exist on declaration and arguments editors, with different implementations. removeFieldCallback: Blockly.ScratchBlocks.ProcedureUtils.removeArgumentCallback_ };
1
9,070
Is this actually a lint related change?
LLK-scratch-blocks
js
@@ -15,6 +15,7 @@ module Blacklight::Document autoload :ActiveModelShim, 'blacklight/document/active_model_shim' autoload :SchemaOrg, 'blacklight/document/schema_org' + autoload :CacheKey, 'blacklight/document/cache_key' autoload :DublinCore, 'blacklight/document/dublin_core' autoload :Email, 'blacklight/document/email' autoload :SemanticFields, 'blacklight/document/semantic_fields'
1
## ## # = Introduction # Blacklight::Document is the module with logic for a class representing # an individual document returned from Solr results. It can be added in to any # local class you want, but in default Blacklight a SolrDocument class is # provided for you which is pretty much a blank class "include"ing # Blacklight::Document. # # Blacklight::Document provides some DefaultFinders. # # It also provides support for Document Extensions, which advertise supported # transformation formats. # module Blacklight::Document autoload :ActiveModelShim, 'blacklight/document/active_model_shim' autoload :SchemaOrg, 'blacklight/document/schema_org' autoload :DublinCore, 'blacklight/document/dublin_core' autoload :Email, 'blacklight/document/email' autoload :SemanticFields, 'blacklight/document/semantic_fields' autoload :Sms, 'blacklight/document/sms' autoload :Extensions, 'blacklight/document/extensions' autoload :Export, 'blacklight/document/export' extend ActiveSupport::Concern include Blacklight::Document::SchemaOrg include Blacklight::Document::SemanticFields include Blacklight::Document::Export extend Deprecation included do extend ActiveModel::Naming include Blacklight::Document::Extensions end attr_reader :response, :_source alias_method :solr_response, :response def initialize(source_doc={}, response=nil) @_source = source_doc.with_indifferent_access @response = response apply_extensions end # the wrapper method to the @_source object. # If a method is missing, it gets sent to @_source # with all of the original params and block def method_missing(m, *args, &b) if _source_responds_to?(m) _source.send(m, *args, &b) else super end end def respond_to_missing? *args _source_responds_to?(*args) || super end # Helper method to check if value/multi-values exist for a given key. # The value can be a string, or a RegExp # Multiple "values" can be given; only one needs to match. # # Example: # doc.has?(:location_facet) # doc.has?(:location_facet, 'Clemons') # doc.has?(:id, 'h009', /^u/i) def has?(k, *values) if !key?(k) false elsif values.empty? self[k].present? else Array(values).any? do |expected| Array(self[k]).any? do |actual| case expected when Regexp actual =~ expected else actual == expected end end end end end alias_method :has_field?, :has? def key? k _source.key? k end alias_method :has_key?, :key? # helper # key is the name of the field # opts is a hash with the following valid keys: # - :sep - a string used for joining multivalued field values # - :default - a value to return when the key doesn't exist # if :sep is nil and the field is a multivalued field, the array is returned def get(key, opts={:sep=>', ', :default=>nil}) val = fetch(key, opts[:default]) if val.is_a?(Array) and opts[:sep] Deprecation.warn(Blacklight::Solr::Document, "#{self.class}#get with a :sep option is deprecated; use #[] or #fetch and join the values using e.g. Array#to_sentence") unless opts[:sep].nil? val.join(opts[:sep]) else val end end deprecation_deprecate get: "Use #[] or #fetch instead" def fetch key, *default if key? key self[key] elsif default.empty? and !block_given? raise KeyError.new("key not found \"#{key}\"") else (yield(self) if block_given?) || default.first end end def first key Array(self[key]).first end def to_partial_path 'catalog/document' end def has_highlight_field? k false end def highlight_field k nil end ## # Implementations that support More-Like-This should override this method # to return an array of documents that are like this one. def more_like_this [] end # Certain class-level methods needed for the document-specific # extendability architecture module ClassMethods attr_writer :unique_key def unique_key @unique_key ||= 'id' end end private def _source_responds_to? *args _source && self != _source && _source.respond_to?(*args) end end
1
5,961
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
projectblacklight-blacklight
rb
@@ -226,6 +226,12 @@ def opt_str_param(obj, param_name, default=None): return default if obj is None else obj +def opt_nonempty_str_param(obj, param_name, default=None): + if obj is not None and not isinstance(obj, string_types): + raise_with_traceback(_param_type_mismatch_exception(obj, str, param_name)) + return default if obj is None or obj == '' else obj + + def bool_param(obj, param_name): if not isinstance(obj, bool): raise_with_traceback(_param_type_mismatch_exception(obj, bool, param_name))
1
import inspect from future.utils import raise_with_traceback from six import string_types class CheckError(Exception): pass class ParameterCheckError(CheckError): pass class ElementCheckError(CheckError): pass class NotImplementedCheckError(CheckError): pass def _param_type_mismatch_exception(obj, ttype, param_name): if isinstance(ttype, tuple): type_names = sorted([t.__name__ for t in ttype]) return ParameterCheckError( 'Param "{name}" is not one of {type_names}. Got {obj} which is type {obj_type}.'.format( name=param_name, obj=repr(obj), type_names=type_names, obj_type=type(obj) ) ) else: return ParameterCheckError( 'Param "{name}" is not a {type}. Got {obj} which is type {obj_type}.'.format( name=param_name, obj=repr(obj), type=ttype.__name__, obj_type=type(obj) ) ) def _not_type_param_subclass_mismatch_exception(obj, param_name): return ParameterCheckError( 'Param "{name}" was supposed to be a type. Got {obj} instead of type {obj_type}'.format( name=param_name, obj=repr(obj), obj_type=type(obj) ) ) def _param_subclass_mismatch_exception(obj, superclass, param_name): return ParameterCheckError( 'Param "{name}" is a type but not a subclass of {superclass}. Got {obj} instead'.format( name=param_name, superclass=superclass, obj=obj ) ) def _type_mismatch_error(obj, ttype, desc=None): if desc: return CheckError( 'Object {obj} is not a {type}. Got {obj} with type {obj_type}. Desc: {desc}'.format( obj=repr(obj), type=ttype.__name__, obj_type=type(obj), desc=desc ) ) else: return CheckError( 'Object {obj} is not a {type}. Got {obj} with type {obj_type}.'.format( obj=repr(obj), type=ttype.__name__, obj_type=type(obj) ) ) def _not_callable_exception(obj, param_name): return ParameterCheckError( 'Param "{name}" is not callable. Got {obj} with type {obj_type}.'.format( name=param_name, obj=repr(obj), obj_type=type(obj) ) ) def _param_invariant_exception(param_name, desc): return ParameterCheckError( 'Invariant violation for parameter {param_name}. Description: {desc}'.format( param_name=param_name, desc=desc ) ) def failed(desc): if not _is_str(desc): raise_with_traceback(CheckError('desc argument must be a string')) raise_with_traceback(CheckError('Failure condition: {desc}'.format(desc=desc))) def not_implemented(desc): if not _is_str(desc): raise_with_traceback(CheckError('desc argument must be a string')) raise_with_traceback(NotImplementedCheckError('Not implemented: {desc}'.format(desc=desc))) def inst(obj, ttype, desc=None): if not isinstance(obj, ttype): raise_with_traceback(_type_mismatch_error(obj, ttype, desc)) return obj def is_callable(obj, desc=None): if not callable(obj): if desc: raise_with_traceback( CheckError( 'Must be callable. Got {obj}. Description: {desc}'.format( obj=repr(obj), desc=desc ) ) ) else: raise_with_traceback( CheckError( 'Must be callable. Got {obj}. Description: {desc}'.format(obj=obj, desc=desc) ) ) return obj def not_none_param(obj, param_name): if obj is None: raise_with_traceback( _param_invariant_exception( param_name, 'Param {param_name} cannot be none'.format(param_name=param_name) ) ) return obj def invariant(condition, desc=None): if not condition: if desc: raise_with_traceback( CheckError('Invariant failed. Description: {desc}'.format(desc=desc)) ) else: raise_with_traceback(CheckError('Invariant failed.')) return True def param_invariant(condition, param_name, desc=None): if not condition: raise_with_traceback(_param_invariant_exception(param_name, desc)) def inst_param(obj, param_name, ttype): if not isinstance(obj, ttype): raise_with_traceback(_param_type_mismatch_exception(obj, ttype, param_name)) return obj def opt_inst_param(obj, param_name, ttype, default=None): if obj is not None and not isinstance(obj, ttype): raise_with_traceback(_param_type_mismatch_exception(obj, ttype, param_name)) return default if obj is None else obj def callable_param(obj, param_name): if not callable(obj): raise_with_traceback(_not_callable_exception(obj, param_name)) return obj def opt_callable_param(obj, param_name, default=None): if obj is not None and not callable(obj): raise_with_traceback(_not_callable_exception(obj, param_name)) return default if obj is None else obj def int_param(obj, param_name): if not isinstance(obj, int): raise_with_traceback(_param_type_mismatch_exception(obj, int, param_name)) return obj def int_value_param(obj, value, param_name): if not isinstance(obj, int): raise_with_traceback(_param_type_mismatch_exception(obj, int, param_name)) if obj != value: raise_with_traceback( _param_invariant_exception(param_name, "Should be equal to {value}".format(value=value)) ) return obj def opt_int_param(obj, param_name): if obj is not None and not isinstance(obj, int): raise_with_traceback(_param_type_mismatch_exception(obj, int, param_name)) return obj def float_param(obj, param_name): if not isinstance(obj, float): raise_with_traceback(_param_type_mismatch_exception(obj, float, param_name)) return obj def opt_float_param(obj, param_name): if obj is not None and not isinstance(obj, float): raise_with_traceback(_param_type_mismatch_exception(obj, float, param_name)) return obj def _is_str(obj): return isinstance(obj, string_types) def str_param(obj, param_name): if not _is_str(obj): raise_with_traceback(_param_type_mismatch_exception(obj, str, param_name)) return obj def opt_str_param(obj, param_name, default=None): if obj is not None and not isinstance(obj, string_types): raise_with_traceback(_param_type_mismatch_exception(obj, str, param_name)) return default if obj is None else obj def bool_param(obj, param_name): if not isinstance(obj, bool): raise_with_traceback(_param_type_mismatch_exception(obj, bool, param_name)) return obj def opt_bool_param(obj, param_name, default=None): if obj is not None and not isinstance(obj, bool): raise_with_traceback(_param_type_mismatch_exception(obj, bool, param_name)) return default if obj is None else obj def is_list(obj_list, of_type=None, desc=None): if not isinstance(obj_list, list): raise_with_traceback(_type_mismatch_error(obj_list, list, desc)) if not of_type: return obj_list return _check_list_items(obj_list, of_type) def list_param(obj_list, param_name, of_type=None): if not isinstance(obj_list, list): raise_with_traceback(_param_type_mismatch_exception(obj_list, list, param_name)) if not of_type: return obj_list return _check_list_items(obj_list, of_type) def tuple_param(obj, param_name): if not isinstance(obj, tuple): raise_with_traceback(_param_type_mismatch_exception(obj, tuple, param_name)) return obj def opt_tuple_param(obj, param_name, default=None): if obj is not None and not isinstance(obj, tuple): raise_with_traceback(_param_type_mismatch_exception(obj, tuple, param_name)) return default if obj is None else obj def _check_list_items(obj_list, of_type): if of_type is str: of_type = string_types for obj in obj_list: if not isinstance(obj, of_type): raise_with_traceback( CheckError( 'Member of list mismatches type. Expected {of_type}. Got {obj_repr} of type ' '{obj_type}.'.format(of_type=of_type, obj_repr=repr(obj), obj_type=type(obj)) ) ) return obj_list def opt_list_param(obj_list, param_name, of_type=None): '''Ensures argument obj_list is a list or None; in the latter case, instantiates an empty list and returns it. If the of_type argument is provided, also ensures that list items conform to the type specified by of_type. ''' if obj_list is not None and not isinstance(obj_list, list): raise_with_traceback(_param_type_mismatch_exception(obj_list, list, param_name)) if not obj_list: return [] if not of_type: return obj_list return _check_list_items(obj_list, of_type) def _check_key_value_types(obj, key_type, value_type, key_check=isinstance, value_check=isinstance): '''Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types specified by key_type, value_type. ''' if not isinstance(obj, dict): raise_with_traceback(_type_mismatch_error(obj, dict)) if key_type is str: key_type = string_types if value_type is str: value_type = string_types for key, value in obj.items(): if key_type and not key_check(key, key_type): raise_with_traceback( CheckError( 'Key in dictionary mismatches type. Expected {key_type}. Got {obj_repr}'.format( key_type=repr(key_type), obj_repr=repr(key) ) ) ) if value_type and not value_check(value, value_type): raise_with_traceback( CheckError( 'Value in dictionary mismatches expected type for key {key}. Expected value ' 'of type {vtype}. Got value {value} of type {obj_type}.'.format( vtype=repr(value_type), obj_type=type(value), key=key, value=value ) ) ) return obj def dict_param(obj, param_name, key_type=None, value_type=None): '''Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise returns obj. ''' if not isinstance(obj, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) if not (key_type or value_type): return obj return _check_key_value_types(obj, key_type, value_type) def opt_dict_param(obj, param_name, key_type=None, value_type=None, value_class=None): '''Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty dictionary. ''' if obj is not None and not isinstance(obj, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) if not obj: return {} if value_class: return _check_key_value_types(obj, key_type, value_type=value_class, value_check=issubclass) return _check_key_value_types(obj, key_type, value_type) def _check_two_dim_key_value_types(obj, key_type, param_name, value_type): _check_key_value_types(obj, key_type, dict) # check level one for level_two_dict in obj.values(): if not isinstance(level_two_dict, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) _check_key_value_types(level_two_dict, key_type, value_type) # check level two return obj def two_dim_dict_param(obj, param_name, key_type=string_types, value_type=None): if not isinstance(obj, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) return _check_two_dim_key_value_types(obj, key_type, param_name, value_type) def opt_two_dim_dict_param(obj, param_name, key_type=string_types, value_type=None): if obj is not None and not isinstance(obj, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) if not obj: return {} return _check_two_dim_key_value_types(obj, key_type, param_name, value_type) def type_param(obj, param_name): if not isinstance(obj, type): raise_with_traceback(_not_type_param_subclass_mismatch_exception(obj, param_name)) return obj def opt_type_param(obj, param_name, default=None): if obj is not None and not isinstance(obj, type): raise_with_traceback(_not_type_param_subclass_mismatch_exception(obj, param_name)) return obj if obj is not None else default def subclass_param(obj, param_name, superclass): type_param(obj, param_name) if not issubclass(obj, superclass): raise_with_traceback(_param_subclass_mismatch_exception(obj, superclass, param_name)) return obj def _element_check_error(key, value, ddict, ttype): return ElementCheckError( 'Value {value} from key {key} is not a {ttype}. Dict: {ddict}'.format( key=key, value=repr(value), ddict=repr(ddict), ttype=repr(ttype) ) ) def generator(obj): if not inspect.isgenerator(obj): raise ParameterCheckError( 'Not a generator (return value of function that yields) Got {obj} instead'.format( obj=obj ) ) return obj def opt_generator(obj): if obj is not None and not inspect.isgenerator(obj): raise ParameterCheckError( 'Not a generator (return value of function that yields) Got {obj} instead'.format( obj=obj ) ) return obj def generator_param(obj, param_name): if not inspect.isgenerator(obj): raise ParameterCheckError( ( 'Param "{name}" is not a generator (return value of function that yields) Got ' '{obj} instead' ).format(name=param_name, obj=obj) ) return obj def opt_generator_param(obj, param_name): if obj is not None and not inspect.isgenerator(obj): raise ParameterCheckError( ( 'Param "{name}" is not a generator (return value of function that yields) Got ' '{obj} instead' ).format(name=param_name, obj=obj) ) return obj def list_elem(ddict, key): dict_param(ddict, 'ddict') str_param(key, 'key') value = ddict[key] if not isinstance(value, list): raise_with_traceback(_element_check_error(key, value, ddict, list)) return value def opt_list_elem(ddict, key): dict_param(ddict, 'ddict') str_param(key, 'key') value = ddict.get(key) if value is None: return [] if not isinstance(value, list): raise_with_traceback(_element_check_error(key, value, ddict, list)) return value def dict_elem(ddict, key): dict_param(ddict, 'ddict') str_param(key, 'key') if key not in ddict: raise_with_traceback( CheckError('{key} not present in dictionary {ddict}'.format(key=key, ddict=ddict)) ) value = ddict[key] if not isinstance(value, dict): raise_with_traceback(_element_check_error(key, value, ddict, dict)) return value def opt_dict_elem(ddict, key): dict_param(ddict, 'ddict') str_param(key, 'key') value = ddict.get(key) if value is None: return {} if not isinstance(value, dict): raise_with_traceback(_element_check_error(key, value, ddict, list)) return value def bool_elem(ddict, key): dict_param(ddict, 'ddict') str_param(key, 'key') value = ddict[key] if not isinstance(value, bool): raise_with_traceback(_element_check_error(key, value, ddict, bool)) return value def opt_str_elem(ddict, key): dict_param(ddict, 'ddict') str_param(key, 'key') value = ddict.get(key) if value is None: return None if not _is_str(value): raise_with_traceback(_element_check_error(key, value, ddict, str)) return value def str_elem(ddict, key): dict_param(ddict, 'ddict') str_param(key, 'key') value = ddict[key] if not _is_str(value): raise_with_traceback(_element_check_error(key, value, ddict, str)) return value
1
13,153
what does this do that str_param does not?
dagster-io-dagster
py
@@ -39,6 +39,11 @@ func GRPCDialOption(api string) grpc.DialOption { return grpc.WithUserAgent(userAgentString(api)) } +// AzureUserAgentPrefix returns a prefix that is used to set Azure SDK User-Agent to help with diagnostics. +func AzureUserAgentPrefix() string { + return fmt.Sprintf("%s/%s", prefix, version) +} + func userAgentString(api string) string { return fmt.Sprintf("%s/%s/%s", prefix, api, version) }
1
// Copyright 2018 The Go Cloud Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package useragent includes constants and utilitiesfor setting the User-Agent // for Go Cloud connections to GCP. package useragent // import "gocloud.dev/internal/useragent" import ( "fmt" "net/http" "google.golang.org/api/option" "google.golang.org/grpc" ) const ( prefix = "go-cloud" version = "0.9.0" ) // ClientOption returns an option.ClientOption that sets a Go Cloud User-Agent. func ClientOption(api string) option.ClientOption { return option.WithUserAgent(userAgentString(api)) } // GRPCDialOption returns a grpc.DialOption that sets a Go Cloud User-Agent. func GRPCDialOption(api string) grpc.DialOption { return grpc.WithUserAgent(userAgentString(api)) } func userAgentString(api string) string { return fmt.Sprintf("%s/%s/%s", prefix, api, version) } // userAgentTransport wraps an http.RoundTripper, adding a User-Agent header // to each request. type userAgentTransport struct { base http.RoundTripper api string } func (t *userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { // Clone the request to avoid mutating it. newReq := *req newReq.Header = make(http.Header) for k, vv := range req.Header { newReq.Header[k] = vv } // Append to the User-Agent string to preserve other information. newReq.Header.Set("User-Agent", req.UserAgent()+" "+userAgentString(t.api)) return t.base.RoundTrip(&newReq) } // HTTPClient wraps client and appends a Go Cloud string to the User-Agent // header for all requests. func HTTPClient(client *http.Client, api string) *http.Client { c := *client c.Transport = &userAgentTransport{base: c.Transport, api: api} return &c }
1
13,502
For GCP we included the API name (see below) so you can distinguish between uses other than `blob` once they exist, WDYT?
google-go-cloud
go
@@ -1,6 +1,14 @@ import os import pickle +from io import BytesIO + +try: + import boto3 + import botocore +except ImportError: + pass + from dagster import check, seven from dagster.utils import mkdir_p
1
import os import pickle from dagster import check, seven from dagster.utils import mkdir_p from .execution_context import SystemPipelineExecutionContext from .types.runtime import RuntimeType class ObjectStore: pass class FileSystemObjectStore(ObjectStore): def __init__(self, run_id): check.str_param(run_id, 'run_id') self.root = os.path.join( seven.get_system_temp_directory(), 'dagster', 'runs', run_id, 'files' ) def set_object(self, obj, context, runtime_type, paths): # pylint: disable=unused-argument check.inst_param(context, 'context', SystemPipelineExecutionContext) check.inst_param(runtime_type, 'runtime_type', RuntimeType) check.list_param(paths, 'paths', of_type=str) check.param_invariant(len(paths) > 0, 'paths') if len(paths) > 1: target_dir = os.path.join(self.root, *paths[:-1]) mkdir_p(target_dir) target_path = os.path.join(target_dir, paths[-1]) else: check.invariant(len(paths) == 1) target_path = os.path.join(target_dir, paths[0]) check.invariant(not os.path.exists(target_path)) with open(target_path, 'wb') as ff: # Hardcode pickle for now pickle.dump(obj, ff) def get_object(self, context, runtime_type, paths): # pylint: disable=unused-argument check.list_param(paths, 'paths', of_type=str) check.param_invariant(len(paths) > 0, 'paths') target_path = os.path.join(self.root, *paths) with open(target_path, 'rb') as ff: return pickle.load(ff) def has_object(self, _cxt, paths): target_path = os.path.join(self.root, *paths) return os.path.exists(target_path)
1
12,704
what would you think about doing this include on-demand within S3ObjectStore methods and then failing in a loud way?
dagster-io-dagster
py
@@ -2777,6 +2777,14 @@ void nano::active_transactions::confirm_frontiers (nano::transaction const & tra size_t elections_count (0); for (auto i (node.store.latest_begin (transaction_a, next_frontier_account)), n (node.store.latest_end ()); i != n && elections_count < max_elections; ++i) { + { + std::lock_guard<std::mutex> guard (mutex); + if (stopped) + { + break; + } + } + nano::account_info info (i->second); if (info.block_count != info.confirmation_height) {
1
#include <nano/node/node.hpp> #include <nano/crypto_lib/random_pool.hpp> #include <nano/lib/interface.h> #include <nano/lib/timer.hpp> #include <nano/lib/utility.hpp> #include <nano/node/common.hpp> #include <nano/rpc/rpc.hpp> #include <algorithm> #include <cstdlib> #include <future> #include <numeric> #include <sstream> #include <boost/polymorphic_cast.hpp> #include <boost/property_tree/json_parser.hpp> double constexpr nano::node::price_max; double constexpr nano::node::free_cutoff; size_t constexpr nano::active_transactions::max_broadcast_queue; size_t constexpr nano::block_arrival::arrival_size_min; std::chrono::seconds constexpr nano::block_arrival::arrival_time_min; namespace nano { extern unsigned char nano_bootstrap_weights_live[]; extern size_t nano_bootstrap_weights_live_size; extern unsigned char nano_bootstrap_weights_beta[]; extern size_t nano_bootstrap_weights_beta_size; } nano::network::network (nano::node & node_a, uint16_t port_a) : buffer_container (node_a.stats, nano::network::buffer_size, 4096), // 2Mb receive buffer resolver (node_a.io_ctx), node (node_a), udp_channels (node_a, port_a), disconnect_observer ([]() {}) { boost::thread::attributes attrs; nano::thread_attributes::set (attrs); for (size_t i = 0; i < node.config.network_threads; ++i) { packet_processing_threads.push_back (boost::thread (attrs, [this]() { nano::thread_role::set (nano::thread_role::name::packet_processing); try { udp_channels.process_packets (); } catch (boost::system::error_code & ec) { this->node.logger.try_log (FATAL_LOG_PREFIX, ec.message ()); release_assert (false); } catch (std::error_code & ec) { this->node.logger.try_log (FATAL_LOG_PREFIX, ec.message ()); release_assert (false); } catch (std::runtime_error & err) { this->node.logger.try_log (FATAL_LOG_PREFIX, err.what ()); release_assert (false); } catch (...) { this->node.logger.try_log (FATAL_LOG_PREFIX, "Unknown exception"); release_assert (false); } if (this->node.config.logging.network_packet_logging ()) { this->node.logger.try_log ("Exiting packet processing thread"); } })); } } nano::network::~network () { for (auto & thread : packet_processing_threads) { thread.join (); } } void nano::network::start () { ongoing_cleanup (); udp_channels.start (); } void nano::network::stop () { udp_channels.stop (); resolver.cancel (); buffer_container.stop (); } void nano::network::send_keepalive (nano::transport::channel const & channel_a) { nano::keepalive message; udp_channels.random_fill (message.peers); channel_a.send (message); } void nano::node::keepalive (std::string const & address_a, uint16_t port_a) { auto node_l (shared_from_this ()); network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (address_a, std::to_string (port_a)), [node_l, address_a, port_a](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) { if (!ec) { for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i) { auto endpoint (nano::transport::map_endpoint_to_v6 (i->endpoint ())); nano::transport::channel_udp channel (node_l->network.udp_channels, endpoint); node_l->network.send_keepalive (channel); } } else { node_l->logger.try_log (boost::str (boost::format ("Error resolving address: %1%:%2%: %3%") % address_a % port_a % ec.message ())); } }); } void nano::network::send_node_id_handshake (nano::endpoint const & endpoint_a, boost::optional<nano::uint256_union> const & query, boost::optional<nano::uint256_union> const & respond_to) { boost::optional<std::pair<nano::account, nano::signature>> response (boost::none); if (respond_to) { response = std::make_pair (node.node_id.pub, nano::sign_message (node.node_id.prv, node.node_id.pub, *respond_to)); assert (!nano::validate_message (response->first, *respond_to, response->second)); } nano::node_id_handshake message (query, response); if (node.config.logging.network_node_id_handshake_logging ()) { node.logger.try_log (boost::str (boost::format ("Node ID handshake sent with node ID %1% to %2%: query %3%, respond_to %4% (signature %5%)") % node.node_id.pub.to_account () % endpoint_a % (query ? query->to_string () : std::string ("[none]")) % (respond_to ? respond_to->to_string () : std::string ("[none]")) % (response ? response->second.to_string () : std::string ("[none]")))); } nano::transport::channel_udp channel (udp_channels, endpoint_a); channel.send (message); } template <typename T> bool confirm_block (nano::transaction const & transaction_a, nano::node & node_a, T & list_a, std::shared_ptr<nano::block> block_a, bool also_publish) { bool result (false); if (node_a.config.enable_voting) { auto hash (block_a->hash ()); // Search in cache auto votes (node_a.votes_cache.find (hash)); if (votes.empty ()) { // Generate new vote node_a.wallets.foreach_representative (transaction_a, [&result, &list_a, &node_a, &transaction_a, &hash](nano::public_key const & pub_a, nano::raw_key const & prv_a) { result = true; auto vote (node_a.store.vote_generate (transaction_a, pub_a, prv_a, std::vector<nano::block_hash> (1, hash))); nano::confirm_ack confirm (vote); auto vote_bytes = confirm.to_bytes (); for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j) { j->get ().send_buffer (vote_bytes, nano::stat::detail::confirm_ack); } node_a.votes_cache.add (vote); }); } else { // Send from cache for (auto & vote : votes) { nano::confirm_ack confirm (vote); auto vote_bytes = confirm.to_bytes (); for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j) { j->get ().send_buffer (vote_bytes, nano::stat::detail::confirm_ack); } } } // Republish if required if (also_publish) { nano::publish publish (block_a); auto publish_bytes (publish.to_bytes ()); for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j) { j->get ().send_buffer (publish_bytes, nano::stat::detail::publish); } } } return result; } bool confirm_block (nano::transaction const & transaction_a, nano::node & node_a, nano::transport::channel const & channel_a, std::shared_ptr<nano::block> block_a, bool also_publish) { std::array<std::reference_wrapper<nano::transport::channel const>, 1> endpoints = { channel_a }; auto result (confirm_block (transaction_a, node_a, endpoints, std::move (block_a), also_publish)); return result; } void nano::network::confirm_hashes (nano::transaction const & transaction_a, nano::transport::channel const & channel_a, std::vector<nano::block_hash> blocks_bundle_a) { if (node.config.enable_voting) { node.wallets.foreach_representative (transaction_a, [this, &blocks_bundle_a, &channel_a, &transaction_a](nano::public_key const & pub_a, nano::raw_key const & prv_a) { auto vote (this->node.store.vote_generate (transaction_a, pub_a, prv_a, blocks_bundle_a)); nano::confirm_ack confirm (vote); std::shared_ptr<std::vector<uint8_t>> bytes (new std::vector<uint8_t>); { nano::vectorstream stream (*bytes); confirm.serialize (stream); } channel_a.send_buffer (bytes, nano::stat::detail::confirm_ack); this->node.votes_cache.add (vote); }); } } bool nano::network::send_votes_cache (nano::transport::channel const & channel_a, nano::block_hash const & hash_a) { // Search in cache auto votes (node.votes_cache.find (hash_a)); // Send from cache for (auto & vote : votes) { nano::confirm_ack confirm (vote); auto vote_bytes = confirm.to_bytes (); channel_a.send_buffer (vote_bytes, nano::stat::detail::confirm_ack); } // Returns true if votes were sent bool result (!votes.empty ()); return result; } void nano::network::flood_message (nano::message const & message_a) { auto list (node.network.udp_channels.list_fanout ()); for (auto i (list.begin ()), n (list.end ()); i != n; ++i) { (*i)->send (message_a); } } void nano::network::flood_block_batch (std::deque<std::shared_ptr<nano::block>> blocks_a, unsigned delay_a) { auto block (blocks_a.front ()); blocks_a.pop_front (); flood_block (block); if (!blocks_a.empty ()) { std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, blocks_a, delay_a]() { if (auto node_l = node_w.lock ()) { node_l->network.flood_block_batch (blocks_a, delay_a); } }); } } void nano::network::broadcast_confirm_req (std::shared_ptr<nano::block> block_a) { auto list (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> (node.rep_crawler.representative_endpoints (std::numeric_limits<size_t>::max ()))); if (list->empty () || node.rep_crawler.total_weight () < node.config.online_weight_minimum.number ()) { // broadcast request to all peers (with max limit 2 * sqrt (peers count)) auto peers (node.network.udp_channels.list (std::min (static_cast<size_t> (100), 2 * node.network.size_sqrt ()))); list->clear (); for (auto & peer : peers) { list->push_back (peer); } } /* * In either case (broadcasting to all representatives, or broadcasting to * all peers because there are not enough connected representatives), * limit each instance to a single random up-to-32 selection. The invoker * of "broadcast_confirm_req" will be responsible for calling it again * if the votes for a block have not arrived in time. */ const size_t max_endpoints = 32; random_pool::shuffle (list->begin (), list->end ()); if (list->size () > max_endpoints) { list->erase (list->begin () + max_endpoints, list->end ()); } broadcast_confirm_req_base (block_a, list, 0); } void nano::network::broadcast_confirm_req_base (std::shared_ptr<nano::block> block_a, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>> endpoints_a, unsigned delay_a, bool resumption) { const size_t max_reps = 10; if (!resumption && node.config.logging.network_logging ()) { node.logger.try_log (boost::str (boost::format ("Broadcasting confirm req for block %1% to %2% representatives") % block_a->hash ().to_string () % endpoints_a->size ())); } auto count (0); while (!endpoints_a->empty () && count < max_reps) { nano::confirm_req req (block_a); endpoints_a->back ()->send (req); endpoints_a->pop_back (); count++; } if (!endpoints_a->empty ()) { delay_a += std::rand () % broadcast_interval_ms; std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, block_a, endpoints_a, delay_a]() { if (auto node_l = node_w.lock ()) { node_l->network.broadcast_confirm_req_base (block_a, endpoints_a, delay_a, true); } }); } } void nano::network::broadcast_confirm_req_batch (std::unordered_map<std::shared_ptr<nano::transport::channel>, std::vector<std::pair<nano::block_hash, nano::block_hash>>> request_bundle_a, unsigned delay_a, bool resumption) { const size_t max_reps = 10; if (!resumption && node.config.logging.network_logging ()) { node.logger.try_log (boost::str (boost::format ("Broadcasting batch confirm req to %1% representatives") % request_bundle_a.size ())); } auto count (0); while (!request_bundle_a.empty () && count < max_reps) { auto j (request_bundle_a.begin ()); count++; std::vector<std::pair<nano::block_hash, nano::block_hash>> roots_hashes; // Limit max request size hash + root to 6 pairs while (roots_hashes.size () <= confirm_req_hashes_max && !j->second.empty ()) { roots_hashes.push_back (j->second.back ()); j->second.pop_back (); } nano::confirm_req req (roots_hashes); j->first->send (req); if (j->second.empty ()) { request_bundle_a.erase (j); } } if (!request_bundle_a.empty ()) { std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, request_bundle_a, delay_a]() { if (auto node_l = node_w.lock ()) { node_l->network.broadcast_confirm_req_batch (request_bundle_a, delay_a + 50, true); } }); } } void nano::network::broadcast_confirm_req_batch (std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> deque_a, unsigned delay_a) { auto pair (deque_a.front ()); deque_a.pop_front (); auto block (pair.first); // confirm_req to representatives auto endpoints (pair.second); if (!endpoints->empty ()) { broadcast_confirm_req_base (block, endpoints, delay_a); } /* Continue while blocks remain Broadcast with random delay between delay_a & 2*delay_a */ if (!deque_a.empty ()) { std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, deque_a, delay_a]() { if (auto node_l = node_w.lock ()) { node_l->network.broadcast_confirm_req_batch (deque_a, delay_a); } }); } } namespace { class network_message_visitor : public nano::message_visitor { public: network_message_visitor (nano::node & node_a, std::shared_ptr<nano::transport::channel> channel_a) : node (node_a), channel (channel_a) { } void keepalive (nano::keepalive const & message_a) override { if (node.config.logging.network_keepalive_logging ()) { node.logger.try_log (boost::str (boost::format ("Received keepalive message from %1%") % channel->to_string ())); } node.stats.inc (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in); node.network.merge_peers (message_a.peers); } void publish (nano::publish const & message_a) override { if (node.config.logging.network_message_logging ()) { node.logger.try_log (boost::str (boost::format ("Publish message from %1% for %2%") % channel->to_string () % message_a.block->hash ().to_string ())); } node.stats.inc (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in); if (!node.block_processor.full ()) { node.process_active (message_a.block); } node.active.publish (message_a.block); } void confirm_req (nano::confirm_req const & message_a) override { if (node.config.logging.network_message_logging ()) { if (!message_a.roots_hashes.empty ()) { node.logger.try_log (boost::str (boost::format ("Confirm_req message from %1% for hashes:roots %2%") % channel->to_string () % message_a.roots_string ())); } else { node.logger.try_log (boost::str (boost::format ("Confirm_req message from %1% for %2%") % channel->to_string () % message_a.block->hash ().to_string ())); } } node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_req, nano::stat::dir::in); // Don't load nodes with disabled voting if (node.config.enable_voting && node.wallets.reps_count) { if (message_a.block != nullptr) { auto hash (message_a.block->hash ()); if (!node.network.send_votes_cache (*channel, hash)) { auto transaction (node.store.tx_begin_read ()); auto successor (node.ledger.successor (transaction, message_a.block->qualified_root ())); if (successor != nullptr) { auto same_block (successor->hash () == hash); confirm_block (transaction, node, std::cref (*channel), std::move (successor), !same_block); } } } else if (!message_a.roots_hashes.empty ()) { auto transaction (node.store.tx_begin_read ()); std::vector<nano::block_hash> blocks_bundle; for (auto & root_hash : message_a.roots_hashes) { if (!node.network.send_votes_cache (*channel, root_hash.first) && node.store.block_exists (transaction, root_hash.first)) { blocks_bundle.push_back (root_hash.first); } else { nano::block_hash successor (0); // Search for block root successor = node.store.block_successor (transaction, root_hash.second); // Search for account root if (successor.is_zero () && node.store.account_exists (transaction, root_hash.second)) { nano::account_info info; auto error (node.store.account_get (transaction, root_hash.second, info)); assert (!error); successor = info.open_block; } if (!successor.is_zero ()) { if (!node.network.send_votes_cache (*channel, successor)) { blocks_bundle.push_back (successor); } auto successor_block (node.store.block_get (transaction, successor)); assert (successor_block != nullptr); nano::publish publish (successor_block); channel->send (publish); } } } if (!blocks_bundle.empty ()) { node.network.confirm_hashes (transaction, *channel, blocks_bundle); } } } } void confirm_ack (nano::confirm_ack const & message_a) override { if (node.config.logging.network_message_logging ()) { node.logger.try_log (boost::str (boost::format ("Received confirm_ack message from %1% for %2%sequence %3%") % channel->to_string () % message_a.vote->hashes_string () % std::to_string (message_a.vote->sequence))); } node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::in); for (auto & vote_block : message_a.vote->blocks) { if (!vote_block.which ()) { auto block (boost::get<std::shared_ptr<nano::block>> (vote_block)); if (!node.block_processor.full ()) { node.process_active (block); } node.active.publish (block); } } node.vote_processor.vote (message_a.vote, channel); } void bulk_pull (nano::bulk_pull const &) override { assert (false); } void bulk_pull_account (nano::bulk_pull_account const &) override { assert (false); } void bulk_push (nano::bulk_push const &) override { assert (false); } void frontier_req (nano::frontier_req const &) override { assert (false); } void node_id_handshake (nano::node_id_handshake const & message_a) override { node.stats.inc (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in); } nano::node & node; std::shared_ptr<nano::transport::channel> channel; }; } // Send keepalives to all the peers we've been notified of void nano::network::merge_peers (std::array<nano::endpoint, 8> const & peers_a) { for (auto i (peers_a.begin ()), j (peers_a.end ()); i != j; ++i) { if (!udp_channels.reachout (*i, node.config.allow_local_peers)) { nano::transport::channel_udp channel (node.network.udp_channels, *i); send_keepalive (channel); } } } bool nano::operation::operator> (nano::operation const & other_a) const { return wakeup > other_a.wakeup; } nano::alarm::alarm (boost::asio::io_context & io_ctx_a) : io_ctx (io_ctx_a), thread ([this]() { nano::thread_role::set (nano::thread_role::name::alarm); run (); }) { } nano::alarm::~alarm () { add (std::chrono::steady_clock::now (), nullptr); thread.join (); } void nano::alarm::run () { std::unique_lock<std::mutex> lock (mutex); auto done (false); while (!done) { if (!operations.empty ()) { auto & operation (operations.top ()); if (operation.function) { if (operation.wakeup <= std::chrono::steady_clock::now ()) { io_ctx.post (operation.function); operations.pop (); } else { auto wakeup (operation.wakeup); condition.wait_until (lock, wakeup); } } else { done = true; } } else { condition.wait (lock); } } } void nano::alarm::add (std::chrono::steady_clock::time_point const & wakeup_a, std::function<void()> const & operation) { { std::lock_guard<std::mutex> lock (mutex); operations.push (nano::operation ({ wakeup_a, operation })); } condition.notify_all (); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (alarm & alarm, const std::string & name) { auto composite = std::make_unique<seq_con_info_composite> (name); size_t count = 0; { std::lock_guard<std::mutex> guard (alarm.mutex); count = alarm.operations.size (); } auto sizeof_element = sizeof (decltype (alarm.operations)::value_type); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "operations", count, sizeof_element })); return composite; } } bool nano::node_init::error () const { return block_store_init || wallet_init || wallets_store_init; } nano::vote_processor::vote_processor (nano::node & node_a) : node (node_a), started (false), stopped (false), active (false), thread ([this]() { nano::thread_role::set (nano::thread_role::name::vote_processing); process_loop (); }) { std::unique_lock<std::mutex> lock (mutex); while (!started) { condition.wait (lock); } } void nano::vote_processor::process_loop () { std::chrono::steady_clock::time_point start_time, end_time; std::chrono::steady_clock::duration elapsed_time; std::chrono::milliseconds elapsed_time_ms; uint64_t elapsed_time_ms_int; bool log_this_iteration; std::unique_lock<std::mutex> lock (mutex); started = true; lock.unlock (); condition.notify_all (); lock.lock (); while (!stopped) { if (!votes.empty ()) { std::deque<std::pair<std::shared_ptr<nano::vote>, std::shared_ptr<nano::transport::channel>>> votes_l; votes_l.swap (votes); log_this_iteration = false; if (node.config.logging.network_logging () && votes_l.size () > 50) { /* * Only log the timing information for this iteration if * there are a sufficient number of items for it to be relevant */ log_this_iteration = true; start_time = std::chrono::steady_clock::now (); } active = true; lock.unlock (); verify_votes (votes_l); { std::unique_lock<std::mutex> active_single_lock (node.active.mutex); auto transaction (node.store.tx_begin_read ()); uint64_t count (1); for (auto & i : votes_l) { vote_blocking (transaction, i.first, i.second, true); // Free active_transactions mutex each 100 processed votes if (count % 100 == 0) { active_single_lock.unlock (); active_single_lock.lock (); } count++; } } lock.lock (); active = false; lock.unlock (); condition.notify_all (); lock.lock (); if (log_this_iteration) { end_time = std::chrono::steady_clock::now (); elapsed_time = end_time - start_time; elapsed_time_ms = std::chrono::duration_cast<std::chrono::milliseconds> (elapsed_time); elapsed_time_ms_int = elapsed_time_ms.count (); if (elapsed_time_ms_int >= 100) { /* * If the time spent was less than 100ms then * the results are probably not useful as well, * so don't spam the logs. */ node.logger.try_log (boost::str (boost::format ("Processed %1% votes in %2% milliseconds (rate of %3% votes per second)") % votes_l.size () % elapsed_time_ms_int % ((votes_l.size () * 1000ULL) / elapsed_time_ms_int))); } } } else { condition.wait (lock); } } } void nano::vote_processor::vote (std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> channel_a) { std::unique_lock<std::mutex> lock (mutex); if (!stopped) { bool process (false); /* Random early delection levels Always process votes for test network (process = true) Stop processing with max 144 * 1024 votes */ if (!node.network_params.network.is_test_network ()) { // Level 0 (< 0.1%) if (votes.size () < 96 * 1024) { process = true; } // Level 1 (0.1-1%) else if (votes.size () < 112 * 1024) { process = (representatives_1.find (vote_a->account) != representatives_1.end ()); } // Level 2 (1-5%) else if (votes.size () < 128 * 1024) { process = (representatives_2.find (vote_a->account) != representatives_2.end ()); } // Level 3 (> 5%) else if (votes.size () < 144 * 1024) { process = (representatives_3.find (vote_a->account) != representatives_3.end ()); } } else { // Process for test network process = true; } if (process) { votes.push_back (std::make_pair (vote_a, channel_a)); lock.unlock (); condition.notify_all (); lock.lock (); } else { node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_overflow); } } } void nano::vote_processor::verify_votes (std::deque<std::pair<std::shared_ptr<nano::vote>, std::shared_ptr<nano::transport::channel>>> & votes_a) { auto size (votes_a.size ()); std::vector<unsigned char const *> messages; messages.reserve (size); std::vector<nano::uint256_union> hashes; hashes.reserve (size); std::vector<size_t> lengths (size, sizeof (nano::uint256_union)); std::vector<unsigned char const *> pub_keys; pub_keys.reserve (size); std::vector<unsigned char const *> signatures; signatures.reserve (size); std::vector<int> verifications; verifications.resize (size); for (auto & vote : votes_a) { hashes.push_back (vote.first->hash ()); messages.push_back (hashes.back ().bytes.data ()); pub_keys.push_back (vote.first->account.bytes.data ()); signatures.push_back (vote.first->signature.bytes.data ()); } nano::signature_check_set check = { size, messages.data (), lengths.data (), pub_keys.data (), signatures.data (), verifications.data () }; node.checker.verify (check); std::remove_reference_t<decltype (votes_a)> result; auto i (0); for (auto & vote : votes_a) { assert (verifications[i] == 1 || verifications[i] == 0); if (verifications[i] == 1) { result.push_back (vote); } ++i; } votes_a.swap (result); } // node.active.mutex lock required nano::vote_code nano::vote_processor::vote_blocking (nano::transaction const & transaction_a, std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> channel_a, bool validated) { assert (!node.active.mutex.try_lock ()); auto result (nano::vote_code::invalid); if (validated || !vote_a->validate ()) { auto max_vote (node.store.vote_max (transaction_a, vote_a)); result = nano::vote_code::replay; if (!node.active.vote (vote_a, true)) { result = nano::vote_code::vote; } switch (result) { case nano::vote_code::vote: node.observers.vote.notify (transaction_a, vote_a, channel_a); case nano::vote_code::replay: // This tries to assist rep nodes that have lost track of their highest sequence number by replaying our highest known vote back to them // Only do this if the sequence number is significantly different to account for network reordering // Amplify attack considerations: We're sending out a confirm_ack in response to a confirm_ack for no net traffic increase if (max_vote->sequence > vote_a->sequence + 10000) { nano::confirm_ack confirm (max_vote); channel_a->send_buffer (confirm.to_bytes (), nano::stat::detail::confirm_ack); } break; case nano::vote_code::invalid: assert (false); break; } } std::string status; switch (result) { case nano::vote_code::invalid: status = "Invalid"; node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_invalid); break; case nano::vote_code::replay: status = "Replay"; node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_replay); break; case nano::vote_code::vote: status = "Vote"; node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_valid); break; } if (node.config.logging.vote_logging ()) { node.logger.try_log (boost::str (boost::format ("Vote from: %1% sequence: %2% block(s): %3%status: %4%") % vote_a->account.to_account () % std::to_string (vote_a->sequence) % vote_a->hashes_string () % status)); } return result; } void nano::vote_processor::stop () { { std::lock_guard<std::mutex> lock (mutex); stopped = true; } condition.notify_all (); if (thread.joinable ()) { thread.join (); } } void nano::vote_processor::flush () { std::unique_lock<std::mutex> lock (mutex); while (active || !votes.empty ()) { condition.wait (lock); } } void nano::vote_processor::calculate_weights () { std::unique_lock<std::mutex> lock (mutex); if (!stopped) { representatives_1.clear (); representatives_2.clear (); representatives_3.clear (); auto supply (node.online_reps.online_stake ()); auto transaction (node.store.tx_begin_read ()); for (auto i (node.store.representation_begin (transaction)), n (node.store.representation_end ()); i != n; ++i) { nano::account representative (i->first); auto weight (node.ledger.weight (transaction, representative)); if (weight > supply / 1000) // 0.1% or above (level 1) { representatives_1.insert (representative); if (weight > supply / 100) // 1% or above (level 2) { representatives_2.insert (representative); if (weight > supply / 20) // 5% or above (level 3) { representatives_3.insert (representative); } } } } } } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (node_observers & node_observers, const std::string & name) { auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (collect_seq_con_info (node_observers.blocks, "blocks")); composite->add_component (collect_seq_con_info (node_observers.wallet, "wallet")); composite->add_component (collect_seq_con_info (node_observers.vote, "vote")); composite->add_component (collect_seq_con_info (node_observers.account_balance, "account_balance")); composite->add_component (collect_seq_con_info (node_observers.endpoint, "endpoint")); composite->add_component (collect_seq_con_info (node_observers.disconnect, "disconnect")); return composite; } std::unique_ptr<seq_con_info_component> collect_seq_con_info (vote_processor & vote_processor, const std::string & name) { size_t votes_count = 0; size_t representatives_1_count = 0; size_t representatives_2_count = 0; size_t representatives_3_count = 0; { std::lock_guard<std::mutex> (vote_processor.mutex); votes_count = vote_processor.votes.size (); representatives_1_count = vote_processor.representatives_1.size (); representatives_2_count = vote_processor.representatives_2.size (); representatives_3_count = vote_processor.representatives_3.size (); } auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "votes", votes_count, sizeof (decltype (vote_processor.votes)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_1", representatives_1_count, sizeof (decltype (vote_processor.representatives_1)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_2", representatives_2_count, sizeof (decltype (vote_processor.representatives_2)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_3", representatives_3_count, sizeof (decltype (vote_processor.representatives_3)::value_type) })); return composite; } std::unique_ptr<seq_con_info_component> collect_seq_con_info (rep_crawler & rep_crawler, const std::string & name) { size_t count = 0; { std::lock_guard<std::mutex> guard (rep_crawler.active_mutex); count = rep_crawler.active.size (); } auto sizeof_element = sizeof (decltype (rep_crawler.active)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "active", count, sizeof_element })); return composite; } std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_processor & block_processor, const std::string & name) { size_t state_blocks_count = 0; size_t blocks_count = 0; size_t blocks_hashes_count = 0; size_t forced_count = 0; size_t rolled_back_count = 0; { std::lock_guard<std::mutex> guard (block_processor.mutex); state_blocks_count = block_processor.state_blocks.size (); blocks_count = block_processor.blocks.size (); blocks_hashes_count = block_processor.blocks_hashes.size (); forced_count = block_processor.forced.size (); rolled_back_count = block_processor.rolled_back.size (); } auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "state_blocks", state_blocks_count, sizeof (decltype (block_processor.state_blocks)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", blocks_count, sizeof (decltype (block_processor.blocks)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks_hashes", blocks_hashes_count, sizeof (decltype (block_processor.blocks_hashes)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "forced", forced_count, sizeof (decltype (block_processor.forced)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "rolled_back", rolled_back_count, sizeof (decltype (block_processor.rolled_back)::value_type) })); composite->add_component (collect_seq_con_info (block_processor.generator, "generator")); return composite; } } nano::node::node (nano::node_init & init_a, boost::asio::io_context & io_ctx_a, uint16_t peering_port_a, boost::filesystem::path const & application_path_a, nano::alarm & alarm_a, nano::logging const & logging_a, nano::work_pool & work_a) : node (init_a, io_ctx_a, application_path_a, alarm_a, nano::node_config (peering_port_a, logging_a), work_a) { } nano::node::node (nano::node_init & init_a, boost::asio::io_context & io_ctx_a, boost::filesystem::path const & application_path_a, nano::alarm & alarm_a, nano::node_config const & config_a, nano::work_pool & work_a, nano::node_flags flags_a, bool delay_frontier_confirmation_height_updating) : io_ctx (io_ctx_a), config (config_a), flags (flags_a), alarm (alarm_a), work (work_a), logger (config_a.logging.min_time_between_log_output), store_impl (std::make_unique<nano::mdb_store> (init_a.block_store_init, config.logging, application_path_a / "data.ldb", config_a.lmdb_max_dbs, !flags.disable_unchecked_drop, flags.sideband_batch_size)), store (*store_impl), wallets_store_impl (std::make_unique<nano::mdb_wallets_store> (init_a.wallets_store_init, application_path_a / "wallets.ldb", config_a.lmdb_max_dbs)), wallets_store (*wallets_store_impl), gap_cache (*this), ledger (store, stats, config.epoch_block_link, config.epoch_block_signer), checker (config.signature_checker_threads), network (*this, config.peering_port), bootstrap_initiator (*this), bootstrap (io_ctx_a, config.peering_port, *this), application_path (application_path_a), wallets (init_a.wallet_init, *this), port_mapping (*this), vote_processor (*this), rep_crawler (*this), warmed_up (0), block_processor (*this), block_processor_thread ([this]() { nano::thread_role::set (nano::thread_role::name::block_processing); this->block_processor.process_blocks (); }), online_reps (*this, config.online_weight_minimum.number ()), stats (config.stat_config), vote_uniquer (block_uniquer), active (*this, delay_frontier_confirmation_height_updating), startup_time (std::chrono::steady_clock::now ()) { if (config.websocket_config.enabled) { auto endpoint_l (nano::tcp_endpoint (config.websocket_config.address, config.websocket_config.port)); websocket_server = std::make_shared<nano::websocket::listener> (*this, endpoint_l); this->websocket_server->run (); } wallets.observer = [this](bool active) { observers.wallet.notify (active); }; network.channel_observer = [this](std::shared_ptr<nano::transport::channel> channel_a) { observers.endpoint.notify (channel_a); }; network.disconnect_observer = [this]() { observers.disconnect.notify (); }; if (!config.callback_address.empty ()) { observers.blocks.add ([this](std::shared_ptr<nano::block> block_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a) { if (this->block_arrival.recent (block_a->hash ())) { auto node_l (shared_from_this ()); background ([node_l, block_a, account_a, amount_a, is_state_send_a]() { boost::property_tree::ptree event; event.add ("account", account_a.to_account ()); event.add ("hash", block_a->hash ().to_string ()); std::string block_text; block_a->serialize_json (block_text); event.add ("block", block_text); event.add ("amount", amount_a.to_string_dec ()); if (is_state_send_a) { event.add ("is_send", is_state_send_a); event.add ("subtype", "send"); } // Subtype field else if (block_a->type () == nano::block_type::state) { if (block_a->link ().is_zero ()) { event.add ("subtype", "change"); } else if (amount_a == 0 && !node_l->ledger.epoch_link.is_zero () && node_l->ledger.is_epoch_link (block_a->link ())) { event.add ("subtype", "epoch"); } else { event.add ("subtype", "receive"); } } std::stringstream ostream; boost::property_tree::write_json (ostream, event); ostream.flush (); auto body (std::make_shared<std::string> (ostream.str ())); auto address (node_l->config.callback_address); auto port (node_l->config.callback_port); auto target (std::make_shared<std::string> (node_l->config.callback_target)); auto resolver (std::make_shared<boost::asio::ip::tcp::resolver> (node_l->io_ctx)); resolver->async_resolve (boost::asio::ip::tcp::resolver::query (address, std::to_string (port)), [node_l, address, port, target, body, resolver](boost::system::error_code const & ec, boost::asio::ip::tcp::resolver::iterator i_a) { if (!ec) { node_l->do_rpc_callback (i_a, address, port, target, body, resolver); } else { if (node_l->config.logging.callback_logging ()) { node_l->logger.always_log (boost::str (boost::format ("Error resolving callback: %1%:%2%: %3%") % address % port % ec.message ())); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); } }); }); } }); } if (websocket_server) { observers.blocks.add ([this](std::shared_ptr<nano::block> block_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a) { if (this->block_arrival.recent (block_a->hash ())) { std::string subtype; if (is_state_send_a) { subtype = "send"; } else if (block_a->type () == nano::block_type::state) { if (block_a->link ().is_zero ()) { subtype = "change"; } else if (amount_a == 0 && !this->ledger.epoch_link.is_zero () && this->ledger.is_epoch_link (block_a->link ())) { subtype = "epoch"; } else { subtype = "receive"; } } nano::websocket::message_builder builder; auto msg (builder.block_confirmed (block_a, account_a, amount_a, subtype)); this->websocket_server->broadcast (msg); } }); } observers.endpoint.add ([this](std::shared_ptr<nano::transport::channel> channel_a) { this->network.send_keepalive (*channel_a); }); observers.vote.add ([this](nano::transaction const & transaction, std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> channel_a) { this->gap_cache.vote (vote_a); this->online_reps.observe (vote_a->account); nano::uint128_t rep_weight; nano::uint128_t min_rep_weight; { rep_weight = ledger.weight (transaction, vote_a->account); min_rep_weight = online_reps.online_stake () / 1000; } if (rep_weight > min_rep_weight) { bool rep_crawler_exists (false); for (auto hash : *vote_a) { if (this->rep_crawler.exists (hash)) { rep_crawler_exists = true; break; } } if (rep_crawler_exists) { // We see a valid non-replay vote for a block we requested, this node is probably a representative if (this->rep_crawler.response (channel_a, vote_a->account, rep_weight)) { logger.try_log (boost::str (boost::format ("Found a representative at %1%") % channel_a->to_string ())); // Rebroadcasting all active votes to new representative auto blocks (this->active.list_blocks (true)); for (auto i (blocks.begin ()), n (blocks.end ()); i != n; ++i) { if (*i != nullptr) { nano::confirm_req req (*i); channel_a->send (req); } } } } } }); if (NANO_VERSION_PATCH == 0) { logger.always_log ("Node starting, version: ", NANO_MAJOR_MINOR_VERSION); } else { logger.always_log ("Node starting, version: ", NANO_MAJOR_MINOR_RC_VERSION); } logger.always_log (boost::str (boost::format ("Work pool running %1% threads") % work.threads.size ())); if (!init_a.error ()) { if (config.logging.node_lifetime_tracing ()) { logger.always_log ("Constructing node"); } nano::genesis genesis; auto transaction (store.tx_begin_write ()); if (store.latest_begin (transaction) == store.latest_end ()) { // Store was empty meaning we just created it, add the genesis block store.initialize (transaction, genesis); } if (!store.block_exists (transaction, genesis.hash ())) { logger.always_log ("Genesis block not found. Make sure the node network ID is correct."); std::exit (1); } node_id = nano::keypair (store.get_node_id (transaction)); logger.always_log ("Node ID: ", node_id.pub.to_account ()); } const uint8_t * weight_buffer = network_params.network.is_live_network () ? nano_bootstrap_weights_live : nano_bootstrap_weights_beta; size_t weight_size = network_params.network.is_live_network () ? nano_bootstrap_weights_live_size : nano_bootstrap_weights_beta_size; if (network_params.network.is_live_network () || network_params.network.is_beta_network ()) { nano::bufferstream weight_stream ((const uint8_t *)weight_buffer, weight_size); nano::uint128_union block_height; if (!nano::try_read (weight_stream, block_height)) { auto max_blocks = (uint64_t)block_height.number (); auto transaction (store.tx_begin_read ()); if (ledger.store.block_count (transaction).sum () < max_blocks) { ledger.bootstrap_weight_max_blocks = max_blocks; while (true) { nano::account account; if (nano::try_read (weight_stream, account.bytes)) { break; } nano::amount weight; if (nano::try_read (weight_stream, weight.bytes)) { break; } logger.always_log ("Using bootstrap rep weight: ", account.to_account (), " -> ", weight.format_balance (Mxrb_ratio, 0, true), " XRB"); ledger.bootstrap_weights[account] = weight.number (); } } } } } nano::node::~node () { if (config.logging.node_lifetime_tracing ()) { logger.always_log ("Destructing node"); } stop (); } void nano::node::do_rpc_callback (boost::asio::ip::tcp::resolver::iterator i_a, std::string const & address, uint16_t port, std::shared_ptr<std::string> target, std::shared_ptr<std::string> body, std::shared_ptr<boost::asio::ip::tcp::resolver> resolver) { if (i_a != boost::asio::ip::tcp::resolver::iterator{}) { auto node_l (shared_from_this ()); auto sock (std::make_shared<boost::asio::ip::tcp::socket> (node_l->io_ctx)); sock->async_connect (i_a->endpoint (), [node_l, target, body, sock, address, port, i_a, resolver](boost::system::error_code const & ec) mutable { if (!ec) { auto req (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ()); req->method (boost::beast::http::verb::post); req->target (*target); req->version (11); req->insert (boost::beast::http::field::host, address); req->insert (boost::beast::http::field::content_type, "application/json"); req->body () = *body; req->prepare_payload (); boost::beast::http::async_write (*sock, *req, [node_l, sock, address, port, req, i_a, target, body, resolver](boost::system::error_code const & ec, size_t bytes_transferred) mutable { if (!ec) { auto sb (std::make_shared<boost::beast::flat_buffer> ()); auto resp (std::make_shared<boost::beast::http::response<boost::beast::http::string_body>> ()); boost::beast::http::async_read (*sock, *sb, *resp, [node_l, sb, resp, sock, address, port, i_a, target, body, resolver](boost::system::error_code const & ec, size_t bytes_transferred) mutable { if (!ec) { if (resp->result () == boost::beast::http::status::ok) { node_l->stats.inc (nano::stat::type::http_callback, nano::stat::detail::initiate, nano::stat::dir::out); } else { if (node_l->config.logging.callback_logging ()) { node_l->logger.try_log (boost::str (boost::format ("Callback to %1%:%2% failed with status: %3%") % address % port % resp->result ())); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); } } else { if (node_l->config.logging.callback_logging ()) { node_l->logger.try_log (boost::str (boost::format ("Unable complete callback: %1%:%2%: %3%") % address % port % ec.message ())); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); }; }); } else { if (node_l->config.logging.callback_logging ()) { node_l->logger.try_log (boost::str (boost::format ("Unable to send callback: %1%:%2%: %3%") % address % port % ec.message ())); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); } }); } else { if (node_l->config.logging.callback_logging ()) { node_l->logger.try_log (boost::str (boost::format ("Unable to connect to callback address: %1%:%2%: %3%") % address % port % ec.message ())); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); ++i_a; node_l->do_rpc_callback (i_a, address, port, target, body, resolver); } }); } } bool nano::node::copy_with_compaction (boost::filesystem::path const & destination_file) { return !mdb_env_copy2 (boost::polymorphic_downcast<nano::mdb_store *> (store_impl.get ())->env.environment, destination_file.string ().c_str (), MDB_CP_COMPACT); } void nano::node::process_fork (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a) { auto root (block_a->root ()); if (!store.block_exists (transaction_a, block_a->type (), block_a->hash ()) && store.root_exists (transaction_a, block_a->root ())) { std::shared_ptr<nano::block> ledger_block (ledger.forked_block (transaction_a, *block_a)); if (ledger_block && !ledger.block_confirmed (transaction_a, ledger_block->hash ())) { std::weak_ptr<nano::node> this_w (shared_from_this ()); if (!active.start (ledger_block, [this_w, root](std::shared_ptr<nano::block>) { if (auto this_l = this_w.lock ()) { auto attempt (this_l->bootstrap_initiator.current_attempt ()); if (attempt && attempt->mode == nano::bootstrap_mode::legacy) { auto transaction (this_l->store.tx_begin_read ()); auto account (this_l->ledger.store.frontier_get (transaction, root)); if (!account.is_zero ()) { attempt->requeue_pull (nano::pull_info (account, root, root)); } else if (this_l->ledger.store.account_exists (transaction, root)) { attempt->requeue_pull (nano::pull_info (root, nano::block_hash (0), nano::block_hash (0))); } } } })) { logger.always_log (boost::str (boost::format ("Resolving fork between our block: %1% and block %2% both with root %3%") % ledger_block->hash ().to_string () % block_a->hash ().to_string () % block_a->root ().to_string ())); network.broadcast_confirm_req (ledger_block); } } } } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (node & node, const std::string & name) { auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (collect_seq_con_info (node.alarm, "alarm")); composite->add_component (collect_seq_con_info (node.work, "work")); composite->add_component (collect_seq_con_info (node.gap_cache, "gap_cache")); composite->add_component (collect_seq_con_info (node.ledger, "ledger")); composite->add_component (collect_seq_con_info (node.active, "active")); composite->add_component (collect_seq_con_info (node.bootstrap_initiator, "bootstrap_initiator")); composite->add_component (collect_seq_con_info (node.bootstrap, "bootstrap")); composite->add_component (node.network.udp_channels.collect_seq_con_info ("udp_channels")); composite->add_component (collect_seq_con_info (node.observers, "observers")); composite->add_component (collect_seq_con_info (node.wallets, "wallets")); composite->add_component (collect_seq_con_info (node.vote_processor, "vote_processor")); composite->add_component (collect_seq_con_info (node.rep_crawler, "rep_crawler")); composite->add_component (collect_seq_con_info (node.block_processor, "block_processor")); composite->add_component (collect_seq_con_info (node.block_arrival, "block_arrival")); composite->add_component (collect_seq_con_info (node.online_reps, "online_reps")); composite->add_component (collect_seq_con_info (node.votes_cache, "votes_cache")); composite->add_component (collect_seq_con_info (node.block_uniquer, "block_uniquer")); composite->add_component (collect_seq_con_info (node.vote_uniquer, "vote_uniquer")); return composite; } } nano::gap_cache::gap_cache (nano::node & node_a) : node (node_a) { } void nano::gap_cache::add (nano::transaction const & transaction_a, nano::block_hash const & hash_a, std::chrono::steady_clock::time_point time_point_a) { std::lock_guard<std::mutex> lock (mutex); auto existing (blocks.get<1> ().find (hash_a)); if (existing != blocks.get<1> ().end ()) { blocks.get<1> ().modify (existing, [time_point_a](nano::gap_information & info) { info.arrival = time_point_a; }); } else { blocks.insert ({ time_point_a, hash_a, std::unordered_set<nano::account> () }); if (blocks.size () > max) { blocks.get<0> ().erase (blocks.get<0> ().begin ()); } } } void nano::gap_cache::vote (std::shared_ptr<nano::vote> vote_a) { std::lock_guard<std::mutex> lock (mutex); auto transaction (node.store.tx_begin_read ()); for (auto hash : *vote_a) { auto existing (blocks.get<1> ().find (hash)); if (existing != blocks.get<1> ().end ()) { auto is_new (false); blocks.get<1> ().modify (existing, [&](nano::gap_information & info) { is_new = info.voters.insert (vote_a->account).second; }); if (is_new) { uint128_t tally; for (auto & voter : existing->voters) { tally += node.ledger.weight (transaction, voter); } bool start_bootstrap (false); if (!node.flags.disable_lazy_bootstrap) { if (tally >= node.config.online_weight_minimum.number ()) { start_bootstrap = true; } } else if (!node.flags.disable_legacy_bootstrap && tally > bootstrap_threshold (transaction)) { start_bootstrap = true; } if (start_bootstrap) { auto node_l (node.shared ()); auto now (std::chrono::steady_clock::now ()); node.alarm.add (node_l->network_params.network.is_test_network () ? now + std::chrono::milliseconds (5) : now + std::chrono::seconds (5), [node_l, hash]() { auto transaction (node_l->store.tx_begin_read ()); if (!node_l->store.block_exists (transaction, hash)) { if (!node_l->bootstrap_initiator.in_progress ()) { node_l->logger.try_log (boost::str (boost::format ("Missing block %1% which has enough votes to warrant lazy bootstrapping it") % hash.to_string ())); } if (!node_l->flags.disable_lazy_bootstrap) { node_l->bootstrap_initiator.bootstrap_lazy (hash); } else if (!node_l->flags.disable_legacy_bootstrap) { node_l->bootstrap_initiator.bootstrap (); } } }); } } } } } nano::uint128_t nano::gap_cache::bootstrap_threshold (nano::transaction const & transaction_a) { auto result ((node.online_reps.online_stake () / 256) * node.config.bootstrap_fraction_numerator); return result; } size_t nano::gap_cache::size () { std::lock_guard<std::mutex> lock (mutex); return blocks.size (); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (gap_cache & gap_cache, const std::string & name) { auto count = gap_cache.size (); auto sizeof_element = sizeof (decltype (gap_cache.blocks)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", count, sizeof_element })); return composite; } } void nano::node::process_active (std::shared_ptr<nano::block> incoming) { block_arrival.add (incoming->hash ()); block_processor.add (incoming, nano::seconds_since_epoch ()); } nano::process_return nano::node::process (nano::block const & block_a) { auto transaction (store.tx_begin_write ()); auto result (ledger.process (transaction, block_a)); return result; } void nano::node::start () { network.start (); add_initial_peers (); if (!flags.disable_legacy_bootstrap) { ongoing_bootstrap (); } else if (!flags.disable_unchecked_cleanup) { ongoing_unchecked_cleanup (); } ongoing_store_flush (); rep_crawler.start (); ongoing_rep_calculation (); ongoing_peer_store (); ongoing_online_weight_calculation_queue (); if (!flags.disable_bootstrap_listener) { bootstrap.start (); } if (!flags.disable_backup) { backup_wallet (); } search_pending (); if (!flags.disable_wallet_bootstrap) { // Delay to start wallet lazy bootstrap auto this_l (shared ()); alarm.add (std::chrono::steady_clock::now () + std::chrono::minutes (1), [this_l]() { this_l->bootstrap_wallet (); }); } port_mapping.start (); } void nano::node::stop () { logger.always_log ("Node stopping"); block_processor.stop (); if (block_processor_thread.joinable ()) { block_processor_thread.join (); } vote_processor.stop (); active.stop (); network.stop (); if (websocket_server) { websocket_server->stop (); } bootstrap_initiator.stop (); bootstrap.stop (); port_mapping.stop (); checker.stop (); wallets.stop (); } void nano::node::keepalive_preconfigured (std::vector<std::string> const & peers_a) { for (auto i (peers_a.begin ()), n (peers_a.end ()); i != n; ++i) { keepalive (*i, network_params.network.default_node_port); } } nano::block_hash nano::node::latest (nano::account const & account_a) { auto transaction (store.tx_begin_read ()); return ledger.latest (transaction, account_a); } nano::uint128_t nano::node::balance (nano::account const & account_a) { auto transaction (store.tx_begin_read ()); return ledger.account_balance (transaction, account_a); } std::shared_ptr<nano::block> nano::node::block (nano::block_hash const & hash_a) { auto transaction (store.tx_begin_read ()); return store.block_get (transaction, hash_a); } std::pair<nano::uint128_t, nano::uint128_t> nano::node::balance_pending (nano::account const & account_a) { std::pair<nano::uint128_t, nano::uint128_t> result; auto transaction (store.tx_begin_read ()); result.first = ledger.account_balance (transaction, account_a); result.second = ledger.account_pending (transaction, account_a); return result; } nano::uint128_t nano::node::weight (nano::account const & account_a) { auto transaction (store.tx_begin_read ()); return ledger.weight (transaction, account_a); } nano::account nano::node::representative (nano::account const & account_a) { auto transaction (store.tx_begin_read ()); nano::account_info info; nano::account result (0); if (!store.account_get (transaction, account_a, info)) { result = info.rep_block; } return result; } void nano::node::ongoing_rep_calculation () { auto now (std::chrono::steady_clock::now ()); vote_processor.calculate_weights (); std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (now + std::chrono::minutes (10), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_rep_calculation (); } }); } void nano::node::ongoing_bootstrap () { auto next_wakeup (300); if (warmed_up < 3) { // Re-attempt bootstrapping more aggressively on startup next_wakeup = 5; if (!bootstrap_initiator.in_progress () && !network.empty ()) { ++warmed_up; } } bootstrap_initiator.bootstrap (); std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (next_wakeup), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_bootstrap (); } }); } void nano::node::ongoing_store_flush () { { auto transaction (store.tx_begin_write ()); store.flush (transaction); } std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (5), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_store_flush (); } }); } void nano::node::ongoing_peer_store () { network.udp_channels.store_all (*this); std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + network_params.node.peer_interval, [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_peer_store (); } }); } void nano::node::backup_wallet () { auto transaction (wallets.tx_begin_read ()); for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n; ++i) { boost::system::error_code error_chmod; auto backup_path (application_path / "backup"); boost::filesystem::create_directories (backup_path); nano::set_secure_perm_directory (backup_path, error_chmod); i->second->store.write_backup (transaction, backup_path / (i->first.to_string () + ".json")); } auto this_l (shared ()); alarm.add (std::chrono::steady_clock::now () + network_params.node.backup_interval, [this_l]() { this_l->backup_wallet (); }); } void nano::node::search_pending () { // Reload wallets from disk wallets.reload (); // Search pending wallets.search_pending_all (); auto this_l (shared ()); alarm.add (std::chrono::steady_clock::now () + network_params.node.search_pending_interval, [this_l]() { this_l->search_pending (); }); } void nano::node::bootstrap_wallet () { std::deque<nano::account> accounts; { std::lock_guard<std::mutex> lock (wallets.mutex); auto transaction (wallets.tx_begin_read ()); for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n && accounts.size () < 128; ++i) { auto & wallet (*i->second); std::lock_guard<std::recursive_mutex> wallet_lock (wallet.store.mutex); for (auto j (wallet.store.begin (transaction)), m (wallet.store.end ()); j != m && accounts.size () < 128; ++j) { nano::account account (j->first); accounts.push_back (account); } } } bootstrap_initiator.bootstrap_wallet (accounts); } void nano::node::unchecked_cleanup () { std::deque<nano::unchecked_key> cleaning_list; // Collect old unchecked keys { auto now (nano::seconds_since_epoch ()); auto transaction (store.tx_begin_read ()); // Max 128k records to clean, max 2 minutes reading to prevent slow i/o systems start issues for (auto i (store.unchecked_begin (transaction)), n (store.unchecked_end ()); i != n && cleaning_list.size () < 128 * 1024 && nano::seconds_since_epoch () - now < 120; ++i) { nano::unchecked_key key (i->first); nano::unchecked_info info (i->second); if ((now - info.modified) > config.unchecked_cutoff_time.count ()) { cleaning_list.push_back (key); } } } // Delete old unchecked keys in batches while (!cleaning_list.empty ()) { size_t deleted_count (0); auto transaction (store.tx_begin_write ()); while (deleted_count++ < 2 * 1024 && !cleaning_list.empty ()) { auto key (cleaning_list.front ()); cleaning_list.pop_front (); store.unchecked_del (transaction, key); } } } void nano::node::ongoing_unchecked_cleanup () { if (!bootstrap_initiator.in_progress ()) { unchecked_cleanup (); } auto this_l (shared ()); alarm.add (std::chrono::steady_clock::now () + network_params.node.unchecked_cleaning_interval, [this_l]() { this_l->ongoing_unchecked_cleanup (); }); } int nano::node::price (nano::uint128_t const & balance_a, int amount_a) { assert (balance_a >= amount_a * nano::Gxrb_ratio); auto balance_l (balance_a); double result (0.0); for (auto i (0); i < amount_a; ++i) { balance_l -= nano::Gxrb_ratio; auto balance_scaled ((balance_l / nano::Mxrb_ratio).convert_to<double> ()); auto units (balance_scaled / 1000.0); auto unit_price (((free_cutoff - units) / free_cutoff) * price_max); result += std::min (std::max (0.0, unit_price), price_max); } return static_cast<int> (result * 100.0); } namespace { class work_request { public: work_request (boost::asio::io_context & io_ctx_a, boost::asio::ip::address address_a, uint16_t port_a) : address (address_a), port (port_a), socket (io_ctx_a) { } boost::asio::ip::address address; uint16_t port; boost::beast::flat_buffer buffer; boost::beast::http::response<boost::beast::http::string_body> response; boost::asio::ip::tcp::socket socket; }; class distributed_work : public std::enable_shared_from_this<distributed_work> { public: distributed_work (std::shared_ptr<nano::node> const & node_a, nano::block_hash const & root_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) : distributed_work (1, node_a, root_a, callback_a, difficulty_a) { assert (node_a != nullptr); } distributed_work (unsigned int backoff_a, std::shared_ptr<nano::node> const & node_a, nano::block_hash const & root_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) : callback (callback_a), backoff (backoff_a), node (node_a), root (root_a), need_resolve (node_a->config.work_peers), difficulty (difficulty_a) { assert (node_a != nullptr); completed.clear (); } void start () { if (need_resolve.empty ()) { start_work (); } else { auto current (need_resolve.back ()); need_resolve.pop_back (); auto this_l (shared_from_this ()); boost::system::error_code ec; auto parsed_address (boost::asio::ip::address_v6::from_string (current.first, ec)); if (!ec) { outstanding[parsed_address] = current.second; start (); } else { node->network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (current.first, std::to_string (current.second)), [current, this_l](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) { if (!ec) { for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i) { auto endpoint (i->endpoint ()); this_l->outstanding[endpoint.address ()] = endpoint.port (); } } else { this_l->node->logger.try_log (boost::str (boost::format ("Error resolving work peer: %1%:%2%: %3%") % current.first % current.second % ec.message ())); } this_l->start (); }); } } } void start_work () { if (!outstanding.empty ()) { auto this_l (shared_from_this ()); std::lock_guard<std::mutex> lock (mutex); for (auto const & i : outstanding) { auto host (i.first); auto service (i.second); node->background ([this_l, host, service]() { auto connection (std::make_shared<work_request> (this_l->node->io_ctx, host, service)); connection->socket.async_connect (nano::tcp_endpoint (host, service), [this_l, connection](boost::system::error_code const & ec) { if (!ec) { std::string request_string; { boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", this_l->root.to_string ()); request.put ("difficulty", nano::to_string_hex (this_l->difficulty)); std::stringstream ostream; boost::property_tree::write_json (ostream, request); request_string = ostream.str (); } auto request (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ()); request->method (boost::beast::http::verb::post); request->target ("/"); request->version (11); request->body () = request_string; request->prepare_payload (); boost::beast::http::async_write (connection->socket, *request, [this_l, connection, request](boost::system::error_code const & ec, size_t bytes_transferred) { if (!ec) { boost::beast::http::async_read (connection->socket, connection->buffer, connection->response, [this_l, connection](boost::system::error_code const & ec, size_t bytes_transferred) { if (!ec) { if (connection->response.result () == boost::beast::http::status::ok) { this_l->success (connection->response.body (), connection->address); } else { this_l->node->logger.try_log (boost::str (boost::format ("Work peer responded with an error %1% %2%: %3%") % connection->address % connection->port % connection->response.result ())); this_l->failure (connection->address); } } else { this_l->node->logger.try_log (boost::str (boost::format ("Unable to read from work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ())); this_l->failure (connection->address); } }); } else { this_l->node->logger.try_log (boost::str (boost::format ("Unable to write to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ())); this_l->failure (connection->address); } }); } else { this_l->node->logger.try_log (boost::str (boost::format ("Unable to connect to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ())); this_l->failure (connection->address); } }); }); } } else { handle_failure (true); } } void stop () { auto this_l (shared_from_this ()); std::lock_guard<std::mutex> lock (mutex); for (auto const & i : outstanding) { auto host (i.first); node->background ([this_l, host]() { std::string request_string; { boost::property_tree::ptree request; request.put ("action", "work_cancel"); request.put ("hash", this_l->root.to_string ()); std::stringstream ostream; boost::property_tree::write_json (ostream, request); request_string = ostream.str (); } boost::beast::http::request<boost::beast::http::string_body> request; request.method (boost::beast::http::verb::post); request.target ("/"); request.version (11); request.body () = request_string; request.prepare_payload (); auto socket (std::make_shared<boost::asio::ip::tcp::socket> (this_l->node->io_ctx)); boost::beast::http::async_write (*socket, request, [socket](boost::system::error_code const & ec, size_t bytes_transferred) { }); }); } outstanding.clear (); } void success (std::string const & body_a, boost::asio::ip::address const & address) { auto last (remove (address)); std::stringstream istream (body_a); try { boost::property_tree::ptree result; boost::property_tree::read_json (istream, result); auto work_text (result.get<std::string> ("work")); uint64_t work; if (!nano::from_string_hex (work_text, work)) { uint64_t result_difficulty (0); if (!nano::work_validate (root, work, &result_difficulty) && result_difficulty >= difficulty) { set_once (work); stop (); } else { node->logger.try_log (boost::str (boost::format ("Incorrect work response from %1% for root %2% with diffuculty %3%: %4%") % address % root.to_string () % nano::to_string_hex (difficulty) % work_text)); handle_failure (last); } } else { node->logger.try_log (boost::str (boost::format ("Work response from %1% wasn't a number: %2%") % address % work_text)); handle_failure (last); } } catch (...) { node->logger.try_log (boost::str (boost::format ("Work response from %1% wasn't parsable: %2%") % address % body_a)); handle_failure (last); } } void set_once (uint64_t work_a) { if (!completed.test_and_set ()) { callback (work_a); } } void failure (boost::asio::ip::address const & address) { auto last (remove (address)); handle_failure (last); } void handle_failure (bool last) { if (last) { if (!completed.test_and_set ()) { if (node->config.work_threads != 0 || node->work.opencl) { auto callback_l (callback); // clang-format off node->work.generate (root, [callback_l](boost::optional<uint64_t> const & work_a) { callback_l (work_a.value ()); }, difficulty); // clang-format on } else { if (backoff == 1 && node->config.logging.work_generation_time ()) { node->logger.try_log ("Work peer(s) failed to generate work for root ", root.to_string (), ", retrying..."); } auto now (std::chrono::steady_clock::now ()); auto root_l (root); auto callback_l (callback); std::weak_ptr<nano::node> node_w (node); auto next_backoff (std::min (backoff * 2, (unsigned int)60 * 5)); // clang-format off node->alarm.add (now + std::chrono::seconds (backoff), [ node_w, root_l, callback_l, next_backoff, difficulty = difficulty ] { if (auto node_l = node_w.lock ()) { auto work_generation (std::make_shared<distributed_work> (next_backoff, node_l, root_l, callback_l, difficulty)); work_generation->start (); } }); // clang-format on } } } } bool remove (boost::asio::ip::address const & address) { std::lock_guard<std::mutex> lock (mutex); outstanding.erase (address); return outstanding.empty (); } std::function<void(uint64_t)> callback; unsigned int backoff; // in seconds std::shared_ptr<nano::node> node; nano::block_hash root; std::mutex mutex; std::map<boost::asio::ip::address, uint16_t> outstanding; std::vector<std::pair<std::string, uint16_t>> need_resolve; std::atomic_flag completed; uint64_t difficulty; }; } void nano::node::work_generate_blocking (nano::block & block_a) { work_generate_blocking (block_a, network_params.network.publish_threshold); } void nano::node::work_generate_blocking (nano::block & block_a, uint64_t difficulty_a) { block_a.block_work_set (work_generate_blocking (block_a.root (), difficulty_a)); } void nano::node::work_generate (nano::uint256_union const & hash_a, std::function<void(uint64_t)> callback_a) { work_generate (hash_a, callback_a, network_params.network.publish_threshold); } void nano::node::work_generate (nano::uint256_union const & hash_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) { auto work_generation (std::make_shared<distributed_work> (shared (), hash_a, callback_a, difficulty_a)); work_generation->start (); } uint64_t nano::node::work_generate_blocking (nano::uint256_union const & block_a) { return work_generate_blocking (block_a, network_params.network.publish_threshold); } uint64_t nano::node::work_generate_blocking (nano::uint256_union const & hash_a, uint64_t difficulty_a) { std::promise<uint64_t> promise; std::future<uint64_t> future = promise.get_future (); // clang-format off work_generate (hash_a, [&promise](uint64_t work_a) { promise.set_value (work_a); }, difficulty_a); // clang-format on return future.get (); } void nano::node::add_initial_peers () { auto transaction (store.tx_begin_read ()); for (auto i (store.peers_begin (transaction)), n (store.peers_end ()); i != n; ++i) { nano::endpoint endpoint (boost::asio::ip::address_v6 (i->first.address_bytes ()), i->first.port ()); if (!network.udp_channels.reachout (endpoint, config.allow_local_peers)) { auto channel (std::make_shared<nano::transport::channel_udp> (network.udp_channels, endpoint)); network.send_keepalive (*channel); rep_crawler.query (channel); } } } void nano::node::block_confirm (std::shared_ptr<nano::block> block_a) { active.start (block_a); network.broadcast_confirm_req (block_a); // Calculate votes for local representatives if (config.enable_voting && active.active (*block_a)) { block_processor.generator.add (block_a->hash ()); } } nano::uint128_t nano::node::delta () { auto result ((online_reps.online_stake () / 100) * config.online_weight_quorum); return result; } void nano::node::ongoing_online_weight_calculation_queue () { std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + (std::chrono::seconds (network_params.node.weight_period)), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_online_weight_calculation (); } }); } void nano::node::ongoing_online_weight_calculation () { online_reps.sample (); ongoing_online_weight_calculation_queue (); } namespace { class confirmed_visitor : public nano::block_visitor { public: confirmed_visitor (nano::transaction const & transaction_a, nano::node & node_a, std::shared_ptr<nano::block> block_a, nano::block_hash const & hash_a) : transaction (transaction_a), node (node_a), block (block_a), hash (hash_a) { } virtual ~confirmed_visitor () = default; void scan_receivable (nano::account const & account_a) { for (auto i (node.wallets.items.begin ()), n (node.wallets.items.end ()); i != n; ++i) { auto wallet (i->second); auto transaction_l (node.wallets.tx_begin_read ()); if (wallet->store.exists (transaction_l, account_a)) { nano::account representative; nano::pending_info pending; representative = wallet->store.representative (transaction_l); auto error (node.store.pending_get (transaction, nano::pending_key (account_a, hash), pending)); if (!error) { auto node_l (node.shared ()); auto amount (pending.amount.number ()); wallet->receive_async (block, representative, amount, [](std::shared_ptr<nano::block>) {}); } else { if (!node.store.block_exists (transaction, hash)) { node.logger.try_log (boost::str (boost::format ("Confirmed block is missing: %1%") % hash.to_string ())); assert (false && "Confirmed block is missing"); } else { node.logger.try_log (boost::str (boost::format ("Block %1% has already been received") % hash.to_string ())); } } } } } void state_block (nano::state_block const & block_a) override { scan_receivable (block_a.hashables.link); } void send_block (nano::send_block const & block_a) override { scan_receivable (block_a.hashables.destination); } void receive_block (nano::receive_block const &) override { } void open_block (nano::open_block const &) override { } void change_block (nano::change_block const &) override { } nano::transaction const & transaction; nano::node & node; std::shared_ptr<nano::block> block; nano::block_hash const & hash; }; } void nano::node::receive_confirmed (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a, nano::block_hash const & hash_a) { confirmed_visitor visitor (transaction_a, *this, block_a, hash_a); block_a->visit (visitor); } void nano::node::process_confirmed (std::shared_ptr<nano::block> block_a, uint8_t iteration) { auto hash (block_a->hash ()); if (ledger.block_exists (block_a->type (), hash)) { add_confirmation_heights (hash); auto transaction (store.tx_begin_read ()); receive_confirmed (transaction, block_a, hash); auto account (ledger.account (transaction, hash)); auto amount (ledger.amount (transaction, hash)); bool is_state_send (false); nano::account pending_account (0); if (auto state = dynamic_cast<nano::state_block *> (block_a.get ())) { is_state_send = ledger.is_send (transaction, *state); pending_account = state->hashables.link; } if (auto send = dynamic_cast<nano::send_block *> (block_a.get ())) { pending_account = send->hashables.destination; } observers.blocks.notify (block_a, account, amount, is_state_send); if (amount > 0) { observers.account_balance.notify (account, false); if (!pending_account.is_zero ()) { observers.account_balance.notify (pending_account, true); } } } // Limit to 0.5 * 20 = 10 seconds (more than max block_processor::process_batch finish time) else if (iteration < 20) { iteration++; std::weak_ptr<nano::node> node_w (shared ()); alarm.add (std::chrono::steady_clock::now () + network_params.node.process_confirmed_interval, [node_w, block_a, iteration]() { if (auto node_l = node_w.lock ()) { node_l->process_confirmed (block_a, iteration); } }); } } void nano::node::process_message (nano::message const & message_a, std::shared_ptr<nano::transport::channel> channel_a) { network_message_visitor visitor (*this, channel_a); message_a.visit (visitor); } nano::endpoint nano::network::endpoint () { return udp_channels.get_local_endpoint (); } void nano::network::cleanup (std::chrono::steady_clock::time_point const & cutoff_a) { node.network.udp_channels.purge (cutoff_a); if (node.network.empty ()) { disconnect_observer (); } } void nano::network::ongoing_cleanup () { cleanup (std::chrono::steady_clock::now () - node.network_params.node.cutoff); std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + node.network_params.node.period, [node_w]() { if (auto node_l = node_w.lock ()) { node_l->network.ongoing_cleanup (); } }); } size_t nano::network::size () const { return udp_channels.size (); } size_t nano::network::size_sqrt () const { return (static_cast<size_t> (std::ceil (std::sqrt (size ())))); } bool nano::network::empty () const { return size () == 0; } bool nano::block_arrival::add (nano::block_hash const & hash_a) { std::lock_guard<std::mutex> lock (mutex); auto now (std::chrono::steady_clock::now ()); auto inserted (arrival.insert (nano::block_arrival_info{ now, hash_a })); auto result (!inserted.second); return result; } bool nano::block_arrival::recent (nano::block_hash const & hash_a) { std::lock_guard<std::mutex> lock (mutex); auto now (std::chrono::steady_clock::now ()); while (arrival.size () > arrival_size_min && arrival.begin ()->arrival + arrival_time_min < now) { arrival.erase (arrival.begin ()); } return arrival.get<1> ().find (hash_a) != arrival.get<1> ().end (); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_arrival & block_arrival, const std::string & name) { size_t count = 0; { std::lock_guard<std::mutex> guard (block_arrival.mutex); count = block_arrival.arrival.size (); } auto sizeof_element = sizeof (decltype (block_arrival.arrival)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "arrival", count, sizeof_element })); return composite; } } nano::online_reps::online_reps (nano::node & node_a, nano::uint128_t minimum_a) : node (node_a), minimum (minimum_a) { auto transaction (node.ledger.store.tx_begin_read ()); online = trend (transaction); } void nano::online_reps::observe (nano::account const & rep_a) { auto transaction (node.ledger.store.tx_begin_read ()); if (node.ledger.weight (transaction, rep_a) > 0) { std::lock_guard<std::mutex> lock (mutex); reps.insert (rep_a); } } void nano::online_reps::sample () { auto transaction (node.ledger.store.tx_begin_write ()); // Discard oldest entries while (node.ledger.store.online_weight_count (transaction) >= node.network_params.node.max_weight_samples) { auto oldest (node.ledger.store.online_weight_begin (transaction)); assert (oldest != node.ledger.store.online_weight_end ()); node.ledger.store.online_weight_del (transaction, oldest->first); } // Calculate current active rep weight nano::uint128_t current; std::unordered_set<nano::account> reps_copy; { std::lock_guard<std::mutex> lock (mutex); reps_copy.swap (reps); } for (auto & i : reps_copy) { current += node.ledger.weight (transaction, i); } node.ledger.store.online_weight_put (transaction, std::chrono::system_clock::now ().time_since_epoch ().count (), current); auto trend_l (trend (transaction)); std::lock_guard<std::mutex> lock (mutex); online = trend_l; } nano::uint128_t nano::online_reps::trend (nano::transaction & transaction_a) { std::vector<nano::uint128_t> items; items.reserve (node.network_params.node.max_weight_samples + 1); items.push_back (minimum); for (auto i (node.ledger.store.online_weight_begin (transaction_a)), n (node.ledger.store.online_weight_end ()); i != n; ++i) { items.push_back (i->second.number ()); } // Pick median value for our target vote weight auto median_idx = items.size () / 2; nth_element (items.begin (), items.begin () + median_idx, items.end ()); return nano::uint128_t{ items[median_idx] }; } nano::uint128_t nano::online_reps::online_stake () { std::lock_guard<std::mutex> lock (mutex); return std::max (online, minimum); } std::vector<nano::account> nano::online_reps::list () { std::vector<nano::account> result; std::lock_guard<std::mutex> lock (mutex); for (auto & i : reps) { result.push_back (i); } return result; } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (online_reps & online_reps, const std::string & name) { size_t count = 0; { std::lock_guard<std::mutex> guard (online_reps.mutex); count = online_reps.reps.size (); } auto sizeof_element = sizeof (decltype (online_reps.reps)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "arrival", count, sizeof_element })); return composite; } } std::shared_ptr<nano::node> nano::node::shared () { return shared_from_this (); } nano::election_vote_result::election_vote_result (bool replay_a, bool processed_a) { replay = replay_a; processed = processed_a; } nano::election::election (nano::node & node_a, std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) : confirmation_action (confirmation_action_a), node (node_a), election_start (std::chrono::steady_clock::now ()), status ({ block_a, 0 }), confirmed (false), stopped (false), announcements (0) { last_votes.insert (std::make_pair (node.network_params.ledger.not_an_account (), nano::vote_info{ std::chrono::steady_clock::now (), 0, block_a->hash () })); blocks.insert (std::make_pair (block_a->hash (), block_a)); update_dependent (); } void nano::election::compute_rep_votes (nano::transaction const & transaction_a) { if (node.config.enable_voting) { node.wallets.foreach_representative (transaction_a, [this, &transaction_a](nano::public_key const & pub_a, nano::raw_key const & prv_a) { auto vote (this->node.store.vote_generate (transaction_a, pub_a, prv_a, status.winner)); this->node.vote_processor.vote (vote, std::make_shared<nano::transport::channel_udp> (this->node.network.udp_channels, this->node.network.endpoint ())); }); } } void nano::election::confirm_once () { if (!confirmed.exchange (true)) { status.election_end = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()); status.election_duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now () - election_start); auto winner_l (status.winner); auto node_l (node.shared ()); auto confirmation_action_l (confirmation_action); node.background ([node_l, winner_l, confirmation_action_l]() { node_l->process_confirmed (winner_l); confirmation_action_l (winner_l); }); } } void nano::election::stop () { stopped = true; } bool nano::election::have_quorum (nano::tally_t const & tally_a, nano::uint128_t tally_sum) const { bool result = false; if (tally_sum >= node.config.online_weight_minimum.number ()) { auto i (tally_a.begin ()); auto first (i->first); ++i; auto second (i != tally_a.end () ? i->first : 0); auto delta_l (node.delta ()); result = tally_a.begin ()->first > (second + delta_l); } return result; } nano::tally_t nano::election::tally (nano::transaction const & transaction_a) { std::unordered_map<nano::block_hash, nano::uint128_t> block_weights; for (auto vote_info : last_votes) { block_weights[vote_info.second.hash] += node.ledger.weight (transaction_a, vote_info.first); } last_tally = block_weights; nano::tally_t result; for (auto item : block_weights) { auto block (blocks.find (item.first)); if (block != blocks.end ()) { result.insert (std::make_pair (item.second, block->second)); } } return result; } void nano::election::confirm_if_quorum (nano::transaction const & transaction_a) { auto tally_l (tally (transaction_a)); assert (!tally_l.empty ()); auto winner (tally_l.begin ()); auto block_l (winner->second); status.tally = winner->first; nano::uint128_t sum (0); for (auto & i : tally_l) { sum += i.first; } if (sum >= node.config.online_weight_minimum.number () && block_l->hash () != status.winner->hash ()) { auto node_l (node.shared ()); node_l->block_processor.force (block_l); status.winner = block_l; update_dependent (); node_l->active.adjust_difficulty (block_l->hash ()); } if (have_quorum (tally_l, sum)) { if (node.config.logging.vote_logging () || blocks.size () > 1) { log_votes (tally_l); } confirm_once (); } } void nano::election::log_votes (nano::tally_t const & tally_a) const { std::stringstream tally; tally << boost::str (boost::format ("\nVote tally for root %1%") % status.winner->root ().to_string ()); for (auto i (tally_a.begin ()), n (tally_a.end ()); i != n; ++i) { tally << boost::str (boost::format ("\nBlock %1% weight %2%") % i->second->hash ().to_string () % i->first.convert_to<std::string> ()); } for (auto i (last_votes.begin ()), n (last_votes.end ()); i != n; ++i) { tally << boost::str (boost::format ("\n%1% %2%") % i->first.to_account () % i->second.hash.to_string ()); } node.logger.try_log (tally.str ()); } nano::election_vote_result nano::election::vote (nano::account rep, uint64_t sequence, nano::block_hash block_hash) { // see republish_vote documentation for an explanation of these rules auto transaction (node.store.tx_begin_read ()); auto replay (false); auto supply (node.online_reps.online_stake ()); auto weight (node.ledger.weight (transaction, rep)); auto should_process (false); if (node.network_params.network.is_test_network () || weight > supply / 1000) // 0.1% or above { unsigned int cooldown; if (weight < supply / 100) // 0.1% to 1% { cooldown = 15; } else if (weight < supply / 20) // 1% to 5% { cooldown = 5; } else // 5% or above { cooldown = 1; } auto last_vote_it (last_votes.find (rep)); if (last_vote_it == last_votes.end ()) { should_process = true; } else { auto last_vote (last_vote_it->second); if (last_vote.sequence < sequence || (last_vote.sequence == sequence && last_vote.hash < block_hash)) { if (last_vote.time <= std::chrono::steady_clock::now () - std::chrono::seconds (cooldown)) { should_process = true; } } else { replay = true; } } if (should_process) { last_votes[rep] = { std::chrono::steady_clock::now (), sequence, block_hash }; if (!confirmed) { confirm_if_quorum (transaction); } } } return nano::election_vote_result (replay, should_process); } bool nano::node::validate_block_by_previous (nano::transaction const & transaction, std::shared_ptr<nano::block> block_a) { bool result (false); nano::account account; if (!block_a->previous ().is_zero ()) { if (store.block_exists (transaction, block_a->previous ())) { account = ledger.account (transaction, block_a->previous ()); } else { result = true; } } else { account = block_a->root (); } if (!result && block_a->type () == nano::block_type::state) { std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a)); nano::amount prev_balance (0); if (!block_l->hashables.previous.is_zero ()) { if (store.block_exists (transaction, block_l->hashables.previous)) { prev_balance = ledger.balance (transaction, block_l->hashables.previous); } else { result = true; } } if (!result) { if (block_l->hashables.balance == prev_balance && !ledger.epoch_link.is_zero () && ledger.is_epoch_link (block_l->hashables.link)) { account = ledger.epoch_signer; } } } if (!result && (account.is_zero () || nano::validate_message (account, block_a->hash (), block_a->block_signature ()))) { result = true; } return result; } bool nano::election::publish (std::shared_ptr<nano::block> block_a) { auto result (false); if (blocks.size () >= 10) { if (last_tally[block_a->hash ()] < node.online_reps.online_stake () / 10) { result = true; } } if (!result) { auto transaction (node.store.tx_begin_read ()); result = node.validate_block_by_previous (transaction, block_a); if (!result) { if (blocks.find (block_a->hash ()) == blocks.end ()) { blocks.insert (std::make_pair (block_a->hash (), block_a)); confirm_if_quorum (transaction); node.network.flood_block (block_a); } } } return result; } size_t nano::election::last_votes_size () { std::lock_guard<std::mutex> lock (node.active.mutex); return last_votes.size (); } void nano::active_transactions::confirm_frontiers (nano::transaction const & transaction_a) { // Limit maximum count of elections to start bool representative (node.config.enable_voting && node.wallets.reps_count > 0); /* Check less frequently for non-representative nodes ~15 minutes for non-representative nodes, 3 minutes for representatives */ int representative_factor = representative ? 3 * 60 : 15 * 60; // Decrease check time for test network int test_network_factor = node.network_params.network.is_test_network () ? 1000 : 1; if (std::chrono::steady_clock::now () >= next_frontier_check) { size_t max_elections (max_broadcast_queue / 4); size_t elections_count (0); for (auto i (node.store.latest_begin (transaction_a, next_frontier_account)), n (node.store.latest_end ()); i != n && elections_count < max_elections; ++i) { nano::account_info info (i->second); if (info.block_count != info.confirmation_height) { auto block (node.store.block_get (transaction_a, info.head)); if (!start (block)) { ++elections_count; // Calculate votes for local representatives if (representative) { node.block_processor.generator.add (block->hash ()); } } // Update next account next_frontier_account = i->first.number () + 1; } } // 4 times slower check if all frontiers were confirmed int fully_confirmed_factor = (elections_count <= max_elections) ? 4 : 1; // Calculate next check time next_frontier_check = std::chrono::steady_clock::now () + std::chrono::seconds ((representative_factor * fully_confirmed_factor) / test_network_factor); // Set next account to 0 if all frontiers were confirmed next_frontier_account = (elections_count <= max_elections) ? 0 : next_frontier_account; } } void nano::election::update_dependent () { assert (!node.active.mutex.try_lock ()); std::vector<nano::block_hash> blocks_search; auto hash (status.winner->hash ()); auto previous (status.winner->previous ()); if (!previous.is_zero ()) { blocks_search.push_back (previous); } auto source (status.winner->source ()); if (!source.is_zero () && source != previous) { blocks_search.push_back (source); } auto link (status.winner->link ()); if (!link.is_zero () && !node.ledger.is_epoch_link (link) && link != previous) { blocks_search.push_back (link); } for (auto & block_search : blocks_search) { auto existing (node.active.blocks.find (block_search)); if (existing != node.active.blocks.end () && !existing->second->confirmed && !existing->second->stopped) { if (existing->second->dependent_blocks.find (hash) == existing->second->dependent_blocks.end ()) { existing->second->dependent_blocks.insert (hash); } } } } void nano::active_transactions::request_confirm (std::unique_lock<std::mutex> & lock_a) { std::unordered_set<nano::qualified_root> inactive; auto transaction (node.store.tx_begin_read ()); unsigned unconfirmed_count (0); unsigned unconfirmed_announcements (0); std::unordered_map<std::shared_ptr<nano::transport::channel>, std::vector<std::pair<nano::block_hash, nano::block_hash>>> requests_bundle; std::deque<std::shared_ptr<nano::block>> rebroadcast_bundle; std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> confirm_req_bundle; auto roots_size (roots.size ()); for (auto i (roots.get<1> ().begin ()), n (roots.get<1> ().end ()); i != n; ++i) { auto root (i->root); auto election_l (i->election); if ((election_l->confirmed || election_l->stopped) && election_l->announcements >= announcement_min - 1) { if (election_l->confirmed) { confirmed.push_back (election_l->status); if (confirmed.size () > election_history_size) { confirmed.pop_front (); } } inactive.insert (root); } else { if (election_l->announcements > announcement_long) { ++unconfirmed_count; unconfirmed_announcements += election_l->announcements; // Log votes for very long unconfirmed elections if (election_l->announcements % 50 == 1) { auto tally_l (election_l->tally (transaction)); election_l->log_votes (tally_l); } /* Escalation for long unconfirmed elections Start new elections for previous block & source if there are less than 100 active elections */ if (election_l->announcements % announcement_long == 1 && roots_size < 100 && !node.network_params.network.is_test_network ()) { std::shared_ptr<nano::block> previous; auto previous_hash (election_l->status.winner->previous ()); if (!previous_hash.is_zero ()) { previous = node.store.block_get (transaction, previous_hash); if (previous != nullptr) { add (std::move (previous)); } } /* If previous block not existing/not commited yet, block_source can cause segfault for state blocks So source check can be done only if previous != nullptr or previous is 0 (open account) */ if (previous_hash.is_zero () || previous != nullptr) { auto source_hash (node.ledger.block_source (transaction, *election_l->status.winner)); if (!source_hash.is_zero ()) { auto source (node.store.block_get (transaction, source_hash)); if (source != nullptr) { add (std::move (source)); } } } election_l->update_dependent (); } } if (election_l->announcements < announcement_long || election_l->announcements % announcement_long == 1) { if (node.ledger.could_fit (transaction, *election_l->status.winner)) { // Broadcast winner if (rebroadcast_bundle.size () < max_broadcast_queue) { rebroadcast_bundle.push_back (election_l->status.winner); } } else { if (election_l->announcements != 0) { election_l->stop (); } } } if (election_l->announcements % 4 == 1) { auto rep_channels (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ()); auto reps (node.rep_crawler.representatives (std::numeric_limits<size_t>::max ())); // Add all rep endpoints that haven't already voted. We use a set since multiple // reps may exist on an endpoint. std::unordered_set<std::shared_ptr<nano::transport::channel>> channels; for (auto & rep : reps) { if (election_l->last_votes.find (rep.account) == election_l->last_votes.end ()) { channels.insert (rep.channel); if (node.config.logging.vote_logging ()) { node.logger.try_log ("Representative did not respond to confirm_req, retrying: ", rep.account.to_account ()); } } } rep_channels->insert (rep_channels->end (), channels.begin (), channels.end ()); if ((!rep_channels->empty () && node.rep_crawler.total_weight () > node.config.online_weight_minimum.number ()) || roots_size > 5) { // broadcast_confirm_req_base modifies reps, so we clone it once to avoid aliasing if (!node.network_params.network.is_test_network ()) { if (confirm_req_bundle.size () < max_broadcast_queue) { confirm_req_bundle.push_back (std::make_pair (election_l->status.winner, rep_channels)); } } else { for (auto & rep : *rep_channels) { auto rep_request (requests_bundle.find (rep)); auto block (election_l->status.winner); auto root_hash (std::make_pair (block->hash (), block->root ())); if (rep_request == requests_bundle.end ()) { if (requests_bundle.size () < max_broadcast_queue) { std::vector<std::pair<nano::block_hash, nano::block_hash>> insert_vector = { root_hash }; requests_bundle.insert (std::make_pair (rep, insert_vector)); } } else if (rep_request->second.size () < max_broadcast_queue * nano::network::confirm_req_hashes_max) { rep_request->second.push_back (root_hash); } } } } else { if (!node.network_params.network.is_test_network ()) { auto deque_l (node.network.udp_channels.random_set (100)); auto vec (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ()); for (auto i : deque_l) { vec->push_back (i); } confirm_req_bundle.push_back (std::make_pair (election_l->status.winner, vec)); } else { for (auto & rep : *rep_channels) { auto rep_request (requests_bundle.find (rep)); auto block (election_l->status.winner); auto root_hash (std::make_pair (block->hash (), block->root ())); if (rep_request == requests_bundle.end ()) { std::vector<std::pair<nano::block_hash, nano::block_hash>> insert_vector = { root_hash }; requests_bundle.insert (std::make_pair (rep, insert_vector)); } else { rep_request->second.push_back (root_hash); } } } } } } ++election_l->announcements; } lock_a.unlock (); // Rebroadcast unconfirmed blocks if (!rebroadcast_bundle.empty ()) { node.network.flood_block_batch (rebroadcast_bundle); } // Batch confirmation request if (!node.network_params.network.is_live_network () && !requests_bundle.empty ()) { node.network.broadcast_confirm_req_batch (requests_bundle, 50); } //confirm_req broadcast if (!confirm_req_bundle.empty ()) { node.network.broadcast_confirm_req_batch (confirm_req_bundle); } // Confirm frontiers confirm_frontiers (transaction); lock_a.lock (); // Erase inactive elections for (auto i (inactive.begin ()), n (inactive.end ()); i != n; ++i) { auto root_it (roots.find (*i)); assert (root_it != roots.end ()); for (auto & block : root_it->election->blocks) { auto erased (blocks.erase (block.first)); (void)erased; assert (erased == 1); } for (auto & dependent_block : root_it->election->dependent_blocks) { adjust_difficulty (dependent_block); } roots.erase (*i); } update_active_difficulty (); if (unconfirmed_count > 0) { node.logger.try_log (boost::str (boost::format ("%1% blocks have been unconfirmed averaging %2% announcements") % unconfirmed_count % (unconfirmed_announcements / unconfirmed_count))); } } void nano::active_transactions::request_loop () { std::unique_lock<std::mutex> lock (mutex); started = true; lock.unlock (); condition.notify_all (); lock.lock (); while (!stopped) { request_confirm (lock); const auto extra_delay (std::min (roots.size (), max_broadcast_queue) * node.network.broadcast_interval_ms * 2); condition.wait_for (lock, std::chrono::milliseconds (node.network_params.network.request_interval_ms + extra_delay)); } } void nano::active_transactions::stop () { std::unique_lock<std::mutex> lock (mutex); while (!started) { condition.wait (lock); } stopped = true; lock.unlock (); condition.notify_all (); if (thread.joinable ()) { thread.join (); } lock.lock (); roots.clear (); } bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) { std::lock_guard<std::mutex> lock (mutex); return add (block_a, confirmation_action_a); } bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) { auto error (true); if (!stopped) { auto root (block_a->qualified_root ()); auto existing (roots.find (root)); if (existing == roots.end ()) { auto election (std::make_shared<nano::election> (node, block_a, confirmation_action_a)); uint64_t difficulty (0); auto error (nano::work_validate (*block_a, &difficulty)); release_assert (!error); roots.insert (nano::conflict_info{ root, difficulty, difficulty, election }); blocks.insert (std::make_pair (block_a->hash (), election)); adjust_difficulty (block_a->hash ()); } error = existing != roots.end (); } return error; } // Validate a vote and apply it to the current election if one exists bool nano::active_transactions::vote (std::shared_ptr<nano::vote> vote_a, bool single_lock) { std::shared_ptr<nano::election> election; bool replay (false); bool processed (false); { std::unique_lock<std::mutex> lock; if (!single_lock) { lock = std::unique_lock<std::mutex> (mutex); } for (auto vote_block : vote_a->blocks) { nano::election_vote_result result; if (vote_block.which ()) { auto block_hash (boost::get<nano::block_hash> (vote_block)); auto existing (blocks.find (block_hash)); if (existing != blocks.end ()) { result = existing->second->vote (vote_a->account, vote_a->sequence, block_hash); } } else { auto block (boost::get<std::shared_ptr<nano::block>> (vote_block)); auto existing (roots.find (block->qualified_root ())); if (existing != roots.end ()) { result = existing->election->vote (vote_a->account, vote_a->sequence, block->hash ()); } } replay = replay || result.replay; processed = processed || result.processed; } } if (processed) { node.network.flood_vote (vote_a); } return replay; } bool nano::active_transactions::active (nano::block const & block_a) { std::lock_guard<std::mutex> lock (mutex); return roots.find (block_a.qualified_root ()) != roots.end (); } void nano::active_transactions::update_difficulty (nano::block const & block_a) { std::lock_guard<std::mutex> lock (mutex); auto existing (roots.find (block_a.qualified_root ())); if (existing != roots.end ()) { uint64_t difficulty; auto error (nano::work_validate (block_a, &difficulty)); assert (!error); if (difficulty > existing->difficulty) { roots.modify (existing, [difficulty](nano::conflict_info & info_a) { info_a.difficulty = difficulty; }); adjust_difficulty (block_a.hash ()); } } } void nano::active_transactions::adjust_difficulty (nano::block_hash const & hash_a) { assert (!mutex.try_lock ()); std::deque<std::pair<nano::block_hash, int64_t>> remaining_blocks; remaining_blocks.emplace_back (hash_a, 0); std::unordered_set<nano::block_hash> processed_blocks; std::vector<std::pair<nano::qualified_root, int64_t>> elections_list; uint128_t sum (0); while (!remaining_blocks.empty ()) { auto const & item (remaining_blocks.front ()); auto hash (item.first); auto level (item.second); if (processed_blocks.find (hash) == processed_blocks.end ()) { auto existing (blocks.find (hash)); if (existing != blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->status.winner->hash () == hash) { auto previous (existing->second->status.winner->previous ()); if (!previous.is_zero ()) { remaining_blocks.emplace_back (previous, level + 1); } auto source (existing->second->status.winner->source ()); if (!source.is_zero () && source != previous) { remaining_blocks.emplace_back (source, level + 1); } auto link (existing->second->status.winner->link ()); if (!link.is_zero () && !node.ledger.is_epoch_link (link) && link != previous) { remaining_blocks.emplace_back (link, level + 1); } for (auto & dependent_block : existing->second->dependent_blocks) { remaining_blocks.emplace_back (dependent_block, level - 1); } processed_blocks.insert (hash); nano::qualified_root root (previous, existing->second->status.winner->root ()); auto existing_root (roots.find (root)); if (existing_root != roots.end ()) { sum += existing_root->difficulty; elections_list.emplace_back (root, level); } } } remaining_blocks.pop_front (); } if (elections_list.size () > 1) { uint64_t average (static_cast<uint64_t> (sum / elections_list.size ())); // Potential overflow check uint64_t divider (1); if (elections_list.size () > 1000000 && (average - node.network_params.network.publish_threshold) > elections_list.size ()) { divider = ((average - node.network_params.network.publish_threshold) / elections_list.size ()) + 1; } // Set adjusted difficulty for (auto & item : elections_list) { auto existing_root (roots.find (item.first)); uint64_t difficulty_a (average + (item.second / divider)); roots.modify (existing_root, [difficulty_a](nano::conflict_info & info_a) { info_a.adjusted_difficulty = difficulty_a; }); } } // Set adjusted difficulty equals to difficulty else if (elections_list.size () == 1) { auto existing_root (roots.find (elections_list.begin ()->first)); if (existing_root->difficulty != existing_root->adjusted_difficulty) { roots.modify (existing_root, [](nano::conflict_info & info_a) { info_a.adjusted_difficulty = info_a.difficulty; }); } } } void nano::active_transactions::update_active_difficulty () { assert (!mutex.try_lock ()); uint64_t difficulty (node.network_params.network.publish_threshold); if (!roots.empty ()) { uint128_t min = roots.get<1> ().begin ()->adjusted_difficulty; assert (min >= node.network_params.network.publish_threshold); uint128_t max = (--roots.get<1> ().end ())->adjusted_difficulty; assert (max >= node.network_params.network.publish_threshold); difficulty = static_cast<uint64_t> ((min + max) / 2); } assert (difficulty >= node.network_params.network.publish_threshold); difficulty_cb.push_front (difficulty); auto sum = std::accumulate (node.active.difficulty_cb.begin (), node.active.difficulty_cb.end (), uint128_t (0)); difficulty = static_cast<uint64_t> (sum / difficulty_cb.size ()); assert (difficulty >= node.network_params.network.publish_threshold); active_difficulty.store (difficulty); } // List of active blocks in elections std::deque<std::shared_ptr<nano::block>> nano::active_transactions::list_blocks (bool single_lock) { std::deque<std::shared_ptr<nano::block>> result; std::unique_lock<std::mutex> lock; if (!single_lock) { lock = std::unique_lock<std::mutex> (mutex); } for (auto i (roots.begin ()), n (roots.end ()); i != n; ++i) { result.push_back (i->election->status.winner); } return result; } std::deque<nano::election_status> nano::active_transactions::list_confirmed () { std::lock_guard<std::mutex> lock (mutex); return confirmed; } void nano::active_transactions::erase (nano::block const & block_a) { std::lock_guard<std::mutex> lock (mutex); if (roots.find (block_a.qualified_root ()) != roots.end ()) { roots.erase (block_a.qualified_root ()); node.logger.try_log (boost::str (boost::format ("Election erased for block block %1% root %2%") % block_a.hash ().to_string () % block_a.root ().to_string ())); } } bool nano::active_transactions::empty () { std::lock_guard<std::mutex> lock (mutex); return roots.empty (); } size_t nano::active_transactions::size () { std::lock_guard<std::mutex> lock (mutex); return roots.size (); } nano::active_transactions::active_transactions (nano::node & node_a, bool delay_frontier_confirmation_height_updating) : node (node_a), difficulty_cb (20, node.network_params.network.publish_threshold), active_difficulty (node.network_params.network.publish_threshold), next_frontier_check (std::chrono::steady_clock::now () + (delay_frontier_confirmation_height_updating ? std::chrono::seconds (60) : std::chrono::seconds (0))), started (false), stopped (false), thread ([this]() { nano::thread_role::set (nano::thread_role::name::request_loop); request_loop (); }) { std::unique_lock<std::mutex> lock (mutex); while (!started) { condition.wait (lock); } } nano::active_transactions::~active_transactions () { stop (); } bool nano::active_transactions::publish (std::shared_ptr<nano::block> block_a) { std::lock_guard<std::mutex> lock (mutex); auto existing (roots.find (block_a->qualified_root ())); auto result (true); if (existing != roots.end ()) { result = existing->election->publish (block_a); if (!result) { blocks.insert (std::make_pair (block_a->hash (), existing->election)); } } return result; } void nano::active_transactions::confirm_block (nano::block_hash const & hash_a) { std::lock_guard<std::mutex> lock (mutex); auto existing (blocks.find (hash_a)); if (existing != blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->status.winner->hash () == hash_a) { existing->second->confirm_once (); } } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (active_transactions & active_transactions, const std::string & name) { size_t roots_count = 0; size_t blocks_count = 0; size_t confirmed_count = 0; { std::lock_guard<std::mutex> guard (active_transactions.mutex); roots_count = active_transactions.roots.size (); blocks_count = active_transactions.blocks.size (); confirmed_count = active_transactions.confirmed.size (); } auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "roots", roots_count, sizeof (decltype (active_transactions.roots)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", blocks_count, sizeof (decltype (active_transactions.blocks)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "confirmed", confirmed_count, sizeof (decltype (active_transactions.confirmed)::value_type) })); return composite; } } /** * For all the blocks below this height which have been implicitly confirmed check if they * are open/receive blocks, and if so follow the source blocks and iteratively repeat to genesis. */ void nano::node::add_confirmation_heights (nano::block_hash const & hash_a) { auto transaction (store.tx_begin_write ()); std::stack<nano::block_hash, std::vector<nano::block_hash>> open_receive_blocks; auto current = hash_a; nano::genesis genesis; do { if (!open_receive_blocks.empty ()) { current = open_receive_blocks.top (); open_receive_blocks.pop (); } auto hash (current); auto block_height (store.block_account_height (transaction, hash)); assert (block_height >= 0); nano::account_info account_info; nano::account account (ledger.account (transaction, hash)); release_assert (!store.account_get (transaction, account, account_info)); auto confirmation_height = account_info.confirmation_height; if (block_height > confirmation_height) { account_info.confirmation_height = block_height; store.account_put (transaction, account, account_info); // Get the difference and check if any of these are recieve blocks auto num_confirmed_blocks = block_height - confirmation_height; // Start from the most recent one and work our way through for (uint64_t i = 0; i < num_confirmed_blocks && !current.is_zero (); ++i) { auto block (store.block_get (transaction, current)); if (block != nullptr) { // Confirm blocks back active.confirm_block (current); // First check legacy receive/open if (block->type () == nano::block_type::receive || (block->type () == nano::block_type::open && current != genesis.hash ())) { open_receive_blocks.push (block->source ()); } else { // Then check state blocks auto state = std::dynamic_pointer_cast<nano::state_block> (block); if (state != nullptr) { nano::block_hash previous (state->hashables.previous); if (!previous.is_zero ()) { if (state->hashables.balance.number () >= ledger.balance (transaction, previous) && !state->hashables.link.is_zero () && !ledger.is_epoch_link (state->hashables.link)) { open_receive_blocks.push (state->hashables.link); } } // State open blocks are always receive or epoch else if (!ledger.is_epoch_link (state->hashables.link)) { open_receive_blocks.push (state->hashables.link); } } } current = block->previous (); } } } } while (!open_receive_blocks.empty ()); } int nano::node::store_version () { auto transaction (store.tx_begin_read ()); return store.version_get (transaction); } nano::inactive_node::inactive_node (boost::filesystem::path const & path_a, uint16_t peering_port_a) : path (path_a), io_context (std::make_shared<boost::asio::io_context> ()), alarm (*io_context), work (1), peering_port (peering_port_a) { boost::system::error_code error_chmod; /* * @warning May throw a filesystem exception */ boost::filesystem::create_directories (path); nano::set_secure_perm_directory (path, error_chmod); logging.max_size = std::numeric_limits<std::uintmax_t>::max (); logging.init (path); node = std::make_shared<nano::node> (init, *io_context, peering_port, path, alarm, logging, work); } nano::inactive_node::~inactive_node () { node->stop (); } nano::message_buffer_manager::message_buffer_manager (nano::stat & stats_a, size_t size, size_t count) : stats (stats_a), free (count), full (count), slab (size * count), entries (count), stopped (false) { assert (count > 0); assert (size > 0); auto slab_data (slab.data ()); auto entry_data (entries.data ()); for (auto i (0); i < count; ++i, ++entry_data) { *entry_data = { slab_data + i * size, 0, nano::endpoint () }; free.push_back (entry_data); } } nano::message_buffer * nano::message_buffer_manager::allocate () { std::unique_lock<std::mutex> lock (mutex); while (!stopped && free.empty () && full.empty ()) { stats.inc (nano::stat::type::udp, nano::stat::detail::blocking, nano::stat::dir::in); condition.wait (lock); } nano::message_buffer * result (nullptr); if (!free.empty ()) { result = free.front (); free.pop_front (); } if (result == nullptr && !full.empty ()) { result = full.front (); full.pop_front (); stats.inc (nano::stat::type::udp, nano::stat::detail::overflow, nano::stat::dir::in); } release_assert (result || stopped); return result; } void nano::message_buffer_manager::enqueue (nano::message_buffer * data_a) { assert (data_a != nullptr); { std::lock_guard<std::mutex> lock (mutex); full.push_back (data_a); } condition.notify_all (); } nano::message_buffer * nano::message_buffer_manager::dequeue () { std::unique_lock<std::mutex> lock (mutex); while (!stopped && full.empty ()) { condition.wait (lock); } nano::message_buffer * result (nullptr); if (!full.empty ()) { result = full.front (); full.pop_front (); } return result; } void nano::message_buffer_manager::release (nano::message_buffer * data_a) { assert (data_a != nullptr); { std::lock_guard<std::mutex> lock (mutex); free.push_back (data_a); } condition.notify_all (); } void nano::message_buffer_manager::stop () { { std::lock_guard<std::mutex> lock (mutex); stopped = true; } condition.notify_all (); }
1
15,364
May be better make `stopped` atomic, then it can be just `for (auto i (node.store.latest_begin (transaction_a, next_frontier_account)), n (node.store.latest_end ()); i != n && && !stopped && elections_count < max_elections; ++i)`
nanocurrency-nano-node
cpp
@@ -165,7 +165,7 @@ folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> GraphStorageClient::ad evb, std::move(requests), [=](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::AddEdgesRequest& r) { - return useToss ? client->future_addEdgesAtomic(r) : client->future_addEdges(r); + return useToss ? client->future_chainAddEdges(r) : client->future_addEdges(r); }); }
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "clients/storage/GraphStorageClient.h" #include "common/base/Base.h" namespace nebula { namespace storage { folly::SemiFuture<StorageRpcResponse<cpp2::GetNeighborsResponse>> GraphStorageClient::getNeighbors( GraphSpaceID space, SessionID session, ExecutionPlanID plan, std::vector<std::string> colNames, const std::vector<Row>& vertices, const std::vector<EdgeType>& edgeTypes, cpp2::EdgeDirection edgeDirection, const std::vector<cpp2::StatProp>* statProps, const std::vector<cpp2::VertexProp>* vertexProps, const std::vector<cpp2::EdgeProp>* edgeProps, const std::vector<cpp2::Expr>* expressions, bool dedup, bool random, const std::vector<cpp2::OrderBy>& orderBy, int64_t limit, const Expression* filter, folly::EventBase* evb) { auto cbStatus = getIdFromRow(space, false); if (!cbStatus.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::GetNeighborsResponse>>( std::runtime_error(cbStatus.status().toString())); } auto status = clusterIdsToHosts(space, vertices, std::move(cbStatus).value()); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::GetNeighborsResponse>>( std::runtime_error(status.status().toString())); } auto& clusters = status.value(); auto common = makeRequestCommon(session, plan); std::unordered_map<HostAddr, cpp2::GetNeighborsRequest> requests; for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_column_names(colNames); req.set_parts(std::move(c.second)); req.set_common(common); cpp2::TraverseSpec spec; spec.set_edge_types(edgeTypes); spec.set_edge_direction(edgeDirection); spec.set_dedup(dedup); spec.set_random(random); if (statProps != nullptr) { spec.set_stat_props(*statProps); } if (vertexProps != nullptr) { spec.set_vertex_props(*vertexProps); } if (edgeProps != nullptr) { spec.set_edge_props(*edgeProps); } if (expressions != nullptr) { spec.set_expressions(*expressions); } if (!orderBy.empty()) { spec.set_order_by(orderBy); } spec.set_limit(limit); if (filter != nullptr) { spec.set_filter(filter->encode()); } req.set_traverse_spec(std::move(spec)); } return collectResponse( evb, std::move(requests), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::GetNeighborsRequest& r) { return client->future_getNeighbors(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> GraphStorageClient::addVertices( GraphSpaceID space, SessionID session, ExecutionPlanID plan, std::vector<cpp2::NewVertex> vertices, std::unordered_map<TagID, std::vector<std::string>> propNames, bool ifNotExists, folly::EventBase* evb) { auto cbStatus = getIdFromNewVertex(space); if (!cbStatus.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(cbStatus.status().toString())); } auto status = clusterIdsToHosts(space, std::move(vertices), std::move(cbStatus).value()); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(status.status().toString())); } auto& clusters = status.value(); std::unordered_map<HostAddr, cpp2::AddVerticesRequest> requests; auto common = makeRequestCommon(session, plan); for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_if_not_exists(ifNotExists); req.set_parts(std::move(c.second)); req.set_prop_names(propNames); req.set_common(common); } VLOG(3) << "requests size " << requests.size(); return collectResponse( evb, std::move(requests), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::AddVerticesRequest& r) { return client->future_addVertices(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> GraphStorageClient::addEdges( GraphSpaceID space, SessionID session, ExecutionPlanID plan, std::vector<cpp2::NewEdge> edges, std::vector<std::string> propNames, bool ifNotExists, folly::EventBase* evb, bool useToss) { auto cbStatus = getIdFromNewEdge(space); if (!cbStatus.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(cbStatus.status().toString())); } auto status = clusterIdsToHosts(space, std::move(edges), std::move(cbStatus).value()); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(status.status().toString())); } auto& clusters = status.value(); std::unordered_map<HostAddr, cpp2::AddEdgesRequest> requests; auto common = makeRequestCommon(session, plan); for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_if_not_exists(ifNotExists); req.set_parts(std::move(c.second)); req.set_prop_names(propNames); req.set_common(common); } return collectResponse( evb, std::move(requests), [=](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::AddEdgesRequest& r) { return useToss ? client->future_addEdgesAtomic(r) : client->future_addEdges(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::GetPropResponse>> GraphStorageClient::getProps( GraphSpaceID space, SessionID session, ExecutionPlanID plan, const DataSet& input, const std::vector<cpp2::VertexProp>* vertexProps, const std::vector<cpp2::EdgeProp>* edgeProps, const std::vector<cpp2::Expr>* expressions, bool dedup, const std::vector<cpp2::OrderBy>& orderBy, int64_t limit, const Expression* filter, folly::EventBase* evb) { auto cbStatus = getIdFromRow(space, edgeProps != nullptr); if (!cbStatus.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::GetPropResponse>>( std::runtime_error(cbStatus.status().toString())); } auto status = clusterIdsToHosts(space, input.rows, std::move(cbStatus).value()); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::GetPropResponse>>( std::runtime_error(status.status().toString())); } auto& clusters = status.value(); std::unordered_map<HostAddr, cpp2::GetPropRequest> requests; auto common = makeRequestCommon(session, plan); for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); req.set_dedup(dedup); if (vertexProps != nullptr) { req.set_vertex_props(*vertexProps); } if (edgeProps != nullptr) { req.set_edge_props(*edgeProps); } if (expressions != nullptr) { req.set_expressions(*expressions); } if (!orderBy.empty()) { req.set_order_by(orderBy); } req.set_limit(limit); if (filter != nullptr) { req.set_filter(filter->encode()); } req.set_common(common); } return collectResponse(evb, std::move(requests), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::GetPropRequest& r) { return client->future_getProps(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> GraphStorageClient::deleteEdges( GraphSpaceID space, SessionID session, ExecutionPlanID plan, std::vector<cpp2::EdgeKey> edges, folly::EventBase* evb) { auto cbStatus = getIdFromEdgeKey(space); if (!cbStatus.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(cbStatus.status().toString())); } auto status = clusterIdsToHosts(space, std::move(edges), std::move(cbStatus).value()); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(status.status().toString())); } auto& clusters = status.value(); std::unordered_map<HostAddr, cpp2::DeleteEdgesRequest> requests; auto common = makeRequestCommon(session, plan); for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); req.set_common(common); } return collectResponse( evb, std::move(requests), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::DeleteEdgesRequest& r) { return client->future_deleteEdges(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> GraphStorageClient::deleteVertices( GraphSpaceID space, SessionID session, ExecutionPlanID plan, std::vector<Value> ids, folly::EventBase* evb) { auto cbStatus = getIdFromValue(space); if (!cbStatus.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(cbStatus.status().toString())); } auto status = clusterIdsToHosts(space, std::move(ids), std::move(cbStatus).value()); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(status.status().toString())); } auto& clusters = status.value(); std::unordered_map<HostAddr, cpp2::DeleteVerticesRequest> requests; auto common = makeRequestCommon(session, plan); for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); req.set_common(common); } return collectResponse( evb, std::move(requests), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::DeleteVerticesRequest& r) { return client->future_deleteVertices(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::ExecResponse>> GraphStorageClient::deleteTags( GraphSpaceID space, SessionID session, ExecutionPlanID plan, std::vector<cpp2::DelTags> delTags, folly::EventBase* evb) { auto cbStatus = getIdFromDelTags(space); if (!cbStatus.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(cbStatus.status().toString())); } auto status = clusterIdsToHosts(space, std::move(delTags), std::move(cbStatus).value()); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::ExecResponse>>( std::runtime_error(status.status().toString())); } auto& clusters = status.value(); std::unordered_map<HostAddr, cpp2::DeleteTagsRequest> requests; auto common = makeRequestCommon(session, plan); for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); req.set_common(common); } return collectResponse( evb, std::move(requests), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::DeleteTagsRequest& r) { return client->future_deleteTags(r); }); } folly::Future<StatusOr<storage::cpp2::UpdateResponse>> GraphStorageClient::updateVertex( GraphSpaceID space, SessionID session, ExecutionPlanID plan, Value vertexId, TagID tagId, std::vector<cpp2::UpdatedProp> updatedProps, bool insertable, std::vector<std::string> returnProps, std::string condition, folly::EventBase* evb) { auto cbStatus = getIdFromValue(space); if (!cbStatus.ok()) { return folly::makeFuture<StatusOr<storage::cpp2::UpdateResponse>>(cbStatus.status()); } std::pair<HostAddr, cpp2::UpdateVertexRequest> request; DCHECK(!!metaClient_); auto status = metaClient_->partsNum(space); if (!status.ok()) { return Status::Error("Space not found, spaceid: %d", space); } auto numParts = status.value(); status = metaClient_->partId(numParts, std::move(cbStatus).value()(vertexId)); if (!status.ok()) { return folly::makeFuture<StatusOr<storage::cpp2::UpdateResponse>>(status.status()); } auto part = status.value(); auto host = this->getLeader(space, part); if (!host.ok()) { return folly::makeFuture<StatusOr<storage::cpp2::UpdateResponse>>(host.status()); } request.first = std::move(host).value(); cpp2::UpdateVertexRequest req; req.set_space_id(space); req.set_vertex_id(vertexId); req.set_tag_id(tagId); req.set_part_id(part); req.set_updated_props(std::move(updatedProps)); req.set_return_props(std::move(returnProps)); req.set_insertable(insertable); req.set_common(makeRequestCommon(session, plan)); if (condition.size() > 0) { req.set_condition(std::move(condition)); } request.second = std::move(req); return getResponse( evb, std::move(request), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::UpdateVertexRequest& r) { return client->future_updateVertex(r); }); } folly::Future<StatusOr<storage::cpp2::UpdateResponse>> GraphStorageClient::updateEdge( GraphSpaceID space, SessionID session, ExecutionPlanID plan, storage::cpp2::EdgeKey edgeKey, std::vector<cpp2::UpdatedProp> updatedProps, bool insertable, std::vector<std::string> returnProps, std::string condition, folly::EventBase* evb) { auto cbStatus = getIdFromEdgeKey(space); if (!cbStatus.ok()) { return folly::makeFuture<StatusOr<storage::cpp2::UpdateResponse>>(cbStatus.status()); } std::pair<HostAddr, cpp2::UpdateEdgeRequest> request; DCHECK(!!metaClient_); auto status = metaClient_->partsNum(space); if (!status.ok()) { return Status::Error("Space not found, spaceid: %d", space); } auto numParts = status.value(); status = metaClient_->partId(numParts, std::move(cbStatus).value()(edgeKey)); if (!status.ok()) { return folly::makeFuture<StatusOr<storage::cpp2::UpdateResponse>>(status.status()); } auto part = status.value(); auto host = this->getLeader(space, part); if (!host.ok()) { return folly::makeFuture<StatusOr<storage::cpp2::UpdateResponse>>(host.status()); } request.first = std::move(host).value(); cpp2::UpdateEdgeRequest req; req.set_space_id(space); req.set_edge_key(edgeKey); req.set_part_id(part); req.set_updated_props(std::move(updatedProps)); req.set_return_props(std::move(returnProps)); req.set_insertable(insertable); req.set_common(makeRequestCommon(session, plan)); if (condition.size() > 0) { req.set_condition(std::move(condition)); } request.second = std::move(req); return getResponse(evb, std::move(request), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::UpdateEdgeRequest& r) { return client->future_updateEdge(r); }); } folly::Future<StatusOr<cpp2::GetUUIDResp>> GraphStorageClient::getUUID(GraphSpaceID space, const std::string& name, folly::EventBase* evb) { std::pair<HostAddr, cpp2::GetUUIDReq> request; DCHECK(!!metaClient_); auto status = metaClient_->partsNum(space); if (!status.ok()) { return Status::Error("Space not found, spaceid: %d", space); } auto numParts = status.value(); status = metaClient_->partId(numParts, name); if (!status.ok()) { return folly::makeFuture<StatusOr<cpp2::GetUUIDResp>>(status.status()); } auto part = status.value(); auto host = this->getLeader(space, part); if (!host.ok()) { return folly::makeFuture<StatusOr<storage::cpp2::GetUUIDResp>>(host.status()); } request.first = std::move(host).value(); cpp2::GetUUIDReq req; req.set_space_id(space); req.set_part_id(part); req.set_name(name); request.second = std::move(req); return getResponse(evb, std::move(request), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::GetUUIDReq& r) { return client->future_getUUID(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::LookupIndexResp>> GraphStorageClient::lookupIndex( GraphSpaceID space, SessionID session, ExecutionPlanID plan, const std::vector<storage::cpp2::IndexQueryContext>& contexts, bool isEdge, int32_t tagOrEdge, const std::vector<std::string>& returnCols, folly::EventBase* evb) { // TODO(sky) : instead of isEdge and tagOrEdge to nebula::cpp2::SchemaID for graph layer. auto status = getHostParts(space); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::LookupIndexResp>>( std::runtime_error(status.status().toString())); } nebula::cpp2::SchemaID schemaId; if (isEdge) { schemaId.set_edge_type(tagOrEdge); } else { schemaId.set_tag_id(tagOrEdge); } auto& clusters = status.value(); std::unordered_map<HostAddr, cpp2::LookupIndexRequest> requests; auto common = makeRequestCommon(session, plan); for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); req.set_return_columns(returnCols); cpp2::IndexSpec spec; spec.set_contexts(contexts); spec.set_schema_id(schemaId); req.set_indices(spec); req.set_common(common); } return collectResponse( evb, std::move(requests), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::LookupIndexRequest& r) { return client->future_lookupIndex(r); }); } folly::SemiFuture<StorageRpcResponse<cpp2::GetNeighborsResponse>> GraphStorageClient::lookupAndTraverse(GraphSpaceID space, SessionID session, ExecutionPlanID plan, cpp2::IndexSpec indexSpec, cpp2::TraverseSpec traverseSpec, folly::EventBase* evb) { auto status = getHostParts(space); if (!status.ok()) { return folly::makeFuture<StorageRpcResponse<cpp2::GetNeighborsResponse>>( std::runtime_error(status.status().toString())); } auto& clusters = status.value(); std::unordered_map<HostAddr, cpp2::LookupAndTraverseRequest> requests; auto common = makeRequestCommon(session, plan); for (auto& c : clusters) { auto& host = c.first; auto& req = requests[host]; req.set_space_id(space); req.set_parts(std::move(c.second)); req.set_indices(indexSpec); req.set_traverse_spec(traverseSpec); req.set_common(common); } return collectResponse( evb, std::move(requests), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::LookupAndTraverseRequest& r) { return client->future_lookupAndTraverse(r); }); } folly::Future<StatusOr<cpp2::ScanEdgeResponse>> GraphStorageClient::scanEdge( cpp2::ScanEdgeRequest req, folly::EventBase* evb) { std::pair<HostAddr, cpp2::ScanEdgeRequest> request; auto host = this->getLeader(req.get_space_id(), req.get_part_id()); if (!host.ok()) { return folly::makeFuture<StatusOr<cpp2::ScanEdgeResponse>>(host.status()); } request.first = std::move(host).value(); request.second = std::move(req); return getResponse(evb, std::move(request), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::ScanEdgeRequest& r) { return client->future_scanEdge(r); }); } folly::Future<StatusOr<cpp2::ScanVertexResponse>> GraphStorageClient::scanVertex( cpp2::ScanVertexRequest req, folly::EventBase* evb) { std::pair<HostAddr, cpp2::ScanVertexRequest> request; auto host = this->getLeader(req.get_space_id(), req.get_part_id()); if (!host.ok()) { return folly::makeFuture<StatusOr<cpp2::ScanVertexResponse>>(host.status()); } request.first = std::move(host).value(); request.second = std::move(req); return getResponse(evb, std::move(request), [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::ScanVertexRequest& r) { return client->future_scanVertex(r); }); } StatusOr<std::function<const VertexID&(const Row&)>> GraphStorageClient::getIdFromRow( GraphSpaceID space, bool isEdgeProps) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { return vidTypeStatus.status(); } auto vidType = std::move(vidTypeStatus).value(); std::function<const VertexID&(const Row&)> cb; if (vidType == meta::cpp2::PropertyType::INT64) { if (isEdgeProps) { cb = [](const Row& r) -> const VertexID& { // The first column has to be the src, the thrid column has to be the // dst DCHECK_EQ(Value::Type::INT, r.values[0].type()); DCHECK_EQ(Value::Type::INT, r.values[3].type()); auto& mutableR = const_cast<Row&>(r); mutableR.values[0] = Value(std::string(reinterpret_cast<const char*>(&r.values[0].getInt()), 8)); mutableR.values[3] = Value(std::string(reinterpret_cast<const char*>(&r.values[3].getInt()), 8)); return mutableR.values[0].getStr(); }; } else { cb = [](const Row& r) -> const VertexID& { // The first column has to be the vid DCHECK_EQ(Value::Type::INT, r.values[0].type()); auto& mutableR = const_cast<Row&>(r); mutableR.values[0] = Value(std::string(reinterpret_cast<const char*>(&r.values[0].getInt()), 8)); return mutableR.values[0].getStr(); }; } } else if (vidType == meta::cpp2::PropertyType::FIXED_STRING) { cb = [](const Row& r) -> const VertexID& { // The first column has to be the vid DCHECK_EQ(Value::Type::STRING, r.values[0].type()); return r.values[0].getStr(); }; } else { return Status::Error("Only support integer/string type vid."); } return cb; } StatusOr<std::function<const VertexID&(const cpp2::NewVertex&)>> GraphStorageClient::getIdFromNewVertex(GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { return vidTypeStatus.status(); } auto vidType = std::move(vidTypeStatus).value(); std::function<const VertexID&(const cpp2::NewVertex&)> cb; if (vidType == meta::cpp2::PropertyType::INT64) { cb = [](const cpp2::NewVertex& v) -> const VertexID& { DCHECK_EQ(Value::Type::INT, v.get_id().type()); auto& mutableV = const_cast<cpp2::NewVertex&>(v); mutableV.set_id(Value(std::string(reinterpret_cast<const char*>(&v.get_id().getInt()), 8))); return mutableV.get_id().getStr(); }; } else if (vidType == meta::cpp2::PropertyType::FIXED_STRING) { cb = [](const cpp2::NewVertex& v) -> const VertexID& { DCHECK_EQ(Value::Type::STRING, v.get_id().type()); return v.get_id().getStr(); }; } else { return Status::Error("Only support integer/string type vid."); } return cb; } StatusOr<std::function<const VertexID&(const cpp2::NewEdge&)>> GraphStorageClient::getIdFromNewEdge( GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { return vidTypeStatus.status(); } auto vidType = std::move(vidTypeStatus).value(); std::function<const VertexID&(const cpp2::NewEdge&)> cb; if (vidType == meta::cpp2::PropertyType::INT64) { cb = [](const cpp2::NewEdge& e) -> const VertexID& { DCHECK_EQ(Value::Type::INT, e.get_key().get_src().type()); DCHECK_EQ(Value::Type::INT, e.get_key().get_dst().type()); auto& mutableE = const_cast<cpp2::NewEdge&>(e); (*mutableE.key_ref()) .src_ref() .emplace(Value( std::string(reinterpret_cast<const char*>(&e.get_key().get_src().getInt()), 8))); (*mutableE.key_ref()) .dst_ref() .emplace(Value( std::string(reinterpret_cast<const char*>(&e.get_key().get_dst().getInt()), 8))); return mutableE.get_key().get_src().getStr(); }; } else if (vidType == meta::cpp2::PropertyType::FIXED_STRING) { cb = [](const cpp2::NewEdge& e) -> const VertexID& { DCHECK_EQ(Value::Type::STRING, e.get_key().get_src().type()); DCHECK_EQ(Value::Type::STRING, e.get_key().get_dst().type()); return e.get_key().get_src().getStr(); }; } else { return Status::Error("Only support integer/string type vid."); } return cb; } StatusOr<std::function<const VertexID&(const cpp2::EdgeKey&)>> GraphStorageClient::getIdFromEdgeKey( GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { return vidTypeStatus.status(); } auto vidType = std::move(vidTypeStatus).value(); std::function<const VertexID&(const cpp2::EdgeKey&)> cb; if (vidType == meta::cpp2::PropertyType::INT64) { cb = [](const cpp2::EdgeKey& eKey) -> const VertexID& { DCHECK_EQ(Value::Type::INT, eKey.get_src().type()); DCHECK_EQ(Value::Type::INT, eKey.get_dst().type()); auto& mutableEK = const_cast<cpp2::EdgeKey&>(eKey); mutableEK.set_src( Value(std::string(reinterpret_cast<const char*>(&eKey.get_src().getInt()), 8))); mutableEK.set_dst( Value(std::string(reinterpret_cast<const char*>(&eKey.get_dst().getInt()), 8))); return mutableEK.get_src().getStr(); }; } else if (vidType == meta::cpp2::PropertyType::FIXED_STRING) { cb = [](const cpp2::EdgeKey& eKey) -> const VertexID& { DCHECK_EQ(Value::Type::STRING, eKey.get_src().type()); DCHECK_EQ(Value::Type::STRING, eKey.get_dst().type()); return eKey.get_src().getStr(); }; } else { return Status::Error("Only support integer/string type vid."); } return cb; } StatusOr<std::function<const VertexID&(const Value&)>> GraphStorageClient::getIdFromValue( GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { return vidTypeStatus.status(); } auto vidType = std::move(vidTypeStatus).value(); std::function<const VertexID&(const Value&)> cb; if (vidType == meta::cpp2::PropertyType::INT64) { cb = [](const Value& v) -> const VertexID& { DCHECK_EQ(Value::Type::INT, v.type()); auto& mutableV = const_cast<Value&>(v); mutableV = Value(std::string(reinterpret_cast<const char*>(&v.getInt()), 8)); return mutableV.getStr(); }; } else if (vidType == meta::cpp2::PropertyType::FIXED_STRING) { cb = [](const Value& v) -> const VertexID& { DCHECK_EQ(Value::Type::STRING, v.type()); return v.getStr(); }; } else { return Status::Error("Only support integer/string type vid."); } return cb; } StatusOr<std::function<const VertexID&(const cpp2::DelTags&)>> GraphStorageClient::getIdFromDelTags( GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { return vidTypeStatus.status(); } auto vidType = std::move(vidTypeStatus).value(); std::function<const VertexID&(const cpp2::DelTags&)> cb; if (vidType == meta::cpp2::PropertyType::INT64) { cb = [](const cpp2::DelTags& delTags) -> const VertexID& { const auto& vId = delTags.get_id(); DCHECK_EQ(Value::Type::INT, vId.type()); auto& mutableV = const_cast<Value&>(vId); mutableV = Value(std::string(reinterpret_cast<const char*>(&vId.getInt()), 8)); return mutableV.getStr(); }; } else if (vidType == meta::cpp2::PropertyType::FIXED_STRING) { cb = [](const cpp2::DelTags& delTags) -> const VertexID& { const auto& vId = delTags.get_id(); DCHECK_EQ(Value::Type::STRING, vId.type()); return vId.getStr(); }; } else { return Status::Error("Only support integer/string type vid."); } return cb; } cpp2::RequestCommon GraphStorageClient::makeRequestCommon(SessionID sessionId, ExecutionPlanID planId) { cpp2::RequestCommon common; common.set_session_id(sessionId); common.set_plan_id(planId); return common; } } // namespace storage } // namespace nebula
1
30,350
In `src/storage/InternalStorageServiceHandler.h ` file, the client in `client->future_chainAddEdges ` should be InterStorageServiceAsyncClient?
vesoft-inc-nebula
cpp
@@ -22,6 +22,9 @@ import ( "strconv" "strings" + "github.com/prometheus/prometheus/pkg/rulefmt" + thanostypes "github.com/thanos-io/thanos/pkg/store/storepb" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" namespacelabeler "github.com/prometheus-operator/prometheus-operator/pkg/namespace-labeler"
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "context" "fmt" "reflect" "sort" "strconv" "strings" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" namespacelabeler "github.com/prometheus-operator/prometheus-operator/pkg/namespace-labeler" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/ghodss/yaml" "github.com/go-kit/log/level" "github.com/pkg/errors" ) const labelPrometheusName = "prometheus-name" // The maximum `Data` size of a ConfigMap seems to differ between // environments. This is probably due to different meta data sizes which count // into the overall maximum size of a ConfigMap. Thereby lets leave a // large buffer. var maxConfigMapDataSize = int(float64(v1.MaxSecretSize) * 0.5) func (c *Operator) createOrUpdateRuleConfigMaps(ctx context.Context, p *monitoringv1.Prometheus) ([]string, error) { cClient := c.kclient.CoreV1().ConfigMaps(p.Namespace) namespaces, err := c.selectRuleNamespaces(p) if err != nil { return nil, err } newRules, err := c.selectRules(p, namespaces) if err != nil { return nil, err } currentConfigMapList, err := cClient.List(ctx, prometheusRulesConfigMapSelector(p.Name)) if err != nil { return nil, err } currentConfigMaps := currentConfigMapList.Items currentRules := map[string]string{} for _, cm := range currentConfigMaps { for ruleFileName, ruleFile := range cm.Data { currentRules[ruleFileName] = ruleFile } } equal := reflect.DeepEqual(newRules, currentRules) if equal && len(currentConfigMaps) != 0 { level.Debug(c.logger).Log( "msg", "no PrometheusRule changes", "namespace", p.Namespace, "prometheus", p.Name, ) currentConfigMapNames := []string{} for _, cm := range currentConfigMaps { currentConfigMapNames = append(currentConfigMapNames, cm.Name) } return currentConfigMapNames, nil } newConfigMaps, err := makeRulesConfigMaps(p, newRules) if err != nil { return nil, errors.Wrap(err, "failed to make rules ConfigMaps") } newConfigMapNames := []string{} for _, cm := range newConfigMaps { newConfigMapNames = append(newConfigMapNames, cm.Name) } if len(currentConfigMaps) == 0 { level.Debug(c.logger).Log( "msg", "no PrometheusRule configmap found, creating new one", "namespace", p.Namespace, "prometheus", p.Name, ) for _, cm := range newConfigMaps { _, err = cClient.Create(ctx, &cm, metav1.CreateOptions{}) if err != nil { return nil, errors.Wrapf(err, "failed to create ConfigMap '%v'", cm.Name) } } return newConfigMapNames, nil } // Simply deleting old ConfigMaps and creating new ones for now. Could be // replaced by logic that only deletes obsolete ConfigMaps in the future. for _, cm := range currentConfigMaps { err := cClient.Delete(ctx, cm.Name, metav1.DeleteOptions{}) if err != nil { return nil, errors.Wrapf(err, "failed to delete current ConfigMap '%v'", cm.Name) } } level.Debug(c.logger).Log( "msg", "updating PrometheusRule", "namespace", p.Namespace, "prometheus", p.Name, ) for _, cm := range newConfigMaps { _, err = cClient.Create(ctx, &cm, metav1.CreateOptions{}) if err != nil { return nil, errors.Wrapf(err, "failed to create new ConfigMap '%v'", cm.Name) } } return newConfigMapNames, nil } func prometheusRulesConfigMapSelector(prometheusName string) metav1.ListOptions { return metav1.ListOptions{LabelSelector: fmt.Sprintf("%v=%v", labelPrometheusName, prometheusName)} } func (c *Operator) selectRuleNamespaces(p *monitoringv1.Prometheus) ([]string, error) { namespaces := []string{} // If 'RuleNamespaceSelector' is nil, only check own namespace. if p.Spec.RuleNamespaceSelector == nil { namespaces = append(namespaces, p.Namespace) } else { ruleNamespaceSelector, err := metav1.LabelSelectorAsSelector(p.Spec.RuleNamespaceSelector) if err != nil { return namespaces, errors.Wrap(err, "convert rule namespace label selector to selector") } namespaces, err = c.listMatchingNamespaces(ruleNamespaceSelector) if err != nil { return nil, err } } level.Debug(c.logger).Log( "msg", "selected RuleNamespaces", "namespaces", strings.Join(namespaces, ","), "namespace", p.Namespace, "prometheus", p.Name, ) return namespaces, nil } func (c *Operator) selectRules(p *monitoringv1.Prometheus, namespaces []string) (map[string]string, error) { rules := map[string]string{} ruleSelector, err := metav1.LabelSelectorAsSelector(p.Spec.RuleSelector) if err != nil { return rules, errors.Wrap(err, "convert rule label selector to selector") } nsLabeler := namespacelabeler.New( p.Spec.EnforcedNamespaceLabel, p.Spec.PrometheusRulesExcludedFromEnforce, true, ) for _, ns := range namespaces { var marshalErr error err := c.ruleInfs.ListAllByNamespace(ns, ruleSelector, func(obj interface{}) { promRule := obj.(*monitoringv1.PrometheusRule).DeepCopy() if err := nsLabeler.EnforceNamespaceLabel(promRule); err != nil { marshalErr = err return } content, err := generateContent(promRule.Spec) if err != nil { marshalErr = err return } rules[fmt.Sprintf("%v-%v.yaml", promRule.Namespace, promRule.Name)] = content }) if err != nil { return nil, err } if marshalErr != nil { return nil, marshalErr } } ruleNames := []string{} for name := range rules { ruleNames = append(ruleNames, name) } level.Debug(c.logger).Log( "msg", "selected Rules", "rules", strings.Join(ruleNames, ","), "namespace", p.Namespace, "prometheus", p.Name, ) if pKey, ok := c.keyFunc(p); ok { c.metrics.SetSelectedResources(pKey, monitoringv1.PrometheusRuleKind, len(rules)) c.metrics.SetRejectedResources(pKey, monitoringv1.PrometheusRuleKind, 0) } return rules, nil } func generateContent(promRule monitoringv1.PrometheusRuleSpec) (string, error) { content, err := yaml.Marshal(promRule) if err != nil { return "", errors.Wrap(err, "failed to marshal content") } return string(content), nil } // makeRulesConfigMaps takes a Prometheus configuration and rule files and // returns a list of Kubernetes ConfigMaps to be later on mounted into the // Prometheus instance. // If the total size of rule files exceeds the Kubernetes ConfigMap limit, // they are split up via the simple first-fit [1] bin packing algorithm. In the // future this can be replaced by a more sophisticated algorithm, but for now // simplicity should be sufficient. // [1] https://en.wikipedia.org/wiki/Bin_packing_problem#First-fit_algorithm func makeRulesConfigMaps(p *monitoringv1.Prometheus, ruleFiles map[string]string) ([]v1.ConfigMap, error) { //check if none of the rule files is too large for a single ConfigMap for filename, file := range ruleFiles { if len(file) > maxConfigMapDataSize { return nil, errors.Errorf( "rule file '%v' is too large for a single Kubernetes ConfigMap", filename, ) } } buckets := []map[string]string{ {}, } currBucketIndex := 0 // To make bin packing algorithm deterministic, sort ruleFiles filenames and // iterate over filenames instead of ruleFiles map (not deterministic). fileNames := []string{} for n := range ruleFiles { fileNames = append(fileNames, n) } sort.Strings(fileNames) for _, filename := range fileNames { // If rule file doesn't fit into current bucket, create new bucket. if bucketSize(buckets[currBucketIndex])+len(ruleFiles[filename]) > maxConfigMapDataSize { buckets = append(buckets, map[string]string{}) currBucketIndex++ } buckets[currBucketIndex][filename] = ruleFiles[filename] } ruleFileConfigMaps := []v1.ConfigMap{} for i, bucket := range buckets { cm := makeRulesConfigMap(p, bucket) cm.Name = cm.Name + "-" + strconv.Itoa(i) ruleFileConfigMaps = append(ruleFileConfigMaps, cm) } return ruleFileConfigMaps, nil } func bucketSize(bucket map[string]string) int { totalSize := 0 for _, v := range bucket { totalSize += len(v) } return totalSize } func makeRulesConfigMap(p *monitoringv1.Prometheus, ruleFiles map[string]string) v1.ConfigMap { boolTrue := true labels := map[string]string{labelPrometheusName: p.Name} for k, v := range managedByOperatorLabels { labels[k] = v } return v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: prometheusRuleConfigMapName(p.Name), Labels: labels, OwnerReferences: []metav1.OwnerReference{ { APIVersion: p.APIVersion, BlockOwnerDeletion: &boolTrue, Controller: &boolTrue, Kind: p.Kind, Name: p.Name, UID: p.UID, }, }, }, Data: ruleFiles, } } func prometheusRuleConfigMapName(prometheusName string) string { return "prometheus-" + prometheusName + "-rulefiles" }
1
16,495
`go.sum` needs to be updated for this. Please run `go mod tidy`
prometheus-operator-prometheus-operator
go
@@ -28,6 +28,17 @@ const ( MachineFinalizer = "awsmachine.infrastructure.cluster.x-k8s.io" ) +// SecretBackend defines variants for backend secret storage. +type SecretBackend string + +var ( + // SecretBackendSSMParameterStore defines AWS Systems Manager Parameter Store as the secret backend + SecretBackendSSMParameterStore = SecretBackend("ssm-parameter-store") + + // SecretBackendSecretsManager defines AWS Secrets Manager as the secret backend + SecretBackendSecretsManager = SecretBackend("secrets-manager") +) + // AWSMachineSpec defines the desired state of AWSMachine type AWSMachineSpec struct { // ProviderID is the unique identifier as specified by the cloud provider.
1
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/errors" ) const ( // MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before // removing it from the apiserver. MachineFinalizer = "awsmachine.infrastructure.cluster.x-k8s.io" ) // AWSMachineSpec defines the desired state of AWSMachine type AWSMachineSpec struct { // ProviderID is the unique identifier as specified by the cloud provider. ProviderID *string `json:"providerID,omitempty"` // AMI is the reference to the AMI from which to create the machine instance. AMI AWSResourceReference `json:"ami,omitempty"` // ImageLookupFormat is the AMI naming format to look up the image for this // machine It will be ignored if an explicit AMI is set. Supports // substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and // kubernetes version, respectively. The BaseOS will be the value in // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as // defined by the packages produced by kubernetes/release without v as a // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default // image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See // also: https://golang.org/pkg/text/template/ // +optional ImageLookupFormat string `json:"imageLookupFormat,omitempty"` // ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set. ImageLookupOrg string `json:"imageLookupOrg,omitempty"` // ImageLookupBaseOS is the name of the base operating system to use for // image lookup the AMI is not set. ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"` // InstanceType is the type of instance to create. Example: m4.xlarge InstanceType string `json:"instanceType,omitempty"` // AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the // AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the // AWSMachine's value takes precedence. // +optional AdditionalTags Tags `json:"additionalTags,omitempty"` // IAMInstanceProfile is a name of an IAM instance profile to assign to the instance // +optional IAMInstanceProfile string `json:"iamInstanceProfile,omitempty"` // PublicIP specifies whether the instance should get a public IP. // Precedence for this setting is as follows: // 1. This field if set // 2. Cluster/flavor setting // 3. Subnet default // +optional PublicIP *bool `json:"publicIP,omitempty"` // AdditionalSecurityGroups is an array of references to security groups that should be applied to the // instance. These security groups would be set in addition to any security groups defined // at the cluster level or in the actuator. // +optional AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"` // FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. // For this infrastructure provider, the ID is equivalent to an AWS Availability Zone. // If multiple subnets are matched for the availability zone, the first one returned is picked. FailureDomain *string `json:"failureDomain,omitempty"` // Subnet is a reference to the subnet to use for this instance. If not specified, // the cluster subnet will be used. // +optional Subnet *AWSResourceReference `json:"subnet,omitempty"` // SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) // +optional SSHKeyName *string `json:"sshKeyName,omitempty"` // RootVolume encapsulates the configuration options for the root volume // +optional RootVolume *Volume `json:"rootVolume,omitempty"` // Configuration options for the non root storage volumes. // +optional NonRootVolumes []*Volume `json:"nonRootVolumes,omitempty"` // NetworkInterfaces is a list of ENIs to associate with the instance. // A maximum of 2 may be specified. // +optional // +kubebuilder:validation:MaxItems=2 NetworkInterfaces []string `json:"networkInterfaces,omitempty"` // UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance. // cloud-init has built-in support for gzip-compressed user data // user data stored in aws secret manager is always gzip-compressed. // // +optional UncompressedUserData *bool `json:"uncompressedUserData,omitempty"` // CloudInit defines options related to the bootstrapping systems where // CloudInit is used. // +optional CloudInit CloudInit `json:"cloudInit,omitempty"` // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. // +optional SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` // Tenancy indicates if instance should run on shared or single-tenant hardware. // +optional // +kubebuilder:validation:Enum:=default;dedicated;host Tenancy string `json:"tenancy,omitempty"` } // CloudInit defines options related to the bootstrapping systems where // CloudInit is used. type CloudInit struct { // InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager // to ensure privacy of userdata. // By default, a cloud-init boothook shell script is prepended to download // the userdata from Secrets Manager and additionally delete the secret. InsecureSkipSecretsManager bool `json:"insecureSkipSecretsManager,omitempty"` // SecretCount is the number of secrets used to form the complete secret // +optional SecretCount int32 `json:"secretCount,omitempty"` // SecretPrefix is the prefix for the secret name. This is stored // temporarily, and deleted when the machine registers as a node against // the workload cluster. // +optional SecretPrefix string `json:"secretPrefix,omitempty"` } // AWSMachineStatus defines the observed state of AWSMachine type AWSMachineStatus struct { // Ready is true when the provider resource is ready. // +optional Ready bool `json:"ready"` // Addresses contains the AWS instance associated addresses. Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` // InstanceState is the state of the AWS instance for this machine. // +optional InstanceState *InstanceState `json:"instanceState,omitempty"` // FailureReason will be set in the event that there is a terminal problem // reconciling the Machine and will contain a succinct value suitable // for machine interpretation. // // This field should not be set for transitive errors that a controller // faces that are expected to be fixed automatically over // time (like service outages), but instead indicate that something is // fundamentally wrong with the Machine's spec or the configuration of // the controller, and that manual intervention is required. Examples // of terminal errors would be invalid combinations of settings in the // spec, values that are unsupported by the controller, or the // responsible controller itself being critically misconfigured. // // Any transient errors that occur during the reconciliation of Machines // can be added as events to the Machine object and/or logged in the // controller's output. // +optional FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable // for logging and human consumption. // // This field should not be set for transitive errors that a controller // faces that are expected to be fixed automatically over // time (like service outages), but instead indicate that something is // fundamentally wrong with the Machine's spec or the configuration of // the controller, and that manual intervention is required. Examples // of terminal errors would be invalid combinations of settings in the // spec, values that are unsupported by the controller, or the // responsible controller itself being critically misconfigured. // // Any transient errors that occur during the reconciliation of Machines // can be added as events to the Machine object and/or logged in the // controller's output. // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions defines current service state of the AWSMachine. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:resource:path=awsmachines,scope=Namespaced,categories=cluster-api // +kubebuilder:storageversion // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSMachine belongs" // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.instanceState",description="EC2 instance state" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status" // +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".spec.providerID",description="EC2 instance ID" // +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this AWSMachine" // AWSMachine is the Schema for the awsmachines API type AWSMachine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec AWSMachineSpec `json:"spec,omitempty"` Status AWSMachineStatus `json:"status,omitempty"` } func (r *AWSMachine) GetConditions() clusterv1.Conditions { return r.Status.Conditions } func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) { r.Status.Conditions = conditions } // +kubebuilder:object:root=true // AWSMachineList contains a list of AWSMachine type AWSMachineList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AWSMachine `json:"items"` } func init() { SchemeBuilder.Register(&AWSMachine{}, &AWSMachineList{}) }
1
16,848
Not sure if this was discussed previously, but is there a specific use case in mind for having this as an API field rather than a configuration option on the controller manager? Is there a specific use case in mind where one would want to choose different backends for individual Clusters/Machines vs having it a global configuration for an instance of the infrastructure provider?
kubernetes-sigs-cluster-api-provider-aws
go
@@ -40,9 +40,11 @@ namespace OpenTelemetry.Metrics var options = new OtlpExporterOptions(); configure?.Invoke(options); - var metricExporter = new OtlpMetricsExporter(options); - var metricReader = new PeriodicExportingMetricReader(metricExporter, options.MetricExportIntervalMilliseconds); - return builder.AddMetricReader(metricReader); + // var metricExporter = new OtlpMetricsExporter(options); + // var metricReader = new PeriodicExportingMetricReader(metricExporter, options.MetricExportIntervalMilliseconds); + // return builder.AddMetricReader(metricReader); + + return builder; } } }
1
// <copyright file="OtlpMetricExporterHelperExtensions.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using OpenTelemetry.Exporter; namespace OpenTelemetry.Metrics { /// <summary> /// Extension methods to simplify registering of the OpenTelemetry Protocol (OTLP) exporter. /// </summary> public static class OtlpMetricExporterHelperExtensions { /// <summary> /// Adds OpenTelemetry Protocol (OTLP) exporter to the MeterProvider. /// </summary> /// <param name="builder"><see cref="MeterProviderBuilder"/> builder to use.</param> /// <param name="configure">Exporter configuration options.</param> /// <returns>The instance of <see cref="MeterProviderBuilder"/> to chain the calls.</returns> public static MeterProviderBuilder AddOtlpExporter(this MeterProviderBuilder builder, Action<OtlpExporterOptions> configure = null) { if (builder == null) { throw new ArgumentNullException(nameof(builder)); } var options = new OtlpExporterOptions(); configure?.Invoke(options); var metricExporter = new OtlpMetricsExporter(options); var metricReader = new PeriodicExportingMetricReader(metricExporter, options.MetricExportIntervalMilliseconds); return builder.AddMetricReader(metricReader); } } }
1
21,286
Same thing with the OtlpExporter
open-telemetry-opentelemetry-dotnet
.cs
@@ -70,7 +70,6 @@ const char * jx_operator_string( jx_operator_t type ) case JX_OP_NOT: return " not "; // note that the closing bracket/paren is in jx_print_subexpr case JX_OP_LOOKUP: return "["; - case JX_OP_CALL: return "("; case JX_OP_SLICE: return ":"; default: return "???"; }
1
/* Copyright (C) 2015- The University of Notre Dame This software is distributed under the GNU General Public License. See the file COPYING for details. */ #include "jx_print.h" #include "jx_parse.h" #include <assert.h> #include <ctype.h> void jx_comprehension_print(struct jx_comprehension *comp, buffer_t *b) { if (!comp) return; buffer_putstring(b, " for "); buffer_putstring(b, comp->variable); buffer_putstring(b, " in "); jx_print_buffer(comp->elements, b); if (comp->condition) { buffer_putstring(b, " if "); jx_print_buffer(comp->condition, b); } jx_comprehension_print(comp->next, b); } static void jx_pair_print( struct jx_pair *pair, buffer_t *b ) { if(!pair) return; jx_print_buffer(pair->key,b); buffer_putstring(b,":"); jx_print_buffer(pair->value,b); if(pair->next) { buffer_putstring(b,","); jx_pair_print(pair->next,b); } } static void jx_item_print( struct jx_item *item, buffer_t *b ) { if(!item) return; jx_print_buffer(item->value, b); jx_comprehension_print(item->comp, b); if(item->next) { buffer_putstring(b,","); jx_item_print(item->next,b); } } const char * jx_operator_string( jx_operator_t type ) { switch(type) { case JX_OP_EQ: return "=="; case JX_OP_NE: return "!="; case JX_OP_LT: return "<"; case JX_OP_LE: return "<="; case JX_OP_GT: return ">"; case JX_OP_GE: return ">="; case JX_OP_ADD: return "+"; case JX_OP_SUB: return "-"; case JX_OP_MUL: return "*"; case JX_OP_DIV: return "/"; case JX_OP_MOD: return "%"; case JX_OP_AND: return " and "; case JX_OP_OR: return " or "; case JX_OP_NOT: return " not "; // note that the closing bracket/paren is in jx_print_subexpr case JX_OP_LOOKUP: return "["; case JX_OP_CALL: return "("; case JX_OP_SLICE: return ":"; default: return "???"; } } void jx_escape_string( const char *s, buffer_t *b ) { if(!s) return; buffer_putstring(b,"\""); while(*s) { switch(*s) { case '\"': buffer_putstring(b,"\\\""); break; case '\'': buffer_putstring(b,"\\\'"); break; case '\\': buffer_putstring(b,"\\\\"); break; case '\b': buffer_putstring(b,"\\b"); break; case '\f': buffer_putstring(b,"\\f"); break; case '\n': buffer_putstring(b,"\\n"); break; case '\r': buffer_putstring(b,"\\r"); break; case '\t': buffer_putstring(b,"\\t"); break; default: if(isprint(*s)) { buffer_printf(b,"%c",*s); } else { buffer_printf(b,"\\u%04x",(int)*s); } break; } s++; } buffer_putstring(b,"\""); } void jx_print_subexpr( struct jx *j, jx_operator_t parent, buffer_t *b ) { if(!j) return; int do_parens = 0; if(j->type==JX_OPERATOR && jx_operator_precedence(parent) < jx_operator_precedence(j->u.oper.type)) { do_parens = 1; } else { do_parens = 0; } if(do_parens) buffer_putstring(b,"("); jx_print_buffer(j,b); if(do_parens) buffer_putstring(b,")"); } void jx_print_args( struct jx *j, buffer_t *b ) { if(!jx_istype(j, JX_ARRAY)) return; jx_item_print(j->u.items, b); } void jx_print_buffer( struct jx *j, buffer_t *b ) { if(!j) return; switch(j->type) { case JX_NULL: buffer_putstring(b,"null"); break; case JX_DOUBLE: buffer_printf(b,"%g",j->u.double_value); break; case JX_BOOLEAN: buffer_printf(b,"%s",j->u.boolean_value ? "true" : "false"); break; case JX_INTEGER: buffer_printf(b,"%lld",(long long)j->u.integer_value); break; case JX_SYMBOL: buffer_printf(b,"%s",j->u.symbol_name); break; case JX_STRING: jx_escape_string(j->u.string_value,b); break; case JX_ARRAY: buffer_putstring(b,"["); jx_item_print(j->u.items,b); buffer_putstring(b,"]"); break; case JX_OBJECT: buffer_putstring(b,"{"); jx_pair_print(j->u.pairs,b); buffer_putstring(b,"}"); break; case JX_OPERATOR: jx_print_subexpr(j->u.oper.left,j->u.oper.type,b); buffer_putstring(b,jx_operator_string(j->u.oper.type)); if (j->u.oper.type == JX_OP_CALL) { jx_item_print(j->u.oper.right->u.items, b); buffer_putstring(b, ")"); } else { jx_print_subexpr( j->u.oper.right, j->u.oper.type, b); } if(j->u.oper.type==JX_OP_LOOKUP) buffer_putstring(b,"]"); break; case JX_FUNCTION: buffer_putstring(b, j->u.func.name); break; case JX_ERROR: buffer_putstring(b,"Error"); jx_print_buffer(j->u.err, b); break; } } void jx_print_stream( struct jx *j, FILE *file ) { buffer_t buffer; buffer_init(&buffer); jx_print_buffer(j,&buffer); fprintf(file,"%s",buffer_tostring(&buffer)); buffer_free(&buffer); } void jx_print_link( struct jx *j, struct link *l, time_t stoptime ) { buffer_t buffer; buffer_init(&buffer); jx_print_buffer(j,&buffer); size_t len; const char *str = buffer_tolstring(&buffer, &len); link_write(l,str,len,stoptime); buffer_free(&buffer); } char * jx_print_string( struct jx *j ) { buffer_t buffer; char *str; buffer_init(&buffer); jx_print_buffer(j,&buffer); buffer_dup(&buffer,&str); buffer_free(&buffer); return str; }
1
14,050
Should not be removed.
cooperative-computing-lab-cctools
c
@@ -1,17 +1,12 @@ -[ 'foss', 'puppet', 'ezbake', 'module' ].each do |lib| - require "beaker/dsl/install_utils/#{lib}_utils" -end require "beaker/dsl/install_utils/pe_defaults" +require 'beaker-puppet' module Beaker module DSL # Collection of installation methods and support module InstallUtils - include DSL::InstallUtils::PuppetUtils include DSL::InstallUtils::PEDefaults - include DSL::InstallUtils::FOSSUtils - include DSL::InstallUtils::ModuleUtils - include DSL::InstallUtils::EZBakeUtils + include BeakerPuppet::InstallUtils end end end
1
[ 'foss', 'puppet', 'ezbake', 'module' ].each do |lib| require "beaker/dsl/install_utils/#{lib}_utils" end require "beaker/dsl/install_utils/pe_defaults" module Beaker module DSL # Collection of installation methods and support module InstallUtils include DSL::InstallUtils::PuppetUtils include DSL::InstallUtils::PEDefaults include DSL::InstallUtils::FOSSUtils include DSL::InstallUtils::ModuleUtils include DSL::InstallUtils::EZBakeUtils end end end
1
14,978
Not a blocker for anything, but should this be moved to beaker-pe? Is that ticketed anywhere?
voxpupuli-beaker
rb
@@ -349,7 +349,9 @@ public class EventFiringWebDriver implements WebDriver, JavascriptExecutor, Take } public void submit() { + dispatcher.beforeClickOn(element, driver); element.submit(); + dispatcher.afterClickOn(element, driver); } public void sendKeys(CharSequence... keysToSend) {
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.support.events; import org.openqa.selenium.Alert; import org.openqa.selenium.Beta; import org.openqa.selenium.By; import org.openqa.selenium.Cookie; import org.openqa.selenium.Dimension; import org.openqa.selenium.JavascriptExecutor; import org.openqa.selenium.OutputType; import org.openqa.selenium.Point; import org.openqa.selenium.TakesScreenshot; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebDriverException; import org.openqa.selenium.WebElement; import org.openqa.selenium.interactions.HasInputDevices; import org.openqa.selenium.interactions.HasTouchScreen; import org.openqa.selenium.interactions.Keyboard; import org.openqa.selenium.interactions.Mouse; import org.openqa.selenium.interactions.TouchScreen; import org.openqa.selenium.interactions.internal.Coordinates; import org.openqa.selenium.internal.Locatable; import org.openqa.selenium.internal.WrapsDriver; import org.openqa.selenium.internal.WrapsElement; import org.openqa.selenium.logging.Logs; import org.openqa.selenium.support.events.internal.EventFiringKeyboard; import org.openqa.selenium.support.events.internal.EventFiringMouse; import org.openqa.selenium.support.events.internal.EventFiringTouch; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; /** * A wrapper around an arbitrary {@link WebDriver} instance which supports registering of a * {@link WebDriverEventListener}, e&#46;g&#46; for logging purposes. */ public class EventFiringWebDriver implements WebDriver, JavascriptExecutor, TakesScreenshot, WrapsDriver, HasInputDevices, HasTouchScreen { private final WebDriver driver; private final List<WebDriverEventListener> eventListeners = new ArrayList<>(); private final WebDriverEventListener dispatcher = (WebDriverEventListener) Proxy .newProxyInstance( WebDriverEventListener.class.getClassLoader(), new Class[] {WebDriverEventListener.class}, new InvocationHandler() { public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { try { for (WebDriverEventListener eventListener : eventListeners) { method.invoke(eventListener, args); } return null; } catch (InvocationTargetException e){ throw e.getTargetException(); } } } ); public EventFiringWebDriver(final WebDriver driver) { Class<?>[] allInterfaces = extractInterfaces(driver); this.driver = (WebDriver) Proxy.newProxyInstance( WebDriverEventListener.class.getClassLoader(), allInterfaces, new InvocationHandler() { public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { if ("getWrappedDriver".equals(method.getName())) { return driver; } try { return method.invoke(driver, args); } catch (InvocationTargetException e) { dispatcher.onException(e.getTargetException(), driver); throw e.getTargetException(); } } } ); } private Class<?>[] extractInterfaces(Object object) { Set<Class<?>> allInterfaces = new HashSet<>(); allInterfaces.add(WrapsDriver.class); if (object instanceof WebElement) { allInterfaces.add(WrapsElement.class); } extractInterfaces(allInterfaces, object.getClass()); return allInterfaces.toArray(new Class<?>[allInterfaces.size()]); } private void extractInterfaces(Set<Class<?>> addTo, Class<?> clazz) { if (Object.class.equals(clazz)) { return; // Done } Class<?>[] classes = clazz.getInterfaces(); addTo.addAll(Arrays.asList(classes)); extractInterfaces(addTo, clazz.getSuperclass()); } /** * @param eventListener the event listener to register * @return this for method chaining. */ public EventFiringWebDriver register(WebDriverEventListener eventListener) { eventListeners.add(eventListener); return this; } /** * @param eventListener the event listener to unregister * @return this for method chaining. */ public EventFiringWebDriver unregister(WebDriverEventListener eventListener) { eventListeners.remove(eventListener); return this; } public WebDriver getWrappedDriver() { if (driver instanceof WrapsDriver) { return ((WrapsDriver) driver).getWrappedDriver(); } else { return driver; } } public void get(String url) { dispatcher.beforeNavigateTo(url, driver); driver.get(url); dispatcher.afterNavigateTo(url, driver); } public String getCurrentUrl() { return driver.getCurrentUrl(); } public String getTitle() { return driver.getTitle(); } public List<WebElement> findElements(By by) { dispatcher.beforeFindBy(by, null, driver); List<WebElement> temp = driver.findElements(by); dispatcher.afterFindBy(by, null, driver); List<WebElement> result = new ArrayList<>(temp.size()); for (WebElement element : temp) { result.add(createWebElement(element)); } return result; } public WebElement findElement(By by) { dispatcher.beforeFindBy(by, null, driver); WebElement temp = driver.findElement(by); dispatcher.afterFindBy(by, null, driver); return createWebElement(temp); } public String getPageSource() { return driver.getPageSource(); } public void close() { driver.close(); } public void quit() { driver.quit(); } public Set<String> getWindowHandles() { return driver.getWindowHandles(); } public String getWindowHandle() { return driver.getWindowHandle(); } public Object executeScript(String script, Object... args) { if (driver instanceof JavascriptExecutor) { dispatcher.beforeScript(script, driver); Object[] usedArgs = unpackWrappedArgs(args); Object result = ((JavascriptExecutor) driver).executeScript(script, usedArgs); dispatcher.afterScript(script, driver); return result; } throw new UnsupportedOperationException( "Underlying driver instance does not support executing javascript"); } public Object executeAsyncScript(String script, Object... args) { if (driver instanceof JavascriptExecutor) { dispatcher.beforeScript(script, driver); Object[] usedArgs = unpackWrappedArgs(args); Object result = ((JavascriptExecutor) driver).executeAsyncScript(script, usedArgs); dispatcher.afterScript(script, driver); return result; } throw new UnsupportedOperationException( "Underlying driver instance does not support executing javascript"); } private Object[] unpackWrappedArgs(Object... args) { // Walk the args: the various drivers expect unpacked versions of the elements Object[] usedArgs = new Object[args.length]; for (int i = 0; i < args.length; i++) { usedArgs[i] = unpackWrappedElement(args[i]); } return usedArgs; } private Object unpackWrappedElement(Object arg) { if (arg instanceof List<?>) { List<?> aList = (List<?>) arg; List<Object> toReturn = new ArrayList<>(); for (Object anAList : aList) { toReturn.add(unpackWrappedElement(anAList)); } return toReturn; } else if (arg instanceof Map<?, ?>) { Map<?, ?> aMap = (Map<?, ?>) arg; Map<Object, Object> toReturn = new HashMap<>(); for (Object key : aMap.keySet()) { toReturn.put(key, unpackWrappedElement(aMap.get(key))); } return toReturn; } else if (arg instanceof EventFiringWebElement) { return ((EventFiringWebElement) arg).getWrappedElement(); } else { return arg; } } public <X> X getScreenshotAs(OutputType<X> target) throws WebDriverException { if (driver instanceof TakesScreenshot) { return ((TakesScreenshot) driver).getScreenshotAs(target); } throw new UnsupportedOperationException( "Underlying driver instance does not support taking screenshots"); } public TargetLocator switchTo() { return new EventFiringTargetLocator(driver.switchTo()); } public Navigation navigate() { return new EventFiringNavigation(driver.navigate()); } public Options manage() { return new EventFiringOptions(driver.manage()); } private WebElement createWebElement(WebElement from) { return new EventFiringWebElement(from); } public Keyboard getKeyboard() { if (driver instanceof HasInputDevices) { return new EventFiringKeyboard(driver, dispatcher); } else { throw new UnsupportedOperationException("Underlying driver does not implement advanced" + " user interactions yet."); } } public Mouse getMouse() { if (driver instanceof HasInputDevices) { return new EventFiringMouse(driver, dispatcher); } else { throw new UnsupportedOperationException("Underlying driver does not implement advanced" + " user interactions yet."); } } public TouchScreen getTouch() { if (driver instanceof HasTouchScreen) { return new EventFiringTouch(driver, dispatcher); } else { throw new UnsupportedOperationException("Underlying driver does not implement advanced" + " user interactions yet."); } } private class EventFiringWebElement implements WebElement, WrapsElement, WrapsDriver, Locatable { private final WebElement element; private final WebElement underlyingElement; private EventFiringWebElement(final WebElement element) { this.element = (WebElement) Proxy.newProxyInstance( WebDriverEventListener.class.getClassLoader(), extractInterfaces(element), new InvocationHandler() { public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { if (method.getName().equals("getWrappedElement")) { return element; } try { return method.invoke(element, args); } catch (InvocationTargetException e) { dispatcher.onException(e.getTargetException(), driver); throw e.getTargetException(); } } } ); this.underlyingElement = element; } public void click() { dispatcher.beforeClickOn(element, driver); element.click(); dispatcher.afterClickOn(element, driver); } public void submit() { element.submit(); } public void sendKeys(CharSequence... keysToSend) { dispatcher.beforeChangeValueOf(element, driver); element.sendKeys(keysToSend); dispatcher.afterChangeValueOf(element, driver); } public void clear() { dispatcher.beforeChangeValueOf(element, driver); element.clear(); dispatcher.afterChangeValueOf(element, driver); } public String getTagName() { return element.getTagName(); } public String getAttribute(String name) { return element.getAttribute(name); } public boolean isSelected() { return element.isSelected(); } public boolean isEnabled() { return element.isEnabled(); } public String getText() { return element.getText(); } public boolean isDisplayed() { return element.isDisplayed(); } public Point getLocation() { return element.getLocation(); } public Dimension getSize() { return element.getSize(); } public String getCssValue(String propertyName) { return element.getCssValue(propertyName); } public WebElement findElement(By by) { dispatcher.beforeFindBy(by, element, driver); WebElement temp = element.findElement(by); dispatcher.afterFindBy(by, element, driver); return createWebElement(temp); } public List<WebElement> findElements(By by) { dispatcher.beforeFindBy(by, element, driver); List<WebElement> temp = element.findElements(by); dispatcher.afterFindBy(by, element, driver); List<WebElement> result = new ArrayList<>(temp.size()); for (WebElement element : temp) { result.add(createWebElement(element)); } return result; } public WebElement getWrappedElement() { return underlyingElement; } @Override public boolean equals(Object obj) { if (!(obj instanceof WebElement)) { return false; } WebElement other = (WebElement) obj; if (other instanceof WrapsElement) { other = ((WrapsElement) other).getWrappedElement(); } return underlyingElement.equals(other); } @Override public int hashCode() { return underlyingElement.hashCode(); } @Override public String toString() { return underlyingElement.toString(); } public WebDriver getWrappedDriver() { return driver; } public Coordinates getCoordinates() { return ((Locatable) underlyingElement).getCoordinates(); } public <X> X getScreenshotAs(OutputType<X> outputType) throws WebDriverException { return element.getScreenshotAs(outputType); } } private class EventFiringNavigation implements Navigation { private final WebDriver.Navigation navigation; EventFiringNavigation(Navigation navigation) { this.navigation = navigation; } public void to(String url) { dispatcher.beforeNavigateTo(url, driver); navigation.to(url); dispatcher.afterNavigateTo(url, driver); } public void to(URL url) { to(String.valueOf(url)); } public void back() { dispatcher.beforeNavigateBack(driver); navigation.back(); dispatcher.afterNavigateBack(driver); } public void forward() { dispatcher.beforeNavigateForward(driver); navigation.forward(); dispatcher.afterNavigateForward(driver); } public void refresh() { navigation.refresh(); } } private class EventFiringOptions implements Options { private Options options; private EventFiringOptions(Options options) { this.options = options; } public Logs logs() { return options.logs(); } public void addCookie(Cookie cookie) { options.addCookie(cookie); } public void deleteCookieNamed(String name) { options.deleteCookieNamed(name); } public void deleteCookie(Cookie cookie) { options.deleteCookie(cookie); } public void deleteAllCookies() { options.deleteAllCookies(); } public Set<Cookie> getCookies() { return options.getCookies(); } public Cookie getCookieNamed(String name) { return options.getCookieNamed(name); } public Timeouts timeouts() { return new EventFiringTimeouts(options.timeouts()); } public ImeHandler ime() { throw new UnsupportedOperationException("Driver does not support IME interactions"); } @Beta public Window window() { return new EventFiringWindow(options.window()); } } private class EventFiringTimeouts implements Timeouts { private final Timeouts timeouts; EventFiringTimeouts(Timeouts timeouts) { this.timeouts = timeouts; } public Timeouts implicitlyWait(long time, TimeUnit unit) { timeouts.implicitlyWait(time, unit); return this; } public Timeouts setScriptTimeout(long time, TimeUnit unit) { timeouts.setScriptTimeout(time, unit); return this; } public Timeouts pageLoadTimeout(long time, TimeUnit unit) { timeouts.pageLoadTimeout(time, unit); return this; } } private class EventFiringTargetLocator implements TargetLocator { private TargetLocator targetLocator; private EventFiringTargetLocator(TargetLocator targetLocator) { this.targetLocator = targetLocator; } public WebDriver frame(int frameIndex) { return targetLocator.frame(frameIndex); } public WebDriver frame(String frameName) { return targetLocator.frame(frameName); } public WebDriver frame(WebElement frameElement) { return targetLocator.frame(frameElement); } public WebDriver parentFrame() { return targetLocator.parentFrame(); } public WebDriver window(String windowName) { return targetLocator.window(windowName); } public WebDriver defaultContent() { return targetLocator.defaultContent(); } public WebElement activeElement() { return targetLocator.activeElement(); } public Alert alert() { return targetLocator.alert(); } } @Beta private class EventFiringWindow implements Window { private final Window window; EventFiringWindow(Window window) { this.window = window; } public void setSize(Dimension targetSize) { window.setSize(targetSize); } public void setPosition(Point targetLocation) { window.setPosition(targetLocation); } public Dimension getSize() { return window.getSize(); } public Point getPosition() { return window.getPosition(); } public void maximize() { window.maximize(); } } }
1
12,685
this shouldn't be beforeClickOn, but rather beforeSubmit? and added to WebDriverEventListener. Since submit does not synthesize the 'click' events, this isn't accurate.
SeleniumHQ-selenium
java
@@ -119,7 +119,7 @@ module Ncr if self.pending? self.currently_awaiting_approvers.first.email_address else - self.approving_official.email_address + self.approving_official ? self.approving_official.email_address : self.system_approver_emails.first end end
1
require 'csv' module Ncr # Make sure all table names use 'ncr_XXX' def self.table_name_prefix 'ncr_' end EXPENSE_TYPES = %w(BA60 BA61 BA80) BUILDING_NUMBERS = YAML.load_file("#{Rails.root}/config/data/ncr/building_numbers.yml") class WorkOrder < ActiveRecord::Base # must define before include PurchaseCardMixin def self.purchase_amount_column_name :amount end include ValueHelper include ProposalDelegate include PurchaseCardMixin # This is a hack to be able to attribute changes to the correct user. This attribute needs to be set explicitly, then the update comment will use them as the "commenter". Defaults to the requester. attr_accessor :modifier after_initialize :set_defaults before_validation :normalize_values before_update :record_changes validates :cl_number, format: { with: /\ACL\d{7}\z/, message: "must start with 'CL', followed by seven numbers" }, allow_blank: true validates :expense_type, inclusion: {in: EXPENSE_TYPES}, presence: true validates :function_code, format: { with: /\APG[A-Z0-9]{3}\z/, message: "must start with 'PG', followed by three letters or numbers" }, allow_blank: true validates :project_title, presence: true validates :vendor, presence: true validates :building_number, presence: true validates :rwa_number, presence: true, if: :ba80? validates :rwa_number, format: { with: /\A[a-zA-Z][0-9]{7}\z/, message: "must be one letter followed by 7 numbers" }, allow_blank: true validates :soc_code, format: { with: /\A[A-Z0-9]{3}\z/, message: "must be three letters or numbers" }, allow_blank: true def set_defaults self.direct_pay ||= false self.not_to_exceed ||= false self.emergency ||= false end # For budget attributes, converts empty strings to `nil`, so that the request isn't shown as being modified when the fields appear in the edit form. def normalize_values if self.cl_number.present? self.cl_number = self.cl_number.upcase self.cl_number.prepend('CL') unless self.cl_number.start_with?('CL') else self.cl_number = nil end if self.function_code.present? self.function_code.upcase! self.function_code.prepend('PG') unless self.function_code.start_with?('PG') else self.function_code = nil end if self.soc_code.present? self.soc_code.upcase! else self.soc_code = nil end end def approver_email_frozen? approval = self.individual_approvals.first approval && !approval.actionable? end def approver_changed?(approval_email) self.approving_official && self.approving_official.email_address != approval_email end # Check the approvers, accounting for frozen approving official def approvers_emails(selected_approving_official_email) emails = self.system_approver_emails if self.approver_email_frozen? emails.unshift(self.approving_official.email_address) else emails.unshift(selected_approving_official_email) end emails end def setup_approvals_and_observers(selected_approving_official_email) emails = self.approvers_emails(selected_approving_official_email) if self.emergency emails.each{|e| self.add_observer(e)} # skip state machine self.proposal.update(status: 'approved') else original_approvers = self.proposal.individual_approvals.non_pending.map(&:user) self.force_approvers(emails) self.notify_removed_approvers(original_approvers) end end def approving_official self.approvers.first end # the highest approver on the stack, pending preferred if status indicates def current_approver_email_address if self.pending? self.currently_awaiting_approvers.first.email_address else self.approving_official.email_address end end def email_approvers Dispatcher.on_proposal_update(self.proposal, self.modifier) end # Ignore values in certain fields if they aren't relevant. May want to # split these into different models def self.relevant_fields(expense_type) fields = [:description, :amount, :expense_type, :vendor, :not_to_exceed, :building_number, :org_code, :direct_pay, :cl_number, :function_code, :soc_code] case expense_type when 'BA61' fields << :emergency when 'BA80' fields.concat([:rwa_number, :code]) end fields end def relevant_fields Ncr::WorkOrder.relevant_fields(self.expense_type) end # Methods for Client Data interface def fields_for_display attributes = self.relevant_fields attributes.map{|key| [WorkOrder.human_attribute_name(key), self[key]]} end # will return nil if the `org_code` is blank or not present in Organization list def organization # TODO reference by `code` rather than storing the whole thing code = (self.org_code || '').split(' ', 2)[0] Ncr::Organization.find(code) end def ba80? self.expense_type == 'BA80' end def public_identifier "FY" + self.fiscal_year.to_s.rjust(2, "0") + "-#{self.proposal.id}" end def total_price self.amount || 0.0 end # may be replaced with paper-trail or similar at some point def version self.updated_at.to_i end def name self.project_title end def system_approver_emails results = [] if %w(BA60 BA61).include?(self.expense_type) unless self.organization.try(:whsc?) results << self.class.ba61_tier1_budget_mailbox end results << self.class.ba61_tier2_budget_mailbox else # BA80 if self.organization.try(:ool?) results << self.class.ool_ba80_budget_mailbox else results << self.class.ba80_budget_mailbox end end results end def self.ba61_tier1_budget_mailbox self.approver_with_role('BA61_tier1_budget_approver') end def self.ba61_tier2_budget_mailbox self.approver_with_role('BA61_tier2_budget_approver') end def self.approver_with_role(role_name) users = User.with_role(role_name).where(client_slug: 'ncr') if users.empty? fail "Missing User with role #{role_name} -- did you run rake db:migrate and rake db:seed?" end users.first.email_address end def self.ba80_budget_mailbox self.approver_with_role('BA80_budget_approver') end def self.ool_ba80_budget_mailbox self.approver_with_role('OOL_BA80_budget_approver') end def org_id self.organization.try(:code) end def building_id regex = /\A(\w{8}) .*\z/ if self.building_number && regex.match(self.building_number) regex.match(self.building_number)[1] else self.building_number end end def as_json super.merge(org_id: self.org_id, building_id: self.building_id) end def fiscal_year year = self.created_at.nil? ? Time.zone.now.year : self.created_at.year month = self.created_at.nil? ? Time.zone.now.month : self.created_at.month if month >= 10 year += 1 end year % 100 # convert to two-digit end protected # TODO move to Proposal model def record_changes changed_attributes = self.changed_attributes.except(:updated_at) comment_texts = [] bullet = changed_attributes.length > 1 ? '- ' : '' changed_attributes.each do |key, value| former = property_to_s(self.send(key + "_was")) value = property_to_s(self[key]) property_name = WorkOrder.human_attribute_name(key) comment_texts << WorkOrder.update_comment_format(property_name, value, bullet, former) end if !comment_texts.empty? if self.approved? comment_texts << "_Modified post-approval_" end self.proposal.comments.create( comment_text: comment_texts.join("\n"), update_comment: true, user: self.modifier || self.requester ) end end def self.update_comment_format key, value, bullet, former=nil from = former ? "from #{former} " : '' "#{bullet}*#{key}* was changed " + from + "to #{value}" end # Generally shouldn't be called directly as it doesn't account for # emergencies, or notify removed approvers def force_approvers(emails) individuals = emails.map do |email| user = User.for_email(email) # Reuse existing approvals, if present self.proposal.existing_approval_for(user) || Approvals::Individual.new(user: user) end self.proposal.root_approval = Approvals::Serial.new(child_approvals: individuals) end def notify_removed_approvers(original_approvers) current_approvers = self.proposal.individual_approvals.non_pending.map(&:user) removed_approvers_to_notify = original_approvers - current_approvers Dispatcher.on_approver_removal(self.proposal, removed_approvers_to_notify) end end end
1
14,517
this ternary operator is hiding an `if/else` within an `if/else` - any chance we could move the logic ELSEwhere? (see what I did there -- ?? :100: )
18F-C2
rb
@@ -4,6 +4,10 @@ package net.sourceforge.pmd.lang.java.typeresolution.typedefinition; +import net.sourceforge.pmd.annotation.InternalApi; + +@Deprecated +@InternalApi public interface TypeDefinition { /** * Get the raw Class type of the definition.
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.java.typeresolution.typedefinition; public interface TypeDefinition { /** * Get the raw Class type of the definition. * * @return Raw Class type. */ Class<?> getType(); }
1
16,467
Is this really internal? Or will the API be different in PMD 7.0.0? Well, the interface doesn't offer much functionality anyway...
pmd-pmd
java
@@ -86,8 +86,12 @@ class Interface(param.Parameterized): datatype = None + # Denotes whether the interface expects gridded data gridded = False + # Denotes whether the interface expects multiple ragged arrays + multi = False + @classmethod def register(cls, interface): cls.interfaces[interface.datatype] = interface
1
import param import numpy as np from ..element import Element from ..ndmapping import OrderedDict from .. import util class iloc(object): """ iloc is small wrapper object that allows row, column based indexing into a Dataset using the ``.iloc`` property. It supports the usual numpy and pandas iloc indexing semantics including integer indices, slices, lists and arrays of values. For more information see the ``Dataset.iloc`` property docstring. """ def __init__(self, dataset): self.dataset = dataset def __getitem__(self, index): index = util.wrap_tuple(index) if len(index) == 1: index = (index[0], slice(None)) elif len(index) > 2: raise IndexError('Tabular index not understood, index ' 'must be at most length 2.') rows, cols = index if rows is Ellipsis: rows = slice(None) data = self.dataset.interface.iloc(self.dataset, (rows, cols)) kdims = self.dataset.kdims vdims = self.dataset.vdims if np.isscalar(data): return data elif cols == slice(None): pass else: if isinstance(cols, slice): dims = self.dataset.dimensions()[index[1]] elif np.isscalar(cols): dims = [self.dataset.get_dimension(cols)] else: dims = [self.dataset.get_dimension(d) for d in cols] kdims = [d for d in dims if d in kdims] vdims = [d for d in dims if d in vdims] datatype = [dt for dt in self.dataset.datatype if dt in Interface.interfaces and not Interface.interfaces[dt].gridded] if not datatype: datatype = ['dataframe', 'dictionary'] return self.dataset.clone(data, kdims=kdims, vdims=vdims, datatype=datatype) class ndloc(object): """ ndloc is a small wrapper object that allows ndarray-like indexing for gridded Datasets using the ``.ndloc`` property. It supports the standard NumPy ndarray indexing semantics including integer indices, slices, lists and arrays of values. For more information see the ``Dataset.ndloc`` property docstring. """ def __init__(self, dataset): self.dataset = dataset def __getitem__(self, indices): ds = self.dataset indices = util.wrap_tuple(indices) if not ds.interface.gridded: raise IndexError('Cannot use ndloc on non nd-dimensional datastructure') selected = self.dataset.interface.ndloc(ds, indices) if np.isscalar(selected): return selected params = {} if hasattr(ds, 'bounds'): params['bounds'] = None return self.dataset.clone(selected, datatype=[ds.interface.datatype]+ds.datatype, **params) class Interface(param.Parameterized): interfaces = {} datatype = None gridded = False @classmethod def register(cls, interface): cls.interfaces[interface.datatype] = interface @classmethod def cast(cls, dataset, datatype=None, cast_type=None): """ Given a list of Dataset objects, cast them to the specified datatype (by default the format matching the current interface) with the given cast_type (if specified). """ if len({type(c) for c in dataset}) > 1 and cast_type is None: raise Exception("Please supply the common cast type") if datatype is None: datatype = cls.datatype unchanged = all({c.interface==cls for c in dataset}) if unchanged and cast_type is None: return dataset elif unchanged: return [cast_type(co, **dict(util.get_param_values(co))) for co in dataset] return [co.clone(co.columns(), datatype=[datatype], new_type=cast_type) for co in dataset] @classmethod def initialize(cls, eltype, data, kdims, vdims, datatype=None): # Process params and dimensions if isinstance(data, Element): pvals = util.get_param_values(data) kdims = pvals.get('kdims') if kdims is None else kdims vdims = pvals.get('vdims') if vdims is None else vdims if datatype is None: datatype = eltype.datatype # Process Element data if (hasattr(data, 'interface') and issubclass(data.interface, Interface)): if data.interface.datatype in datatype: data = data.data elif data.interface.gridded: gridded = OrderedDict([(kd.name, data.dimension_values(kd.name, expanded=False)) for kd in data.kdims]) for vd in data.vdims: gridded[vd.name] = data.dimension_values(vd, flat=False) data = tuple(gridded.values()) else: data = tuple(data.columns().values()) elif isinstance(data, Element): data = tuple(data.dimension_values(d) for d in kdims+vdims) elif isinstance(data, util.generator_types): data = list(data) # Set interface priority order prioritized = [cls.interfaces[p] for p in datatype if p in cls.interfaces] head = [intfc for intfc in prioritized if type(data) in intfc.types] if head: # Prioritize interfaces which have matching types prioritized = head + [el for el in prioritized if el != head[0]] # Iterate over interfaces until one can interpret the input for interface in prioritized: try: (data, dims, extra_kws) = interface.init(eltype, data, kdims, vdims) break except Exception: pass else: raise ValueError("None of the available storage backends " "were able to support the supplied data format.") return data, interface, dims, extra_kws @classmethod def validate(cls, dataset): not_found = [d for d in dataset.dimensions(label='name') if d not in dataset.data] if not_found: raise ValueError("Supplied data does not contain specified " "dimensions, the following dimensions were " "not found: %s" % repr(not_found)) @classmethod def expanded(cls, arrays): return not any(array.shape not in [arrays[0].shape, (1,)] for array in arrays[1:]) @classmethod def select_mask(cls, dataset, selection): """ Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected. """ mask = np.ones(len(dataset), dtype=np.bool) for dim, k in selection.items(): if isinstance(k, tuple): k = slice(*k) arr = cls.values(dataset, dim) if isinstance(k, slice): if k.start is not None: mask &= k.start <= arr if k.stop is not None: mask &= arr < k.stop elif isinstance(k, (set, list)): iter_slcs = [] for ik in k: iter_slcs.append(arr == ik) mask &= np.logical_or.reduce(iter_slcs) elif callable(k): mask &= k(arr) else: index_mask = arr == k if dataset.ndims == 1 and np.sum(index_mask) == 0: data_index = np.argmin(np.abs(arr - k)) mask = np.zeros(len(dataset), dtype=np.bool) mask[data_index] = True else: mask &= index_mask return mask @classmethod def indexed(cls, dataset, selection): """ Given a Dataset object and selection to be applied returns boolean to indicate whether a scalar value has been indexed. """ selected = list(selection.keys()) all_scalar = all((not isinstance(sel, (tuple, slice, set, list)) and not callable(sel)) for sel in selection.values()) all_kdims = all(d in selected for d in dataset.kdims) return all_scalar and all_kdims @classmethod def range(cls, dataset, dimension): column = dataset.dimension_values(dimension) if dataset.get_dimension_type(dimension) is np.datetime64: return column.min(), column.max() else: try: return (np.nanmin(column), np.nanmax(column)) except TypeError: column.sort() return column[0], column[-1] @classmethod def concatenate(cls, dataset, datatype=None): """ Utility function to concatenate a list of Column objects, returning a new Dataset object. Note that this is unlike the .concat method which only concatenates the data. """ if len(set(type(c) for c in dataset)) != 1: raise Exception("All inputs must be same type in order to concatenate") interfaces = set(c.interface for c in dataset) if len(interfaces)!=1 and datatype is None: raise Exception("Please specify the concatenated datatype") elif len(interfaces)!=1: interface = cls.interfaces[datatype] else: interface = interfaces.pop() concat_data = interface.concat(dataset) return dataset[0].clone(concat_data) @classmethod def reduce(cls, dataset, reduce_dims, function, **kwargs): kdims = [kdim for kdim in dataset.kdims if kdim not in reduce_dims] return cls.aggregate(dataset, kdims, function, **kwargs) @classmethod def array(cls, dataset, dimensions): return Element.array(dataset, dimensions) @classmethod def dframe(cls, dataset, dimensions): return Element.dframe(dataset, dimensions) @classmethod def columns(cls, dataset, dimensions): return Element.columns(dataset, dimensions) @classmethod def shape(cls, dataset): return dataset.data.shape @classmethod def length(cls, dataset): return len(dataset.data) @classmethod def nonzero(cls, dataset): return bool(cls.length(dataset)) @classmethod def redim(cls, dataset, dimensions): return dataset.data
1
18,994
Does it have to be arrays? Isn't it ragged 'data' (i.e multiple elements of different lengths)?
holoviz-holoviews
py
@@ -399,6 +399,7 @@ func (oi *OVFImporter) setUpImportWorkflow() (*daisy.Workflow, error) { if err != nil { return nil, fmt.Errorf("error parsing workflow %q: %v", ovfImportWorkflow, err) } + workflow.ForceCleanupOnError = true return workflow, nil }
1
// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ovfimporter import ( "context" "fmt" "os" "path" "path/filepath" "strconv" "strings" "cloud.google.com/go/storage" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/domain" computeutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/compute" daisyutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/daisy" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/logging" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/param" pathutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/path" storageutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/storage" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/daisycommon" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/daisy_utils" ovfdomain "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/domain" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/gce_utils" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_import_params" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_utils" "github.com/GoogleCloudPlatform/compute-image-tools/daisy" daisycompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" "github.com/vmware/govmomi/ovf" "google.golang.org/api/compute/v1" "google.golang.org/api/iterator" "google.golang.org/api/option" ) const ( ovfWorkflowDir = "daisy_workflows/ovf_import/" ovfImportWorkflow = ovfWorkflowDir + "import_ovf.wf.json" ) const ( //Alpha represents alpha release track Alpha = "alpha" //Beta represents beta release track Beta = "beta" //GA represents GA release track GA = "ga" ) // OVFImporter is responsible for importing OVF into GCE type OVFImporter struct { ctx context.Context storageClient domain.StorageClientInterface computeClient daisycompute.Client tarGcsExtractor domain.TarGcsExtractorInterface mgce domain.MetadataGCEInterface ovfDescriptorLoader ovfdomain.OvfDescriptorLoaderInterface bucketIteratorCreator domain.BucketIteratorCreatorInterface Logger logging.LoggerInterface zoneValidator domain.ZoneValidatorInterface gcsPathToClean string workflowPath string buildID string diskInfos *[]ovfutils.DiskInfo params *ovfimportparams.OVFImportParams imageLocation string } // NewOVFImporter creates an OVF importer, including automatically populating dependencies, // such as compute/storage clients. func NewOVFImporter(params *ovfimportparams.OVFImportParams) (*OVFImporter, error) { ctx := context.Background() logger := logging.NewLogger("[import-ovf]") storageClient, err := storageutils.NewStorageClient(ctx, logger, "") if err != nil { return nil, err } computeClient, err := createComputeClient(&ctx, params) if err != nil { return nil, err } tarGcsExtractor := storageutils.NewTarGcsExtractor(ctx, storageClient, logger) buildID := os.Getenv("BUILD_ID") if buildID == "" { buildID = pathutils.RandString(5) } workingDirOVFImportWorkflow := toWorkingDir(ovfImportWorkflow, params) bic := &storageutils.BucketIteratorCreator{} ovfImporter := &OVFImporter{ctx: ctx, storageClient: storageClient, computeClient: computeClient, tarGcsExtractor: tarGcsExtractor, workflowPath: workingDirOVFImportWorkflow, buildID: buildID, ovfDescriptorLoader: ovfutils.NewOvfDescriptorLoader(storageClient), mgce: &computeutils.MetadataGCE{}, bucketIteratorCreator: bic, Logger: logger, zoneValidator: &computeutils.ZoneValidator{ComputeClient: computeClient}, params: params} return ovfImporter, nil } func (oi *OVFImporter) buildDaisyVars( translateWorkflowPath string, bootDiskGcsPath string, machineType string, region string) map[string]string { varMap := map[string]string{} varMap["instance_name"] = strings.ToLower(oi.params.InstanceNames) if translateWorkflowPath != "" { varMap["translate_workflow"] = translateWorkflowPath varMap["install_gce_packages"] = strconv.FormatBool(!oi.params.NoGuestEnvironment) } if bootDiskGcsPath != "" { varMap["boot_disk_file"] = bootDiskGcsPath } if oi.params.Network != "" { varMap["network"] = fmt.Sprintf("global/networks/%v", oi.params.Network) } if oi.params.Subnet != "" { varMap["subnet"] = fmt.Sprintf("regions/%v/subnetworks/%v", region, oi.params.Subnet) } if machineType != "" { varMap["machine_type"] = machineType } if oi.params.Description != "" { varMap["description"] = oi.params.Description } if oi.params.PrivateNetworkIP != "" { varMap["private_network_ip"] = oi.params.PrivateNetworkIP } if oi.params.NetworkTier != "" { varMap["network_tier"] = oi.params.NetworkTier } return varMap } func (oi *OVFImporter) updateInstance(w *daisy.Workflow) { instance := (*w.Steps["create-instance"].CreateInstances)[0] instance.CanIpForward = oi.params.CanIPForward instance.DeletionProtection = oi.params.DeletionProtection if instance.Scheduling == nil { instance.Scheduling = &compute.Scheduling{} } if oi.params.NoRestartOnFailure { vFalse := false instance.Scheduling.AutomaticRestart = &vFalse } if oi.params.NodeAffinities != nil { instance.Scheduling.NodeAffinities = oi.params.NodeAffinities } } func toWorkingDir(dir string, params *ovfimportparams.OVFImportParams) string { wd, err := filepath.Abs(filepath.Dir(params.CurrentExecutablePath)) if err == nil { return path.Join(wd, dir) } return dir } // creates a new Daisy Compute client func createComputeClient(ctx *context.Context, params *ovfimportparams.OVFImportParams) (daisycompute.Client, error) { computeOptions := []option.ClientOption{option.WithCredentialsFile(params.Oauth)} if params.Ce != "" { computeOptions = append(computeOptions, option.WithEndpoint(params.Ce)) } computeClient, err := daisycompute.NewClient(*ctx, computeOptions...) if err != nil { return nil, err } return computeClient, nil } func (oi *OVFImporter) getProject() (string, error) { return param.GetProjectID(oi.mgce, oi.params.Project) } func (oi *OVFImporter) getZone(project string) (string, error) { if oi.params.Zone != "" { if err := oi.zoneValidator.ZoneValid(project, oi.params.Zone); err != nil { return "", err } return oi.params.Zone, nil } if !oi.mgce.OnGCE() { return "", fmt.Errorf("zone cannot be determined because build is not running on GCE") } // determine zone based on the zone Cloud Build is running in zone, err := oi.mgce.Zone() if err != nil || zone == "" { return "", fmt.Errorf("can't infer zone: %v", err) } return zone, nil } func (oi *OVFImporter) getRegion(zone string) (string, error) { zoneSplits := strings.Split(zone, "-") if len(zoneSplits) < 2 { return "", fmt.Errorf("%v is not a valid zone", zone) } return strings.Join(zoneSplits[:len(zoneSplits)-1], "-"), nil } // Returns OVF GCS bucket and object path (director). If ovaOvaGcsPath is pointing to an OVA file, // it extracts it to a temporary GCS folder and returns it's path. func (oi *OVFImporter) getOvfGcsPath(tmpGcsPath string) (string, bool, error) { ovfOvaGcsPathLowered := strings.ToLower(oi.params.OvfOvaGcsPath) var ovfGcsPath string var shouldCleanUp bool var err error if strings.HasSuffix(ovfOvaGcsPathLowered, ".ova") { ovfGcsPath = pathutils.JoinURL(tmpGcsPath, "ovf") oi.Logger.Log( fmt.Sprintf("Extracting %v OVA archive to %v", oi.params.OvfOvaGcsPath, ovfGcsPath)) err = oi.tarGcsExtractor.ExtractTarToGcs(oi.params.OvfOvaGcsPath, ovfGcsPath) shouldCleanUp = true } else if strings.HasSuffix(ovfOvaGcsPathLowered, ".ovf") { // OvfOvaGcsPath is pointing to OVF descriptor, no need to unpack, just extract directory path. ovfGcsPath = (oi.params.OvfOvaGcsPath)[0 : strings.LastIndex(oi.params.OvfOvaGcsPath, "/")+1] } else { ovfGcsPath = oi.params.OvfOvaGcsPath } // assume OvfOvaGcsPath is a GCS folder for the whole OVF package return pathutils.ToDirectoryURL(ovfGcsPath), shouldCleanUp, err } func (oi *OVFImporter) createScratchBucketBucket(project string, region string) error { safeProjectName := strings.Replace(project, "google", "elgoog", -1) safeProjectName = strings.Replace(safeProjectName, ":", "-", -1) if strings.HasPrefix(safeProjectName, "goog") { safeProjectName = strings.Replace(safeProjectName, "goog", "ggoo", 1) } bucket := strings.ToLower(safeProjectName + "-ovf-import-bkt-" + region) it := oi.bucketIteratorCreator.CreateBucketIterator(oi.ctx, oi.storageClient, project) for itBucketAttrs, err := it.Next(); err != iterator.Done; itBucketAttrs, err = it.Next() { if err != nil { return err } if itBucketAttrs.Name == bucket { oi.params.ScratchBucketGcsPath = fmt.Sprintf("gs://%v/", bucket) return nil } } oi.Logger.Log(fmt.Sprintf("Creating scratch bucket `%v` in %v region", bucket, region)) if err := oi.storageClient.CreateBucket( bucket, project, &storage.BucketAttrs{Name: bucket, Location: region}); err != nil { return err } oi.params.ScratchBucketGcsPath = fmt.Sprintf("gs://%v/", bucket) return nil } func (oi *OVFImporter) buildTmpGcsPath(project string, region string) (string, error) { if oi.params.ScratchBucketGcsPath == "" { if err := oi.createScratchBucketBucket(project, region); err != nil { return "", err } } return pathutils.JoinURL(oi.params.ScratchBucketGcsPath, fmt.Sprintf("ovf-import-%v", oi.buildID)), nil } func (oi *OVFImporter) modifyWorkflowPostValidate(w *daisy.Workflow) { w.LogWorkflowInfo("Cloud Build ID: %s", oi.buildID) rl := &daisyutils.ResourceLabeler{ BuildID: oi.buildID, UserLabels: oi.params.UserLabels, BuildIDLabelKey: "gce-ovf-import-build-id", ImageLocation: oi.imageLocation, InstanceLabelKeyRetriever: func(instance *daisy.Instance) string { if strings.ToLower(oi.params.InstanceNames) == instance.Name { return "gce-ovf-import" } return "gce-ovf-import-tmp" }, DiskLabelKeyRetriever: func(disk *daisy.Disk) string { return "gce-ovf-import-tmp" }, ImageLabelKeyRetriever: func(imageName string) string { return "gce-ovf-import-tmp" }} rl.LabelResources(w) daisyutils.UpdateAllInstanceNoExternalIP(w, oi.params.NoExternalIP) } func (oi *OVFImporter) modifyWorkflowPreValidate(w *daisy.Workflow) { daisyovfutils.AddDiskImportSteps(w, (*oi.diskInfos)[1:]) oi.updateInstance(w) } func (oi *OVFImporter) getMachineType( ovfDescriptor *ovf.Envelope, project string, zone string) (string, error) { machineTypeProvider := ovfgceutils.MachineTypeProvider{ OvfDescriptor: ovfDescriptor, MachineType: oi.params.MachineType, ComputeClient: oi.computeClient, Project: project, Zone: zone, } return machineTypeProvider.GetMachineType() } func (oi *OVFImporter) setUpImportWorkflow() (*daisy.Workflow, error) { if err := ovfimportparams.ValidateAndParseParams(oi.params); err != nil { return nil, err } var ( project string zone string region string err error ) if project, err = param.GetProjectID(oi.mgce, oi.params.Project); err != nil { return nil, err } if zone, err = oi.getZone(project); err != nil { return nil, err } if region, err = oi.getRegion(zone); err != nil { return nil, err } if err := validateReleaseTrack(oi.params.ReleaseTrack); err != nil { return nil, err } if oi.params.ReleaseTrack == Alpha || oi.params.ReleaseTrack == Beta { oi.imageLocation = region } tmpGcsPath, err := oi.buildTmpGcsPath(project, region) if err != nil { return nil, err } ovfGcsPath, shouldCleanup, err := oi.getOvfGcsPath(tmpGcsPath) if shouldCleanup { oi.gcsPathToClean = ovfGcsPath } if err != nil { return nil, err } ovfDescriptor, diskInfos, err := ovfutils.GetOVFDescriptorAndDiskPaths( oi.ovfDescriptorLoader, ovfGcsPath) if err != nil { return nil, err } oi.diskInfos = &diskInfos var osIDValue string if oi.params.OsID == "" { if osIDValue, err = ovfutils.GetOSId(ovfDescriptor); err != nil { return nil, err } oi.Logger.Log( fmt.Sprintf("Found valid osType in OVF descriptor, importing VM with `%v` as OS.", osIDValue)) } else if err = daisyutils.ValidateOS(oi.params.OsID); err != nil { return nil, err } else { osIDValue = oi.params.OsID } translateWorkflowPath := "../image_import/" + daisyutils.GetTranslateWorkflowPath(osIDValue) machineTypeStr, err := oi.getMachineType(ovfDescriptor, project, zone) if err != nil { return nil, err } oi.Logger.Log(fmt.Sprintf("Will create instance of `%v` machine type.", machineTypeStr)) varMap := oi.buildDaisyVars(translateWorkflowPath, diskInfos[0].FilePath, machineTypeStr, region) workflow, err := daisycommon.ParseWorkflow(oi.workflowPath, varMap, project, zone, oi.params.ScratchBucketGcsPath, oi.params.Oauth, oi.params.Timeout, oi.params.Ce, oi.params.GcsLogsDisabled, oi.params.CloudLogsDisabled, oi.params.StdoutLogsDisabled) if err != nil { return nil, fmt.Errorf("error parsing workflow %q: %v", ovfImportWorkflow, err) } return workflow, nil } func validateReleaseTrack(releaseTrack string) error { if releaseTrack != "" && releaseTrack != Alpha && releaseTrack != Beta && releaseTrack != GA { return fmt.Errorf("invalid value for release-track flag: %v", releaseTrack) } return nil } // Import runs OVF import func (oi *OVFImporter) Import() error { oi.Logger.Log("Starting OVF import workflow.") w, err := oi.setUpImportWorkflow() if err != nil { oi.Logger.Log(err.Error()) return err } if err := w.RunWithModifiers(oi.ctx, oi.modifyWorkflowPreValidate, oi.modifyWorkflowPostValidate); err != nil { oi.Logger.Log(err.Error()) return err } oi.Logger.Log("OVF import workflow finished successfully.") return nil } // CleanUp performs clean up of any temporary resources or connections used for OVF import func (oi *OVFImporter) CleanUp() { oi.Logger.Log("Cleaning up.") if oi.storageClient != nil { if oi.gcsPathToClean != "" { err := oi.storageClient.DeleteGcsPath(oi.gcsPathToClean) if err != nil { oi.Logger.Log( fmt.Sprintf("couldn't delete GCS path %v: %v", oi.gcsPathToClean, err.Error())) } } err := oi.storageClient.Close() if err != nil { oi.Logger.Log(fmt.Sprintf("couldn't close storage client: %v", err.Error())) } } }
1
9,368
Per my understanding, this flag only impacts "NoCleanup" disk. If so, we may name it more clear. The reason of this ask is because I plan to add some other flag to force cleanup for other non-NoCleanup disks.
GoogleCloudPlatform-compute-image-tools
go
@@ -40,7 +40,7 @@ import org.apache.spark.sql.util.CaseInsensitiveStringMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class SparkStreamingWrite extends SparkBatchWrite implements StreamingWrite { +class SparkStreamingWrite extends BaseBatchWrite implements StreamingWrite { private static final Logger LOG = LoggerFactory.getLogger(SparkStreamingWrite.class); private static final String QUERY_ID_PROPERTY = "spark.sql.streaming.queryId";
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.source; import java.util.Map; import org.apache.iceberg.AppendFiles; import org.apache.iceberg.DataFile; import org.apache.iceberg.OverwriteFiles; import org.apache.iceberg.Schema; import org.apache.iceberg.Snapshot; import org.apache.iceberg.SnapshotUpdate; import org.apache.iceberg.Table; import org.apache.iceberg.encryption.EncryptionManager; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.io.FileIO; import org.apache.spark.broadcast.Broadcast; import org.apache.spark.sql.connector.write.PhysicalWriteInfo; import org.apache.spark.sql.connector.write.WriterCommitMessage; import org.apache.spark.sql.connector.write.streaming.StreamingDataWriterFactory; import org.apache.spark.sql.connector.write.streaming.StreamingWrite; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.util.CaseInsensitiveStringMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class SparkStreamingWrite extends SparkBatchWrite implements StreamingWrite { private static final Logger LOG = LoggerFactory.getLogger(SparkStreamingWrite.class); private static final String QUERY_ID_PROPERTY = "spark.sql.streaming.queryId"; private static final String EPOCH_ID_PROPERTY = "spark.sql.streaming.epochId"; private final boolean truncateBatches; private final String queryId; SparkStreamingWrite(Table table, Broadcast<FileIO> io, Broadcast<EncryptionManager> encryptionManager, CaseInsensitiveStringMap options, boolean truncateBatches, String queryId, String applicationId, String wapId, Schema writeSchema, StructType dsSchema) { super( table, io, encryptionManager, options, false, truncateBatches, Expressions.alwaysTrue(), applicationId, wapId, writeSchema, dsSchema); this.truncateBatches = truncateBatches; this.queryId = queryId; } @Override public StreamingDataWriterFactory createStreamingWriterFactory(PhysicalWriteInfo info) { // the writer factory works for both batch and streaming return createBatchWriterFactory(info); } @Override public void commit(long epochId, WriterCommitMessage[] messages) { LOG.info("Committing epoch {} for query {} in {} mode", epochId, queryId, truncateBatches ? "complete" : "append"); table().refresh(); Long lastCommittedEpochId = getLastCommittedEpochId(); if (lastCommittedEpochId != null && epochId <= lastCommittedEpochId) { LOG.info("Skipping epoch {} for query {} as it was already committed", epochId, queryId); return; } if (truncateBatches) { OverwriteFiles overwriteFiles = table().newOverwrite(); overwriteFiles.overwriteByRowFilter(Expressions.alwaysTrue()); int numFiles = 0; for (DataFile file : files(messages)) { overwriteFiles.addFile(file); numFiles++; } commit(overwriteFiles, epochId, numFiles, "streaming complete overwrite"); } else { AppendFiles append = table().newFastAppend(); int numFiles = 0; for (DataFile file : files(messages)) { append.appendFile(file); numFiles++; } commit(append, epochId, numFiles, "streaming append"); } } private <T> void commit(SnapshotUpdate<T> snapshotUpdate, long epochId, int numFiles, String description) { snapshotUpdate.set(QUERY_ID_PROPERTY, queryId); snapshotUpdate.set(EPOCH_ID_PROPERTY, Long.toString(epochId)); commitOperation(snapshotUpdate, numFiles, description); } @Override public void abort(long epochId, WriterCommitMessage[] messages) { abort(messages); } private Long getLastCommittedEpochId() { Snapshot snapshot = table().currentSnapshot(); Long lastCommittedEpochId = null; while (snapshot != null) { Map<String, String> summary = snapshot.summary(); String snapshotQueryId = summary.get(QUERY_ID_PROPERTY); if (queryId.equals(snapshotQueryId)) { lastCommittedEpochId = Long.valueOf(summary.get(EPOCH_ID_PROPERTY)); break; } Long parentSnapshotId = snapshot.parentId(); snapshot = parentSnapshotId != null ? table().snapshot(parentSnapshotId) : null; } return lastCommittedEpochId; } }
1
27,469
In my changes for `RequiresDistributionAndOrdering`, this class went away and is replaced by an inner class. I think that pattern worked well. Maybe we could do that before this one to reduce the number of changes here.
apache-iceberg
java
@@ -95,10 +95,10 @@ const std::unordered_map<std::string, ItemParseAttributes_t> ItemParseAttributes {"magicpointspercent", ITEM_PARSE_MAGICPOINTSPERCENT}, {"criticalhitchance", ITEM_PARSE_CRITICALHITCHANCE}, {"criticalhitamount", ITEM_PARSE_CRITICALHITAMOUNT}, - {"hitpointsleechchance", ITEM_PARSE_HITPOINTSLEECHCHANCE}, - {"hitpointsleechamount", ITEM_PARSE_HITPOINTSLEECHAMOUNT}, - {"manapointsleechchance", ITEM_PARSE_MANAPOINTSLEECHCHANCE}, - {"manapointsleechamount", ITEM_PARSE_MANAPOINTSLEECHAMOUNT}, + {"lifeleechchance", ITEM_PARSE_LIFELEECHCHANCE}, + {"lifeleechamount", ITEM_PARSE_LIFELEECHAMOUNT}, + {"manaleechchance", ITEM_PARSE_MANALEECHCHANCE}, + {"manaleechamount", ITEM_PARSE_MANALEECHAMOUNT}, {"fieldabsorbpercentenergy", ITEM_PARSE_FIELDABSORBPERCENTENERGY}, {"fieldabsorbpercentfire", ITEM_PARSE_FIELDABSORBPERCENTFIRE}, {"fieldabsorbpercentpoison", ITEM_PARSE_FIELDABSORBPERCENTPOISON},
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2018 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "items.h" #include "spells.h" #include "movement.h" #include "weapons.h" #include "pugicast.h" extern MoveEvents* g_moveEvents; extern Weapons* g_weapons; const std::unordered_map<std::string, ItemParseAttributes_t> ItemParseAttributesMap = { {"type", ITEM_PARSE_TYPE}, {"description", ITEM_PARSE_DESCRIPTION}, {"runespellname", ITEM_PARSE_RUNESPELLNAME}, {"weight", ITEM_PARSE_WEIGHT}, {"showcount", ITEM_PARSE_SHOWCOUNT}, {"armor", ITEM_PARSE_ARMOR}, {"defense", ITEM_PARSE_DEFENSE}, {"extradef", ITEM_PARSE_EXTRADEF}, {"attack", ITEM_PARSE_ATTACK}, {"rotateto", ITEM_PARSE_ROTATETO}, {"moveable", ITEM_PARSE_MOVEABLE}, {"movable", ITEM_PARSE_MOVEABLE}, {"blockprojectile", ITEM_PARSE_BLOCKPROJECTILE}, {"allowpickupable", ITEM_PARSE_PICKUPABLE}, {"pickupable", ITEM_PARSE_PICKUPABLE}, {"forceserialize", ITEM_PARSE_FORCESERIALIZE}, {"forcesave", ITEM_PARSE_FORCESERIALIZE}, {"floorchange", ITEM_PARSE_FLOORCHANGE}, {"corpsetype", ITEM_PARSE_CORPSETYPE}, {"containersize", ITEM_PARSE_CONTAINERSIZE}, {"fluidsource", ITEM_PARSE_FLUIDSOURCE}, {"readable", ITEM_PARSE_READABLE}, {"writeable", ITEM_PARSE_WRITEABLE}, {"maxtextlen", ITEM_PARSE_MAXTEXTLEN}, {"writeonceitemid", ITEM_PARSE_WRITEONCEITEMID}, {"weapontype", ITEM_PARSE_WEAPONTYPE}, {"slottype", ITEM_PARSE_SLOTTYPE}, {"ammotype", ITEM_PARSE_AMMOTYPE}, {"shoottype", ITEM_PARSE_SHOOTTYPE}, {"effect", ITEM_PARSE_EFFECT}, {"range", ITEM_PARSE_RANGE}, {"stopduration", ITEM_PARSE_STOPDURATION}, {"decayto", ITEM_PARSE_DECAYTO}, {"transformequipto", ITEM_PARSE_TRANSFORMEQUIPTO}, {"transformdeequipto", ITEM_PARSE_TRANSFORMDEEQUIPTO}, {"duration", ITEM_PARSE_DURATION}, {"showduration", ITEM_PARSE_SHOWDURATION}, {"charges", ITEM_PARSE_CHARGES}, {"showcharges", ITEM_PARSE_SHOWCHARGES}, {"showattributes", ITEM_PARSE_SHOWATTRIBUTES}, {"hitchance", ITEM_PARSE_HITCHANCE}, {"maxhitchance", ITEM_PARSE_MAXHITCHANCE}, {"invisible", ITEM_PARSE_INVISIBLE}, {"speed", ITEM_PARSE_SPEED}, {"healthgain", ITEM_PARSE_HEALTHGAIN}, {"healthticks", ITEM_PARSE_HEALTHTICKS}, {"managain", ITEM_PARSE_MANAGAIN}, {"manaticks", ITEM_PARSE_MANATICKS}, {"manashield", ITEM_PARSE_MANASHIELD}, {"skillsword", ITEM_PARSE_SKILLSWORD}, {"skillaxe", ITEM_PARSE_SKILLAXE}, {"skillclub", ITEM_PARSE_SKILLCLUB}, {"skilldist", ITEM_PARSE_SKILLDIST}, {"skillfish", ITEM_PARSE_SKILLFISH}, {"skillshield", ITEM_PARSE_SKILLSHIELD}, {"skillfist", ITEM_PARSE_SKILLFIST}, {"maxhitpoints", ITEM_PARSE_MAXHITPOINTS}, {"maxhitpointspercent", ITEM_PARSE_MAXHITPOINTSPERCENT}, {"maxmanapoints", ITEM_PARSE_MAXMANAPOINTS}, {"maxmanapointspercent", ITEM_PARSE_MAXMANAPOINTSPERCENT}, {"magicpoints", ITEM_PARSE_MAGICPOINTS}, {"magiclevelpoints", ITEM_PARSE_MAGICPOINTS}, {"magicpointspercent", ITEM_PARSE_MAGICPOINTSPERCENT}, {"criticalhitchance", ITEM_PARSE_CRITICALHITCHANCE}, {"criticalhitamount", ITEM_PARSE_CRITICALHITAMOUNT}, {"hitpointsleechchance", ITEM_PARSE_HITPOINTSLEECHCHANCE}, {"hitpointsleechamount", ITEM_PARSE_HITPOINTSLEECHAMOUNT}, {"manapointsleechchance", ITEM_PARSE_MANAPOINTSLEECHCHANCE}, {"manapointsleechamount", ITEM_PARSE_MANAPOINTSLEECHAMOUNT}, {"fieldabsorbpercentenergy", ITEM_PARSE_FIELDABSORBPERCENTENERGY}, {"fieldabsorbpercentfire", ITEM_PARSE_FIELDABSORBPERCENTFIRE}, {"fieldabsorbpercentpoison", ITEM_PARSE_FIELDABSORBPERCENTPOISON}, {"fieldabsorbpercentearth", ITEM_PARSE_FIELDABSORBPERCENTPOISON}, {"absorbpercentall", ITEM_PARSE_ABSORBPERCENTALL}, {"absorbpercentallelements", ITEM_PARSE_ABSORBPERCENTALL}, {"absorbpercentelements", ITEM_PARSE_ABSORBPERCENTELEMENTS}, {"absorbpercentmagic", ITEM_PARSE_ABSORBPERCENTMAGIC}, {"absorbpercentenergy", ITEM_PARSE_ABSORBPERCENTENERGY}, {"absorbpercentfire", ITEM_PARSE_ABSORBPERCENTFIRE}, {"absorbpercentpoison", ITEM_PARSE_ABSORBPERCENTPOISON}, {"absorbpercentearth", ITEM_PARSE_ABSORBPERCENTPOISON}, {"absorbpercentice", ITEM_PARSE_ABSORBPERCENTICE}, {"absorbpercentholy", ITEM_PARSE_ABSORBPERCENTHOLY}, {"absorbpercentdeath", ITEM_PARSE_ABSORBPERCENTDEATH}, {"absorbpercentlifedrain", ITEM_PARSE_ABSORBPERCENTLIFEDRAIN}, {"absorbpercentmanadrain", ITEM_PARSE_ABSORBPERCENTMANADRAIN}, {"absorbpercentdrown", ITEM_PARSE_ABSORBPERCENTDROWN}, {"absorbpercentphysical", ITEM_PARSE_ABSORBPERCENTPHYSICAL}, {"absorbpercenthealing", ITEM_PARSE_ABSORBPERCENTHEALING}, {"absorbpercentundefined", ITEM_PARSE_ABSORBPERCENTUNDEFINED}, {"suppressdrunk", ITEM_PARSE_SUPPRESSDRUNK}, {"suppressenergy", ITEM_PARSE_SUPPRESSENERGY}, {"suppressfire", ITEM_PARSE_SUPPRESSFIRE}, {"suppresspoison", ITEM_PARSE_SUPPRESSPOISON}, {"suppressdrown", ITEM_PARSE_SUPPRESSDROWN}, {"suppressphysical", ITEM_PARSE_SUPPRESSPHYSICAL}, {"suppressfreeze", ITEM_PARSE_SUPPRESSFREEZE}, {"suppressdazzle", ITEM_PARSE_SUPPRESSDAZZLE}, {"suppresscurse", ITEM_PARSE_SUPPRESSCURSE}, {"field", ITEM_PARSE_FIELD}, {"replaceable", ITEM_PARSE_REPLACEABLE}, {"partnerdirection", ITEM_PARSE_PARTNERDIRECTION}, {"leveldoor", ITEM_PARSE_LEVELDOOR}, {"maletransformto", ITEM_PARSE_MALETRANSFORMTO}, {"malesleeper", ITEM_PARSE_MALETRANSFORMTO}, {"femaletransformto", ITEM_PARSE_FEMALETRANSFORMTO}, {"femalesleeper", ITEM_PARSE_FEMALETRANSFORMTO}, {"transformto", ITEM_PARSE_TRANSFORMTO}, {"destroyto", ITEM_PARSE_DESTROYTO}, {"elementice", ITEM_PARSE_ELEMENTICE}, {"elementearth", ITEM_PARSE_ELEMENTEARTH}, {"elementfire", ITEM_PARSE_ELEMENTFIRE}, {"elementenergy", ITEM_PARSE_ELEMENTENERGY}, {"walkstack", ITEM_PARSE_WALKSTACK}, {"blocking", ITEM_PARSE_BLOCKING}, {"allowdistread", ITEM_PARSE_ALLOWDISTREAD}, }; const std::unordered_map<std::string, ItemTypes_t> ItemTypesMap = { {"key", ITEM_TYPE_KEY}, {"magicfield", ITEM_TYPE_MAGICFIELD}, {"container", ITEM_TYPE_CONTAINER}, {"depot", ITEM_TYPE_DEPOT}, {"mailbox", ITEM_TYPE_MAILBOX}, {"trashholder", ITEM_TYPE_TRASHHOLDER}, {"teleport", ITEM_TYPE_TELEPORT}, {"door", ITEM_TYPE_DOOR}, {"bed", ITEM_TYPE_BED}, {"rune", ITEM_TYPE_RUNE}, }; const std::unordered_map<std::string, tileflags_t> TileStatesMap = { {"down", TILESTATE_FLOORCHANGE_DOWN}, {"north", TILESTATE_FLOORCHANGE_NORTH}, {"south", TILESTATE_FLOORCHANGE_SOUTH}, {"southalt", TILESTATE_FLOORCHANGE_SOUTH_ALT}, {"west", TILESTATE_FLOORCHANGE_WEST}, {"east", TILESTATE_FLOORCHANGE_EAST}, {"eastalt", TILESTATE_FLOORCHANGE_EAST_ALT}, }; const std::unordered_map<std::string, RaceType_t> RaceTypesMap = { {"venom", RACE_VENOM}, {"blood", RACE_BLOOD}, {"undead", RACE_UNDEAD}, {"fire", RACE_FIRE}, {"energy", RACE_ENERGY}, }; const std::unordered_map<std::string, WeaponType_t> WeaponTypesMap = { {"sword", WEAPON_SWORD}, {"club", WEAPON_CLUB}, {"axe", WEAPON_AXE}, {"shield", WEAPON_SHIELD}, {"distance", WEAPON_DISTANCE}, {"wand", WEAPON_WAND}, {"ammunition", WEAPON_AMMO}, }; const std::unordered_map<std::string, FluidTypes_t> FluidTypesMap = { {"water", FLUID_WATER }, {"blood", FLUID_BLOOD}, {"beer", FLUID_BEER}, {"slime", FLUID_SLIME}, {"lemonade", FLUID_LEMONADE}, {"milk", FLUID_MILK }, {"mana", FLUID_MANA }, {"life", FLUID_LIFE }, {"oil", FLUID_OIL }, {"urine", FLUID_URINE }, {"coconut", FLUID_COCONUTMILK }, {"wine", FLUID_WINE }, {"mud", FLUID_MUD }, {"fruitjuice", FLUID_FRUITJUICE }, {"lava", FLUID_LAVA }, {"rum", FLUID_RUM }, {"swamp", FLUID_SWAMP }, {"tea", FLUID_TEA }, {"mead", FLUID_MEAD }, }; Items::Items() { items.reserve(30000); nameToItems.reserve(30000); } void Items::clear() { items.clear(); reverseItemMap.clear(); nameToItems.clear(); } bool Items::reload() { clear(); loadFromOtb("data/items/items.otb"); if (!loadFromXml()) { return false; } g_moveEvents->reload(); g_weapons->reload(); g_weapons->loadDefaults(); return true; } constexpr auto OTBI = OTB::Identifier{{'O','T', 'B', 'I'}}; bool Items::loadFromOtb(const std::string& file) { OTB::Loader loader{file, OTBI}; auto& root = loader.parseTree(); PropStream props; if (loader.getProps(root, props)) { //4 byte flags //attributes //0x01 = version data uint32_t flags; if (!props.read<uint32_t>(flags)) { return false; } uint8_t attr; if (!props.read<uint8_t>(attr)) { return false; } if (attr == ROOT_ATTR_VERSION) { uint16_t datalen; if (!props.read<uint16_t>(datalen)) { return false; } if (datalen != sizeof(VERSIONINFO)) { return false; } VERSIONINFO vi; if (!props.read(vi)) { return false; } majorVersion = vi.dwMajorVersion; //items otb format file version minorVersion = vi.dwMinorVersion; //client version buildNumber = vi.dwBuildNumber; //revision } } if (majorVersion == 0xFFFFFFFF) { std::cout << "[Warning - Items::loadFromOtb] items.otb using generic client version." << std::endl; } else if (majorVersion != 3) { std::cout << "Old version detected, a newer version of items.otb is required." << std::endl; return false; } else if (minorVersion < CLIENT_VERSION_1098) { std::cout << "A newer version of items.otb is required." << std::endl; return false; } for (auto& itemNode : root.children) { PropStream stream; if (!loader.getProps(itemNode, stream)) { return false; } uint32_t flags; if (!stream.read<uint32_t>(flags)) { return false; } uint16_t serverId = 0; uint16_t clientId = 0; uint16_t speed = 0; uint16_t wareId = 0; uint8_t lightLevel = 0; uint8_t lightColor = 0; uint8_t alwaysOnTopOrder = 0; uint8_t attrib; while (stream.read<uint8_t>(attrib)) { uint16_t datalen; if (!stream.read<uint16_t>(datalen)) { return false; } switch (attrib) { case ITEM_ATTR_SERVERID: { if (datalen != sizeof(uint16_t)) { return false; } if (!stream.read<uint16_t>(serverId)) { return false; } if (serverId > 30000 && serverId < 30100) { serverId -= 30000; } break; } case ITEM_ATTR_CLIENTID: { if (datalen != sizeof(uint16_t)) { return false; } if (!stream.read<uint16_t>(clientId)) { return false; } break; } case ITEM_ATTR_SPEED: { if (datalen != sizeof(uint16_t)) { return false; } if (!stream.read<uint16_t>(speed)) { return false; } break; } case ITEM_ATTR_LIGHT2: { if (datalen != sizeof(lightBlock2)) { return false; } lightBlock2 lb2; if (!stream.read(lb2)) { return false; } lightLevel = static_cast<uint8_t>(lb2.lightLevel); lightColor = static_cast<uint8_t>(lb2.lightColor); break; } case ITEM_ATTR_TOPORDER: { if (datalen != sizeof(uint8_t)) { return false; } if (!stream.read<uint8_t>(alwaysOnTopOrder)) { return false; } break; } case ITEM_ATTR_WAREID: { if (datalen != sizeof(uint16_t)) { return false; } if (!stream.read<uint16_t>(wareId)) { return false; } break; } default: { //skip unknown attributes if (!stream.skip(datalen)) { return false; } break; } } } reverseItemMap.emplace(clientId, serverId); // store the found item if (serverId >= items.size()) { items.resize(serverId + 1); } ItemType& iType = items[serverId]; iType.group = static_cast<itemgroup_t>(itemNode.type); switch (itemNode.type) { case ITEM_GROUP_CONTAINER: iType.type = ITEM_TYPE_CONTAINER; break; case ITEM_GROUP_DOOR: //not used iType.type = ITEM_TYPE_DOOR; break; case ITEM_GROUP_MAGICFIELD: //not used iType.type = ITEM_TYPE_MAGICFIELD; break; case ITEM_GROUP_TELEPORT: //not used iType.type = ITEM_TYPE_TELEPORT; break; case ITEM_GROUP_NONE: case ITEM_GROUP_GROUND: case ITEM_GROUP_SPLASH: case ITEM_GROUP_FLUID: case ITEM_GROUP_CHARGES: case ITEM_GROUP_DEPRECATED: break; default: return false; } iType.blockSolid = hasBitSet(FLAG_BLOCK_SOLID, flags); iType.blockProjectile = hasBitSet(FLAG_BLOCK_PROJECTILE, flags); iType.blockPathFind = hasBitSet(FLAG_BLOCK_PATHFIND, flags); iType.hasHeight = hasBitSet(FLAG_HAS_HEIGHT, flags); iType.useable = hasBitSet(FLAG_USEABLE, flags); iType.pickupable = hasBitSet(FLAG_PICKUPABLE, flags); iType.moveable = hasBitSet(FLAG_MOVEABLE, flags); iType.stackable = hasBitSet(FLAG_STACKABLE, flags); iType.alwaysOnTop = hasBitSet(FLAG_ALWAYSONTOP, flags); iType.isVertical = hasBitSet(FLAG_VERTICAL, flags); iType.isHorizontal = hasBitSet(FLAG_HORIZONTAL, flags); iType.isHangable = hasBitSet(FLAG_HANGABLE, flags); iType.allowDistRead = hasBitSet(FLAG_ALLOWDISTREAD, flags); iType.rotatable = hasBitSet(FLAG_ROTATABLE, flags); iType.canReadText = hasBitSet(FLAG_READABLE, flags); iType.lookThrough = hasBitSet(FLAG_LOOKTHROUGH, flags); iType.isAnimation = hasBitSet(FLAG_ANIMATION, flags); // iType.walkStack = !hasBitSet(FLAG_FULLTILE, flags); iType.forceUse = hasBitSet(FLAG_FORCEUSE, flags); iType.id = serverId; iType.clientId = clientId; iType.speed = speed; iType.lightLevel = lightLevel; iType.lightColor = lightColor; iType.wareId = wareId; iType.alwaysOnTopOrder = alwaysOnTopOrder; } items.shrink_to_fit(); return true; } bool Items::loadFromXml() { pugi::xml_document doc; pugi::xml_parse_result result = doc.load_file("data/items/items.xml"); if (!result) { printXMLError("Error - Items::loadFromXml", "data/items/items.xml", result); return false; } for (auto itemNode : doc.child("items").children()) { pugi::xml_attribute idAttribute = itemNode.attribute("id"); if (idAttribute) { parseItemNode(itemNode, pugi::cast<uint16_t>(idAttribute.value())); continue; } pugi::xml_attribute fromIdAttribute = itemNode.attribute("fromid"); if (!fromIdAttribute) { std::cout << "[Warning - Items::loadFromXml] No item id found" << std::endl; continue; } pugi::xml_attribute toIdAttribute = itemNode.attribute("toid"); if (!toIdAttribute) { std::cout << "[Warning - Items::loadFromXml] fromid (" << fromIdAttribute.value() << ") without toid" << std::endl; continue; } uint16_t id = pugi::cast<uint16_t>(fromIdAttribute.value()); uint16_t toId = pugi::cast<uint16_t>(toIdAttribute.value()); while (id <= toId) { parseItemNode(itemNode, id++); } } buildInventoryList(); return true; } void Items::buildInventoryList() { inventory.reserve(items.size()); for (const auto& type: items) { if (type.weaponType != WEAPON_NONE || type.ammoType != AMMO_NONE || type.attack != 0 || type.defense != 0 || type.extraDefense != 0 || type.armor != 0 || type.slotPosition & SLOTP_NECKLACE || type.slotPosition & SLOTP_RING || type.slotPosition & SLOTP_AMMO || type.slotPosition & SLOTP_FEET || type.slotPosition & SLOTP_HEAD || type.slotPosition & SLOTP_ARMOR || type.slotPosition & SLOTP_LEGS) { inventory.push_back(type.clientId); } } inventory.shrink_to_fit(); std::sort(inventory.begin(), inventory.end()); } void Items::parseItemNode(const pugi::xml_node& itemNode, uint16_t id) { if (id > 30000 && id < 30100) { id -= 30000; if (id >= items.size()) { items.resize(id + 1); } ItemType& iType = items[id]; iType.id = id; } ItemType& it = getItemType(id); if (it.id == 0) { return; } it.name = itemNode.attribute("name").as_string(); nameToItems.insert({ asLowerCaseString(it.name), id }); pugi::xml_attribute articleAttribute = itemNode.attribute("article"); if (articleAttribute) { it.article = articleAttribute.as_string(); } pugi::xml_attribute pluralAttribute = itemNode.attribute("plural"); if (pluralAttribute) { it.pluralName = pluralAttribute.as_string(); } Abilities& abilities = it.getAbilities(); for (auto attributeNode : itemNode.children()) { pugi::xml_attribute keyAttribute = attributeNode.attribute("key"); if (!keyAttribute) { continue; } pugi::xml_attribute valueAttribute = attributeNode.attribute("value"); if (!valueAttribute) { continue; } std::string tmpStrValue = asLowerCaseString(keyAttribute.as_string()); auto parseAttribute = ItemParseAttributesMap.find(tmpStrValue); if (parseAttribute != ItemParseAttributesMap.end()) { ItemParseAttributes_t parseType = parseAttribute->second; switch (parseType) { case ITEM_PARSE_TYPE: { tmpStrValue = asLowerCaseString(valueAttribute.as_string()); auto it2 = ItemTypesMap.find(tmpStrValue); if (it2 != ItemTypesMap.end()) { it.type = it2->second; if (it.type == ITEM_TYPE_CONTAINER) { it.group = ITEM_GROUP_CONTAINER; } } else { std::cout << "[Warning - Items::parseItemNode] Unknown type: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_DESCRIPTION: { it.description = valueAttribute.as_string(); break; } case ITEM_PARSE_RUNESPELLNAME: { it.runeSpellName = valueAttribute.as_string(); break; } case ITEM_PARSE_WEIGHT: { it.weight = pugi::cast<uint32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SHOWCOUNT: { it.showCount = valueAttribute.as_bool(); break; } case ITEM_PARSE_ARMOR: { it.armor = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_DEFENSE: { it.defense = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_EXTRADEF: { it.extraDefense = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_ATTACK: { it.attack = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_ROTATETO: { it.rotateTo = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MOVEABLE: { it.moveable = valueAttribute.as_bool(); break; } case ITEM_PARSE_BLOCKPROJECTILE: { it.blockProjectile = valueAttribute.as_bool(); break; } case ITEM_PARSE_PICKUPABLE: { it.allowPickupable = valueAttribute.as_bool(); break; } case ITEM_PARSE_FORCESERIALIZE: { it.forceSerialize = valueAttribute.as_bool(); break; } case ITEM_PARSE_FLOORCHANGE: { tmpStrValue = asLowerCaseString(valueAttribute.as_string()); auto it2 = TileStatesMap.find(tmpStrValue); if (it2 != TileStatesMap.end()) { it.floorChange = it2->second; } else { std::cout << "[Warning - Items::parseItemNode] Unknown floorChange: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_CORPSETYPE: { tmpStrValue = asLowerCaseString(valueAttribute.as_string()); auto it2 = RaceTypesMap.find(tmpStrValue); if (it2 != RaceTypesMap.end()) { it.corpseType = it2->second; } else { std::cout << "[Warning - Items::parseItemNode] Unknown corpseType: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_CONTAINERSIZE: { it.maxItems = pugi::cast<uint16_t>(valueAttribute.value()); break; } case ITEM_PARSE_FLUIDSOURCE: { tmpStrValue = asLowerCaseString(valueAttribute.as_string()); auto it2 = FluidTypesMap.find(tmpStrValue); if (it2 != FluidTypesMap.end()) { it.fluidSource = it2->second; } else { std::cout << "[Warning - Items::parseItemNode] Unknown fluidSource: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_READABLE: { it.canReadText = valueAttribute.as_bool(); break; } case ITEM_PARSE_WRITEABLE: { it.canWriteText = valueAttribute.as_bool(); it.canReadText = it.canWriteText; break; } case ITEM_PARSE_MAXTEXTLEN: { it.maxTextLen = pugi::cast<uint16_t>(valueAttribute.value()); break; } case ITEM_PARSE_WRITEONCEITEMID: { it.writeOnceItemId = pugi::cast<uint16_t>(valueAttribute.value()); break; } case ITEM_PARSE_WEAPONTYPE: { tmpStrValue = asLowerCaseString(valueAttribute.as_string()); auto it2 = WeaponTypesMap.find(tmpStrValue); if (it2 != WeaponTypesMap.end()) { it.weaponType = it2->second; } else { std::cout << "[Warning - Items::parseItemNode] Unknown weaponType: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_SLOTTYPE: { tmpStrValue = asLowerCaseString(valueAttribute.as_string()); if (tmpStrValue == "head") { it.slotPosition |= SLOTP_HEAD; } else if (tmpStrValue == "body") { it.slotPosition |= SLOTP_ARMOR; } else if (tmpStrValue == "legs") { it.slotPosition |= SLOTP_LEGS; } else if (tmpStrValue == "feet") { it.slotPosition |= SLOTP_FEET; } else if (tmpStrValue == "backpack") { it.slotPosition |= SLOTP_BACKPACK; } else if (tmpStrValue == "two-handed") { it.slotPosition |= SLOTP_TWO_HAND; } else if (tmpStrValue == "right-hand") { it.slotPosition &= ~SLOTP_LEFT; } else if (tmpStrValue == "left-hand") { it.slotPosition &= ~SLOTP_RIGHT; } else if (tmpStrValue == "necklace") { it.slotPosition |= SLOTP_NECKLACE; } else if (tmpStrValue == "ring") { it.slotPosition |= SLOTP_RING; } else if (tmpStrValue == "ammo") { it.slotPosition |= SLOTP_AMMO; } else if (tmpStrValue == "hand") { it.slotPosition |= SLOTP_HAND; } else { std::cout << "[Warning - Items::parseItemNode] Unknown slotType: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_AMMOTYPE: { it.ammoType = getAmmoType(asLowerCaseString(valueAttribute.as_string())); if (it.ammoType == AMMO_NONE) { std::cout << "[Warning - Items::parseItemNode] Unknown ammoType: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_SHOOTTYPE: { ShootType_t shoot = getShootType(asLowerCaseString(valueAttribute.as_string())); if (shoot != CONST_ANI_NONE) { it.shootType = shoot; } else { std::cout << "[Warning - Items::parseItemNode] Unknown shootType: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_EFFECT: { MagicEffectClasses effect = getMagicEffect(asLowerCaseString(valueAttribute.as_string())); if (effect != CONST_ME_NONE) { it.magicEffect = effect; } else { std::cout << "[Warning - Items::parseItemNode] Unknown effect: " << valueAttribute.as_string() << std::endl; } break; } case ITEM_PARSE_RANGE: { it.shootRange = pugi::cast<uint16_t>(valueAttribute.value()); break; } case ITEM_PARSE_STOPDURATION: { it.stopTime = valueAttribute.as_bool(); break; } case ITEM_PARSE_DECAYTO: { it.decayTo = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_TRANSFORMEQUIPTO: { it.transformEquipTo = pugi::cast<uint16_t>(valueAttribute.value()); break; } case ITEM_PARSE_TRANSFORMDEEQUIPTO: { it.transformDeEquipTo = pugi::cast<uint16_t>(valueAttribute.value()); break; } case ITEM_PARSE_DURATION: { it.decayTime = pugi::cast<uint32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SHOWDURATION: { it.showDuration = valueAttribute.as_bool(); break; } case ITEM_PARSE_CHARGES: { it.charges = pugi::cast<uint32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SHOWCHARGES: { it.showCharges = valueAttribute.as_bool(); break; } case ITEM_PARSE_SHOWATTRIBUTES: { it.showAttributes = valueAttribute.as_bool(); break; } case ITEM_PARSE_HITCHANCE: { it.hitChance = std::min<int8_t>(100, std::max<int8_t>(-100, pugi::cast<int16_t>(valueAttribute.value()))); break; } case ITEM_PARSE_MAXHITCHANCE: { it.maxHitChance = std::min<uint32_t>(100, pugi::cast<uint32_t>(valueAttribute.value())); break; } case ITEM_PARSE_INVISIBLE: { abilities.invisible = valueAttribute.as_bool(); break; } case ITEM_PARSE_SPEED: { abilities.speed = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_HEALTHGAIN: { abilities.regeneration = true; abilities.healthGain = pugi::cast<uint32_t>(valueAttribute.value()); break; } case ITEM_PARSE_HEALTHTICKS: { abilities.regeneration = true; abilities.healthTicks = pugi::cast<uint32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MANAGAIN: { abilities.regeneration = true; abilities.manaGain = pugi::cast<uint32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MANATICKS: { abilities.regeneration = true; abilities.manaTicks = pugi::cast<uint32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MANASHIELD: { abilities.manaShield = valueAttribute.as_bool(); break; } case ITEM_PARSE_SKILLSWORD: { abilities.skills[SKILL_SWORD] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SKILLAXE: { abilities.skills[SKILL_AXE] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SKILLCLUB: { abilities.skills[SKILL_CLUB] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SKILLDIST: { abilities.skills[SKILL_DISTANCE] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SKILLFISH: { abilities.skills[SKILL_FISHING] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SKILLSHIELD: { abilities.skills[SKILL_SHIELD] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_SKILLFIST: { abilities.skills[SKILL_FIST] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MAXHITPOINTS: { abilities.stats[STAT_MAXHITPOINTS] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MAXHITPOINTSPERCENT: { abilities.statsPercent[STAT_MAXHITPOINTS] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MAXMANAPOINTS: { abilities.stats[STAT_MAXMANAPOINTS] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MAXMANAPOINTSPERCENT: { abilities.statsPercent[STAT_MAXMANAPOINTS] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MAGICPOINTS: { abilities.stats[STAT_MAGICPOINTS] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MAGICPOINTSPERCENT: { abilities.statsPercent[STAT_MAGICPOINTS] = pugi::cast<int32_t>(valueAttribute.value()); break; } case ITEM_PARSE_FIELDABSORBPERCENTENERGY: { abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_FIELDABSORBPERCENTFIRE: { abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_FIELDABSORBPERCENTPOISON: { abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTALL: { int16_t value = pugi::cast<int16_t>(valueAttribute.value()); for (auto& i : abilities.absorbPercent) { i += value; } break; } case ITEM_PARSE_ABSORBPERCENTELEMENTS: { int16_t value = pugi::cast<int16_t>(valueAttribute.value()); abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += value; abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += value; abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += value; abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += value; break; } case ITEM_PARSE_ABSORBPERCENTMAGIC: { int16_t value = pugi::cast<int16_t>(valueAttribute.value()); abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += value; abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += value; abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += value; abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += value; abilities.absorbPercent[combatTypeToIndex(COMBAT_HOLYDAMAGE)] += value; abilities.absorbPercent[combatTypeToIndex(COMBAT_DEATHDAMAGE)] += value; break; } case ITEM_PARSE_ABSORBPERCENTENERGY: { abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTFIRE: { abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTPOISON: { abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTICE: { abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTHOLY: { abilities.absorbPercent[combatTypeToIndex(COMBAT_HOLYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTDEATH: { abilities.absorbPercent[combatTypeToIndex(COMBAT_DEATHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTLIFEDRAIN: { abilities.absorbPercent[combatTypeToIndex(COMBAT_LIFEDRAIN)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTMANADRAIN: { abilities.absorbPercent[combatTypeToIndex(COMBAT_MANADRAIN)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTDROWN: { abilities.absorbPercent[combatTypeToIndex(COMBAT_DROWNDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTPHYSICAL: { abilities.absorbPercent[combatTypeToIndex(COMBAT_PHYSICALDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTHEALING: { abilities.absorbPercent[combatTypeToIndex(COMBAT_HEALING)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ABSORBPERCENTUNDEFINED: { abilities.absorbPercent[combatTypeToIndex(COMBAT_UNDEFINEDDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value()); break; } case ITEM_PARSE_SUPPRESSDRUNK: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_DRUNK; } break; } case ITEM_PARSE_SUPPRESSENERGY: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_ENERGY; } break; } case ITEM_PARSE_SUPPRESSFIRE: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_FIRE; } break; } case ITEM_PARSE_SUPPRESSPOISON: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_POISON; } break; } case ITEM_PARSE_SUPPRESSDROWN: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_DROWN; } break; } case ITEM_PARSE_SUPPRESSPHYSICAL: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_BLEEDING; } break; } case ITEM_PARSE_SUPPRESSFREEZE: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_FREEZING; } break; } case ITEM_PARSE_SUPPRESSDAZZLE: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_DAZZLED; } break; } case ITEM_PARSE_SUPPRESSCURSE: { if (valueAttribute.as_bool()) { abilities.conditionSuppressions |= CONDITION_CURSED; } break; } case ITEM_PARSE_FIELD: { it.group = ITEM_GROUP_MAGICFIELD; it.type = ITEM_TYPE_MAGICFIELD; CombatType_t combatType = COMBAT_NONE; ConditionDamage* conditionDamage = nullptr; tmpStrValue = asLowerCaseString(valueAttribute.as_string()); if (tmpStrValue == "fire") { conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_FIRE); combatType = COMBAT_FIREDAMAGE; } else if (tmpStrValue == "energy") { conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_ENERGY); combatType = COMBAT_ENERGYDAMAGE; } else if (tmpStrValue == "poison") { conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_POISON); combatType = COMBAT_EARTHDAMAGE; } else if (tmpStrValue == "drown") { conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_DROWN); combatType = COMBAT_DROWNDAMAGE; } else if (tmpStrValue == "physical") { conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_BLEEDING); combatType = COMBAT_PHYSICALDAMAGE; } else { std::cout << "[Warning - Items::parseItemNode] Unknown field value: " << valueAttribute.as_string() << std::endl; } if (combatType != COMBAT_NONE) { it.combatType = combatType; it.conditionDamage.reset(conditionDamage); for (auto subAttributeNode : attributeNode.children()) { uint32_t ticks = 0; int32_t damage = 0; int32_t start = 0; int32_t count = 1; pugi::xml_attribute subKeyAttribute = subAttributeNode.attribute("key"); if (!subKeyAttribute) { continue; } pugi::xml_attribute subValueAttribute = subAttributeNode.attribute("value"); if (!subValueAttribute) { continue; } tmpStrValue = asLowerCaseString(subKeyAttribute.as_string()); if (tmpStrValue == "ticks") { ticks = pugi::cast<uint32_t>(subValueAttribute.value()); } else if (tmpStrValue == "count") { count = std::max<int32_t>(1, pugi::cast<int32_t>(subValueAttribute.value())); } else if (tmpStrValue == "start") { start = std::max<int32_t>(0, pugi::cast<int32_t>(subValueAttribute.value())); } else if (tmpStrValue == "damage") { damage = -pugi::cast<int32_t>(subValueAttribute.value()); if (start > 0) { std::list<int32_t> damageList; ConditionDamage::generateDamageList(damage, start, damageList); for (int32_t damageValue : damageList) { conditionDamage->addDamage(1, ticks, -damageValue); } start = 0; } else { conditionDamage->addDamage(count, ticks, damage); } } } conditionDamage->setParam(CONDITION_PARAM_FIELD, 1); if (conditionDamage->getTotalDamage() > 0) { conditionDamage->setParam(CONDITION_PARAM_FORCEUPDATE, 1); } } break; } case ITEM_PARSE_REPLACEABLE: { it.replaceable = valueAttribute.as_bool(); break; } case ITEM_PARSE_PARTNERDIRECTION: { it.bedPartnerDir = getDirection(valueAttribute.as_string()); break; } case ITEM_PARSE_LEVELDOOR: { it.levelDoor = pugi::cast<uint32_t>(valueAttribute.value()); break; } case ITEM_PARSE_MALETRANSFORMTO: { uint16_t value = pugi::cast<uint16_t>(valueAttribute.value()); it.transformToOnUse[PLAYERSEX_MALE] = value; ItemType& other = getItemType(value); if (other.transformToFree == 0) { other.transformToFree = it.id; } if (it.transformToOnUse[PLAYERSEX_FEMALE] == 0) { it.transformToOnUse[PLAYERSEX_FEMALE] = value; } break; } case ITEM_PARSE_FEMALETRANSFORMTO: { uint16_t value = pugi::cast<uint16_t>(valueAttribute.value()); it.transformToOnUse[PLAYERSEX_FEMALE] = value; ItemType& other = getItemType(value); if (other.transformToFree == 0) { other.transformToFree = it.id; } if (it.transformToOnUse[PLAYERSEX_MALE] == 0) { it.transformToOnUse[PLAYERSEX_MALE] = value; } break; } case ITEM_PARSE_TRANSFORMTO: { it.transformToFree = pugi::cast<uint16_t>(valueAttribute.value()); break; } case ITEM_PARSE_DESTROYTO: { it.destroyTo = pugi::cast<uint16_t>(valueAttribute.value()); break; } case ITEM_PARSE_ELEMENTICE: { abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value()); abilities.elementType = COMBAT_ICEDAMAGE; break; } case ITEM_PARSE_ELEMENTEARTH: { abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value()); abilities.elementType = COMBAT_EARTHDAMAGE; break; } case ITEM_PARSE_ELEMENTFIRE: { abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value()); abilities.elementType = COMBAT_FIREDAMAGE; break; } case ITEM_PARSE_ELEMENTENERGY: { abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value()); abilities.elementType = COMBAT_ENERGYDAMAGE; break; } case ITEM_PARSE_WALKSTACK: { it.walkStack = valueAttribute.as_bool(); break; } case ITEM_PARSE_BLOCKING: { it.blockSolid = valueAttribute.as_bool(); break; } case ITEM_PARSE_ALLOWDISTREAD: { it.allowDistRead = booleanString(valueAttribute.as_string()); break; } default: { // It should not ever get to here, only if you add a new key to the map and don't configure a case for it. std::cout << "[Warning - Items::parseItemNode] Not configured key value: " << keyAttribute.as_string() << std::endl; break; } } } else { std::cout << "[Warning - Items::parseItemNode] Unknown key value: " << keyAttribute.as_string() << std::endl; } } //check bed items if ((it.transformToFree != 0 || it.transformToOnUse[PLAYERSEX_FEMALE] != 0 || it.transformToOnUse[PLAYERSEX_MALE] != 0) && it.type != ITEM_TYPE_BED) { std::cout << "[Warning - Items::parseItemNode] Item " << it.id << " is not set as a bed-type" << std::endl; } } ItemType& Items::getItemType(size_t id) { if (id < items.size()) { return items[id]; } return items.front(); } const ItemType& Items::getItemType(size_t id) const { if (id < items.size()) { return items[id]; } return items.front(); } const ItemType& Items::getItemIdByClientId(uint16_t spriteId) const { auto it = reverseItemMap.find(spriteId); if (it != reverseItemMap.end()) { return getItemType(it->second); } return items.front(); } uint16_t Items::getItemIdByName(const std::string& name) { auto result = nameToItems.find(asLowerCaseString(name)); if (result == nameToItems.end()) return 0; return result->second; }
1
15,418
Shouldn't this be called `hp` instead of `life`?
otland-forgottenserver
cpp
@@ -106,7 +106,7 @@ public class BaseServer<T extends BaseServer> implements Server<T> { FilterHolder filterHolder = servletContextHandler.addFilter(CrossOriginFilter.class, "/*", EnumSet .of(DispatcherType.REQUEST)); - filterHolder.setInitParameter("allowedOrigins", "*"); + filterHolder.setInitParameter("allowedMethods", "GET,POST,PUT,DELETE,HEAD"); // Warning user LOG.warning("You have enabled CORS requests from any host. "
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.server; import org.eclipse.jetty.servlet.FilterHolder; import org.eclipse.jetty.servlets.CrossOriginFilter; import org.openqa.selenium.WebDriverException; import org.openqa.selenium.net.NetworkUtils; import org.openqa.selenium.net.PortProber; import org.openqa.selenium.remote.http.HttpHandler; import org.eclipse.jetty.security.ConstraintMapping; import org.eclipse.jetty.security.ConstraintSecurityHandler; import org.eclipse.jetty.server.Connector; import org.eclipse.jetty.server.HttpConfiguration; import org.eclipse.jetty.server.HttpConnectionFactory; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.servlet.ServletContextHandler; import org.eclipse.jetty.servlet.ServletHolder; import org.eclipse.jetty.util.log.JavaUtilLog; import org.eclipse.jetty.util.log.Log; import org.eclipse.jetty.util.security.Constraint; import org.eclipse.jetty.util.thread.QueuedThreadPool; import javax.servlet.DispatcherType; import javax.servlet.Servlet; import java.io.UncheckedIOException; import java.net.BindException; import java.net.MalformedURLException; import java.net.URL; import java.util.EnumSet; import java.util.Objects; import java.util.logging.Logger; import static java.util.concurrent.TimeUnit.SECONDS; public class BaseServer<T extends BaseServer> implements Server<T> { private static final Logger LOG = Logger.getLogger(BaseServer.class.getName()); private static final int MAX_SHUTDOWN_RETRIES = 8; private final org.eclipse.jetty.server.Server server; private final ServletContextHandler servletContextHandler; private final URL url; private HttpHandler handler; public BaseServer(BaseServerOptions options) { int port = options.getPort() == 0 ? PortProber.findFreePort() : options.getPort(); String host = options.getHostname().orElseGet(() -> { try { return new NetworkUtils().getNonLoopbackAddressOfThisMachine(); } catch (WebDriverException ignored) { return "localhost"; } }); try { this.url = new URL("http", host, port, ""); } catch (MalformedURLException e) { throw new UncheckedIOException(e); } Log.setLog(new JavaUtilLog()); this.server = new org.eclipse.jetty.server.Server( new QueuedThreadPool(options.getMaxServerThreads())); this.servletContextHandler = new ServletContextHandler(ServletContextHandler.SECURITY); ConstraintSecurityHandler securityHandler = (ConstraintSecurityHandler) servletContextHandler.getSecurityHandler(); Constraint disableTrace = new Constraint(); disableTrace.setName("Disable TRACE"); disableTrace.setAuthenticate(true); ConstraintMapping disableTraceMapping = new ConstraintMapping(); disableTraceMapping.setConstraint(disableTrace); disableTraceMapping.setMethod("TRACE"); disableTraceMapping.setPathSpec("/"); securityHandler.addConstraintMapping(disableTraceMapping); Constraint enableOther = new Constraint(); enableOther.setName("Enable everything but TRACE"); ConstraintMapping enableOtherMapping = new ConstraintMapping(); enableOtherMapping.setConstraint(enableOther); enableOtherMapping.setMethodOmissions(new String[]{"TRACE"}); enableOtherMapping.setPathSpec("/"); securityHandler.addConstraintMapping(enableOtherMapping); // Allow CORS: Whether the Selenium server should allow web browser connections from any host if (options.getAllowCORS()) { FilterHolder filterHolder = servletContextHandler.addFilter(CrossOriginFilter.class, "/*", EnumSet .of(DispatcherType.REQUEST)); filterHolder.setInitParameter("allowedOrigins", "*"); // Warning user LOG.warning("You have enabled CORS requests from any host. " + "Be careful not to visit sites which could maliciously " + "try to start Selenium sessions on your machine"); } server.setHandler(servletContextHandler); HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSecureScheme("https"); ServerConnector http = new ServerConnector(server, new HttpConnectionFactory(httpConfig)); options.getHostname().ifPresent(http::setHost); http.setPort(getUrl().getPort()); http.setIdleTimeout(500000); server.setConnectors(new Connector[]{http}); } @Override public void addServlet(Class<? extends Servlet> servlet, String pathSpec) { if (server.isRunning()) { throw new IllegalStateException("You may not add a servlet to a running server"); } servletContextHandler.addServlet( Objects.requireNonNull(servlet), Objects.requireNonNull(pathSpec)); } @Override public void addServlet(Servlet servlet, String pathSpec) { if (server.isRunning()) { throw new IllegalStateException("You may not add a servlet to a running server"); } servletContextHandler.addServlet( new ServletHolder(Objects.requireNonNull(servlet)), Objects.requireNonNull(pathSpec)); } @Override public T setHandler(HttpHandler handler) { if (server.isRunning()) { throw new IllegalStateException("You may not add a handler to a running server"); } this.handler = Objects.requireNonNull(handler, "Handler to use must be set."); return (T) this; } @Override public boolean isStarted() { return server.isStarted(); } @Override public T start() { try { // If there are no routes, we've done something terribly wrong. if (handler == null) { throw new IllegalStateException("There must be at least one route specified"); } addServlet(new HttpHandlerServlet(handler.with(new WrapExceptions().andThen(new AddWebDriverSpecHeaders()))), "/*"); server.start(); PortProber.waitForPortUp(getUrl().getPort(), 10, SECONDS); //noinspection unchecked return (T) this; } catch (Exception e) { try { stop(); } catch (Exception ignore) { } if (e instanceof BindException) { LOG.severe(String.format( "Port %s is busy, please choose a free port and specify it using -port option", getUrl().getPort())); } if (e instanceof RuntimeException) { throw (RuntimeException) e; } throw new RuntimeException(e); } } @Override public void stop() { int numTries = 0; Exception shutDownException = null; // shut down the jetty server (try try again) while (numTries <= MAX_SHUTDOWN_RETRIES) { numTries++; try { server.stop(); // If we reached here stop didn't throw an exception, so we can assume success. return; } catch (Exception ex) { // org.openqa.jetty.jetty.Server.stop() throws Exception shutDownException = ex; // If Exception is thrown we try to stop the jetty server again } } // This is bad!! Jetty didn't shutdown. throw new RuntimeException(shutDownException); } @Override public URL getUrl() { return url; } }
1
17,024
Because the default value of allowedOrigins is * (all origins), so it isn't necessary to set again at all.
SeleniumHQ-selenium
rb
@@ -5757,4 +5757,3 @@ bool Game::reload(ReloadTypes_t reloadType) } return true; } -
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2019 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "pugicast.h" #include "actions.h" #include "bed.h" #include "configmanager.h" #include "creature.h" #include "creatureevent.h" #include "databasetasks.h" #include "events.h" #include "game.h" #include "globalevent.h" #include "iologindata.h" #include "iomarket.h" #include "items.h" #include "monster.h" #include "movement.h" #include "scheduler.h" #include "server.h" #include "spells.h" #include "talkaction.h" #include "weapons.h" #include "script.h" extern ConfigManager g_config; extern Actions* g_actions; extern Chat* g_chat; extern TalkActions* g_talkActions; extern Spells* g_spells; extern Vocations g_vocations; extern GlobalEvents* g_globalEvents; extern CreatureEvents* g_creatureEvents; extern Events* g_events; extern Monsters g_monsters; extern MoveEvents* g_moveEvents; extern Weapons* g_weapons; extern Scripts* g_scripts; Game::Game() { offlineTrainingWindow.defaultEnterButton = 1; offlineTrainingWindow.defaultEscapeButton = 0; offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD); offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE); offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB); offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DISTANCE); offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL_MAGLEVEL); offlineTrainingWindow.buttons.emplace_back("Okay", offlineTrainingWindow.defaultEnterButton); offlineTrainingWindow.buttons.emplace_back("Cancel", offlineTrainingWindow.defaultEscapeButton); offlineTrainingWindow.priority = true; } Game::~Game() { for (const auto& it : guilds) { delete it.second; } } void Game::start(ServiceManager* manager) { serviceManager = manager; updateWorldTime(); if (g_config.getBoolean(ConfigManager::DEFAULT_WORLD_LIGHT)) { g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this))); } g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0))); g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this))); } GameState_t Game::getGameState() const { return gameState; } void Game::setWorldType(WorldType_t type) { worldType = type; } void Game::setGameState(GameState_t newState) { if (gameState == GAME_STATE_SHUTDOWN) { return; //this cannot be stopped } if (gameState == newState) { return; } gameState = newState; switch (newState) { case GAME_STATE_INIT: { groups.load(); g_chat->load(); map.spawns.startup(); raids.loadFromXml(); raids.startup(); quests.loadFromXml(); mounts.loadFromXml(); loadMotdNum(); loadPlayersRecord(); g_globalEvents->startup(); break; } case GAME_STATE_SHUTDOWN: { g_globalEvents->execute(GLOBALEVENT_SHUTDOWN); //kick all players that are still online auto it = players.begin(); while (it != players.end()) { it->second->kickPlayer(true); it = players.begin(); } saveMotdNum(); saveGameState(); g_dispatcher.addTask( createTask(std::bind(&Game::shutdown, this))); g_scheduler.stop(); g_databaseTasks.stop(); g_dispatcher.stop(); break; } case GAME_STATE_CLOSED: { /* kick all players without the CanAlwaysLogin flag */ auto it = players.begin(); while (it != players.end()) { if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) { it->second->kickPlayer(true); it = players.begin(); } else { ++it; } } saveGameState(); break; } default: break; } } void Game::saveGameState() { if (gameState == GAME_STATE_NORMAL) { setGameState(GAME_STATE_MAINTAIN); } std::cout << "Saving server..." << std::endl; for (const auto& it : players) { it.second->loginPosition = it.second->getPosition(); IOLoginData::savePlayer(it.second); } Map::save(); g_databaseTasks.flush(); if (gameState == GAME_STATE_MAINTAIN) { setGameState(GAME_STATE_NORMAL); } } bool Game::loadMainMap(const std::string& filename) { Monster::despawnRange = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRANGE); Monster::despawnRadius = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRADIUS); return map.loadMap("data/world/" + filename + ".otbm", true); } void Game::loadMap(const std::string& path) { map.loadMap(path, false); } Cylinder* Game::internalGetCylinder(Player* player, const Position& pos) const { if (pos.x != 0xFFFF) { return map.getTile(pos); } //container if (pos.y & 0x40) { uint8_t from_cid = pos.y & 0x0F; return player->getContainerByID(from_cid); } //inventory return player; } Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId, stackPosType_t type) const { if (pos.x != 0xFFFF) { Tile* tile = map.getTile(pos); if (!tile) { return nullptr; } Thing* thing; switch (type) { case STACKPOS_LOOK: { return tile->getTopVisibleThing(player); } case STACKPOS_MOVE: { Item* item = tile->getTopDownItem(); if (item && item->isMoveable()) { thing = item; } else { thing = tile->getTopVisibleCreature(player); } break; } case STACKPOS_USEITEM: { thing = tile->getUseItem(index); break; } case STACKPOS_TOPDOWN_ITEM: { thing = tile->getTopDownItem(); break; } case STACKPOS_USETARGET: { thing = tile->getTopVisibleCreature(player); if (!thing) { thing = tile->getUseItem(index); } break; } default: { thing = nullptr; break; } } if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { //do extra checks here if the thing is accessable if (thing && thing->getItem()) { if (tile->hasProperty(CONST_PROP_ISVERTICAL)) { if (player->getPosition().x + 1 == tile->getPosition().x) { thing = nullptr; } } else { // horizontal if (player->getPosition().y + 1 == tile->getPosition().y) { thing = nullptr; } } } } return thing; } //container if (pos.y & 0x40) { uint8_t fromCid = pos.y & 0x0F; Container* parentContainer = player->getContainerByID(fromCid); if (!parentContainer) { return nullptr; } if (parentContainer->getID() == ITEM_BROWSEFIELD) { Tile* tile = parentContainer->getTile(); if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { if (tile->hasProperty(CONST_PROP_ISVERTICAL)) { if (player->getPosition().x + 1 == tile->getPosition().x) { return nullptr; } } else { // horizontal if (player->getPosition().y + 1 == tile->getPosition().y) { return nullptr; } } } } uint8_t slot = pos.z; return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot); } else if (pos.y == 0 && pos.z == 0) { const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return nullptr; } int32_t subType; if (it.isFluidContainer() && index < static_cast<int32_t>(sizeof(reverseFluidMap) / sizeof(uint8_t))) { subType = reverseFluidMap[index]; } else { subType = -1; } return findItemOfType(player, it.id, true, subType); } //inventory slots_t slot = static_cast<slots_t>(pos.y); if (slot == CONST_SLOT_STORE_INBOX) { return player->getStoreInbox(); } return player->getInventoryItem(slot); } void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos) { pos.x = 0; pos.y = 0; pos.z = 0; stackpos = 0; Cylinder* topParent = item->getTopParent(); if (topParent) { if (Player* player = dynamic_cast<Player*>(topParent)) { pos.x = 0xFFFF; Container* container = dynamic_cast<Container*>(item->getParent()); if (container) { pos.y = static_cast<uint16_t>(0x40) | static_cast<uint16_t>(player->getContainerID(container)); pos.z = container->getThingIndex(item); stackpos = pos.z; } else { pos.y = player->getThingIndex(item); stackpos = pos.y; } } else if (Tile* tile = topParent->getTile()) { pos = tile->getPosition(); stackpos = tile->getThingIndex(item); } } } Creature* Game::getCreatureByID(uint32_t id) { if (id <= Player::playerAutoID) { return getPlayerByID(id); } else if (id <= Monster::monsterAutoID) { return getMonsterByID(id); } else if (id <= Npc::npcAutoID) { return getNpcByID(id); } return nullptr; } Monster* Game::getMonsterByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = monsters.find(id); if (it == monsters.end()) { return nullptr; } return it->second; } Npc* Game::getNpcByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = npcs.find(id); if (it == npcs.end()) { return nullptr; } return it->second; } Player* Game::getPlayerByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = players.find(id); if (it == players.end()) { return nullptr; } return it->second; } Creature* Game::getCreatureByName(const std::string& s) { if (s.empty()) { return nullptr; } const std::string& lowerCaseName = asLowerCaseString(s); auto m_it = mappedPlayerNames.find(lowerCaseName); if (m_it != mappedPlayerNames.end()) { return m_it->second; } for (const auto& it : npcs) { if (lowerCaseName == asLowerCaseString(it.second->getName())) { return it.second; } } for (const auto& it : monsters) { if (lowerCaseName == asLowerCaseString(it.second->getName())) { return it.second; } } return nullptr; } Npc* Game::getNpcByName(const std::string& s) { if (s.empty()) { return nullptr; } const char* npcName = s.c_str(); for (const auto& it : npcs) { if (strcasecmp(npcName, it.second->getName().c_str()) == 0) { return it.second; } } return nullptr; } Player* Game::getPlayerByName(const std::string& s) { if (s.empty()) { return nullptr; } auto it = mappedPlayerNames.find(asLowerCaseString(s)); if (it == mappedPlayerNames.end()) { return nullptr; } return it->second; } Player* Game::getPlayerByGUID(const uint32_t& guid) { if (guid == 0) { return nullptr; } auto it = mappedPlayerGuids.find(guid); if (it == mappedPlayerGuids.end()) { return nullptr; } return it->second; } ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player) { size_t strlen = s.length(); if (strlen == 0 || strlen > 20) { return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE; } if (s.back() == '~') { const std::string& query = asLowerCaseString(s.substr(0, strlen - 1)); std::string result; ReturnValue ret = wildcardTree.findOne(query, result); if (ret != RETURNVALUE_NOERROR) { return ret; } player = getPlayerByName(result); } else { player = getPlayerByName(s); } if (!player) { return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE; } return RETURNVALUE_NOERROR; } Player* Game::getPlayerByAccount(uint32_t acc) { for (const auto& it : players) { if (it.second->getAccount() == acc) { return it.second; } } return nullptr; } bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/) { if (creature->getParent() != nullptr) { return false; } if (!map.placeCreature(pos, creature, extendedPos, forced)) { return false; } creature->incrementReferenceCounter(); creature->setID(); creature->addList(); return true; } bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/) { if (!internalPlaceCreature(creature, pos, extendedPos, forced)) { return false; } SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true); for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true); } } for (Creature* spectator : spectators) { spectator->onCreatureAppear(creature, true); } creature->getParent()->postAddNotification(creature, nullptr, 0); addCreatureCheck(creature); creature->onPlacedCreature(); return true; } bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/) { if (creature->isRemoved()) { return false; } Tile* tile = creature->getTile(); std::vector<int32_t> oldStackPosVector; SpectatorVec spectators; map.getSpectators(spectators, tile->getPosition(), true); for (Creature* spectator : spectators) { if (Player* player = spectator->getPlayer()) { oldStackPosVector.push_back(player->canSeeCreature(creature) ? tile->getClientIndexOfCreature(player, creature) : -1); } } tile->removeCreature(creature); const Position& tilePosition = tile->getPosition(); //send to client size_t i = 0; for (Creature* spectator : spectators) { if (Player* player = spectator->getPlayer()) { player->sendRemoveTileCreature(creature, tilePosition, oldStackPosVector[i++]); } } //event method for (Creature* spectator : spectators) { spectator->onRemoveCreature(creature, isLogout); } creature->getParent()->postRemoveNotification(creature, nullptr, 0); creature->removeList(); creature->setRemoved(); ReleaseCreature(creature); removeCreatureCheck(creature); for (Creature* summon : creature->summons) { summon->setSkillLoss(false); removeCreature(summon); } return true; } void Game::executeDeath(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && !creature->isRemoved()) { creature->onDeath(); } } void Game::playerMoveThing(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint8_t fromIndex = 0; if (fromPos.x == 0xFFFF) { if (fromPos.y & 0x40) { fromIndex = fromPos.z; } else { fromIndex = static_cast<uint8_t>(fromPos.y); } } else { fromIndex = fromStackPos; } Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (Creature* movingCreature = thing->getCreature()) { Tile* tile = map.getTile(toPos); if (!tile) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) { SchedulerTask* task = createSchedulerTask(1000, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreature->getPosition(), tile->getPosition())); player->setNextActionTask(task); } else { playerMoveCreature(player, movingCreature, movingCreature->getPosition(), tile); } } else if (thing->getItem()) { Cylinder* toCylinder = internalGetCylinder(player, toPos); if (!toCylinder) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, thing->getItem(), toCylinder); } } void Game::playerMoveCreatureByID(uint32_t playerId, uint32_t movingCreatureId, const Position& movingCreatureOrigPos, const Position& toPos) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* movingCreature = getCreatureByID(movingCreatureId); if (!movingCreature) { return; } Tile* toTile = map.getTile(toPos); if (!toTile) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } playerMoveCreature(player, movingCreature, movingCreatureOrigPos, toTile); } void Game::playerMoveCreature(Player* player, Creature* movingCreature, const Position& movingCreatureOrigPos, Tile* toTile) { if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition())); player->setNextActionTask(task); return; } if (movingCreature->isMovementBlocked()) { player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE); return; } player->setNextActionTask(nullptr); if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) { //need to walk to the creature first before moving it std::vector<Direction> listDir; if (player->getPathTo(movingCreatureOrigPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(1500, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition())); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) || (movingCreature->isInGhostMode() && !player->isAccessPlayer())) { player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE); return; } //check throw distance const Position& movingCreaturePos = movingCreature->getPosition(); const Position& toPos = toTile->getPosition(); if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) { player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH); return; } if (player != movingCreature) { if (toTile->hasFlag(TILESTATE_BLOCKPATH)) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } else { if (CreatureVector* tileCreatures = toTile->getCreatures()) { for (Creature* tileCreature : *tileCreatures) { if (!tileCreature->isInGhostMode()) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } } } Npc* movingNpc = movingCreature->getNpc(); if (movingNpc && !Spawns::isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } } } if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) { return; } ReturnValue ret = internalMoveCreature(*movingCreature, *toTile); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); } } ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/) { creature->setLastPosition(creature->getPosition()); const Position& currentPos = creature->getPosition(); Position destPos = getNextPosition(direction, currentPos); Player* player = creature->getPlayer(); bool diagonalMovement = (direction & DIRECTION_DIAGONAL_MASK) != 0; if (player && !diagonalMovement) { //try go up if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) { Tile* tmpTile = map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1); if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) { tmpTile = map.getTile(destPos.x, destPos.y, destPos.getZ() - 1); if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID)) { flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE; if (!tmpTile->hasFlag(TILESTATE_FLOORCHANGE)) { player->setDirection(direction); destPos.z--; } } } } //try go down if (currentPos.z != 7 && currentPos.z == destPos.z) { Tile* tmpTile = map.getTile(destPos.x, destPos.y, destPos.z); if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) { tmpTile = map.getTile(destPos.x, destPos.y, destPos.z + 1); if (tmpTile && tmpTile->hasHeight(3)) { flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE; player->setDirection(direction); destPos.z++; } } } } Tile* toTile = map.getTile(destPos); if (!toTile) { return RETURNVALUE_NOTPOSSIBLE; } return internalMoveCreature(*creature, *toTile, flags); } ReturnValue Game::internalMoveCreature(Creature& creature, Tile& toTile, uint32_t flags /*= 0*/) { //check if we can move the creature to the destination ReturnValue ret = toTile.queryAdd(0, creature, 1, flags); if (ret != RETURNVALUE_NOERROR) { return ret; } map.moveCreature(creature, toTile); if (creature.getParent() != &toTile) { return RETURNVALUE_NOERROR; } int32_t index = 0; Item* toItem = nullptr; Tile* subCylinder = nullptr; Tile* toCylinder = &toTile; Tile* fromCylinder = nullptr; uint32_t n = 0; while ((subCylinder = toCylinder->queryDestination(index, creature, &toItem, flags)) != toCylinder) { map.moveCreature(creature, *subCylinder); if (creature.getParent() != subCylinder) { //could happen if a script move the creature fromCylinder = nullptr; break; } fromCylinder = toCylinder; toCylinder = subCylinder; flags = 0; //to prevent infinite loop if (++n >= MAP_MAX_LAYERS) { break; } } if (fromCylinder) { const Position& fromPosition = fromCylinder->getPosition(); const Position& toPosition = toCylinder->getPosition(); if (fromPosition.z != toPosition.z && (fromPosition.x != toPosition.x || fromPosition.y != toPosition.y)) { Direction dir = getDirectionTo(fromPosition, toPosition); if ((dir & DIRECTION_DIAGONAL_MASK) == 0) { internalCreatureTurn(&creature, dir); } } } return RETURNVALUE_NOERROR; } void Game::playerMoveItemByPlayerID(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, nullptr, nullptr); } void Game::playerMoveItem(Player* player, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count, Item* item, Cylinder* toCylinder) { if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), fromPos, spriteId, fromStackPos, toPos, count)); player->setNextActionTask(task); return; } player->setNextActionTask(nullptr); if (item == nullptr) { uint8_t fromIndex = 0; if (fromPos.x == 0xFFFF) { if (fromPos.y & 0x40) { fromIndex = fromPos.z; } else { fromIndex = static_cast<uint8_t>(fromPos.y); } } else { fromIndex = fromStackPos; } Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE); if (!thing || !thing->getItem()) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } item = thing->getItem(); } if (item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Cylinder* fromCylinder = internalGetCylinder(player, fromPos); if (fromCylinder == nullptr) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (toCylinder == nullptr) { toCylinder = internalGetCylinder(player, toPos); if (toCylinder == nullptr) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } if (!item->isPushable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE); return; } const Position& playerPos = player->getPosition(); const Position& mapFromPos = fromCylinder->getTile()->getPosition(); if (playerPos.z != mapFromPos.z) { player->sendCancelMessage(playerPos.z > mapFromPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(playerPos, mapFromPos)) { //need to walk to the item first before using it std::vector<Direction> listDir; if (player->getPathTo(item->getPosition(), listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), fromPos, spriteId, fromStackPos, toPos, count)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } const Tile* toCylinderTile = toCylinder->getTile(); const Position& mapToPos = toCylinderTile->getPosition(); //hangable item specific code if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { //destination supports hangable objects so need to move there first bool vertical = toCylinderTile->hasProperty(CONST_PROP_ISVERTICAL); if (vertical) { if (playerPos.x + 1 == mapToPos.x) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } else { // horizontal if (playerPos.y + 1 == mapToPos.y) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) { Position walkPos = mapToPos; if (vertical) { walkPos.x++; } else { walkPos.y++; } Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && Position::areInRange<1, 1>(mapFromPos, playerPos) && !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) { //need to pickup the item first Item* moveItem = nullptr; ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem, 0, player, nullptr, &fromPos, &toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::vector<Direction> listDir; if (player->getPathTo(walkPos, listDir, 0, 0, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), itemPos, spriteId, itemStackPos, toPos, count)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } } if ((Position::getDistanceX(playerPos, mapToPos) > item->getThrowRange()) || (Position::getDistanceY(playerPos, mapToPos) > item->getThrowRange()) || (Position::getDistanceZ(mapFromPos, mapToPos) * 4 > item->getThrowRange())) { player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH); return; } if (!canThrowObjectTo(mapFromPos, mapToPos)) { player->sendCancelMessage(RETURNVALUE_CANNOTTHROW); return; } uint8_t toIndex = 0; if (toPos.x == 0xFFFF) { if (toPos.y & 0x40) { toIndex = toPos.z; } else { toIndex = static_cast<uint8_t>(toPos.y); } } ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player, nullptr, &fromPos, &toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); } } ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index, Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/, const Position* fromPos /*= nullptr*/, const Position* toPos/*= nullptr*/) { Player* actorPlayer = actor ? actor->getPlayer() : nullptr; if (actorPlayer && fromPos && toPos) { if (!g_events->eventPlayerOnMoveItem(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder)) { return RETURNVALUE_NOTPOSSIBLE; } } Tile* fromTile = fromCylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == fromCylinder) { fromCylinder = fromTile; } } Item* toItem = nullptr; Cylinder* subCylinder; int floorN = 0; while ((subCylinder = toCylinder->queryDestination(index, *item, &toItem, flags)) != toCylinder) { toCylinder = subCylinder; flags = 0; //to prevent infinite loop if (++floorN >= MAP_MAX_LAYERS) { break; } } //destination is the same as the source? if (item == toItem) { return RETURNVALUE_NOERROR; //silently ignore move } //check if we can add this item ReturnValue ret = toCylinder->queryAdd(index, *item, count, flags, actor); if (ret == RETURNVALUE_NEEDEXCHANGE) { //check if we can add it to source cylinder ret = fromCylinder->queryAdd(fromCylinder->getThingIndex(item), *toItem, toItem->getItemCount(), 0); if (ret == RETURNVALUE_NOERROR) { //check how much we can move uint32_t maxExchangeQueryCount = 0; ReturnValue retExchangeMaxCount = fromCylinder->queryMaxCount(INDEX_WHEREEVER, *toItem, toItem->getItemCount(), maxExchangeQueryCount, 0); if (retExchangeMaxCount != RETURNVALUE_NOERROR && maxExchangeQueryCount == 0) { return retExchangeMaxCount; } if (toCylinder->queryRemove(*toItem, toItem->getItemCount(), flags, actor) == RETURNVALUE_NOERROR) { int32_t oldToItemIndex = toCylinder->getThingIndex(toItem); toCylinder->removeThing(toItem, toItem->getItemCount()); fromCylinder->addThing(toItem); if (oldToItemIndex != -1) { toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex); } int32_t newToItemIndex = fromCylinder->getThingIndex(toItem); if (newToItemIndex != -1) { fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex); } ret = toCylinder->queryAdd(index, *item, count, flags); toItem = nullptr; } } } if (ret != RETURNVALUE_NOERROR) { return ret; } //check how much we can move uint32_t maxQueryCount = 0; ReturnValue retMaxCount = toCylinder->queryMaxCount(index, *item, count, maxQueryCount, flags); if (retMaxCount != RETURNVALUE_NOERROR && maxQueryCount == 0) { return retMaxCount; } uint32_t m; if (item->isStackable()) { m = std::min<uint32_t>(count, maxQueryCount); } else { m = maxQueryCount; } Item* moveItem = item; //check if we can remove this item ret = fromCylinder->queryRemove(*item, m, flags, actor); if (ret != RETURNVALUE_NOERROR) { return ret; } if (tradeItem) { if (toCylinder->getItem() == tradeItem) { return RETURNVALUE_NOTENOUGHROOM; } Cylinder* tmpCylinder = toCylinder->getParent(); while (tmpCylinder) { if (tmpCylinder->getItem() == tradeItem) { return RETURNVALUE_NOTENOUGHROOM; } tmpCylinder = tmpCylinder->getParent(); } } //remove the item int32_t itemIndex = fromCylinder->getThingIndex(item); Item* updateItem = nullptr; fromCylinder->removeThing(item, m); //update item(s) if (item->isStackable()) { uint32_t n; if (item->equals(toItem)) { n = std::min<uint32_t>(100 - toItem->getItemCount(), m); toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n); updateItem = toItem; } else { n = 0; } int32_t newCount = m - n; if (newCount > 0) { moveItem = item->clone(); moveItem->setItemCount(newCount); } else { moveItem = nullptr; } if (item->isRemoved()) { ReleaseItem(item); } } //add item if (moveItem /*m - n > 0*/) { toCylinder->addThing(index, moveItem); } if (itemIndex != -1) { fromCylinder->postRemoveNotification(item, toCylinder, itemIndex); } if (moveItem) { int32_t moveItemIndex = toCylinder->getThingIndex(moveItem); if (moveItemIndex != -1) { toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex); } } if (updateItem) { int32_t updateItemIndex = toCylinder->getThingIndex(updateItem); if (updateItemIndex != -1) { toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex); } } if (_moveItem) { if (moveItem) { *_moveItem = moveItem; } else { *_moveItem = item; } } //we could not move all, inform the player if (item->isStackable() && maxQueryCount < count) { return retMaxCount; } if (moveItem && moveItem->getDuration() > 0) { if (moveItem->getDecaying() != DECAYING_TRUE) { moveItem->incrementReferenceCounter(); moveItem->setDecaying(DECAYING_TRUE); toDecayItems.push_front(moveItem); } } if (actorPlayer && fromPos && toPos) { g_events->eventPlayerOnItemMoved(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder); } return ret; } ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/, uint32_t flags/* = 0*/, bool test/* = false*/) { uint32_t remainderCount = 0; return internalAddItem(toCylinder, item, index, flags, test, remainderCount); } ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index, uint32_t flags, bool test, uint32_t& remainderCount) { if (toCylinder == nullptr || item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } Cylinder* destCylinder = toCylinder; Item* toItem = nullptr; toCylinder = toCylinder->queryDestination(index, *item, &toItem, flags); //check if we can add this item ReturnValue ret = toCylinder->queryAdd(index, *item, item->getItemCount(), flags); if (ret != RETURNVALUE_NOERROR) { return ret; } /* Check if we can move add the whole amount, we do this by checking against the original cylinder, since the queryDestination can return a cylinder that might only hold a part of the full amount. */ uint32_t maxQueryCount = 0; ret = destCylinder->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), maxQueryCount, flags); if (ret != RETURNVALUE_NOERROR) { return ret; } if (test) { return RETURNVALUE_NOERROR; } if (item->isStackable() && item->equals(toItem)) { uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount); uint32_t n = std::min<uint32_t>(100 - toItem->getItemCount(), m); toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n); int32_t count = m - n; if (count > 0) { if (item->getItemCount() != count) { Item* remainderItem = item->clone(); remainderItem->setItemCount(count); if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RETURNVALUE_NOERROR) { ReleaseItem(remainderItem); remainderCount = count; } } else { toCylinder->addThing(index, item); int32_t itemIndex = toCylinder->getThingIndex(item); if (itemIndex != -1) { toCylinder->postAddNotification(item, nullptr, itemIndex); } } } else { //fully merged with toItem, item will be destroyed item->onRemoved(); ReleaseItem(item); int32_t itemIndex = toCylinder->getThingIndex(toItem); if (itemIndex != -1) { toCylinder->postAddNotification(toItem, nullptr, itemIndex); } } } else { toCylinder->addThing(index, item); int32_t itemIndex = toCylinder->getThingIndex(item); if (itemIndex != -1) { toCylinder->postAddNotification(item, nullptr, itemIndex); } } if (item->getDuration() > 0) { item->incrementReferenceCounter(); item->setDecaying(DECAYING_TRUE); toDecayItems.push_front(item); } return RETURNVALUE_NOERROR; } ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/) { Cylinder* cylinder = item->getParent(); if (cylinder == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } Tile* fromTile = cylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == cylinder) { cylinder = fromTile; } } if (count == -1) { count = item->getItemCount(); } //check if we can remove this item ReturnValue ret = cylinder->queryRemove(*item, count, flags | FLAG_IGNORENOTMOVEABLE); if (ret != RETURNVALUE_NOERROR) { return ret; } if (!item->canRemove()) { return RETURNVALUE_NOTPOSSIBLE; } if (!test) { int32_t index = cylinder->getThingIndex(item); //remove the item cylinder->removeThing(item, count); if (item->isRemoved()) { item->onRemoved(); if (item->canDecay()) { decayItems->remove(item); } ReleaseItem(item); } cylinder->postRemoveNotification(item, nullptr, index); } return RETURNVALUE_NOERROR; } ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= CONST_SLOT_WHEREEVER*/) { uint32_t remainderCount = 0; ReturnValue ret = internalAddItem(player, item, static_cast<int32_t>(slot), 0, false, remainderCount); if (remainderCount != 0) { Item* remainderItem = Item::CreateItem(item->getID(), remainderCount); ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT); if (remaindRet != RETURNVALUE_NOERROR) { ReleaseItem(remainderItem); } } if (ret != RETURNVALUE_NOERROR && dropOnMap) { ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT); } return ret; } Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId, bool depthSearch /*= true*/, int32_t subType /*= -1*/) const { if (cylinder == nullptr) { return nullptr; } std::vector<Container*> containers; for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) { Thing* thing = cylinder->getThing(i); if (!thing) { continue; } Item* item = thing->getItem(); if (!item) { continue; } if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) { return item; } if (depthSearch) { Container* container = item->getContainer(); if (container) { containers.push_back(container); } } } size_t i = 0; while (i < containers.size()) { Container* container = containers[i++]; for (Item* item : container->getItemList()) { if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) { return item; } Container* subContainer = item->getContainer(); if (subContainer) { containers.push_back(subContainer); } } } return nullptr; } bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/) { if (cylinder == nullptr) { return false; } if (money == 0) { return true; } std::vector<Container*> containers; std::multimap<uint32_t, Item*> moneyMap; uint64_t moneyCount = 0; for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) { Thing* thing = cylinder->getThing(i); if (!thing) { continue; } Item* item = thing->getItem(); if (!item) { continue; } Container* container = item->getContainer(); if (container) { containers.push_back(container); } else { const uint32_t worth = item->getWorth(); if (worth != 0) { moneyCount += worth; moneyMap.emplace(worth, item); } } } size_t i = 0; while (i < containers.size()) { Container* container = containers[i++]; for (Item* item : container->getItemList()) { Container* tmpContainer = item->getContainer(); if (tmpContainer) { containers.push_back(tmpContainer); } else { const uint32_t worth = item->getWorth(); if (worth != 0) { moneyCount += worth; moneyMap.emplace(worth, item); } } } } if (moneyCount < money) { return false; } for (const auto& moneyEntry : moneyMap) { Item* item = moneyEntry.second; if (moneyEntry.first < money) { internalRemoveItem(item); money -= moneyEntry.first; } else if (moneyEntry.first > money) { const uint32_t worth = moneyEntry.first / item->getItemCount(); const uint32_t removeCount = std::ceil(money / static_cast<double>(worth)); addMoney(cylinder, (worth * removeCount) - money, flags); internalRemoveItem(item, removeCount); break; } else { internalRemoveItem(item); break; } } return true; } void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/) { if (money == 0) { return; } uint32_t crystalCoins = money / 10000; money -= crystalCoins * 10000; while (crystalCoins > 0) { const uint16_t count = std::min<uint32_t>(100, crystalCoins); Item* remaindItem = Item::CreateItem(ITEM_CRYSTAL_COIN, count); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } crystalCoins -= count; } uint16_t platinumCoins = money / 100; if (platinumCoins != 0) { Item* remaindItem = Item::CreateItem(ITEM_PLATINUM_COIN, platinumCoins); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } money -= platinumCoins * 100; } if (money != 0) { Item* remaindItem = Item::CreateItem(ITEM_GOLD_COIN, money); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } } } Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/) { if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite return item; } Cylinder* cylinder = item->getParent(); if (cylinder == nullptr) { return nullptr; } Tile* fromTile = cylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == cylinder) { cylinder = fromTile; } } int32_t itemIndex = cylinder->getThingIndex(item); if (itemIndex == -1) { return item; } if (!item->canTransform()) { return item; } const ItemType& newType = Item::items[newId]; if (newType.id == 0) { return item; } const ItemType& curType = Item::items[item->getID()]; if (curType.alwaysOnTop != newType.alwaysOnTop) { //This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa) //Remove the old, and add the new cylinder->removeThing(item, item->getItemCount()); cylinder->postRemoveNotification(item, cylinder, itemIndex); item->setID(newId); if (newCount != -1) { item->setSubType(newCount); } cylinder->addThing(item); Cylinder* newParent = item->getParent(); if (newParent == nullptr) { ReleaseItem(item); return nullptr; } newParent->postAddNotification(item, cylinder, newParent->getThingIndex(item)); return item; } if (curType.type == newType.type) { //Both items has the same type so we can safely change id/subtype if (newCount == 0 && (item->isStackable() || item->hasAttribute(ITEM_ATTRIBUTE_CHARGES))) { if (item->isStackable()) { internalRemoveItem(item); return nullptr; } else { int32_t newItemId = newId; if (curType.id == newType.id) { newItemId = item->getDecayTo(); } if (newItemId < 0) { internalRemoveItem(item); return nullptr; } else if (newItemId != newId) { //Replacing the the old item with the new while maintaining the old position Item* newItem = Item::CreateItem(newItemId, 1); if (newItem == nullptr) { return nullptr; } cylinder->replaceThing(itemIndex, newItem); cylinder->postAddNotification(newItem, cylinder, itemIndex); item->setParent(nullptr); cylinder->postRemoveNotification(item, cylinder, itemIndex); ReleaseItem(item); return newItem; } else { return transformItem(item, newItemId); } } } else { cylinder->postRemoveNotification(item, cylinder, itemIndex); uint16_t itemId = item->getID(); int32_t count = item->getSubType(); if (curType.id != newType.id) { if (newType.group != curType.group) { item->setDefaultSubtype(); } itemId = newId; } if (newCount != -1 && newType.hasSubType()) { count = newCount; } cylinder->updateThing(item, itemId, count); cylinder->postAddNotification(item, cylinder, itemIndex); return item; } } //Replacing the the old item with the new while maintaining the old position Item* newItem; if (newCount == -1) { newItem = Item::CreateItem(newId); } else { newItem = Item::CreateItem(newId, newCount); } if (newItem == nullptr) { return nullptr; } cylinder->replaceThing(itemIndex, newItem); cylinder->postAddNotification(newItem, cylinder, itemIndex); item->setParent(nullptr); cylinder->postRemoveNotification(item, cylinder, itemIndex); ReleaseItem(item); if (newItem->getDuration() > 0) { if (newItem->getDecaying() != DECAYING_TRUE) { newItem->incrementReferenceCounter(); newItem->setDecaying(DECAYING_TRUE); toDecayItems.push_front(newItem); } } return newItem; } ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/) { if (newPos == thing->getPosition()) { return RETURNVALUE_NOERROR; } else if (thing->isRemoved()) { return RETURNVALUE_NOTPOSSIBLE; } Tile* toTile = map.getTile(newPos); if (!toTile) { return RETURNVALUE_NOTPOSSIBLE; } if (Creature* creature = thing->getCreature()) { ReturnValue ret = toTile->queryAdd(0, *creature, 1, FLAG_NOLIMIT); if (ret != RETURNVALUE_NOERROR) { return ret; } map.moveCreature(*creature, *toTile, !pushMove); return RETURNVALUE_NOERROR; } else if (Item* item = thing->getItem()) { return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags); } return RETURNVALUE_NOTPOSSIBLE; } Item* searchForItem(Container* container, uint16_t itemId) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { if ((*it)->getID() == itemId) { return *it; } } return nullptr; } slots_t getSlotType(const ItemType& it) { slots_t slot = CONST_SLOT_RIGHT; if (it.weaponType != WeaponType_t::WEAPON_SHIELD) { int32_t slotPosition = it.slotPosition; if (slotPosition & SLOTP_HEAD) { slot = CONST_SLOT_HEAD; } else if (slotPosition & SLOTP_NECKLACE) { slot = CONST_SLOT_NECKLACE; } else if (slotPosition & SLOTP_ARMOR) { slot = CONST_SLOT_ARMOR; } else if (slotPosition & SLOTP_LEGS) { slot = CONST_SLOT_LEGS; } else if (slotPosition & SLOTP_FEET) { slot = CONST_SLOT_FEET; } else if (slotPosition & SLOTP_RING) { slot = CONST_SLOT_RING; } else if (slotPosition & SLOTP_AMMO) { slot = CONST_SLOT_AMMO; } else if (slotPosition & SLOTP_TWO_HAND || slotPosition & SLOTP_LEFT) { slot = CONST_SLOT_LEFT; } } return slot; } //Implementation of player invoked events void Game::playerEquipItem(uint32_t playerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Item* item = player->getInventoryItem(CONST_SLOT_BACKPACK); if (!item) { return; } Container* backpack = item->getContainer(); if (!backpack) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); slots_t slot = getSlotType(it); Item* slotItem = player->getInventoryItem(slot); Item* equipItem = searchForItem(backpack, it.id); if (slotItem && slotItem->getID() == it.id && (!it.stackable || slotItem->getItemCount() == 100 || !equipItem)) { internalMoveItem(slotItem->getParent(), player, CONST_SLOT_WHEREEVER, slotItem, slotItem->getItemCount(), nullptr); } else if (equipItem) { internalMoveItem(equipItem->getParent(), player, slot, equipItem, equipItem->getItemCount(), nullptr); } } void Game::playerMove(uint32_t playerId, Direction direction) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (player->isMovementBlocked()) { player->sendCancelWalk(); return; } player->resetIdleTime(); player->setNextWalkActionTask(nullptr); player->startAutoWalk(direction); } bool Game::playerBroadcastMessage(Player* player, const std::string& text) const { if (!player->hasFlag(PlayerFlag_CanBroadcast)) { return false; } std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl; for (const auto& it : players) { it.second->sendPrivateMessage(player, TALKTYPE_BROADCAST, text); } return true; } void Game::playerCreatePrivateChannel(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player || !player->isPremium()) { return; } ChatChannel* channel = g_chat->createChannel(*player, CHANNEL_PRIVATE); if (!channel || !channel->addUser(*player)) { return; } player->sendCreatePrivateChannel(channel->getId(), channel->getName()); } void Game::playerChannelInvite(uint32_t playerId, const std::string& name) { Player* player = getPlayerByID(playerId); if (!player) { return; } PrivateChatChannel* channel = g_chat->getPrivateChannel(*player); if (!channel) { return; } Player* invitePlayer = getPlayerByName(name); if (!invitePlayer) { return; } if (player == invitePlayer) { return; } channel->invitePlayer(*player, *invitePlayer); } void Game::playerChannelExclude(uint32_t playerId, const std::string& name) { Player* player = getPlayerByID(playerId); if (!player) { return; } PrivateChatChannel* channel = g_chat->getPrivateChannel(*player); if (!channel) { return; } Player* excludePlayer = getPlayerByName(name); if (!excludePlayer) { return; } if (player == excludePlayer) { return; } channel->excludePlayer(*player, *excludePlayer); } void Game::playerRequestChannels(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendChannelsDialog(); } void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId) { Player* player = getPlayerByID(playerId); if (!player) { return; } ChatChannel* channel = g_chat->addUserToChannel(*player, channelId); if (!channel) { return; } const InvitedMap* invitedUsers = channel->getInvitedUsers(); const UsersMap* users; if (!channel->isPublicChannel()) { users = &channel->getUsers(); } else { users = nullptr; } player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers); } void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_chat->removeUserFromChannel(*player, channelId); } void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!IOLoginData::formatPlayerName(receiver)) { player->sendCancelMessage("A player with this name does not exist."); return; } if (player->getName() == receiver) { player->sendCancelMessage("You cannot set up a private message channel with yourself."); return; } player->sendOpenPrivateChannel(receiver); } void Game::playerCloseNpcChannel(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } SpectatorVec spectators; map.getSpectators(spectators, player->getPosition()); for (Creature* spectator : spectators) { if (Npc* npc = spectator->getNpc()) { npc->onPlayerCloseChannel(player); } } } void Game::playerReceivePing(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->receivePing(); } void Game::playerReceivePingBack(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendPingBack(); } void Game::playerAutoWalk(uint32_t playerId, const std::vector<Direction>& listDir) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->resetIdleTime(); player->setNextWalkTask(nullptr); player->startAutoWalk(listDir); } void Game::playerStopAutoWalk(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->stopWalk(); } void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId, const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0); if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { return; } Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } Position walkToPos = fromPos; ReturnValue ret = g_actions->canUse(player, fromPos); if (ret == RETURNVALUE_NOERROR) { ret = g_actions->canUse(player, toPos, item); if (ret == RETURNVALUE_TOOFARAWAY) { walkToPos = toPos; } } if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) { Item* moveItem = nullptr; ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::vector<Direction> listDir; if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItemEx, this, playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this, playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey); } void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos, uint8_t index, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } bool isHotkey = (pos.x == 0xFFFF && pos.y == 0 && pos.z == 0); if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { return; } Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || item->isUseable() || item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } ReturnValue ret = g_actions->canUse(player, pos); if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { std::vector<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItem, this, playerId, pos, stackPos, index, spriteId)); player->setNextWalkActionTask(task); return; } ret = RETURNVALUE_THEREISNOWAY; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this, playerId, pos, stackPos, index, spriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItem(player, pos, index, item, isHotkey); } void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } if (!Position::areInRange<7, 5, 0>(creature->getPosition(), player->getPosition())) { return; } bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0); if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { if (creature->getPlayer() || isHotkey) { player->sendCancelMessage(RETURNVALUE_DIRECTPLAYERSHOOT); return; } } Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || !item->isUseable() || item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } Position toPos = creature->getPosition(); Position walkToPos = fromPos; ReturnValue ret = g_actions->canUse(player, fromPos); if (ret == RETURNVALUE_NOERROR) { ret = g_actions->canUse(player, toPos, item); if (ret == RETURNVALUE_TOOFARAWAY) { walkToPos = toPos; } } if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) { Item* moveItem = nullptr; ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::vector<Direction> listDir; if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseWithCreature, this, playerId, itemPos, itemStackPos, creatureId, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this, playerId, fromPos, fromStackPos, creatureId, spriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->getThingIndex(creature), item, isHotkey, creature); } void Game::playerCloseContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->closeContainer(cid); player->sendCloseContainer(cid); } void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(cid); if (!container) { return; } Container* parentContainer = dynamic_cast<Container*>(container->getRealParent()); if (!parentContainer) { Tile* tile = container->getTile(); if (!tile) { return; } if (!g_events->eventPlayerOnBrowseField(player, tile->getPosition())) { return; } auto it = browseFields.find(tile); if (it == browseFields.end()) { parentContainer = new Container(tile); parentContainer->incrementReferenceCounter(); browseFields[tile] = parentContainer; g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition()))); } else { parentContainer = it->second; } } player->addContainer(cid, parentContainer); player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid)); } void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(cid); if (!container) { return; } player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid)); } void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM); if (!thing) { return; } Item* item = thing->getItem(); if (!item || item->getClientID() != spriteId || !item->isRotatable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) { std::vector<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRotateItem, this, playerId, pos, stackPos, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } uint16_t newId = Item::items[item->getID()].rotateTo; if (newId != 0) { transformItem(item, newId); } } void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint16_t maxTextLength = 0; uint32_t internalWindowTextId = 0; Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength); if (text.length() > maxTextLength || windowTextId != internalWindowTextId) { return; } if (!writeItem || writeItem->isRemoved()) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Cylinder* topParent = writeItem->getTopParent(); Player* owner = dynamic_cast<Player*>(topParent); if (owner && owner != player) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) { if (!creatureEvent->executeTextEdit(player, writeItem, text)) { player->setWriteItem(nullptr); return; } } if (!text.empty()) { if (writeItem->getText() != text) { writeItem->setText(text); writeItem->setWriter(player->getName()); writeItem->setDate(time(nullptr)); } } else { writeItem->resetText(); writeItem->resetWriter(); writeItem->resetDate(); } uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId; if (newId != 0) { transformItem(writeItem, newId); } player->setWriteItem(nullptr); } void Game::playerBrowseField(uint32_t playerId, const Position& pos) { Player* player = getPlayerByID(playerId); if (!player) { return; } const Position& playerPos = player->getPosition(); if (playerPos.z != pos.z) { player->sendCancelMessage(playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(playerPos, pos)) { std::vector<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind( &Game::playerBrowseField, this, playerId, pos )); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } Tile* tile = map.getTile(pos); if (!tile) { return; } if (!g_events->eventPlayerOnBrowseField(player, pos)) { return; } Container* container; auto it = browseFields.find(tile); if (it == browseFields.end()) { container = new Container(tile); container->incrementReferenceCounter(); browseFields[tile] = container; g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition()))); } else { container = it->second; } uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3)); Container* openContainer = player->getContainerByID(dummyContainerId); if (openContainer) { player->onCloseContainer(openContainer); player->closeContainer(dummyContainerId); } else { player->addContainer(dummyContainerId, container); player->sendContainer(dummyContainerId, container, false, 0); } } void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(containerId); if (!container || !container->hasPagination()) { return; } if ((index % container->capacity()) != 0 || index >= container->size()) { return; } player->setContainerIndex(containerId, index); player->sendContainer(containerId, container, container->hasParent(), index); } void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint32_t internalWindowTextId; uint32_t internalListId; House* house = player->getEditHouse(internalWindowTextId, internalListId); if (house && house->canEditAccessList(internalListId, player) && internalWindowTextId == windowTextId && listId == 0) { house->setAccessList(internalListId, text); } player->setEditHouse(nullptr); } void Game::playerWrapItem(uint32_t playerId, const Position& position, uint8_t stackPos, const uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Thing* thing = internalGetThing(player, position, stackPos, 0, STACKPOS_TOPDOWN_ITEM); if (!thing) { return; } Item* item = thing->getItem(); if (!item || item->getClientID() != spriteId || !item->hasAttribute(ITEM_ATTRIBUTE_WRAPID) || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (position.x != 0xFFFF && !Position::areInRange<1, 1, 0>(position, player->getPosition())) { std::vector<Direction> listDir; if (player->getPathTo(position, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerWrapItem, this, playerId, position, stackPos, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } g_events->eventPlayerOnWrapItem(player, item); } void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos, uint32_t tradePlayerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* tradePartner = getPlayerByID(tradePlayerId); if (!tradePartner || tradePartner == player) { player->sendTextMessage(MESSAGE_INFO_DESCR, "Sorry, not possible."); return; } if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) { std::ostringstream ss; ss << tradePartner->getName() << " tells you to move closer."; player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str()); return; } if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) { player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE); return; } Thing* tradeThing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM); if (!tradeThing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* tradeItem = tradeThing->getItem(); if (tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (g_config.getBoolean(ConfigManager::ONLY_INVITED_CAN_MOVE_HOUSE_ITEMS)) { if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tradeItem->getTile())) { House* house = houseTile->getHouse(); if (house && !house->isInvited(player)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } } const Position& playerPosition = player->getPosition(); const Position& tradeItemPosition = tradeItem->getPosition(); if (playerPosition.z != tradeItemPosition.z) { player->sendCancelMessage(playerPosition.z > tradeItemPosition.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(tradeItemPosition, playerPosition)) { std::vector<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir)))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRequestTrade, this, playerId, pos, stackPos, tradePlayerId, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } Container* tradeItemContainer = tradeItem->getContainer(); if (tradeItemContainer) { for (const auto& it : tradeItems) { Item* item = it.first; if (tradeItem == item) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } if (tradeItemContainer->isHoldingItem(item)) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } } } else { for (const auto& it : tradeItems) { Item* item = it.first; if (tradeItem == item) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } } } Container* tradeContainer = tradeItem->getContainer(); if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) { player->sendTextMessage(MESSAGE_INFO_DESCR, "You can not trade more than 100 items."); return; } if (!g_events->eventPlayerOnTradeRequest(player, tradePartner, tradeItem)) { return; } internalStartTrade(player, tradePartner, tradeItem); } bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem) { if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) { player->sendCancelMessage(RETURNVALUE_YOUAREALREADYTRADING); return false; } else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) { player->sendCancelMessage(RETURNVALUE_THISPLAYERISALREADYTRADING); return false; } player->tradePartner = tradePartner; player->tradeItem = tradeItem; player->tradeState = TRADE_INITIATED; tradeItem->incrementReferenceCounter(); tradeItems[tradeItem] = player->getID(); player->sendTradeItemRequest(player->getName(), tradeItem, true); if (tradePartner->tradeState == TRADE_NONE) { std::ostringstream ss; ss << player->getName() << " wants to trade with you."; tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); tradePartner->tradeState = TRADE_ACKNOWLEDGE; tradePartner->tradePartner = player; } else { Item* counterOfferItem = tradePartner->tradeItem; player->sendTradeItemRequest(tradePartner->getName(), counterOfferItem, false); tradePartner->sendTradeItemRequest(player->getName(), tradeItem, false); } return true; } void Game::playerAcceptTrade(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) { return; } Player* tradePartner = player->tradePartner; if (!tradePartner) { return; } if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) { player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE); return; } player->setTradeState(TRADE_ACCEPT); if (tradePartner->getTradeState() == TRADE_ACCEPT) { Item* playerTradeItem = player->tradeItem; Item* partnerTradeItem = tradePartner->tradeItem; if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, playerTradeItem, partnerTradeItem)) { internalCloseTrade(player); return; } player->setTradeState(TRADE_TRANSFER); tradePartner->setTradeState(TRADE_TRANSFER); auto it = tradeItems.find(playerTradeItem); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } it = tradeItems.find(partnerTradeItem); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } bool isSuccess = false; ReturnValue tradePartnerRet = RETURNVALUE_NOERROR; ReturnValue playerRet = RETURNVALUE_NOERROR; // if player is trying to trade its own backpack if (tradePartner->getInventoryItem(CONST_SLOT_BACKPACK) == partnerTradeItem) { tradePartnerRet = (tradePartner->getInventoryItem(getSlotType(Item::items[playerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR); } if (player->getInventoryItem(CONST_SLOT_BACKPACK) == playerTradeItem) { playerRet = (player->getInventoryItem(getSlotType(Item::items[partnerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR); } if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) { tradePartnerRet = internalAddItem(tradePartner, playerTradeItem, INDEX_WHEREEVER, 0, true); playerRet = internalAddItem(player, partnerTradeItem, INDEX_WHEREEVER, 0, true); if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) { playerRet = internalRemoveItem(playerTradeItem, playerTradeItem->getItemCount(), true); tradePartnerRet = internalRemoveItem(partnerTradeItem, partnerTradeItem->getItemCount(), true); if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) { tradePartnerRet = internalMoveItem(playerTradeItem->getParent(), tradePartner, INDEX_WHEREEVER, playerTradeItem, playerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK, nullptr, partnerTradeItem); if (tradePartnerRet == RETURNVALUE_NOERROR) { internalMoveItem(partnerTradeItem->getParent(), player, INDEX_WHEREEVER, partnerTradeItem, partnerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK); playerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, tradePartner); partnerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, player); isSuccess = true; } } } } if (!isSuccess) { std::string errorDescription; if (tradePartner->tradeItem) { errorDescription = getTradeErrorDescription(tradePartnerRet, playerTradeItem); tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription); tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner); } if (player->tradeItem) { errorDescription = getTradeErrorDescription(playerRet, partnerTradeItem); player->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription); player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player); } } g_events->eventPlayerOnTradeCompleted(player, tradePartner, playerTradeItem, partnerTradeItem, isSuccess); player->setTradeState(TRADE_NONE); player->tradeItem = nullptr; player->tradePartner = nullptr; player->sendTradeClose(); tradePartner->setTradeState(TRADE_NONE); tradePartner->tradeItem = nullptr; tradePartner->tradePartner = nullptr; tradePartner->sendTradeClose(); } } std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item) { if (item) { if (ret == RETURNVALUE_NOTENOUGHCAPACITY) { std::ostringstream ss; ss << "You do not have enough capacity to carry"; if (item->isStackable() && item->getItemCount() > 1) { ss << " these objects."; } else { ss << " this object."; } ss << "\n " << item->getWeightDescription(); return ss.str(); } else if (ret == RETURNVALUE_NOTENOUGHROOM || ret == RETURNVALUE_CONTAINERNOTENOUGHROOM) { std::ostringstream ss; ss << "You do not have enough room to carry"; if (item->isStackable() && item->getItemCount() > 1) { ss << " these objects."; } else { ss << " this object."; } return ss.str(); } } return "Trade could not be completed."; } void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, uint8_t index) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* tradePartner = player->tradePartner; if (!tradePartner) { return; } Item* tradeItem; if (lookAtCounterOffer) { tradeItem = tradePartner->getTradeItem(); } else { tradeItem = player->getTradeItem(); } if (!tradeItem) { return; } const Position& playerPosition = player->getPosition(); const Position& tradeItemPosition = tradeItem->getPosition(); int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition), Position::getDistanceY(playerPosition, tradeItemPosition)); if (index == 0) { g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance); return; } Container* tradeContainer = tradeItem->getContainer(); if (!tradeContainer) { return; } std::vector<const Container*> containers {tradeContainer}; size_t i = 0; while (i < containers.size()) { const Container* container = containers[i++]; for (Item* item : container->getItemList()) { Container* tmpContainer = item->getContainer(); if (tmpContainer) { containers.push_back(tmpContainer); } if (--index == 0) { g_events->eventPlayerOnLookInTrade(player, tradePartner, item, lookDistance); return; } } } } void Game::playerCloseTrade(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } internalCloseTrade(player); } void Game::internalCloseTrade(Player* player) { Player* tradePartner = player->tradePartner; if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) { return; } if (player->getTradeItem()) { auto it = tradeItems.find(player->getTradeItem()); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player); player->tradeItem = nullptr; } player->setTradeState(TRADE_NONE); player->tradePartner = nullptr; player->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled."); player->sendTradeClose(); if (tradePartner) { if (tradePartner->getTradeItem()) { auto it = tradeItems.find(tradePartner->getTradeItem()); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner); tradePartner->tradeItem = nullptr; } tradePartner->setTradeState(TRADE_NONE); tradePartner->tradePartner = nullptr; tradePartner->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled."); tradePartner->sendTradeClose(); } } void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreCap/* = false*/, bool inBackpacks/* = false*/) { if (amount == 0 || amount > 100) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } uint8_t subType; if (it.isSplash() || it.isFluidContainer()) { subType = clientFluidToServer(count); } else { subType = count; } if (!player->hasShopItemForSale(it.id, subType)) { return; } merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks); } void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped) { if (amount == 0 || amount > 100) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } uint8_t subType; if (it.isSplash() || it.isFluidContainer()) { subType = clientFluidToServer(count); } else { subType = count; } merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped); } void Game::playerCloseShop(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->closeShopWindow(); } void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } int32_t subType; if (it.isFluidContainer() || it.isSplash()) { subType = clientFluidToServer(count); } else { subType = count; } if (!player->hasShopItemForSale(it.id, subType)) { return; } const std::string& description = Item::getDescription(it, 1, nullptr, subType); g_events->eventPlayerOnLookInShop(player, &it, subType, description); } void Game::playerLookAt(uint32_t playerId, const Position& pos, uint8_t stackPos) { Player* player = getPlayerByID(playerId); if (!player) { return; } Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_LOOK); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Position thingPos = thing->getPosition(); if (!player->canSee(thingPos)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Position playerPos = player->getPosition(); int32_t lookDistance; if (thing != player) { lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos)); if (playerPos.z != thingPos.z) { lookDistance += 15; } } else { lookDistance = -1; } g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance); } void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } if (!player->canSeeCreature(creature)) { return; } const Position& creaturePos = creature->getPosition(); if (!player->canSee(creaturePos)) { return; } int32_t lookDistance; if (creature != player) { const Position& playerPos = player->getPosition(); lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos)); if (playerPos.z != creaturePos.z) { lookDistance += 15; } } else { lookDistance = -1; } g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance); } void Game::playerCancelAttackAndFollow(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } playerSetAttackedCreature(playerId, 0); playerFollowCreature(playerId, 0); player->stopWalk(); } void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (player->getAttackedCreature() && creatureId == 0) { player->setAttackedCreature(nullptr); player->sendCancelTarget(); return; } Creature* attackCreature = getCreatureByID(creatureId); if (!attackCreature) { player->setAttackedCreature(nullptr); player->sendCancelTarget(); return; } ReturnValue ret = Combat::canTargetCreature(player, attackCreature); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); player->sendCancelTarget(); player->setAttackedCreature(nullptr); return; } player->setAttackedCreature(attackCreature); g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID()))); } void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setAttackedCreature(nullptr); g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID()))); player->setFollowCreature(getCreatureByID(creatureId)); } void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, bool chaseMode, bool secureMode) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setFightMode(fightMode); player->setChaseMode(chaseMode); player->setSecureMode(secureMode); } void Game::playerRequestAddVip(uint32_t playerId, const std::string& name) { if (name.length() > 20) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } Player* vipPlayer = getPlayerByName(name); if (!vipPlayer) { uint32_t guid; bool specialVip; std::string formattedName = name; if (!IOLoginData::getGuidByNameEx(guid, specialVip, formattedName)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name does not exist."); return; } if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player."); return; } player->addVIP(guid, formattedName, VIPSTATUS_OFFLINE); } else { if (vipPlayer->hasFlag(PlayerFlag_SpecialVIP) && !player->hasFlag(PlayerFlag_SpecialVIP)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player."); return; } if (!vipPlayer->isInGhostMode() || player->isAccessPlayer()) { player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_ONLINE); } else { player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_OFFLINE); } } } void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->removeVIP(guid); } void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->editVIP(guid, description, icon, notify); } void Game::playerTurn(uint32_t playerId, Direction dir) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!g_events->eventPlayerOnTurn(player, dir)) { return; } player->resetIdleTime(); internalCreatureTurn(player, dir); } void Game::playerRequestOutfit(uint32_t playerId) { if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendOutfitWindow(); } void Game::playerToggleMount(uint32_t playerId, bool mount) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->toggleMount(mount); } void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit) { if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(player->getSex(), outfit.lookType); if (!playerOutfit) { outfit.lookMount = 0; } if (outfit.lookMount != 0) { Mount* mount = mounts.getMountByClientID(outfit.lookMount); if (!mount) { return; } if (!player->hasMount(mount)) { return; } if (player->isMounted()) { Mount* prevMount = mounts.getMountByID(player->getCurrentMount()); if (prevMount) { changeSpeed(player, mount->speed - prevMount->speed); } player->setCurrentMount(mount->id); } else { player->setCurrentMount(mount->id); outfit.lookMount = 0; } } else if (player->isMounted()) { player->dismount(); } if (player->canWear(outfit.lookType, outfit.lookAddons)) { player->defaultOutfit = outfit; if (player->hasCondition(CONDITION_OUTFIT)) { return; } internalCreatureChangeOutfit(player, outfit); } } void Game::playerShowQuestLog(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendQuestLog(); } void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Quest* quest = quests.getQuestByID(questId); if (!quest) { return; } player->sendQuestLine(quest); } void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type, const std::string& receiver, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->resetIdleTime(); if (playerSaySpell(player, type, text)) { return; } uint32_t muteTime = player->isMuted(); if (muteTime > 0) { std::ostringstream ss; ss << "You are still muted for " << muteTime << " seconds."; player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str()); return; } if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) { return; } if (type != TALKTYPE_PRIVATE_PN) { player->removeMessageBuffer(); } switch (type) { case TALKTYPE_SAY: internalCreatureSay(player, TALKTYPE_SAY, text, false); break; case TALKTYPE_WHISPER: playerWhisper(player, text); break; case TALKTYPE_YELL: playerYell(player, text); break; case TALKTYPE_PRIVATE_TO: case TALKTYPE_PRIVATE_RED_TO: playerSpeakTo(player, type, receiver, text); break; case TALKTYPE_CHANNEL_O: case TALKTYPE_CHANNEL_Y: case TALKTYPE_CHANNEL_R1: g_chat->talkToChannel(*player, type, text, channelId); break; case TALKTYPE_PRIVATE_PN: playerSpeakToNpc(player, text); break; case TALKTYPE_BROADCAST: playerBroadcastMessage(player, text); break; default: break; } } bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text) { std::string words = text; TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words); if (result == TALKACTION_BREAK) { return true; } result = g_spells->playerSaySpell(player, words); if (result == TALKACTION_BREAK) { if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) { return internalCreatureSay(player, TALKTYPE_SAY, words, false); } else { return internalCreatureSay(player, TALKTYPE_MONSTER_SAY, words, false); } } else if (result == TALKACTION_FAILED) { return true; } return false; } void Game::playerWhisper(Player* player, const std::string& text) { SpectatorVec spectators; map.getSpectators(spectators, player->getPosition(), false, false, Map::maxClientViewportX, Map::maxClientViewportX, Map::maxClientViewportY, Map::maxClientViewportY); //send to client for (Creature* spectator : spectators) { if (Player* spectatorPlayer = spectator->getPlayer()) { if (!Position::areInRange<1, 1>(player->getPosition(), spectatorPlayer->getPosition())) { spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, "pspsps"); } else { spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, text); } } } //event method for (Creature* spectator : spectators) { spectator->onCreatureSay(player, TALKTYPE_WHISPER, text); } } bool Game::playerYell(Player* player, const std::string& text) { if (player->hasCondition(CONDITION_YELLTICKS)) { player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED); return false; } uint32_t minimumLevel = g_config.getNumber(ConfigManager::YELL_MINIMUM_LEVEL); if (player->getLevel() < minimumLevel) { std::ostringstream ss; ss << "You may not yell unless you have reached level " << minimumLevel; if (g_config.getBoolean(ConfigManager::YELL_ALLOW_PREMIUM)) { if (player->isPremium()) { internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false); return true; } else { ss << " or have a premium account"; } } ss << "."; player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str()); return false; } if (player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER) { Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0); player->addCondition(condition); } internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false); return true; } bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver, const std::string& text) { Player* toPlayer = getPlayerByName(receiver); if (!toPlayer) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online."); return false; } if (type == TALKTYPE_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) { type = TALKTYPE_PRIVATE_RED_FROM; } else { type = TALKTYPE_PRIVATE_FROM; } toPlayer->sendPrivateMessage(player, type, text); toPlayer->onCreatureSay(player, type, text); if (toPlayer->isInGhostMode() && !player->isAccessPlayer()) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online."); } else { std::ostringstream ss; ss << "Message sent to " << toPlayer->getName() << '.'; player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str()); } return true; } void Game::playerSpeakToNpc(Player* player, const std::string& text) { SpectatorVec spectators; map.getSpectators(spectators, player->getPosition()); for (Creature* spectator : spectators) { if (spectator->getNpc()) { spectator->onCreatureSay(player, TALKTYPE_PRIVATE_PN, text); } } } //-- bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/, int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const { return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, rangex, rangey); } bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool floorCheck) const { return map.isSightClear(fromPos, toPos, floorCheck); } bool Game::internalCreatureTurn(Creature* creature, Direction dir) { if (creature->getDirection() == dir) { return false; } creature->setDirection(dir); //send to client SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureTurn(creature); } return true; } bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text, bool ghostMode, SpectatorVec* spectatorsPtr/* = nullptr*/, const Position* pos/* = nullptr*/) { if (text.empty()) { return false; } if (!pos) { pos = &creature->getPosition(); } SpectatorVec spectators; if (!spectatorsPtr || spectatorsPtr->empty()) { // This somewhat complex construct ensures that the cached SpectatorVec // is used if available and if it can be used, else a local vector is // used (hopefully the compiler will optimize away the construction of // the temporary when it's not used). if (type != TALKTYPE_YELL && type != TALKTYPE_MONSTER_YELL) { map.getSpectators(spectators, *pos, false, false, Map::maxClientViewportX, Map::maxClientViewportX, Map::maxClientViewportY, Map::maxClientViewportY); } else { map.getSpectators(spectators, *pos, true, false, 18, 18, 14, 14); } } else { spectators = (*spectatorsPtr); } //send to client for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { if (!ghostMode || tmpPlayer->canSeeCreature(creature)) { tmpPlayer->sendCreatureSay(creature, type, text, pos); } } } //event method for (Creature* spectator : spectators) { spectator->onCreatureSay(creature, type, text); if (creature != spectator) { g_events->eventCreatureOnHear(spectator, creature, text, type); } } return true; } void Game::checkCreatureWalk(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->onWalk(); cleanup(); } } void Game::updateCreatureWalk(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->goToFollowCreature(); } } void Game::checkCreatureAttack(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->onAttacking(0); } } void Game::addCreatureCheck(Creature* creature) { creature->creatureCheck = true; if (creature->inCheckCreaturesVector) { // already in a vector return; } creature->inCheckCreaturesVector = true; checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature); creature->incrementReferenceCounter(); } void Game::removeCreatureCheck(Creature* creature) { if (creature->inCheckCreaturesVector) { creature->creatureCheck = false; } } void Game::checkCreatures(size_t index) { g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT))); auto& checkCreatureList = checkCreatureLists[index]; auto it = checkCreatureList.begin(), end = checkCreatureList.end(); while (it != end) { Creature* creature = *it; if (creature->creatureCheck) { if (creature->getHealth() > 0) { creature->onThink(EVENT_CREATURE_THINK_INTERVAL); creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL); creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL); } ++it; } else { creature->inCheckCreaturesVector = false; it = checkCreatureList.erase(it); ReleaseCreature(creature); } } cleanup(); } void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta) { int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed(); varSpeed += varSpeedDelta; creature->setSpeed(varSpeed); //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), false, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed()); } } void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit) { if (!g_events->eventCreatureOnChangeOutfit(creature, outfit)) { return; } creature->setCurrentOutfit(outfit); if (creature->isInvisible()) { return; } //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit); } } void Game::internalCreatureChangeVisible(Creature* creature, bool visible) { //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureChangeVisible(creature, visible); } } void Game::changeLight(const Creature* creature) { //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureLight(creature); } } bool Game::combatBlockHit(CombatDamage& damage, Creature* attacker, Creature* target, bool checkDefense, bool checkArmor, bool field, bool ignoreResistances /*= false */) { if (damage.primary.type == COMBAT_NONE && damage.secondary.type == COMBAT_NONE) { return true; } if (target->getPlayer() && target->isInGhostMode()) { return true; } if (damage.primary.value > 0) { return false; } static const auto sendBlockEffect = [this](BlockType_t blockType, CombatType_t combatType, const Position& targetPos) { if (blockType == BLOCK_DEFENSE) { addMagicEffect(targetPos, CONST_ME_POFF); } else if (blockType == BLOCK_ARMOR) { addMagicEffect(targetPos, CONST_ME_BLOCKHIT); } else if (blockType == BLOCK_IMMUNITY) { uint8_t hitEffect = 0; switch (combatType) { case COMBAT_UNDEFINEDDAMAGE: { return; } case COMBAT_ENERGYDAMAGE: case COMBAT_FIREDAMAGE: case COMBAT_PHYSICALDAMAGE: case COMBAT_ICEDAMAGE: case COMBAT_DEATHDAMAGE: { hitEffect = CONST_ME_BLOCKHIT; break; } case COMBAT_EARTHDAMAGE: { hitEffect = CONST_ME_GREEN_RINGS; break; } case COMBAT_HOLYDAMAGE: { hitEffect = CONST_ME_HOLYDAMAGE; break; } default: { hitEffect = CONST_ME_POFF; break; } } addMagicEffect(targetPos, hitEffect); } }; BlockType_t primaryBlockType, secondaryBlockType; if (damage.primary.type != COMBAT_NONE) { damage.primary.value = -damage.primary.value; primaryBlockType = target->blockHit(attacker, damage.primary.type, damage.primary.value, checkDefense, checkArmor, field, ignoreResistances); damage.primary.value = -damage.primary.value; sendBlockEffect(primaryBlockType, damage.primary.type, target->getPosition()); } else { primaryBlockType = BLOCK_NONE; } if (damage.secondary.type != COMBAT_NONE) { damage.secondary.value = -damage.secondary.value; secondaryBlockType = target->blockHit(attacker, damage.secondary.type, damage.secondary.value, false, false, field, ignoreResistances); damage.secondary.value = -damage.secondary.value; sendBlockEffect(secondaryBlockType, damage.secondary.type, target->getPosition()); } else { secondaryBlockType = BLOCK_NONE; } damage.blockType = primaryBlockType; return (primaryBlockType != BLOCK_NONE) && (secondaryBlockType != BLOCK_NONE); } void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect) { switch (combatType) { case COMBAT_PHYSICALDAMAGE: { Item* splash = nullptr; switch (target->getRace()) { case RACE_VENOM: color = TEXTCOLOR_LIGHTGREEN; effect = CONST_ME_HITBYPOISON; splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_SLIME); break; case RACE_BLOOD: color = TEXTCOLOR_RED; effect = CONST_ME_DRAWBLOOD; if (const Tile* tile = target->getTile()) { if (!tile->hasFlag(TILESTATE_PROTECTIONZONE)) { splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD); } } break; case RACE_UNDEAD: color = TEXTCOLOR_LIGHTGREY; effect = CONST_ME_HITAREA; break; case RACE_FIRE: color = TEXTCOLOR_ORANGE; effect = CONST_ME_DRAWBLOOD; break; case RACE_ENERGY: color = TEXTCOLOR_ELECTRICPURPLE; effect = CONST_ME_ENERGYHIT; break; default: color = TEXTCOLOR_NONE; effect = CONST_ME_NONE; break; } if (splash) { internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT); startDecay(splash); } break; } case COMBAT_ENERGYDAMAGE: { color = TEXTCOLOR_ELECTRICPURPLE; effect = CONST_ME_ENERGYHIT; break; } case COMBAT_EARTHDAMAGE: { color = TEXTCOLOR_LIGHTGREEN; effect = CONST_ME_GREEN_RINGS; break; } case COMBAT_DROWNDAMAGE: { color = TEXTCOLOR_LIGHTBLUE; effect = CONST_ME_LOSEENERGY; break; } case COMBAT_FIREDAMAGE: { color = TEXTCOLOR_ORANGE; effect = CONST_ME_HITBYFIRE; break; } case COMBAT_ICEDAMAGE: { color = TEXTCOLOR_SKYBLUE; effect = CONST_ME_ICEATTACK; break; } case COMBAT_HOLYDAMAGE: { color = TEXTCOLOR_YELLOW; effect = CONST_ME_HOLYDAMAGE; break; } case COMBAT_DEATHDAMAGE: { color = TEXTCOLOR_DARKRED; effect = CONST_ME_SMALLCLOUDS; break; } case COMBAT_LIFEDRAIN: { color = TEXTCOLOR_RED; effect = CONST_ME_MAGIC_RED; break; } default: { color = TEXTCOLOR_NONE; effect = CONST_ME_NONE; break; } } } bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage) { const Position& targetPos = target->getPosition(); if (damage.primary.value > 0) { if (target->getHealth() <= 0) { return false; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } Player* targetPlayer = target->getPlayer(); if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeHealthChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeHealth(attacker, target, damage); } } int32_t realHealthChange = target->getHealth(); target->gainHealth(attacker, damage.primary.value); realHealthChange = target->getHealth() - realHealthChange; if (realHealthChange > 0 && !target->isInGhostMode()) { std::stringstream ss; ss << realHealthChange << (realHealthChange != 1 ? " hitpoints." : " hitpoint."); std::string damageString = ss.str(); std::string spectatorMessage; TextMessage message; message.position = targetPos; message.primary.value = realHealthChange; message.primary.color = TEXTCOLOR_PASTELRED; SpectatorVec spectators; map.getSpectators(spectators, targetPos, false, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { ss.str({}); ss << "You heal " << target->getNameDescription() << " for " << damageString; message.type = MESSAGE_HEALED; message.text = ss.str(); } else if (tmpPlayer == targetPlayer) { ss.str({}); if (!attacker) { ss << "You were healed"; } else if (targetPlayer == attackerPlayer) { ss << "You healed yourself"; } else { ss << "You were healed by " << attacker->getNameDescription(); } ss << " for " << damageString; message.type = MESSAGE_HEALED; message.text = ss.str(); } else { if (spectatorMessage.empty()) { ss.str({}); if (!attacker) { ss << ucfirst(target->getNameDescription()) << " was healed"; } else { ss << ucfirst(attacker->getNameDescription()) << " healed "; if (attacker == target) { ss << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself" : "himself") : "itself"); } else { ss << target->getNameDescription(); } } ss << " for " << damageString; spectatorMessage = ss.str(); } message.type = MESSAGE_HEALED_OTHERS; message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } } else { if (!target->isAttackable()) { if (!target->isInGhostMode()) { addMagicEffect(targetPos, CONST_ME_POFF); } return true; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } Player* targetPlayer = target->getPlayer(); if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } damage.primary.value = std::abs(damage.primary.value); damage.secondary.value = std::abs(damage.secondary.value); int32_t healthChange = damage.primary.value + damage.secondary.value; if (healthChange == 0) { return true; } TextMessage message; message.position = targetPos; SpectatorVec spectators; if (targetPlayer && target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) { int32_t manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange); if (manaDamage != 0) { if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } healthChange = damage.primary.value + damage.secondary.value; if (healthChange == 0) { return true; } manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange); } } targetPlayer->drainMana(attacker, manaDamage); map.getSpectators(spectators, targetPos, true, true); addMagicEffect(spectators, targetPos, CONST_ME_LOSEENERGY); std::stringstream ss; std::string damageString = std::to_string(manaDamage); std::string spectatorMessage; message.primary.value = manaDamage; message.primary.color = TEXTCOLOR_BLUE; for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer->getPosition().z != targetPos.z) { continue; } if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString + " mana due to your attack."; message.type = MESSAGE_DAMAGE_DEALT; message.text = ss.str(); } else if (tmpPlayer == targetPlayer) { ss.str({}); ss << "You lose " << damageString << " mana"; if (!attacker) { ss << '.'; } else if (targetPlayer == attackerPlayer) { ss << " due to your own attack."; } else { ss << " due to an attack by " << attacker->getNameDescription() << '.'; } message.type = MESSAGE_DAMAGE_RECEIVED; message.text = ss.str(); } else { if (spectatorMessage.empty()) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString + " mana"; if (attacker) { ss << " due to "; if (attacker == target) { ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack"); } else { ss << "an attack by " << attacker->getNameDescription(); } } ss << '.'; spectatorMessage = ss.str(); } message.type = MESSAGE_DAMAGE_OTHERS; message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } damage.primary.value -= manaDamage; if (damage.primary.value < 0) { damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value); damage.primary.value = 0; } } } int32_t realDamage = damage.primary.value + damage.secondary.value; if (realDamage == 0) { return true; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeHealthChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeHealth(attacker, target, damage); } } int32_t targetHealth = target->getHealth(); if (damage.primary.value >= targetHealth) { damage.primary.value = targetHealth; damage.secondary.value = 0; } else if (damage.secondary.value) { damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value); } realDamage = damage.primary.value + damage.secondary.value; if (realDamage == 0) { return true; } if (spectators.empty()) { map.getSpectators(spectators, targetPos, true, true); } message.primary.value = damage.primary.value; message.secondary.value = damage.secondary.value; uint8_t hitEffect; if (message.primary.value) { combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect); if (hitEffect != CONST_ME_NONE) { addMagicEffect(spectators, targetPos, hitEffect); } } if (message.secondary.value) { combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect); if (hitEffect != CONST_ME_NONE) { addMagicEffect(spectators, targetPos, hitEffect); } } if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) { std::stringstream ss; ss << realDamage << (realDamage != 1 ? " hitpoints" : " hitpoint"); std::string damageString = ss.str(); std::string spectatorMessage; for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer->getPosition().z != targetPos.z) { continue; } if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " due to your attack."; message.type = MESSAGE_DAMAGE_DEALT; message.text = ss.str(); } else if (tmpPlayer == targetPlayer) { ss.str({}); ss << "You lose " << damageString; if (!attacker) { ss << '.'; } else if (targetPlayer == attackerPlayer) { ss << " due to your own attack."; } else { ss << " due to an attack by " << attacker->getNameDescription() << '.'; } message.type = MESSAGE_DAMAGE_RECEIVED; message.text = ss.str(); } else { message.type = MESSAGE_DAMAGE_OTHERS; if (spectatorMessage.empty()) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString; if (attacker) { ss << " due to "; if (attacker == target) { if (targetPlayer) { ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack"); } else { ss << "its own attack"; } } else { ss << "an attack by " << attacker->getNameDescription(); } } ss << '.'; spectatorMessage = ss.str(); } message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } if (realDamage >= targetHealth) { for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) { if (!creatureEvent->executeOnPrepareDeath(target, attacker)) { return false; } } } target->drainHealth(attacker, realDamage); addCreatureHealth(spectators, target); } return true; } bool Game::combatChangeMana(Creature* attacker, Creature* target, CombatDamage& damage) { Player* targetPlayer = target->getPlayer(); if (!targetPlayer) { return true; } int32_t manaChange = damage.primary.value + damage.secondary.value; if (manaChange > 0) { if (attacker) { const Player* attackerPlayer = attacker->getPlayer(); if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(target) == SKULL_NONE) { return false; } } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeMana(attacker, target, damage); } } int32_t realManaChange = targetPlayer->getMana(); targetPlayer->changeMana(manaChange); realManaChange = targetPlayer->getMana() - realManaChange; if (realManaChange > 0 && !targetPlayer->isInGhostMode()) { TextMessage message(MESSAGE_HEALED, "You gained " + std::to_string(realManaChange) + " mana."); message.position = target->getPosition(); message.primary.value = realManaChange; message.primary.color = TEXTCOLOR_MAYABLUE; targetPlayer->sendTextMessage(message); } } else { const Position& targetPos = target->getPosition(); if (!target->isAttackable()) { if (!target->isInGhostMode()) { addMagicEffect(targetPos, CONST_ME_POFF); } return false; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } int32_t manaLoss = std::min<int32_t>(targetPlayer->getMana(), -manaChange); BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss); if (blockType != BLOCK_NONE) { addMagicEffect(targetPos, CONST_ME_POFF); return false; } if (manaLoss <= 0) { return true; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeMana(attacker, target, damage); } } targetPlayer->drainMana(attacker, manaLoss); std::stringstream ss; std::string damageString = std::to_string(manaLoss); std::string spectatorMessage; TextMessage message; message.position = targetPos; message.primary.value = manaLoss; message.primary.color = TEXTCOLOR_BLUE; SpectatorVec spectators; map.getSpectators(spectators, targetPos, false, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " mana due to your attack."; message.type = MESSAGE_DAMAGE_DEALT; message.text = ss.str(); } else if (tmpPlayer == targetPlayer) { ss.str({}); ss << "You lose " << damageString << " mana"; if (!attacker) { ss << '.'; } else if (targetPlayer == attackerPlayer) { ss << " due to your own attack."; } else { ss << " mana due to an attack by " << attacker->getNameDescription() << '.'; } message.type = MESSAGE_DAMAGE_RECEIVED; message.text = ss.str(); } else { if (spectatorMessage.empty()) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " mana"; if (attacker) { ss << " due to "; if (attacker == target) { ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack"); } else { ss << "an attack by " << attacker->getNameDescription(); } } ss << '.'; spectatorMessage = ss.str(); } message.type = MESSAGE_DAMAGE_OTHERS; message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } return true; } void Game::addCreatureHealth(const Creature* target) { SpectatorVec spectators; map.getSpectators(spectators, target->getPosition(), true, true); addCreatureHealth(spectators, target); } void Game::addCreatureHealth(const SpectatorVec& spectators, const Creature* target) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendCreatureHealth(target); } } } void Game::addMagicEffect(const Position& pos, uint8_t effect) { SpectatorVec spectators; map.getSpectators(spectators, pos, true, true); addMagicEffect(spectators, pos, effect); } void Game::addMagicEffect(const SpectatorVec& spectators, const Position& pos, uint8_t effect) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendMagicEffect(pos, effect); } } } void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect) { SpectatorVec spectators, toPosSpectators; map.getSpectators(spectators, fromPos, false, true); map.getSpectators(toPosSpectators, toPos, false, true); spectators.addSpectators(toPosSpectators); addDistanceEffect(spectators, fromPos, toPos, effect); } void Game::addDistanceEffect(const SpectatorVec& spectators, const Position& fromPos, const Position& toPos, uint8_t effect) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendDistanceShoot(fromPos, toPos, effect); } } } void Game::startDecay(Item* item) { if (!item || !item->canDecay()) { return; } ItemDecayState_t decayState = item->getDecaying(); if (decayState == DECAYING_TRUE) { return; } if (item->getDuration() > 0) { item->incrementReferenceCounter(); item->setDecaying(DECAYING_TRUE); toDecayItems.push_front(item); } else { internalDecayItem(item); } } void Game::internalDecayItem(Item* item) { const ItemType& it = Item::items[item->getID()]; if (it.decayTo != 0) { Item* newItem = transformItem(item, item->getDecayTo()); startDecay(newItem); } else { ReturnValue ret = internalRemoveItem(item); if (ret != RETURNVALUE_NOERROR) { std::cout << "[Debug - Game::internalDecayItem] internalDecayItem failed, error code: " << static_cast<uint32_t>(ret) << ", item id: " << item->getID() << std::endl; } } } void Game::checkDecay() { g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this))); size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS; auto it = decayItems[bucket].begin(), end = decayItems[bucket].end(); while (it != end) { Item* item = *it; if (!item->canDecay()) { item->setDecaying(DECAYING_FALSE); ReleaseItem(item); it = decayItems[bucket].erase(it); continue; } int32_t duration = item->getDuration(); int32_t decreaseTime = std::min<int32_t>(EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS, duration); duration -= decreaseTime; item->decreaseDuration(decreaseTime); if (duration <= 0) { it = decayItems[bucket].erase(it); internalDecayItem(item); ReleaseItem(item); } else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) { it = decayItems[bucket].erase(it); size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS; if (newBucket == bucket) { internalDecayItem(item); ReleaseItem(item); } else { decayItems[newBucket].push_back(item); } } else { ++it; } } lastBucket = bucket; cleanup(); } void Game::checkLight() { g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this))); updateWorldLightLevel(); LightInfo lightInfo = getWorldLightInfo(); for (const auto& it : players) { it.second->sendWorldLight(lightInfo); } } void Game::updateWorldLightLevel() { if (getWorldTime() >= GAME_SUNRISE && getWorldTime() <= GAME_DAYTIME) { lightLevel = ((GAME_DAYTIME - GAME_SUNRISE) - (GAME_DAYTIME - getWorldTime())) * float(LIGHT_CHANGE_SUNRISE) + LIGHT_NIGHT; } else if (getWorldTime() >= GAME_SUNSET && getWorldTime() <= GAME_NIGHTTIME) { lightLevel = LIGHT_DAY - ((getWorldTime() - GAME_SUNSET) * float(LIGHT_CHANGE_SUNSET)); } else if (getWorldTime() >= GAME_NIGHTTIME || getWorldTime() < GAME_SUNRISE) { lightLevel = LIGHT_NIGHT; } else { lightLevel = LIGHT_DAY; } } void Game::updateWorldTime() { g_scheduler.addEvent(createSchedulerTask(EVENT_WORLDTIMEINTERVAL, std::bind(&Game::updateWorldTime, this))); time_t osTime = time(nullptr); tm* timeInfo = localtime(&osTime); worldTime = (timeInfo->tm_sec + (timeInfo->tm_min * 60)) / 2.5f; } void Game::shutdown() { std::cout << "Shutting down..." << std::flush; g_scheduler.shutdown(); g_databaseTasks.shutdown(); g_dispatcher.shutdown(); map.spawns.clear(); raids.clear(); cleanup(); if (serviceManager) { serviceManager->stop(); } ConnectionManager::getInstance().closeAll(); std::cout << " done!" << std::endl; } void Game::cleanup() { //free memory for (auto creature : ToReleaseCreatures) { creature->decrementReferenceCounter(); } ToReleaseCreatures.clear(); for (auto item : ToReleaseItems) { item->decrementReferenceCounter(); } ToReleaseItems.clear(); for (Item* item : toDecayItems) { const uint32_t dur = item->getDuration(); if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) { decayItems[lastBucket].push_back(item); } else { decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item); } } toDecayItems.clear(); } void Game::ReleaseCreature(Creature* creature) { ToReleaseCreatures.push_back(creature); } void Game::ReleaseItem(Item* item) { ToReleaseItems.push_back(item); } void Game::broadcastMessage(const std::string& text, MessageClasses type) const { std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl; for (const auto& it : players) { it.second->sendTextMessage(type, text); } } void Game::updateCreatureWalkthrough(const Creature* creature) { //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature)); } } void Game::updateCreatureSkull(const Creature* creature) { if (getWorldType() != WORLD_TYPE_PVP) { return; } SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureSkull(creature); } } void Game::updatePlayerShield(Player* player) { SpectatorVec spectators; map.getSpectators(spectators, player->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureShield(player); } } void Game::updatePlayerHelpers(const Player& player) { uint32_t creatureId = player.getID(); uint16_t helpers = player.getHelpers(); SpectatorVec spectators; map.getSpectators(spectators, player.getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureHelpers(creatureId, helpers); } } void Game::updateCreatureType(Creature* creature) { const Player* masterPlayer = nullptr; uint32_t creatureId = creature->getID(); CreatureType_t creatureType = creature->getType(); if (creatureType == CREATURETYPE_MONSTER) { const Creature* master = creature->getMaster(); if (master) { masterPlayer = master->getPlayer(); if (masterPlayer) { creatureType = CREATURETYPE_SUMMON_OTHERS; } } } //send to clients SpectatorVec spectators; map.getSpectators(spectators, creature->getPosition(), true, true); if (creatureType == CREATURETYPE_SUMMON_OTHERS) { for (Creature* spectator : spectators) { Player* player = spectator->getPlayer(); if (masterPlayer == player) { player->sendCreatureType(creatureId, CREATURETYPE_SUMMON_OWN); } else { player->sendCreatureType(creatureId, creatureType); } } } else { for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureType(creatureId, creatureType); } } } void Game::loadMotdNum() { Database& db = Database::getInstance(); DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'"); if (result) { motdNum = result->getNumber<uint32_t>("value"); } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')"); } result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'"); if (result) { motdHash = result->getString("value"); if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) { ++motdNum; } } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')"); } } void Game::saveMotdNum() const { Database& db = Database::getInstance(); std::ostringstream query; query << "UPDATE `server_config` SET `value` = '" << motdNum << "' WHERE `config` = 'motd_num'"; db.executeQuery(query.str()); query.str(std::string()); query << "UPDATE `server_config` SET `value` = '" << transformToSHA1(g_config.getString(ConfigManager::MOTD)) << "' WHERE `config` = 'motd_hash'"; db.executeQuery(query.str()); } void Game::checkPlayersRecord() { const size_t playersOnline = getPlayersOnline(); if (playersOnline > playersRecord) { uint32_t previousRecord = playersRecord; playersRecord = playersOnline; for (auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) { it.second.executeRecord(playersRecord, previousRecord); } updatePlayersRecord(); } } void Game::updatePlayersRecord() const { Database& db = Database::getInstance(); std::ostringstream query; query << "UPDATE `server_config` SET `value` = '" << playersRecord << "' WHERE `config` = 'players_record'"; db.executeQuery(query.str()); } void Game::loadPlayersRecord() { Database& db = Database::getInstance(); DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'"); if (result) { playersRecord = result->getNumber<uint32_t>("value"); } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')"); } } void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId) { if (playerId == invitedId) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } Player* invitedPlayer = getPlayerByID(invitedId); if (!invitedPlayer || invitedPlayer->isInviting(player)) { return; } if (invitedPlayer->getParty()) { std::ostringstream ss; ss << invitedPlayer->getName() << " is already in a party."; player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str()); return; } Party* party = player->getParty(); if (!party) { party = new Party(player); } else if (party->getLeader() != player) { return; } party->invitePlayer(*invitedPlayer); } void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* leader = getPlayerByID(leaderId); if (!leader || !leader->isInviting(player)) { return; } Party* party = leader->getParty(); if (!party || party->getLeader() != leader) { return; } if (player->getParty()) { player->sendTextMessage(MESSAGE_INFO_DESCR, "You are already in a party."); return; } party->joinParty(*player); } void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || party->getLeader() != player) { return; } Player* invitedPlayer = getPlayerByID(invitedId); if (!invitedPlayer || !player->isInviting(invitedPlayer)) { return; } party->revokeInvitation(*invitedPlayer); } void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || party->getLeader() != player) { return; } Player* newLeader = getPlayerByID(newLeaderId); if (!newLeader || !player->isPartner(newLeader)) { return; } party->passPartyLeadership(newLeader); } void Game::playerLeaveParty(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || player->hasCondition(CONDITION_INFIGHT)) { return; } party->leaveParty(player); } void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || (player->hasCondition(CONDITION_INFIGHT) && player->getZone() != ZONE_PROTECTION)) { return; } party->setSharedExperience(player, sharedExpActive); } void Game::sendGuildMotd(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Guild* guild = player->getGuild(); if (guild) { player->sendChannelMessage("Message of the Day", guild->getMotd(), TALKTYPE_CHANNEL_R1, CHANNEL_GUILD); } } void Game::kickPlayer(uint32_t playerId, bool displayEffect) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->kickPlayer(displayEffect); } void Game::playerReportRuleViolation(uint32_t playerId, const std::string& targetName, uint8_t reportType, uint8_t reportReason, const std::string& comment, const std::string& translation) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_events->eventPlayerOnReportRuleViolation(player, targetName, reportType, reportReason, comment, translation); } void Game::playerReportBug(uint32_t playerId, const std::string& message, const Position& position, uint8_t category) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_events->eventPlayerOnReportBug(player, message, position, category); } void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment) { Player* player = getPlayerByID(playerId); if (!player) { return; } // TODO: move debug assertions to database FILE* file = fopen("client_assertions.txt", "a"); if (file) { fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str()); fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str()); fclose(file); } } void Game::playerLeaveMarket(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setInMarket(false); } void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } if (it.wareId == 0) { return; } const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id); const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id); player->sendMarketBrowseItem(it.id, buyOffers, sellOffers); player->sendMarketDetail(it.id); } void Game::playerBrowseMarketOwnOffers(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID()); const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID()); player->sendMarketBrowseOwnOffers(buyOffers, sellOffers); } void Game::playerBrowseMarketOwnHistory(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID()); const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID()); player->sendMarketBrowseOwnHistory(buyOffers, sellOffers); } void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous) { if (amount == 0 || amount > 64000) { return; } if (price == 0 || price > 999999999) { return; } if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) { player->sendMarketLeave(); return; } const ItemType& itt = Item::items.getItemIdByClientId(spriteId); if (itt.id == 0 || itt.wareId == 0) { return; } const ItemType& it = Item::items.getItemIdByClientId(itt.wareId); if (it.id == 0 || it.wareId == 0) { return; } if (!it.stackable && amount > 2000) { return; } const uint32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER); if (maxOfferCount != 0 && IOMarket::getPlayerOfferCount(player->getGUID()) >= maxOfferCount) { return; } uint64_t fee = (price / 100.) * amount; if (fee < 20) { fee = 20; } else if (fee > 1000) { fee = 1000; } if (type == MARKETACTION_SELL) { if (fee > player->bankBalance) { return; } DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false); if (!depotChest) { return; } std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox()); if (itemList.empty()) { return; } if (it.stackable) { uint16_t tmpAmount = amount; for (Item* item : itemList) { uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount()); tmpAmount -= removeCount; internalRemoveItem(item, removeCount); if (tmpAmount == 0) { break; } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } player->bankBalance -= fee; } else { uint64_t totalPrice = static_cast<uint64_t>(price) * amount; totalPrice += fee; if (totalPrice > player->bankBalance) { return; } player->bankBalance -= totalPrice; } IOMarket::createOffer(player->getGUID(), static_cast<MarketAction_t>(type), it.id, amount, price, anonymous); player->sendMarketEnter(player->getLastDepotId()); const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id); const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id); player->sendMarketBrowseItem(it.id, buyOffers, sellOffers); } void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter); if (offer.id == 0 || offer.playerId != player->getGUID()) { return; } if (offer.type == MARKETACTION_BUY) { player->bankBalance += static_cast<uint64_t>(offer.price) * offer.amount; player->sendMarketEnter(player->getLastDepotId()); } else { const ItemType& it = Item::items[offer.itemId]; if (it.id == 0) { return; } if (it.stackable) { uint16_t tmpAmount = offer.amount; while (tmpAmount > 0) { int32_t stackCount = std::min<int32_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < offer.amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } } IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED); offer.amount = 0; offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION); player->sendMarketCancelOffer(offer); player->sendMarketEnter(player->getLastDepotId()); } void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount) { if (amount == 0 || amount > 64000) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter); if (offer.id == 0) { return; } if (amount > offer.amount) { return; } const ItemType& it = Item::items[offer.itemId]; if (it.id == 0) { return; } uint64_t totalPrice = static_cast<uint64_t>(offer.price) * amount; if (offer.type == MARKETACTION_BUY) { DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false); if (!depotChest) { return; } std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox()); if (itemList.empty()) { return; } Player* buyerPlayer = getPlayerByGUID(offer.playerId); if (!buyerPlayer) { buyerPlayer = new Player(nullptr); if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) { delete buyerPlayer; return; } } if (it.stackable) { uint16_t tmpAmount = amount; for (Item* item : itemList) { uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount()); tmpAmount -= removeCount; internalRemoveItem(item, removeCount); if (tmpAmount == 0) { break; } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } player->bankBalance += totalPrice; if (it.stackable) { uint16_t tmpAmount = amount; while (tmpAmount > 0) { uint16_t stackCount = std::min<uint16_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } if (buyerPlayer->isOffline()) { IOLoginData::savePlayer(buyerPlayer); delete buyerPlayer; } else { buyerPlayer->onReceiveMail(); } } else { if (totalPrice > player->bankBalance) { return; } player->bankBalance -= totalPrice; if (it.stackable) { uint16_t tmpAmount = amount; while (tmpAmount > 0) { uint16_t stackCount = std::min<uint16_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } Player* sellerPlayer = getPlayerByGUID(offer.playerId); if (sellerPlayer) { sellerPlayer->bankBalance += totalPrice; } else { IOLoginData::increaseBankBalance(offer.playerId, totalPrice); } player->onReceiveMail(); } const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION); IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX); IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED); offer.amount -= amount; if (offer.amount == 0) { IOMarket::deleteOffer(offer.id); } else { IOMarket::acceptOffer(offer.id, amount); } player->sendMarketEnter(player->getLastDepotId()); offer.timestamp += marketOfferDuration; player->sendMarketAcceptOffer(offer); } void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer) { Player* player = getPlayerByID(playerId); if (!player) { return; } for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) { creatureEvent->executeExtendedOpcode(player, opcode, buffer); } } std::forward_list<Item*> Game::getMarketItemList(uint16_t wareId, uint16_t sufficientCount, DepotChest* depotChest, Inbox* inbox) { std::forward_list<Item*> itemList; uint16_t count = 0; std::list<Container*> containers { depotChest, inbox }; do { Container* container = containers.front(); containers.pop_front(); for (Item* item : container->getItemList()) { Container* c = item->getContainer(); if (c && !c->empty()) { containers.push_back(c); continue; } const ItemType& itemType = Item::items[item->getID()]; if (itemType.wareId != wareId) { continue; } if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) { continue; } if (!item->hasMarketAttributes()) { continue; } itemList.push_front(item); count += Item::countByType(item, -1); if (count >= sufficientCount) { return itemList; } } } while (!containers.empty()); return std::forward_list<Item*>(); } void Game::forceAddCondition(uint32_t creatureId, Condition* condition) { Creature* creature = getCreatureByID(creatureId); if (!creature) { delete condition; return; } creature->addCondition(condition, true); } void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type) { Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } creature->removeCondition(type, true); } void Game::sendOfflineTrainingDialog(Player* player) { if (!player) { return; } if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) { player->sendModalWindow(offlineTrainingWindow); } } void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->hasModalWindowOpen(modalWindowId)) { return; } player->onModalWindowHandled(modalWindowId); // offline training, hardcoded if (modalWindowId == std::numeric_limits<uint32_t>::max()) { if (button == offlineTrainingWindow.defaultEnterButton) { if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DISTANCE || choice == SKILL_MAGLEVEL) { BedItem* bedItem = player->getBedItem(); if (bedItem && bedItem->sleep(player)) { player->setOfflineTrainingSkill(choice); return; } } } else { player->sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted."); } player->setBedItem(nullptr); } else { for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) { creatureEvent->executeModalWindow(player, modalWindowId, button, choice); } } } void Game::addPlayer(Player* player) { const std::string& lowercase_name = asLowerCaseString(player->getName()); mappedPlayerNames[lowercase_name] = player; mappedPlayerGuids[player->getGUID()] = player; wildcardTree.insert(lowercase_name); players[player->getID()] = player; } void Game::removePlayer(Player* player) { const std::string& lowercase_name = asLowerCaseString(player->getName()); mappedPlayerNames.erase(lowercase_name); mappedPlayerGuids.erase(player->getGUID()); wildcardTree.remove(lowercase_name); players.erase(player->getID()); } void Game::addNpc(Npc* npc) { npcs[npc->getID()] = npc; } void Game::removeNpc(Npc* npc) { npcs.erase(npc->getID()); } void Game::addMonster(Monster* monster) { monsters[monster->getID()] = monster; } void Game::removeMonster(Monster* monster) { monsters.erase(monster->getID()); } Guild* Game::getGuild(uint32_t id) const { auto it = guilds.find(id); if (it == guilds.end()) { return nullptr; } return it->second; } void Game::addGuild(Guild* guild) { guilds[guild->getId()] = guild; } void Game::removeGuild(uint32_t guildId) { guilds.erase(guildId); } void Game::decreaseBrowseFieldRef(const Position& pos) { Tile* tile = map.getTile(pos.x, pos.y, pos.z); if (!tile) { return; } auto it = browseFields.find(tile); if (it != browseFields.end()) { it->second->decrementReferenceCounter(); } } void Game::internalRemoveItems(std::vector<Item*> itemList, uint32_t amount, bool stackable) { if (stackable) { for (Item* item : itemList) { if (item->getItemCount() > amount) { internalRemoveItem(item, amount); break; } else { amount -= item->getItemCount(); internalRemoveItem(item); } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } } BedItem* Game::getBedBySleeper(uint32_t guid) const { auto it = bedSleepersMap.find(guid); if (it == bedSleepersMap.end()) { return nullptr; } return it->second; } void Game::setBedSleeper(BedItem* bed, uint32_t guid) { bedSleepersMap[guid] = bed; } void Game::removeBedSleeper(uint32_t guid) { auto it = bedSleepersMap.find(guid); if (it != bedSleepersMap.end()) { bedSleepersMap.erase(it); } } Item* Game::getUniqueItem(uint16_t uniqueId) { auto it = uniqueItems.find(uniqueId); if (it == uniqueItems.end()) { return nullptr; } return it->second; } bool Game::addUniqueItem(uint16_t uniqueId, Item* item) { auto result = uniqueItems.emplace(uniqueId, item); if (!result.second) { std::cout << "Duplicate unique id: " << uniqueId << std::endl; } return result.second; } void Game::removeUniqueItem(uint16_t uniqueId) { auto it = uniqueItems.find(uniqueId); if (it != uniqueItems.end()) { uniqueItems.erase(it); } } bool Game::reload(ReloadTypes_t reloadType) { switch (reloadType) { case RELOAD_TYPE_ACTIONS: return g_actions->reload(); case RELOAD_TYPE_CHAT: return g_chat->load(); case RELOAD_TYPE_CONFIG: return g_config.reload(); case RELOAD_TYPE_CREATURESCRIPTS: { g_creatureEvents->reload(); g_creatureEvents->removeInvalidEvents(); return true; } case RELOAD_TYPE_EVENTS: return g_events->load(); case RELOAD_TYPE_GLOBALEVENTS: return g_globalEvents->reload(); case RELOAD_TYPE_ITEMS: return Item::items.reload(); case RELOAD_TYPE_MONSTERS: return g_monsters.reload(); case RELOAD_TYPE_MOUNTS: return mounts.reload(); case RELOAD_TYPE_MOVEMENTS: return g_moveEvents->reload(); case RELOAD_TYPE_NPCS: { Npcs::reload(); return true; } case RELOAD_TYPE_QUESTS: return quests.reload(); case RELOAD_TYPE_RAIDS: return raids.reload() && raids.startup(); case RELOAD_TYPE_SPELLS: { if (!g_spells->reload()) { std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl; std::terminate(); } else if (!g_monsters.reload()) { std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl; std::terminate(); } return true; } case RELOAD_TYPE_TALKACTIONS: return g_talkActions->reload(); case RELOAD_TYPE_WEAPONS: { bool results = g_weapons->reload(); g_weapons->loadDefaults(); return results; } case RELOAD_TYPE_SCRIPTS: { // commented out stuff is TODO, once we approach further in revscriptsys g_actions->clear(true); g_creatureEvents->clear(true); g_moveEvents->clear(true); g_talkActions->clear(true); g_globalEvents->clear(true); g_weapons->clear(true); g_weapons->loadDefaults(); g_spells->clear(true); g_scripts->loadScripts("scripts", false, true); g_creatureEvents->removeInvalidEvents(); /* Npcs::reload(); raids.reload() && raids.startup(); Item::items.reload(); quests.reload(); mounts.reload(); g_config.reload(); g_events->load(); g_chat->load(); */ return true; } default: { if (!g_spells->reload()) { std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl; std::terminate(); } else if (!g_monsters.reload()) { std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl; std::terminate(); } g_actions->reload(); g_config.reload(); g_creatureEvents->reload(); g_monsters.reload(); g_moveEvents->reload(); Npcs::reload(); raids.reload() && raids.startup(); g_talkActions->reload(); Item::items.reload(); g_weapons->reload(); g_weapons->clear(true); g_weapons->loadDefaults(); quests.reload(); mounts.reload(); g_globalEvents->reload(); g_events->load(); g_chat->load(); g_actions->clear(true); g_creatureEvents->clear(true); g_moveEvents->clear(true); g_talkActions->clear(true); g_globalEvents->clear(true); g_spells->clear(true); g_scripts->loadScripts("scripts", false, true); g_creatureEvents->removeInvalidEvents(); return true; } } return true; }
1
18,272
Undo this modification
otland-forgottenserver
cpp
@@ -170,7 +170,7 @@ namespace OpenTelemetry.Exporter.Zipkin.Tests var traceId = useShortTraceIds ? TraceId.Substring(TraceId.Length - 16, 16) : TraceId; Assert.Equal( - $@"[{{""traceId"":""{traceId}"",""name"":""Name"",""parentId"":""{ZipkinActivityConversionExtensions.EncodeSpanId(activity.ParentSpanId)}"",""id"":""{ZipkinActivityConversionExtensions.EncodeSpanId(context.SpanId)}"",""kind"":""CLIENT"",""timestamp"":{timestamp},""duration"":60000000,""localEndpoint"":{{""serviceName"":""OpenTelemetry Exporter""{ipInformation}}},""annotations"":[{{""timestamp"":{eventTimestamp},""value"":""Event1""}},{{""timestamp"":{eventTimestamp},""value"":""Event2""}}],""tags"":{{""stringKey"":""value"",""longKey"":""1"",""longKey2"":""1"",""doubleKey"":""1"",""doubleKey2"":""1"",""boolKey"":""True"",""library.name"":""CreateTestActivity""}}}}]", + $@"[{{""traceId"":""{traceId}"",""name"":""Name"",""parentId"":""{ZipkinActivityConversionExtensions.EncodeSpanId(activity.ParentSpanId)}"",""id"":""{ZipkinActivityConversionExtensions.EncodeSpanId(context.SpanId)}"",""kind"":""CLIENT"",""timestamp"":{timestamp},""duration"":60000000,""localEndpoint"":{{""serviceName"":""OpenTelemetry Exporter""{ipInformation}}},""annotations"":[{{""timestamp"":{eventTimestamp},""value"":""Event1""}},{{""timestamp"":{eventTimestamp},""value"":""Event2""}}],""tags"":{{""stringKey"":""value"",""longKey"":1,""longKey2"":1,""doubleKey"":1.0,""doubleKey2"":1.0,""boolKey"":true,""library.name"":""CreateTestActivity""}}}}]", Responses[requestId]); }
1
// <copyright file="ZipkinExporterTests.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Net; using System.Text; using System.Threading; using OpenTelemetry.Exporter.Zipkin.Implementation; using OpenTelemetry.Resources; using OpenTelemetry.Tests; using OpenTelemetry.Trace; using Xunit; namespace OpenTelemetry.Exporter.Zipkin.Tests { public class ZipkinExporterTests : IDisposable { private const string TraceId = "e8ea7e9ac72de94e91fabc613f9686b2"; private static readonly ConcurrentDictionary<Guid, string> Responses = new ConcurrentDictionary<Guid, string>(); private readonly IDisposable testServer; private readonly string testServerHost; private readonly int testServerPort; static ZipkinExporterTests() { Activity.DefaultIdFormat = ActivityIdFormat.W3C; Activity.ForceDefaultIdFormat = true; var listener = new ActivityListener { ShouldListenTo = _ => true, GetRequestedDataUsingParentId = (ref ActivityCreationOptions<string> options) => ActivityDataRequest.AllData, GetRequestedDataUsingContext = (ref ActivityCreationOptions<ActivityContext> options) => ActivityDataRequest.AllData, }; ActivitySource.AddActivityListener(listener); } public ZipkinExporterTests() { this.testServer = TestHttpServer.RunServer( ctx => ProcessServerRequest(ctx), out this.testServerHost, out this.testServerPort); static void ProcessServerRequest(HttpListenerContext context) { context.Response.StatusCode = 200; using StreamReader readStream = new StreamReader(context.Request.InputStream); string requestContent = readStream.ReadToEnd(); Responses.TryAdd( Guid.Parse(context.Request.QueryString["requestId"]), requestContent); context.Response.OutputStream.Close(); } } public void Dispose() { this.testServer.Dispose(); } [Fact] public void ZipkinExporter_BadArgs() { TracerProviderBuilder builder = null; Assert.Throws<ArgumentNullException>(() => builder.AddZipkinExporter()); } [Fact] public void ZipkinExporter_SuppresssesInstrumentation() { const string ActivitySourceName = "zipkin.test"; Guid requestId = Guid.NewGuid(); TestActivityProcessor testActivityProcessor = new TestActivityProcessor(); int endCalledCount = 0; testActivityProcessor.EndAction = (a) => { endCalledCount++; }; var exporterOptions = new ZipkinExporterOptions(); exporterOptions.ServiceName = "test-zipkin"; exporterOptions.Endpoint = new Uri($"http://{this.testServerHost}:{this.testServerPort}/api/v2/spans?requestId={requestId}"); var zipkinExporter = new ZipkinExporter(exporterOptions); var exportActivityProcessor = new BatchExportActivityProcessor(zipkinExporter); var openTelemetrySdk = Sdk.CreateTracerProviderBuilder() .AddSource(ActivitySourceName) .AddProcessor(testActivityProcessor) .AddProcessor(exportActivityProcessor) .AddHttpClientInstrumentation() .Build(); var source = new ActivitySource(ActivitySourceName); var activity = source.StartActivity("Test Zipkin Activity"); activity?.Stop(); // We call ForceFlush on the exporter twice, so that in the event // of a regression, this should give any operations performed in // the Zipkin exporter itself enough time to be instrumented and // loop back through the exporter. exportActivityProcessor.ForceFlush(); exportActivityProcessor.ForceFlush(); Assert.Equal(1, endCalledCount); } [Theory] [InlineData(true)] [InlineData(false)] public void ZipkinExporterIntegrationTest(bool useShortTraceIds) { Guid requestId = Guid.NewGuid(); ZipkinExporter exporter = new ZipkinExporter( new ZipkinExporterOptions { Endpoint = new Uri($"http://{this.testServerHost}:{this.testServerPort}/api/v2/spans?requestId={requestId}"), UseShortTraceIds = useShortTraceIds, }); var activity = CreateTestActivity(); var processor = new SimpleExportActivityProcessor(exporter); processor.OnEnd(activity); var context = activity.Context; var timestamp = activity.StartTimeUtc.ToEpochMicroseconds(); var eventTimestamp = activity.Events.First().Timestamp.ToEpochMicroseconds(); StringBuilder ipInformation = new StringBuilder(); if (!string.IsNullOrEmpty(exporter.LocalEndpoint.Ipv4)) { ipInformation.Append($@",""ipv4"":""{exporter.LocalEndpoint.Ipv4}"""); } if (!string.IsNullOrEmpty(exporter.LocalEndpoint.Ipv6)) { ipInformation.Append($@",""ipv6"":""{exporter.LocalEndpoint.Ipv6}"""); } var traceId = useShortTraceIds ? TraceId.Substring(TraceId.Length - 16, 16) : TraceId; Assert.Equal( $@"[{{""traceId"":""{traceId}"",""name"":""Name"",""parentId"":""{ZipkinActivityConversionExtensions.EncodeSpanId(activity.ParentSpanId)}"",""id"":""{ZipkinActivityConversionExtensions.EncodeSpanId(context.SpanId)}"",""kind"":""CLIENT"",""timestamp"":{timestamp},""duration"":60000000,""localEndpoint"":{{""serviceName"":""OpenTelemetry Exporter""{ipInformation}}},""annotations"":[{{""timestamp"":{eventTimestamp},""value"":""Event1""}},{{""timestamp"":{eventTimestamp},""value"":""Event2""}}],""tags"":{{""stringKey"":""value"",""longKey"":""1"",""longKey2"":""1"",""doubleKey"":""1"",""doubleKey2"":""1"",""boolKey"":""True"",""library.name"":""CreateTestActivity""}}}}]", Responses[requestId]); } internal static Activity CreateTestActivity( bool setAttributes = true, Dictionary<string, object> additionalAttributes = null, bool addEvents = true, bool addLinks = true, Resource resource = null, ActivityKind kind = ActivityKind.Client) { var startTimestamp = DateTime.UtcNow; var endTimestamp = startTimestamp.AddSeconds(60); var eventTimestamp = DateTime.UtcNow; var traceId = ActivityTraceId.CreateFromString("e8ea7e9ac72de94e91fabc613f9686b2".AsSpan()); var parentSpanId = ActivitySpanId.CreateFromBytes(new byte[] { 12, 23, 34, 45, 56, 67, 78, 89 }); var attributes = new Dictionary<string, object> { { "stringKey", "value" }, { "longKey", 1L }, { "longKey2", 1 }, { "doubleKey", 1D }, { "doubleKey2", 1F }, { "boolKey", true }, }; if (additionalAttributes != null) { foreach (var attribute in additionalAttributes) { attributes.Add(attribute.Key, attribute.Value); } } var events = new List<ActivityEvent> { new ActivityEvent( "Event1", eventTimestamp, new ActivityTagsCollection(new Dictionary<string, object> { { "key", "value" }, })), new ActivityEvent( "Event2", eventTimestamp, new ActivityTagsCollection(new Dictionary<string, object> { { "key", "value" }, })), }; var linkedSpanId = ActivitySpanId.CreateFromString("888915b6286b9c41".AsSpan()); var activitySource = new ActivitySource(nameof(CreateTestActivity)); var tags = setAttributes ? attributes.Select(kvp => new KeyValuePair<string, object>(kvp.Key, kvp.Value.ToString())) : null; var links = addLinks ? new[] { new ActivityLink(new ActivityContext( traceId, linkedSpanId, ActivityTraceFlags.Recorded)), } : null; var activity = activitySource.StartActivity( "Name", kind, parentContext: new ActivityContext(traceId, parentSpanId, ActivityTraceFlags.Recorded), tags, links, startTime: startTimestamp); if (addEvents) { foreach (var evnt in events) { activity.AddEvent(evnt); } } activity.SetEndTime(endTimestamp); activity.Stop(); return activity; } } }
1
16,656
Please check my thoughts here... I changed the test to not `ToString()` attribute values. This was important to test things when `net.peer.port` was both an int or a string, but I was unsure if Zipkin supported non-string attributes.
open-telemetry-opentelemetry-dotnet
.cs
@@ -335,6 +335,8 @@ public class Camera extends Plugin { returnBase64(call, exif, bitmapOutputStream); } else if (settings.getResultType() == CameraResultType.URI) { returnFileURI(call, exif, bitmap, u, bitmapOutputStream); + } else if (settings.getResultType() == CameraResultType.BASE64NOMETADATA) { + returnBase64NoMetadata(call, exif, bitmapOutputStream); } else { call.reject(INVALID_RESULT_TYPE_ERROR); }
1
package com.getcapacitor.plugin; import android.Manifest; import android.content.Intent; import android.content.pm.PackageManager; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.net.Uri; import android.os.Bundle; import android.os.Environment; import android.provider.MediaStore; import android.support.v4.content.FileProvider; import android.util.Base64; import android.util.Log; import com.getcapacitor.Dialogs; import com.getcapacitor.FileUtils; import com.getcapacitor.JSObject; import com.getcapacitor.NativePlugin; import com.getcapacitor.Plugin; import com.getcapacitor.PluginCall; import com.getcapacitor.PluginMethod; import com.getcapacitor.PluginRequestCodes; import com.getcapacitor.plugin.camera.CameraResultType; import com.getcapacitor.plugin.camera.CameraSettings; import com.getcapacitor.plugin.camera.CameraSource; import com.getcapacitor.plugin.camera.CameraUtils; import com.getcapacitor.plugin.camera.ExifWrapper; import com.getcapacitor.plugin.camera.ImageUtils; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.text.SimpleDateFormat; import java.util.Date; /** * The Camera plugin makes it easy to take a photo or have the user select a photo * from their albums. * * On Android, this plugin sends an intent that opens the stock Camera app. * * Adapted from https://developer.android.com/training/camera/photobasics.html */ @NativePlugin( requestCodes={Camera.REQUEST_IMAGE_CAPTURE, Camera.REQUEST_IMAGE_PICK, Camera.REQUEST_IMAGE_EDIT} ) public class Camera extends Plugin { // Request codes static final int REQUEST_IMAGE_CAPTURE = PluginRequestCodes.CAMERA_IMAGE_CAPTURE; static final int REQUEST_IMAGE_PICK = PluginRequestCodes.CAMERA_IMAGE_PICK; static final int REQUEST_IMAGE_EDIT = PluginRequestCodes.CAMERA_IMAGE_EDIT; // Message constants private static final String INVALID_RESULT_TYPE_ERROR = "Invalid resultType option"; private static final String PERMISSION_DENIED_ERROR = "Unable to access camera, user denied permission request"; private static final String NO_CAMERA_ERROR = "Device doesn't have a camera available"; private static final String NO_CAMERA_ACTIVITY_ERROR = "Unable to resolve camera activity"; private static final String IMAGE_FILE_SAVE_ERROR = "Unable to create photo on disk"; private static final String IMAGE_PROCESS_NO_FILE_ERROR = "Unable to process image, file not found on disk"; private static final String UNABLE_TO_PROCESS_IMAGE = "Unable to process image"; private static final String IMAGE_EDIT_ERROR = "Unable to edit image"; private String imageFileSavePath; private Uri imageFileUri; private boolean isEdited = false; private CameraSettings settings = new CameraSettings(); @PluginMethod() public void getPhoto(PluginCall call) { isEdited = false; saveCall(call); settings = getSettings(call); doShow(call); } private void doShow(PluginCall call) { switch (settings.getSource()) { case PROMPT: showPrompt(call); break; case CAMERA: showCamera(call); break; case PHOTOS: showPhotos(call); break; default: showPrompt(call); break; } } private void showPrompt(final PluginCall call) { if (checkPermissions (call)) { // We have all necessary permissions, open the camera JSObject fromPhotos = new JSObject(); fromPhotos.put("title", "From Photos"); JSObject takePicture = new JSObject(); takePicture.put("title", "Take Picture"); Object[] options = new Object[] { fromPhotos, takePicture }; Dialogs.actions(getActivity(), options, new Dialogs.OnSelectListener() { @Override public void onSelect(int index) { if (index == 0) { openPhotos(call); } else if (index == 1) { openCamera(call); } } }); } } private void showCamera(final PluginCall call) { if (checkPermissions (call)) { if (!getActivity().getPackageManager().hasSystemFeature(PackageManager.FEATURE_CAMERA)) { call.error(NO_CAMERA_ERROR); return; } openCamera(call); } } private void showPhotos(final PluginCall call) { openPhotos(call); } private boolean checkPermissions(PluginCall call) { // If we want to save to the gallery, we need two permissions if(settings.isSaveToGallery() && !(hasPermission(Manifest.permission.CAMERA) && hasPermission(Manifest.permission.WRITE_EXTERNAL_STORAGE))) { pluginRequestPermissions(new String[] { Manifest.permission.CAMERA, Manifest.permission.WRITE_EXTERNAL_STORAGE, Manifest.permission.READ_EXTERNAL_STORAGE }, REQUEST_IMAGE_CAPTURE); return false; } // If we don't need to save to the gallery, we can just ask for camera permissions else if(!hasPermission(Manifest.permission.CAMERA)) { pluginRequestPermission(Manifest.permission.CAMERA, REQUEST_IMAGE_CAPTURE); return false; } return true; } private CameraSettings getSettings(PluginCall call) { CameraSettings settings = new CameraSettings(); settings.setResultType(getResultType(call.getString("resultType"))); settings.setSaveToGallery(call.getBoolean("saveToGallery", CameraSettings.DEFAULT_SAVE_IMAGE_TO_GALLERY)); settings.setAllowEditing(call.getBoolean("allowEditing", false)); settings.setQuality(call.getInt("quality", CameraSettings.DEFAULT_QUALITY)); settings.setWidth(call.getInt("width", 0)); settings.setHeight(call.getInt("height", 0)); settings.setShouldResize(settings.getWidth() > 0 || settings.getHeight() > 0); settings.setShouldCorrectOrientation(call.getBoolean("correctOrientation", CameraSettings.DEFAULT_CORRECT_ORIENTATION)); try { settings.setSource(CameraSource.valueOf(call.getString("source", CameraSource.PROMPT.getSource()))); } catch (IllegalArgumentException ex) { settings.setSource(CameraSource.PROMPT); } return settings; } private CameraResultType getResultType(String resultType) { if (resultType == null) { return null; } try { return CameraResultType.valueOf(resultType.toUpperCase()); } catch (IllegalArgumentException ex) { Log.d(getLogTag(), "Invalid result type \"" + resultType + "\", defaulting to base64"); return CameraResultType.BASE64; } } public void openCamera(final PluginCall call) { boolean saveToGallery = call.getBoolean("saveToGallery", CameraSettings.DEFAULT_SAVE_IMAGE_TO_GALLERY); Intent takePictureIntent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE); if (takePictureIntent.resolveActivity(getActivity().getPackageManager()) != null) { // If we will be saving the photo, send the target file along try { String appId = getAppId(); File photoFile = CameraUtils.createImageFile(getActivity(), saveToGallery); imageFileSavePath = photoFile.getAbsolutePath(); // TODO: Verify provider config exists imageFileUri = FileProvider.getUriForFile(getActivity(), appId + ".fileprovider", photoFile); takePictureIntent.putExtra(MediaStore.EXTRA_OUTPUT, imageFileUri); } catch (Exception ex) { call.error(IMAGE_FILE_SAVE_ERROR, ex); return; } startActivityForResult(call, takePictureIntent, REQUEST_IMAGE_CAPTURE); } else { call.error(NO_CAMERA_ACTIVITY_ERROR); } } public void openPhotos(final PluginCall call) { Intent intent = new Intent(Intent.ACTION_PICK); intent.setType("image/*"); startActivityForResult(call, intent, REQUEST_IMAGE_PICK); } public void processCameraImage(PluginCall call, Intent data) { boolean saveToGallery = call.getBoolean("saveToGallery", CameraSettings.DEFAULT_SAVE_IMAGE_TO_GALLERY); CameraResultType resultType = getResultType(call.getString("resultType")); if(imageFileSavePath == null) { call.error(IMAGE_PROCESS_NO_FILE_ERROR); return; } if (saveToGallery) { Intent mediaScanIntent = new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE); File f = new File(imageFileSavePath); Uri contentUri = Uri.fromFile(f); mediaScanIntent.setData(contentUri); getActivity().sendBroadcast(mediaScanIntent); } // Load the image as a Bitmap File f = new File(imageFileSavePath); BitmapFactory.Options bmOptions = new BitmapFactory.Options(); Uri contentUri = Uri.fromFile(f); Bitmap bitmap = BitmapFactory.decodeFile(imageFileSavePath, bmOptions); if (bitmap == null) { call.error("User cancelled photos app"); return; } returnResult(call, bitmap, contentUri); } public void processPickedImage(PluginCall call, Intent data) { if (data == null) { call.error("No image picked"); return; } Uri u = data.getData(); InputStream imageStream = null; try { imageStream = getActivity().getContentResolver().openInputStream(u); Bitmap bitmap = BitmapFactory.decodeStream(imageStream); if (bitmap == null) { call.reject("Unable to process bitmap"); return; } returnResult(call, bitmap, u); } catch (OutOfMemoryError err) { call.error("Out of memory"); } catch (FileNotFoundException ex) { call.error("No such image found", ex); } finally { if (imageStream != null) { try { imageStream.close(); } catch (IOException e) { Log.e(getLogTag(), UNABLE_TO_PROCESS_IMAGE, e); } } } } /** * Save the modified image we've created to a temporary location, so we can * return a URI to it later * @param bitmap * @param contentUri * @param is * @return * @throws IOException */ private Uri saveTemporaryImage(Bitmap bitmap, Uri contentUri, InputStream is) throws IOException { String filename = contentUri.getLastPathSegment(); if (!filename.contains(".jpg") && !filename.contains(".jpeg")) { filename += "." + (new java.util.Date()).getTime() + ".jpeg"; } File cacheDir = getActivity().getCacheDir(); File outFile = new File(cacheDir, filename); FileOutputStream fos = new FileOutputStream(outFile); byte[] buffer = new byte[1024]; int len; while ((len = is.read(buffer)) != -1) { fos.write(buffer, 0, len); } fos.close(); return Uri.fromFile(outFile); } /** * After processing the image, return the final result back to the caller. * @param call * @param bitmap * @param u */ private void returnResult(PluginCall call, Bitmap bitmap, Uri u) { try { bitmap = prepareBitmap(bitmap, u); } catch (IOException e) { call.reject(UNABLE_TO_PROCESS_IMAGE); return; } ExifWrapper exif = ImageUtils.getExifData(getContext(), bitmap, u); // Compress the final image and prepare for output to client ByteArrayOutputStream bitmapOutputStream = new ByteArrayOutputStream(); bitmap.compress(Bitmap.CompressFormat.JPEG, settings.getQuality(), bitmapOutputStream); if (settings.isAllowEditing() && !isEdited) { editImage(call, u); return; } if (settings.getResultType() == CameraResultType.BASE64) { returnBase64(call, exif, bitmapOutputStream); } else if (settings.getResultType() == CameraResultType.URI) { returnFileURI(call, exif, bitmap, u, bitmapOutputStream); } else { call.reject(INVALID_RESULT_TYPE_ERROR); } // Result returned, clear stored paths imageFileSavePath = null; imageFileUri = null; } private void returnFileURI(PluginCall call, ExifWrapper exif, Bitmap bitmap, Uri u, ByteArrayOutputStream bitmapOutputStream) { ByteArrayInputStream bis = null; try { bis = new ByteArrayInputStream(bitmapOutputStream.toByteArray()); Uri newUri = saveTemporaryImage(bitmap, u, bis); JSObject ret = new JSObject(); ret.put("exif", exif.toJson()); ret.put("path", newUri.toString()); ret.put("webPath", FileUtils.getPortablePath(getContext(), bridge.getLocalUrl(), newUri)); call.resolve(ret); } catch (IOException ex) { call.reject(UNABLE_TO_PROCESS_IMAGE, ex); } finally { if (bis != null) { try { bis.close(); } catch (IOException e) { Log.e(getLogTag(), UNABLE_TO_PROCESS_IMAGE, e); } } } } /** * Apply our standard processing of the bitmap, returning a new one and * recycling the old one in the process * @param bitmap * @param imageUri * @return */ private Bitmap prepareBitmap(Bitmap bitmap, Uri imageUri) throws IOException { if (settings.isShouldCorrectOrientation()) { final Bitmap newBitmap = ImageUtils.correctOrientation(getContext(), bitmap, imageUri); bitmap = replaceBitmap(bitmap, newBitmap); } if (settings.isShouldResize()) { final Bitmap newBitmap = ImageUtils.resize(bitmap, settings.getWidth(), settings.getHeight()); bitmap = replaceBitmap(bitmap, newBitmap); } return bitmap; } private Bitmap replaceBitmap(Bitmap bitmap, final Bitmap newBitmap) { if (bitmap != newBitmap) { bitmap.recycle(); } bitmap = newBitmap; return bitmap; } private void returnBase64(PluginCall call, ExifWrapper exif, ByteArrayOutputStream bitmapOutputStream) { byte[] byteArray = bitmapOutputStream.toByteArray(); String encoded = Base64.encodeToString(byteArray, Base64.DEFAULT); JSObject data = new JSObject(); data.put("base64Data", "data:image/jpeg;base64," + encoded); data.put("exif", exif.toJson()); call.resolve(data); } @Override protected void handleRequestPermissionsResult(int requestCode, String[] permissions, int[] grantResults) { super.handleRequestPermissionsResult(requestCode, permissions, grantResults); Log.d(getLogTag(),"handling request perms result"); if (getSavedCall() == null) { Log.d(getLogTag(),"No stored plugin call for permissions request result"); return; } PluginCall savedCall = getSavedCall(); for (int i = 0; i < grantResults.length; i++) { int result = grantResults[i]; String perm = permissions[i]; if(result == PackageManager.PERMISSION_DENIED) { Log.d(getLogTag(), "User denied camera permission: " + perm); savedCall.error(PERMISSION_DENIED_ERROR); return; } } if (requestCode == REQUEST_IMAGE_CAPTURE) { doShow(savedCall); } } @Override protected void handleOnActivityResult(int requestCode, int resultCode, Intent data) { super.handleOnActivityResult(requestCode, resultCode, data); PluginCall savedCall = getSavedCall(); if (savedCall == null) { return; } if (requestCode == REQUEST_IMAGE_CAPTURE) { processCameraImage(savedCall, data); } else if (requestCode == REQUEST_IMAGE_PICK) { processPickedImage(savedCall, data); } else if (requestCode == REQUEST_IMAGE_EDIT) { isEdited = true; processPickedImage(savedCall, data); } } private void editImage(PluginCall call, Uri uri) { try { Uri origPhotoUri = uri; if (imageFileUri != null) { origPhotoUri = imageFileUri; } Intent editIntent = new Intent(Intent.ACTION_EDIT); editIntent.setDataAndType(origPhotoUri, "image/*"); File editedFile = CameraUtils.createImageFile(getActivity(), false); Uri editedUri = Uri.fromFile(editedFile); editIntent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION); editIntent.addFlags(Intent.FLAG_GRANT_WRITE_URI_PERMISSION); editIntent.putExtra(MediaStore.EXTRA_OUTPUT, editedUri); startActivityForResult(call, editIntent, REQUEST_IMAGE_EDIT); } catch (Exception ex) { call.error(IMAGE_EDIT_ERROR, ex); } } @Override protected Bundle saveInstanceState() { Bundle bundle = super.saveInstanceState(); bundle.putString("cameraImageFileSavePath", imageFileSavePath); return bundle; } @Override protected void restoreState(Bundle state) { String storedImageFileSavePath = state.getString("cameraImageFileSavePath"); if (storedImageFileSavePath != null) { imageFileSavePath = storedImageFileSavePath; } } }
1
7,738
Looks like you made changes on `CameraResultType` class, but didn't commit them. And you also have to do the changes on the types in @capacitor/core
ionic-team-capacitor
js
@@ -83,7 +83,7 @@ func buildImportParams() *ovfimportparams.OVFImportParams { ShieldedIntegrityMonitoring: *shieldedIntegrityMonitoring, ShieldedSecureBoot: *shieldedSecureBoot, ShieldedVtpm: *shieldedVtpm, Tags: *tags, Zone: *zoneFlag, BootDiskKmskey: *bootDiskKmskey, BootDiskKmsKeyring: *bootDiskKmsKeyring, BootDiskKmsLocation: *bootDiskKmsLocation, - BootDiskKmsProject: *bootDiskKmsProject, Timeout: *timeout, Project: *project, + BootDiskKmsProject: *bootDiskKmsProject, Timeout: *timeout, Project: project, ScratchBucketGcsPath: *scratchBucketGcsPath, Oauth: *oauth, Ce: *ce, GcsLogsDisabled: *gcsLogsDisabled, CloudLogsDisabled: *cloudLogsDisabled, StdoutLogsDisabled: *stdoutLogsDisabled, NodeAffinityLabelsFlag: nodeAffinityLabelsFlag,
1
// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // GCE OVF import tool package main import ( "flag" "fmt" "os" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/flags" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/logging/service" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_import_params" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_importer" "github.com/GoogleCloudPlatform/compute-image-tools/daisy" ) var ( instanceNames = flag.String(ovfimportparams.InstanceNameFlagKey, "", "VM Instance names to be created, separated by commas.") clientID = flag.String(ovfimportparams.ClientIDFlagKey, "", "Identifies the client of the importer, e.g. `gcloud` or `pantheon`") ovfOvaGcsPath = flag.String(ovfimportparams.OvfGcsPathFlagKey, "", " Google Cloud Storage URI of the OVF or OVA file to import. For example: gs://my-bucket/my-vm.ovf.") noGuestEnvironment = flag.Bool("no-guest-environment", false, "Google Guest Environment will not be installed on the image.") canIPForward = flag.Bool("can-ip-forward", false, "If provided, allows the instances to send and receive packets with non-matching destination or source IP addresses.") deletionProtection = flag.Bool("deletion-protection", false, "Enables deletion protection for the instance.") description = flag.String("description", "", "Specifies a textual description of the instances.") labels = flag.String("labels", "", "List of label KEY=VALUE pairs to add. Keys must start with a lowercase character and contain only hyphens (-), underscores (_), lowercase characters, and numbers. Values must contain only hyphens (-), underscores (_), lowercase characters, and numbers.") machineType = flag.String("machine-type", "", "Specifies the machine type used for the instances. To get a list of available machine types, run 'gcloud compute machine-types list'. If unspecified, the default type is n1-standard-1.") network = flag.String("network", "", "Name of the network in your project to use for the image import. The network must have access to Google Cloud Storage. If not specified, the network named default is used. If -subnet is also specified subnet must be a subnetwork of network specified by -network.") networkTier = flag.String("network-tier", "", "Specifies the network tier that will be used to configure the instance. NETWORK_TIER must be one of: PREMIUM, STANDARD. The default value is PREMIUM.") subnet = flag.String("subnet", "", "Name of the subnetwork in your project to use for the image import. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. Zone should be specified if this field is specified.") privateNetworkIP = flag.String("private-network-ip", "", "Specifies the RFC1918 IP to assign to the instance. The IP should be in the subnet or legacy network IP range.") noExternalIP = flag.Bool("no-external-ip", false, "Specifies that VPC into which instances is being imported doesn't allow external IPs.") noRestartOnFailure = flag.Bool("no-restart-on-failure", false, "the instance will not be restarted if it’s terminated by Compute Engine. This does not affect terminations performed by the user.") osID = flag.String("os", "", "Specifies the OS of the image being imported. OS must be one of: centos-6, centos-7, debian-8, debian-9, rhel-6, rhel-6-byol, rhel-7, rhel-7-byol, ubuntu-1404, ubuntu-1604, windows-10-byol, windows-2008r2, windows-2008r2-byol, windows-2012, windows-2012-byol, windows-2012r2, windows-2012r2-byol, windows-2016, windows-2016-byol, windows-7-byol, windows-2019, windows-2019-byol, windows-8-1-x64-byol.") shieldedIntegrityMonitoring = flag.Bool("shielded-integrity-monitoring", false, "Enables monitoring and attestation of the boot integrity of the instance. The attestation is performed against the integrity policy baseline. This baseline is initially derived from the implicitly trusted boot image when the instance is created. This baseline can be updated by using --shielded-vm-learn-integrity-policy.") shieldedSecureBoot = flag.Bool("shielded-secure-boot", false, "The instance will boot with secure boot enabled.") shieldedVtpm = flag.Bool("shielded-vtpm", false, "The instance will boot with the TPM (Trusted Platform Module) enabled. A TPM is a hardware module that can be used for different security operations such as remote attestation, encryption and sealing of keys.") tags = flag.String("tags", "", "Specifies a list of tags to apply to the instance. These tags allow network firewall rules and routes to be applied to specified VM instances. See `gcloud compute firewall-rules create` for more details.") zoneFlag = flag.String("zone", "", "Zone of the image to import. The zone in which to do the work of importing the image. Overrides the default compute/zone property value for this command invocation") bootDiskKmskey = flag.String("boot-disk-kms-key", "", "The Cloud KMS (Key Management Service) cryptokey that will be used to protect the disk. The arguments in this group can be used to specify the attributes of this resource. ID of the key or fully qualified identifier for the key. This flag must be specified if any of the other arguments in this group are specified.") bootDiskKmsKeyring = flag.String("boot-disk-kms-keyring", "", "The KMS keyring of the key.") bootDiskKmsLocation = flag.String("boot-disk-kms-location", "", "The Cloud location for the key.") bootDiskKmsProject = flag.String("boot-disk-kms-project", "", "The Cloud project for the key.") timeout = flag.String("timeout", "", "Maximum time a build can last before it is failed as TIMEOUT. For example, specifying 2h will fail the process after 2 hours. See `gcloud topic datetimes` for information on duration formats") project = flag.String("project", "", "project to run in, overrides what is set in workflow") scratchBucketGcsPath = flag.String("scratch-bucket-gcs-path", "", "GCS scratch bucket to use, overrides what is set in workflow") oauth = flag.String("oauth", "", "path to oauth json file, overrides what is set in workflow") ce = flag.String("compute-endpoint-override", "", "API endpoint to override default") gcsLogsDisabled = flag.Bool("disable-gcs-logging", false, "do not stream logs to GCS") cloudLogsDisabled = flag.Bool("disable-cloud-logging", false, "do not stream logs to Cloud Logging") stdoutLogsDisabled = flag.Bool("disable-stdout-logging", false, "do not display individual workflow logs on stdout") releaseTrack = flag.String("release-track", ovfimporter.GA, fmt.Sprintf("Release track of OVF import. One of: %s, %s or %s. Impacts which compute API release track is used by the import tool.", ovfimporter.Alpha, ovfimporter.Beta, ovfimporter.GA)) nodeAffinityLabelsFlag flags.StringArrayFlag currentExecutablePath string ) func init() { currentExecutablePath = string(os.Args[0]) flag.Var(&nodeAffinityLabelsFlag, "node-affinity-label", "Node affinity label used to determine sole tenant node to schedule this instance on. Label is of the format: <key>,<operator>,<value>,<value2>... where <operator> can be one of: IN, NOT. For example: workload,IN,prod,test is a label with key 'workload' and values 'prod' and 'test'. This flag can be specified multiple times for multiple labels.") } func buildImportParams() *ovfimportparams.OVFImportParams { flag.Parse() return &ovfimportparams.OVFImportParams{InstanceNames: *instanceNames, ClientID: *clientID, OvfOvaGcsPath: *ovfOvaGcsPath, NoGuestEnvironment: *noGuestEnvironment, CanIPForward: *canIPForward, DeletionProtection: *deletionProtection, Description: *description, Labels: *labels, MachineType: *machineType, Network: *network, NetworkTier: *networkTier, Subnet: *subnet, PrivateNetworkIP: *privateNetworkIP, NoExternalIP: *noExternalIP, NoRestartOnFailure: *noRestartOnFailure, OsID: *osID, ShieldedIntegrityMonitoring: *shieldedIntegrityMonitoring, ShieldedSecureBoot: *shieldedSecureBoot, ShieldedVtpm: *shieldedVtpm, Tags: *tags, Zone: *zoneFlag, BootDiskKmskey: *bootDiskKmskey, BootDiskKmsKeyring: *bootDiskKmsKeyring, BootDiskKmsLocation: *bootDiskKmsLocation, BootDiskKmsProject: *bootDiskKmsProject, Timeout: *timeout, Project: *project, ScratchBucketGcsPath: *scratchBucketGcsPath, Oauth: *oauth, Ce: *ce, GcsLogsDisabled: *gcsLogsDisabled, CloudLogsDisabled: *cloudLogsDisabled, StdoutLogsDisabled: *stdoutLogsDisabled, NodeAffinityLabelsFlag: nodeAffinityLabelsFlag, CurrentExecutablePath: currentExecutablePath, ReleaseTrack: *releaseTrack, } } func runImport() (*daisy.Workflow, error) { var ovfImporter *ovfimporter.OVFImporter var err error defer func() { if ovfImporter != nil { ovfImporter.CleanUp() } }() if ovfImporter, err = ovfimporter.NewOVFImporter(buildImportParams()); err != nil { return nil, err } return ovfImporter.Import() } func main() { flag.Parse() paramLog := service.InputParams{ InstanceImportParams: &service.InstanceImportParams{ CommonParams: &service.CommonParams{ ClientID: *clientID, Network: *network, Subnet: *subnet, Zone: *zoneFlag, Timeout: *timeout, Project: *project, ObfuscatedProject: service.Hash(*project), Labels: *labels, ScratchBucketGcsPath: *scratchBucketGcsPath, Oauth: *oauth, ComputeEndpointOverride: *ce, DisableGcsLogging: *gcsLogsDisabled, DisableCloudLogging: *cloudLogsDisabled, DisableStdoutLogging: *stdoutLogsDisabled, }, InstanceName: *instanceNames, OvfGcsPath: *ovfOvaGcsPath, CanIPForward: *canIPForward, DeletionProtection: *deletionProtection, MachineType: *machineType, NetworkInterface: *network, NetworkTier: *networkTier, PrivateNetworkIP: *privateNetworkIP, NoExternalIP: *noExternalIP, NoRestartOnFailure: *noRestartOnFailure, OS: *osID, ShieldedIntegrityMonitoring: *shieldedIntegrityMonitoring, ShieldedSecureBoot: *shieldedSecureBoot, ShieldedVtpm: *shieldedVtpm, Tags: *tags, HasBootDiskKmsKey: *bootDiskKmskey != "", HasBootDiskKmsKeyring: *bootDiskKmsKeyring != "", HasBootDiskKmsLocation: *bootDiskKmsLocation != "", HasBootDiskKmsProject: *bootDiskKmsProject != "", NoGuestEnvironment: *noGuestEnvironment, NodeAffinityLabel: nodeAffinityLabelsFlag.String(), }, } if err := service.RunWithServerLogging(service.InstanceImportAction, paramLog, project, runImport); err != nil { os.Exit(1) } }
1
9,607
Can you explain the history of the bug a bit, and why this fixes it?
GoogleCloudPlatform-compute-image-tools
go
@@ -0,0 +1,11 @@ +module MentorHelper + def mentor_image(mentor) + image_tag gravatar_url(mentor.email, size: '300') + end + + def mentor_contact_link(mentor) + mail_to mentor.email, + I18n.t('dashboard.show.contact_your_mentor', + mentor_name: mentor.first_name) + end +end
1
1
8,639
What do you think about `mentor_mail_to` or `mentor_mail_to_link` in order to match Rails' `mail_to` method, which is what this calls? I'm not sure about this suggestion...
thoughtbot-upcase
rb
@@ -80,7 +80,7 @@ namespace OpenTelemetry.Exporter { string valueDisplay = string.Empty; StringBuilder tagsBuilder = new StringBuilder(); - for (int i = 0; i < metricPoint.Keys.Length; i++) + for (int i = 0; i < ((metricPoint.Keys == null) ? 0 : metricPoint.Keys.Length); i++) { tagsBuilder.Append(metricPoint.Keys[i]); tagsBuilder.Append(":");
1
// <copyright file="ConsoleMetricExporter.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Globalization; using System.Linq; using System.Text; using OpenTelemetry.Metrics; using OpenTelemetry.Resources; namespace OpenTelemetry.Exporter { public class ConsoleMetricExporter : ConsoleExporter<Metric> { private Resource resource; public ConsoleMetricExporter(ConsoleExporterOptions options) : base(options) { } public override ExportResult Export(in Batch<Metric> batch) { if (this.resource == null) { this.resource = this.ParentProvider.GetResource(); if (this.resource != Resource.Empty) { foreach (var resourceAttribute in this.resource.Attributes) { if (resourceAttribute.Key.Equals("service.name")) { Console.WriteLine("Service.Name" + resourceAttribute.Value); } } } } foreach (var metric in batch) { var msg = new StringBuilder($"\nExport "); msg.Append(metric.Name); if (!string.IsNullOrEmpty(metric.Description)) { msg.Append(' '); msg.Append(metric.Description); } if (!string.IsNullOrEmpty(metric.Unit)) { msg.Append($", Unit: {metric.Unit}"); } if (!string.IsNullOrEmpty(metric.Meter.Name)) { msg.Append($", Meter: {metric.Meter.Name}"); if (!string.IsNullOrEmpty(metric.Meter.Version)) { msg.Append($"/{metric.Meter.Version}"); } } Console.WriteLine(msg.ToString()); foreach (ref var metricPoint in metric.GetMetricPoints()) { string valueDisplay = string.Empty; StringBuilder tagsBuilder = new StringBuilder(); for (int i = 0; i < metricPoint.Keys.Length; i++) { tagsBuilder.Append(metricPoint.Keys[i]); tagsBuilder.Append(":"); tagsBuilder.Append(metricPoint.Values[i]); } var tags = tagsBuilder.ToString(); var metricType = metric.MetricType; if (metricType.IsHistogram()) { var bucketsBuilder = new StringBuilder(); bucketsBuilder.Append($"Sum: {metricPoint.DoubleValue} Count: {metricPoint.LongValue} \n"); for (int i = 0; i < metricPoint.ExplicitBounds.Length + 1; i++) { if (i == 0) { bucketsBuilder.Append("(-Infinity,"); bucketsBuilder.Append(metricPoint.ExplicitBounds[i]); bucketsBuilder.Append("]"); bucketsBuilder.Append(":"); bucketsBuilder.Append(metricPoint.BucketCounts[i]); } else if (i == metricPoint.ExplicitBounds.Length) { bucketsBuilder.Append("("); bucketsBuilder.Append(metricPoint.ExplicitBounds[i - 1]); bucketsBuilder.Append(","); bucketsBuilder.Append("+Infinity]"); bucketsBuilder.Append(":"); bucketsBuilder.Append(metricPoint.BucketCounts[i]); } else { bucketsBuilder.Append("("); bucketsBuilder.Append(metricPoint.ExplicitBounds[i - 1]); bucketsBuilder.Append(","); bucketsBuilder.Append(metricPoint.ExplicitBounds[i]); bucketsBuilder.Append("]"); bucketsBuilder.Append(":"); bucketsBuilder.Append(metricPoint.BucketCounts[i]); } bucketsBuilder.AppendLine(); } valueDisplay = bucketsBuilder.ToString(); } else if (metricType.IsDouble()) { valueDisplay = metricPoint.DoubleValue.ToString(CultureInfo.InvariantCulture); } else if (metricType.IsLong()) { valueDisplay = metricPoint.LongValue.ToString(CultureInfo.InvariantCulture); } msg = new StringBuilder(); msg.Append(metricPoint.StartTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ", CultureInfo.InvariantCulture)); msg.Append(", "); msg.Append(metricPoint.EndTime.ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ", CultureInfo.InvariantCulture)); msg.Append("] "); msg.Append(string.Join(";", tags)); msg.Append(' '); msg.Append(metric.MetricType); msg.AppendLine(); msg.Append($"Value: {valueDisplay}"); Console.WriteLine(msg); } } return ExportResult.Success; } } }
1
21,381
Looks like this will make the check in every loop. Consider extracting the null check.
open-telemetry-opentelemetry-dotnet
.cs
@@ -11,6 +11,14 @@ module RSpec::Core expect(Module.private_methods & seg_methods).to eq([]) end + module SharedExampleGroup + describe Registry do + it 'can safely be reset' do + expect { Registry.clear }.to_not raise_error + end + end + end + %w[share_examples_for shared_examples_for shared_examples shared_context].each do |shared_method_name| describe shared_method_name do it "is exposed to the global namespace" do
1
require 'spec_helper' module RSpec::Core describe SharedExampleGroup do ExampleModule = Module.new ExampleClass = Class.new it 'does not add a bunch of private methods to Module' do seg_methods = RSpec::Core::SharedExampleGroup.private_instance_methods expect(Module.private_methods & seg_methods).to eq([]) end %w[share_examples_for shared_examples_for shared_examples shared_context].each do |shared_method_name| describe shared_method_name do it "is exposed to the global namespace" do expect(Kernel).to respond_to(shared_method_name) end it "displays a warning when adding a second shared example group with the same name" do group = ExampleGroup.describe('example group') group.send(shared_method_name, 'some shared group') {} original_declaration = [__FILE__, __LINE__ - 1].join(':') warning = nil Kernel.stub(:warn) { |msg| warning = msg } group.send(shared_method_name, 'some shared group') {} second_declaration = [__FILE__, __LINE__ - 1].join(':') expect(warning).to include('some shared group', original_declaration, second_declaration) end ["name", :name, ExampleModule, ExampleClass].each do |object| type = object.class.name.downcase context "given a #{type}" do it "captures the given #{type} and block in the collection of shared example groups" do implementation = lambda {} send(shared_method_name, object, &implementation) expect(SharedExampleGroup::Registry.shared_example_groups[self][object]).to eq implementation end end end context "given a hash" do it "delegates extend on configuration" do implementation = Proc.new { def bar; 'bar'; end } send(shared_method_name, :foo => :bar, &implementation) a = RSpec.configuration.include_or_extend_modules.first expect(a[0]).to eq(:extend) expect(Class.new.extend(a[1]).new.bar).to eq('bar') expect(a[2]).to eq(:foo => :bar) end end context "given a string and a hash" do it "captures the given string and block in the World's collection of shared example groups" do implementation = lambda {} send(shared_method_name, "name", :foo => :bar, &implementation) expect(SharedExampleGroup::Registry.shared_example_groups[self]["name"]).to eq implementation end it "delegates extend on configuration" do implementation = Proc.new { def bar; 'bar'; end } send(shared_method_name, "name", :foo => :bar, &implementation) a = RSpec.configuration.include_or_extend_modules.first expect(a[0]).to eq(:extend) expect(Class.new.extend(a[1]).new.bar).to eq('bar') expect(a[2]).to eq(:foo => :bar) end end end end end end
1
9,551
Maybe `it "can safely be reset when there are not yet any shared example groups"`? That's the edge case that wasn't working, right?
rspec-rspec-core
rb
@@ -19,7 +19,8 @@ import ( "fmt" "net/http" - "github.com/aws/amazon-ecs-agent/agent/asm/factory" + factory "github.com/aws/amazon-ecs-agent/agent/asm/factory" + ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory" "github.com/aws/amazon-ecs-agent/agent/config" "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/amazon-ecs-agent/agent/ec2"
1
// +build linux // Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package app import ( "fmt" "net/http" "github.com/aws/amazon-ecs-agent/agent/asm/factory" "github.com/aws/amazon-ecs-agent/agent/config" "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/amazon-ecs-agent/agent/ec2" "github.com/aws/amazon-ecs-agent/agent/ecscni" "github.com/aws/amazon-ecs-agent/agent/engine" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/eni/pause" "github.com/aws/amazon-ecs-agent/agent/eni/udevwrapper" "github.com/aws/amazon-ecs-agent/agent/eni/watcher" "github.com/aws/amazon-ecs-agent/agent/statechange" "github.com/aws/amazon-ecs-agent/agent/taskresource" cgroup "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" "github.com/cihub/seelog" "github.com/pkg/errors" ) // initPID defines the process identifier for the init process const initPID = 1 // awsVPCCNIPlugins is a list of CNI plugins required by the ECS Agent // to configure the ENI for a task var awsVPCCNIPlugins = []string{ecscni.ECSENIPluginName, ecscni.ECSBridgePluginName, ecscni.ECSIPAMPluginName, } // startWindowsService is not supported on Linux func (agent *ecsAgent) startWindowsService() int { seelog.Error("Windows Services are not supported on Linux") return 1 } // initializeTaskENIDependencies initializes all of the dependencies required by // the Agent to support the 'awsvpc' networking mode. A non nil error is returned // if an error is encountered during this process. An additional boolean flag to // indicate if this error is considered terminal is also returned func (agent *ecsAgent) initializeTaskENIDependencies(state dockerstate.TaskEngineState, taskEngine engine.TaskEngine) (error, bool) { // Check if the Agent process's pid == 1, which means it's running without an init system if agent.os.Getpid() == initPID { // This is a terminal error. Bad things happen with invoking the // the ENI plugin when there's no init process in the pid namesapce. // Specifically, the DHClient processes that are started as children // of the Agent will not be reaped leading to the ENI device // disappearing until the Agent is killed. return errors.New("agent is not started with an init system"), true } // Set VPC and Subnet IDs for the instance if err, ok := agent.setVPCSubnet(); err != nil { return err, ok } // Validate that the CNI plugins exist in the expected path and that // they possess the right capabilities if err := agent.verifyCNIPluginsCapabilities(); err != nil { // An error here is terminal as it means that the plugins // do not support the ENI capability return err, true } if agent.cfg.ShouldLoadPauseContainerTarball() { // Load the pause container's image from the 'disk' if _, err := agent.pauseLoader.LoadImage(agent.ctx, agent.cfg, agent.dockerClient); err != nil { if pause.IsNoSuchFileError(err) || pause.UnsupportedPlatform(err) { // If the pause container's image tarball doesn't exist or if the // invocation is done for an unsupported platform, we cannot recover. // Return the error as terminal for these cases return err, true } return err, false } } if err := agent.startUdevWatcher(state, taskEngine.StateChangeEvents()); err != nil { // If udev watcher was not initialized in this run because of the udev socket // file not being available etc, the Agent might be able to retry and succeed // on the next run. Hence, returning a false here for terminal bool return err, false } return nil, false } // setVPCSubnet sets the vpc and subnet ids for the agent by querying the // instance metadata service func (agent *ecsAgent) setVPCSubnet() (error, bool) { mac, err := agent.ec2MetadataClient.PrimaryENIMAC() if err != nil { return fmt.Errorf("unable to get mac address of instance's primary ENI from instance metadata: %v", err), false } vpcID, err := agent.ec2MetadataClient.VPCID(mac) if err != nil { if isInstanceLaunchedInVPC(err) { return fmt.Errorf("unable to get vpc id from instance metadata: %v", err), true } return instanceNotLaunchedInVPCError, false } subnetID, err := agent.ec2MetadataClient.SubnetID(mac) if err != nil { return fmt.Errorf("unable to get subnet id from instance metadata: %v", err), false } agent.vpc = vpcID agent.subnet = subnetID agent.mac = mac return nil, false } // isInstanceLaunchedInVPC returns false when the http status code is set to // 'not found' (404) when querying the vpc id from instance metadata func isInstanceLaunchedInVPC(err error) bool { if metadataErr, ok := err.(*ec2.MetadataError); ok && metadataErr.GetStatusCode() == http.StatusNotFound { return false } return true } // verifyCNIPluginsCapabilities returns an error if there's an error querying // capabilities or if the required capability is absent from the capabilities // of the following plugins: // a. ecs-eni // b. ecs-bridge // c. ecs-ipam func (agent *ecsAgent) verifyCNIPluginsCapabilities() error { // Check if we can get capabilities from each plugin for _, plugin := range awsVPCCNIPlugins { capabilities, err := agent.cniClient.Capabilities(plugin) if err != nil { return err } if !contains(capabilities, ecscni.CapabilityAWSVPCNetworkingMode) { return errors.Errorf("plugin '%s' doesn't support the capability: %s", plugin, ecscni.CapabilityAWSVPCNetworkingMode) } } return nil } // startUdevWatcher starts the udev monitor and the watcher for receiving // notifications from the monitor func (agent *ecsAgent) startUdevWatcher(state dockerstate.TaskEngineState, stateChangeEvents chan<- statechange.Event) error { seelog.Debug("Setting up ENI Watcher") udevMonitor, err := udevwrapper.New() if err != nil { return errors.Wrapf(err, "unable to create udev monitor") } // Create Watcher eniWatcher := watcher.New(agent.ctx, agent.mac, udevMonitor, state, stateChangeEvents) if err := eniWatcher.Init(); err != nil { return errors.Wrapf(err, "unable to initialize eni watcher") } go eniWatcher.Start() return nil } func contains(capabilities []string, capability string) bool { for _, cap := range capabilities { if cap == capability { return true } } return false } // initializeResourceFields exists mainly for testing doStart() to use mock Control // object func (agent *ecsAgent) initializeResourceFields(credentialsManager credentials.Manager) { agent.resourceFields = &taskresource.ResourceFields{ Control: cgroup.New(), ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{ IOUtil: ioutilwrapper.NewIOUtil(), ASMClientCreator: factory.NewClientCreator(), CredentialsManager: credentialsManager, }, Ctx: agent.ctx, DockerClient: agent.dockerClient, } } func (agent *ecsAgent) cgroupInit() error { err := agent.resourceFields.Control.Init() // When task CPU and memory limits are enabled, all tasks are placed // under the '/ecs' cgroup root. if err == nil { return nil } if agent.cfg.TaskCPUMemLimit == config.ExplicitlyEnabled { return errors.Wrapf(err, "unable to setup '/ecs' cgroup") } seelog.Warnf("Disabling TaskCPUMemLimit because agent is unabled to setup '/ecs' cgroup: %v", err) agent.cfg.TaskCPUMemLimit = config.ExplicitlyDisabled return nil }
1
21,013
naming: please use `asmfactory`
aws-amazon-ecs-agent
go
@@ -54,9 +54,12 @@ func (c *Client) buildCommand(target *core.BuildTarget, inputRoot *pb.Directory, } // We can't predict what variables like this should be so we sneakily bung something on // the front of the command. It'd be nicer if there were a better way though... - const commandPrefix = "export TMP_DIR=\"`pwd`\" && " + var commandPrefix = "export TMP_DIR=\"`pwd`\" && " // TODO(peterebden): Remove this nonsense once API v2.1 is released. files, dirs := outputs(target) + if len(target.Outputs()) == 1 { // $OUT is relative when running remotely; make it absolute + commandPrefix += "export OUT=\"$TMP_DIR/$OUT\" && " + } cmd, err := core.ReplaceSequences(c.state, target, c.getCommand(target)) return &pb.Command{ Platform: c.platform,
1
package remote import ( "encoding/hex" "fmt" "io/ioutil" "os" "path" "runtime" "sort" "strings" pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" "github.com/golang/protobuf/ptypes" "github.com/thought-machine/please/src/core" "github.com/thought-machine/please/src/fs" ) // uploadAction uploads a build action for a target and returns its digest. func (c *Client) uploadAction(target *core.BuildTarget, uploadInputRoot, isTest bool) (*pb.Command, *pb.Digest, error) { var command *pb.Command var digest *pb.Digest err := c.uploadBlobs(func(ch chan<- *blob) error { defer close(ch) inputRoot, err := c.buildInputRoot(target, uploadInputRoot, isTest) if err != nil { return err } inputRootDigest, inputRootMsg := c.digestMessageContents(inputRoot) ch <- &blob{Data: inputRootMsg, Digest: inputRootDigest} command, err = c.buildCommand(target, inputRoot, isTest) if err != nil { return err } commandDigest, commandMsg := c.digestMessageContents(command) ch <- &blob{Data: commandMsg, Digest: commandDigest} actionDigest, actionMsg := c.digestMessageContents(&pb.Action{ CommandDigest: commandDigest, InputRootDigest: inputRootDigest, Timeout: ptypes.DurationProto(timeout(target, isTest)), }) digest = actionDigest ch <- &blob{Data: actionMsg, Digest: actionDigest} return nil }) return command, digest, err } // buildCommand builds the command for a single target. func (c *Client) buildCommand(target *core.BuildTarget, inputRoot *pb.Directory, isTest bool) (*pb.Command, error) { if isTest { return c.buildTestCommand(target) } // We can't predict what variables like this should be so we sneakily bung something on // the front of the command. It'd be nicer if there were a better way though... const commandPrefix = "export TMP_DIR=\"`pwd`\" && " // TODO(peterebden): Remove this nonsense once API v2.1 is released. files, dirs := outputs(target) cmd, err := core.ReplaceSequences(c.state, target, c.getCommand(target)) return &pb.Command{ Platform: c.platform, // We have to run everything through bash since our commands are arbitrary. // Unfortunately we can't just say "bash", we need an absolute path which is // a bit weird since it assumes that our absolute path is the same as the // remote one (which is probably OK on the same OS, but not between say Linux and // FreeBSD where bash is not idiomatically in the same place). Arguments: []string{ c.bashPath, "--noprofile", "--norc", "-u", "-o", "pipefail", "-c", commandPrefix + cmd, }, EnvironmentVariables: buildEnv(c.stampedBuildEnvironment(target, inputRoot)), OutputFiles: files, OutputDirectories: dirs, OutputPaths: append(files, dirs...), }, err } // stampedBuildEnvironment returns a build environment, optionally with a stamp if the // target requires one. func (c *Client) stampedBuildEnvironment(target *core.BuildTarget, inputRoot *pb.Directory) []string { if !target.Stamp { return core.BuildEnvironment(c.state, target, ".") } // We generate the stamp ourselves from the input root. // TODO(peterebden): it should include the target properties too... stamp := c.sum(mustMarshal(inputRoot)) return core.StampedBuildEnvironment(c.state, target, stamp, ".") } // buildTestCommand builds a command for a target when testing. func (c *Client) buildTestCommand(target *core.BuildTarget) (*pb.Command, error) { // TODO(peterebden): Remove all this nonsense once API v2.1 is released. files := make([]string, 0, 2) dirs := []string{} if target.NeedCoverage(c.state) { files = append(files, core.CoverageFile) } if target.HasLabel(core.TestResultsDirLabel) { dirs = []string{core.TestResultsFile} } else { files = append(files, core.TestResultsFile) } const commandPrefix = "export TMP_DIR=\"`pwd`\" TEST_DIR=\"`pwd`\" && " cmd, err := core.ReplaceTestSequences(c.state, target, target.GetTestCommand(c.state)) return &pb.Command{ Platform: &pb.Platform{ Properties: []*pb.Platform_Property{ { Name: "OSFamily", Value: translateOS(target.Subrepo), }, }, }, Arguments: []string{ c.bashPath, "--noprofile", "--norc", "-u", "-o", "pipefail", "-c", commandPrefix + cmd, }, EnvironmentVariables: buildEnv(core.TestEnvironment(c.state, target, "")), OutputFiles: files, OutputDirectories: dirs, OutputPaths: append(files, dirs...), }, err } // getCommand returns the appropriate command to use for a target. func (c *Client) getCommand(target *core.BuildTarget) string { if target.IsRemoteFile { // TODO(peterebden): we should handle this using the Remote Fetch API once that's available. urls := make([]string, len(target.Sources)) for i, s := range target.Sources { urls[i] = "curl -fsSLo $OUT " + s.String() } cmd := strings.Join(urls, " || ") if target.IsBinary { return "(" + cmd + ") && chmod +x $OUT" } return cmd } cmd := target.GetCommand(c.state) if cmd == "" { cmd = "true" } if target.IsBinary && len(target.Outputs()) > 0 { return "( " + cmd + " ) && chmod +x $OUTS" } return cmd } // digestDir calculates the digest for a directory. // It returns Directory protos for the directory and all its (recursive) children. func (c *Client) digestDir(dir string, children []*pb.Directory) (*pb.Directory, []*pb.Directory, error) { entries, err := ioutil.ReadDir(dir) if err != nil { return nil, nil, err } d := &pb.Directory{} err = c.uploadBlobs(func(ch chan<- *blob) error { defer close(ch) for _, entry := range entries { name := entry.Name() fullname := path.Join(dir, name) if mode := entry.Mode(); mode&os.ModeDir != 0 { dir, descendants, err := c.digestDir(fullname, children) if err != nil { return err } digest, contents := c.digestMessageContents(dir) ch <- &blob{ Digest: digest, Data: contents, } d.Directories = append(d.Directories, &pb.DirectoryNode{ Name: name, Digest: digest, }) children = append(children, descendants...) continue } else if mode&os.ModeSymlink != 0 { target, err := os.Readlink(fullname) if err != nil { return err } d.Symlinks = append(d.Symlinks, &pb.SymlinkNode{ Name: name, Target: target, }) continue } h, err := c.state.PathHasher.Hash(fullname, false, true) if err != nil { return err } digest := &pb.Digest{ Hash: hex.EncodeToString(h), SizeBytes: entry.Size(), } d.Files = append(d.Files, &pb.FileNode{ Name: name, Digest: digest, IsExecutable: (entry.Mode() & 0111) != 0, }) ch <- &blob{ File: fullname, Digest: digest, } } return nil }) return d, children, err } // buildInputRoot constructs the directory that is the input root and optionally uploads it. func (c *Client) buildInputRoot(target *core.BuildTarget, upload, isTest bool) (root *pb.Directory, err error) { c.uploadBlobs(func(ch chan<- *blob) error { defer close(ch) if upload { root, err = c.uploadInputs(ch, target, isTest, false) } else { root, err = c.uploadInputs(nil, target, isTest, false) } return nil }) return } // uploadInputs finds and uploads a set of inputs from a target. func (c *Client) uploadInputs(ch chan<- *blob, target *core.BuildTarget, isTest, useTargetPackage bool) (*pb.Directory, error) { b := newDirBuilder(c) for input := range c.iterInputs(target, isTest) { if l := input.Label(); l != nil { if o := c.targetOutputs(*l); o == nil { if c.remoteExecution { // Classic "we shouldn't get here" stuff return nil, fmt.Errorf("Outputs not known for %s (should be built by now)", *l) } } else { pkgName := l.PackageName if useTargetPackage { pkgName = target.Label.PackageName } d := b.Dir(pkgName) d.Files = append(d.Files, o.Files...) d.Directories = append(d.Directories, o.Directories...) d.Symlinks = append(d.Symlinks, o.Symlinks...) continue } } if err := c.uploadInput(b, ch, input); err != nil { return nil, err } } if useTargetPackage { b.Root(ch) return b.Dir(target.Label.PackageName), nil } return b.Root(ch), nil } // uploadInput finds and uploads a single input. func (c *Client) uploadInput(b *dirBuilder, ch chan<- *blob, input core.BuildInput) error { fullPaths := input.FullPaths(c.state.Graph) for i, out := range input.Paths(c.state.Graph) { in := fullPaths[i] if err := fs.Walk(in, func(name string, isDir bool) error { if isDir { return nil // nothing to do } dest := path.Join(out, name[len(in):]) d := b.Dir(path.Dir(dest)) // Now handle the file itself info, err := os.Lstat(name) if err != nil { return err } if info.Mode()&os.ModeSymlink != 0 { link, err := os.Readlink(name) if err != nil { return err } d.Symlinks = append(d.Symlinks, &pb.SymlinkNode{ Name: path.Base(dest), Target: link, }) return nil } h, err := c.state.PathHasher.Hash(name, false, true) if err != nil { return err } digest := &pb.Digest{ Hash: hex.EncodeToString(h), SizeBytes: info.Size(), } d.Files = append(d.Files, &pb.FileNode{ Name: path.Base(dest), Digest: digest, IsExecutable: info.Mode()&0100 != 0, }) if ch != nil { ch <- &blob{ File: name, Digest: digest, } } return nil }); err != nil { return err } } return nil } // iterInputs yields all the input files needed for a target. func (c *Client) iterInputs(target *core.BuildTarget, isTest bool) <-chan core.BuildInput { if !isTest { return core.IterInputs(c.state.Graph, target, true) } ch := make(chan core.BuildInput) go func() { ch <- target.Label for _, datum := range target.Data { ch <- datum } close(ch) }() return ch } // buildMetadata converts an ActionResult into one of our BuildMetadata protos. // N.B. this always returns a non-nil metadata object for the first response. func (c *Client) buildMetadata(ar *pb.ActionResult, needStdout, needStderr bool) (*core.BuildMetadata, error) { metadata := &core.BuildMetadata{ Stdout: ar.StdoutRaw, Stderr: ar.StderrRaw, } if ar.ExecutionMetadata != nil { metadata.StartTime = toTime(ar.ExecutionMetadata.ExecutionStartTimestamp) metadata.EndTime = toTime(ar.ExecutionMetadata.ExecutionCompletedTimestamp) } if needStdout && len(metadata.Stdout) == 0 && ar.StdoutDigest != nil { b, err := c.readAllByteStream(ar.StdoutDigest) if err != nil { return metadata, err } metadata.Stdout = b } if needStderr && len(metadata.Stderr) == 0 && ar.StderrDigest != nil { b, err := c.readAllByteStream(ar.StderrDigest) if err != nil { return metadata, err } metadata.Stderr = b } return metadata, nil } // digestForFilename returns the digest for an output of the given name. func (c *Client) digestForFilename(ar *pb.ActionResult, name string) *pb.Digest { for _, file := range ar.OutputFiles { if file.Path == name { return file.Digest } } return nil } // downloadDirectory downloads & writes out a single Directory proto. func (c *Client) downloadDirectory(root string, dir *pb.Directory) error { if err := os.MkdirAll(root, core.DirPermissions); err != nil { return err } for _, file := range dir.Files { if err := c.retrieveByteStream(&blob{ Digest: file.Digest, File: path.Join(root, file.Name), Mode: 0644 | extraFilePerms(file), }); err != nil { return wrap(err, "Downloading %s", path.Join(root, file.Name)) } } for _, dir := range dir.Directories { d := &pb.Directory{} name := path.Join(root, dir.Name) if err := c.readByteStreamToProto(dir.Digest, d); err != nil { return wrap(err, "Downloading directory metadata for %s", name) } else if err := c.downloadDirectory(name, d); err != nil { return wrap(err, "Downloading directory %s", name) } } for _, sym := range dir.Symlinks { if err := os.Symlink(sym.Target, path.Join(root, sym.Name)); err != nil { return err } } return nil } // verifyActionResult verifies that all the requested outputs actually exist in a returned // ActionResult. Servers do not necessarily verify this but we need to make sure they are // complete for future requests. func (c *Client) verifyActionResult(target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest, ar *pb.ActionResult) error { outs := make(map[string]bool, len(ar.OutputFiles)+len(ar.OutputDirectories)+len(ar.OutputFileSymlinks)+len(ar.OutputDirectorySymlinks)) for _, f := range ar.OutputFiles { outs[f.Path] = true } for _, f := range ar.OutputDirectories { outs[f.Path] = true } for _, f := range ar.OutputFileSymlinks { outs[f.Path] = true } for _, f := range ar.OutputDirectorySymlinks { outs[f.Path] = true } for _, out := range command.OutputFiles { if !outs[out] { return fmt.Errorf("Remote build action for %s failed to produce output %s%s", target, out, c.actionURL(actionDigest, true)) } } for _, out := range command.OutputDirectories { if !outs[out] { return fmt.Errorf("Remote build action for %s failed to produce output %s%s", target, out, c.actionURL(actionDigest, true)) } } return nil } // translateOS converts the OS name of a subrepo into a Bazel-style OS name. func translateOS(subrepo *core.Subrepo) string { if subrepo == nil { return reallyTranslateOS(runtime.GOOS) } return reallyTranslateOS(subrepo.Arch.OS) } func reallyTranslateOS(os string) string { switch os { case "darwin": return "macos" default: return os } } // buildEnv translates the set of environment variables for this target to a proto. func buildEnv(env []string) []*pb.Command_EnvironmentVariable { sort.Strings(env) // Proto says it must be sorted (not just consistently ordered :( ) vars := make([]*pb.Command_EnvironmentVariable, len(env)) for i, e := range env { idx := strings.IndexByte(e, '=') vars[i] = &pb.Command_EnvironmentVariable{ Name: e[:idx], Value: e[idx+1:], } } return vars }
1
8,927
FYI you could use a raw string for this which removes the need for escaping the inner quotes.
thought-machine-please
go
@@ -984,3 +984,19 @@ class TestGetSetClipboard: def test_supports_selection(self, clipboard_mock, selection): clipboard_mock.supportsSelection.return_value = selection assert utils.supports_selection() == selection + + [email protected]('keystr, expected', [ + ('<Control-x>', True), + ('<Meta-x>', True), + ('<Ctrl-Alt-y>', True), + ('x', False), + ('X', False), + ('<Escape>', True), + ('foobar', False), + ('foo>', False), + ('<foo', False), + ('<<', False), +]) +def test_is_special_key(keystr, expected): + assert utils.is_special_key(keystr) == expected
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Tests for qutebrowser.utils.utils.""" import sys import enum import datetime import os.path import io import logging import functools import collections from PyQt5.QtCore import Qt from PyQt5.QtGui import QColor, QClipboard import pytest import qutebrowser import qutebrowser.utils # for test_qualname from qutebrowser.utils import utils, qtutils ELLIPSIS = '\u2026' class Color(QColor): """A QColor with a nicer repr().""" def __repr__(self): return utils.get_repr(self, constructor=True, red=self.red(), green=self.green(), blue=self.blue(), alpha=self.alpha()) class TestCompactText: """Test compact_text.""" @pytest.mark.parametrize('text, expected', [ ('foo\nbar', 'foobar'), (' foo \n bar ', 'foobar'), ('\nfoo\n', 'foo'), ], ids=repr) def test_compact_text(self, text, expected): """Test folding of newlines.""" assert utils.compact_text(text) == expected @pytest.mark.parametrize('elidelength, text, expected', [ (None, 'x' * 100, 'x' * 100), (6, 'foobar', 'foobar'), (5, 'foobar', 'foob' + ELLIPSIS), (5, 'foo\nbar', 'foob' + ELLIPSIS), (7, 'foo\nbar', 'foobar'), ], ids=lambda val: repr(val)[:20]) def test_eliding(self, elidelength, text, expected): """Test eliding.""" assert utils.compact_text(text, elidelength) == expected class TestEliding: """Test elide.""" def test_too_small(self): """Test eliding to 0 chars which should fail.""" with pytest.raises(ValueError): utils.elide('foo', 0) @pytest.mark.parametrize('text, length, expected', [ ('foo', 1, ELLIPSIS), ('foo', 3, 'foo'), ('foobar', 3, 'fo' + ELLIPSIS), ]) def test_elided(self, text, length, expected): assert utils.elide(text, length) == expected @pytest.fixture(params=[True, False]) def freezer(request, monkeypatch): if request.param and not getattr(sys, 'frozen', False): monkeypatch.setattr(sys, 'frozen', True, raising=False) monkeypatch.setattr('sys.executable', qutebrowser.__file__) elif not request.param and getattr(sys, 'frozen', False): # Want to test unfrozen tests, but we are frozen pytest.skip("Can't run with sys.frozen = True!") @pytest.mark.usefixtures('freezer') class TestReadFile: """Test read_file.""" def test_readfile(self): """Read a test file.""" content = utils.read_file(os.path.join('utils', 'testfile')) assert content.splitlines()[0] == "Hello World!" def test_readfile_binary(self): """Read a test file in binary mode.""" content = utils.read_file(os.path.join('utils', 'testfile'), binary=True) assert content.splitlines()[0] == b"Hello World!" @pytest.mark.usefixtures('freezer') def test_resource_filename(): """Read a test file.""" filename = utils.resource_filename(os.path.join('utils', 'testfile')) with open(filename, 'r', encoding='utf-8') as f: assert f.read().splitlines()[0] == "Hello World!" class Patcher: """Helper for TestActuteWarning. Attributes: monkeypatch: The pytest monkeypatch fixture. """ def __init__(self, monkeypatch): self.monkeypatch = monkeypatch def patch_platform(self, platform='linux'): """Patch sys.platform.""" self.monkeypatch.setattr('sys.platform', platform) def patch_exists(self, exists=True): """Patch os.path.exists.""" self.monkeypatch.setattr('qutebrowser.utils.utils.os.path.exists', lambda path: exists) def patch_version(self, version='5.2.0'): """Patch Qt version.""" self.monkeypatch.setattr('qutebrowser.utils.utils.qtutils.qVersion', lambda: version) def patch_file(self, data): """Patch open() to return the given data.""" fake_file = io.StringIO(data) self.monkeypatch.setattr(utils, 'open', lambda filename, mode, encoding: fake_file, raising=False) def patch_all(self, data): """Patch everything so the issue would exist.""" self.patch_platform() self.patch_exists() self.patch_version() self.patch_file(data) class TestActuteWarning: """Test actute_warning.""" @pytest.fixture def patcher(self, monkeypatch): """Fixture providing a Patcher helper.""" return Patcher(monkeypatch) def test_non_linux(self, patcher, capsys): """Test with a non-Linux OS.""" patcher.patch_platform('toaster') utils.actute_warning() out, err = capsys.readouterr() assert not out assert not err def test_no_compose(self, patcher, capsys): """Test with no compose file.""" patcher.patch_platform() patcher.patch_exists(False) utils.actute_warning() out, err = capsys.readouterr() assert not out assert not err def test_newer_qt(self, patcher, capsys): """Test with compose file but newer Qt version.""" patcher.patch_platform() patcher.patch_exists() patcher.patch_version('5.4') utils.actute_warning() out, err = capsys.readouterr() assert not out assert not err def test_no_match(self, patcher, capsys): """Test with compose file and affected Qt but no match.""" patcher.patch_all('foobar') utils.actute_warning() out, err = capsys.readouterr() assert not out assert not err def test_empty(self, patcher, capsys): """Test with empty compose file.""" patcher.patch_all(None) utils.actute_warning() out, err = capsys.readouterr() assert not out assert not err def test_match(self, patcher, capsys): """Test with compose file and affected Qt and a match.""" patcher.patch_all('foobar\n<dead_actute>\nbaz') utils.actute_warning() out, err = capsys.readouterr() assert out.startswith('Note: If you got a') assert not err def test_match_stdout_none(self, monkeypatch, patcher, capsys): """Test with a match and stdout being None.""" patcher.patch_all('foobar\n<dead_actute>\nbaz') monkeypatch.setattr('sys.stdout', None) utils.actute_warning() def test_unreadable(self, mocker, patcher, capsys, caplog): """Test with an unreadable compose file.""" patcher.patch_platform() patcher.patch_exists() patcher.patch_version() mocker.patch('qutebrowser.utils.utils.open', side_effect=OSError, create=True) with caplog.at_level(logging.ERROR, 'init'): utils.actute_warning() assert len(caplog.records) == 1 assert caplog.records[0].message == 'Failed to read Compose file' out, _err = capsys.readouterr() assert not out class TestInterpolateColor: """Tests for interpolate_color. Attributes: white: The Color white as a valid Color for tests. white: The Color black as a valid Color for tests. """ Colors = collections.namedtuple('Colors', ['white', 'black']) @pytest.fixture def colors(self): """Example colors to be used.""" return self.Colors(Color('white'), Color('black')) def test_invalid_start(self, colors): """Test an invalid start color.""" with pytest.raises(qtutils.QtValueError): utils.interpolate_color(Color(), colors.white, 0) def test_invalid_end(self, colors): """Test an invalid end color.""" with pytest.raises(qtutils.QtValueError): utils.interpolate_color(colors.white, Color(), 0) def test_invalid_percentage(self, colors): """Test an invalid percentage.""" with pytest.raises(ValueError): utils.interpolate_color(colors.white, colors.white, -1) with pytest.raises(ValueError): utils.interpolate_color(colors.white, colors.white, 101) def test_invalid_colorspace(self, colors): """Test an invalid colorspace.""" with pytest.raises(ValueError): utils.interpolate_color(colors.white, colors.black, 10, QColor.Cmyk) def test_valid_percentages_rgb(self, colors): """Test 0% and 100% in the RGB colorspace.""" white = utils.interpolate_color(colors.white, colors.black, 0, QColor.Rgb) black = utils.interpolate_color(colors.white, colors.black, 100, QColor.Rgb) assert Color(white) == colors.white assert Color(black) == colors.black def test_valid_percentages_hsv(self, colors): """Test 0% and 100% in the HSV colorspace.""" white = utils.interpolate_color(colors.white, colors.black, 0, QColor.Hsv) black = utils.interpolate_color(colors.white, colors.black, 100, QColor.Hsv) assert Color(white) == colors.white assert Color(black) == colors.black def test_valid_percentages_hsl(self, colors): """Test 0% and 100% in the HSL colorspace.""" white = utils.interpolate_color(colors.white, colors.black, 0, QColor.Hsl) black = utils.interpolate_color(colors.white, colors.black, 100, QColor.Hsl) assert Color(white) == colors.white assert Color(black) == colors.black def test_interpolation_rgb(self): """Test an interpolation in the RGB colorspace.""" color = utils.interpolate_color(Color(0, 40, 100), Color(0, 20, 200), 50, QColor.Rgb) assert Color(color) == Color(0, 30, 150) def test_interpolation_hsv(self): """Test an interpolation in the HSV colorspace.""" start = Color() stop = Color() start.setHsv(0, 40, 100) stop.setHsv(0, 20, 200) color = utils.interpolate_color(start, stop, 50, QColor.Hsv) expected = Color() expected.setHsv(0, 30, 150) assert Color(color) == expected def test_interpolation_hsl(self): """Test an interpolation in the HSL colorspace.""" start = Color() stop = Color() start.setHsl(0, 40, 100) stop.setHsl(0, 20, 200) color = utils.interpolate_color(start, stop, 50, QColor.Hsl) expected = Color() expected.setHsl(0, 30, 150) assert Color(color) == expected @pytest.mark.parametrize('percentage, expected', [ (0, (0, 0, 0)), (99, (0, 0, 0)), (100, (255, 255, 255)), ]) def test_interpolation_none(self, percentage, expected): """Test an interpolation with a gradient turned off.""" color = utils.interpolate_color(Color(0, 0, 0), Color(255, 255, 255), percentage, None) assert isinstance(color, QColor) assert Color(color) == Color(*expected) @pytest.mark.parametrize('seconds, out', [ (-1, '-0:01'), (0, '0:00'), (59, '0:59'), (60, '1:00'), (60.4, '1:00'), (61, '1:01'), (-61, '-1:01'), (3599, '59:59'), (3600, '1:00:00'), (3601, '1:00:01'), (36000, '10:00:00'), ]) def test_format_seconds(seconds, out): assert utils.format_seconds(seconds) == out @pytest.mark.parametrize('td, out', [ (datetime.timedelta(seconds=-1), '-1s'), (datetime.timedelta(seconds=0), '0s'), (datetime.timedelta(seconds=59), '59s'), (datetime.timedelta(seconds=120), '2m'), (datetime.timedelta(seconds=60.4), '1m'), (datetime.timedelta(seconds=63), '1m 3s'), (datetime.timedelta(seconds=-64), '-1m 4s'), (datetime.timedelta(seconds=3599), '59m 59s'), (datetime.timedelta(seconds=3600), '1h'), (datetime.timedelta(seconds=3605), '1h 5s'), (datetime.timedelta(seconds=3723), '1h 2m 3s'), (datetime.timedelta(seconds=3780), '1h 3m'), (datetime.timedelta(seconds=36000), '10h'), ]) def test_format_timedelta(td, out): assert utils.format_timedelta(td) == out class TestFormatSize: """Tests for format_size. Class attributes: TESTS: A list of (input, output) tuples. """ TESTS = [ (-1024, '-1.00k'), (-1, '-1.00'), (0, '0.00'), (1023, '1023.00'), (1024, '1.00k'), (1034.24, '1.01k'), (1024 * 1024 * 2, '2.00M'), (1024 ** 10, '1024.00Y'), (None, '?.??'), ] KILO_TESTS = [(999, '999.00'), (1000, '1.00k'), (1010, '1.01k')] @pytest.mark.parametrize('size, out', TESTS) def test_format_size(self, size, out): """Test format_size with several tests.""" assert utils.format_size(size) == out @pytest.mark.parametrize('size, out', TESTS) def test_suffix(self, size, out): """Test the suffix option.""" assert utils.format_size(size, suffix='B') == out + 'B' @pytest.mark.parametrize('size, out', KILO_TESTS) def test_base(self, size, out): """Test with an alternative base.""" assert utils.format_size(size, base=1000) == out class TestKeyToString: """Test key_to_string.""" @pytest.mark.parametrize('key, expected', [ (Qt.Key_Blue, 'Blue'), (Qt.Key_Backtab, 'Tab'), (Qt.Key_Escape, 'Escape'), (Qt.Key_A, 'A'), (Qt.Key_degree, '°'), (Qt.Key_Meta, 'Meta'), ]) def test_normal(self, key, expected): """Test a special key where QKeyEvent::toString works incorrectly.""" assert utils.key_to_string(key) == expected def test_missing(self, monkeypatch): """Test with a missing key.""" monkeypatch.delattr('qutebrowser.utils.utils.Qt.Key_Blue') # We don't want to test the key which is actually missing - we only # want to know if the mapping still behaves properly. assert utils.key_to_string(Qt.Key_A) == 'A' def test_all(self): """Make sure there's some sensible output for all keys.""" for name, value in sorted(vars(Qt).items()): if not isinstance(value, Qt.Key): continue print(name) string = utils.key_to_string(value) assert string string.encode('utf-8') # make sure it's encodable class TestKeyEventToString: """Test keyevent_to_string.""" def test_only_control(self, fake_keyevent_factory): """Test keyeevent when only control is pressed.""" evt = fake_keyevent_factory(key=Qt.Key_Control, modifiers=Qt.ControlModifier) assert utils.keyevent_to_string(evt) is None def test_only_hyper_l(self, fake_keyevent_factory): """Test keyeevent when only Hyper_L is pressed.""" evt = fake_keyevent_factory(key=Qt.Key_Hyper_L, modifiers=Qt.MetaModifier) assert utils.keyevent_to_string(evt) is None def test_only_key(self, fake_keyevent_factory): """Test with a simple key pressed.""" evt = fake_keyevent_factory(key=Qt.Key_A) assert utils.keyevent_to_string(evt) == 'A' def test_key_and_modifier(self, fake_keyevent_factory): """Test with key and modifier pressed.""" evt = fake_keyevent_factory(key=Qt.Key_A, modifiers=Qt.ControlModifier) expected = 'Meta+A' if sys.platform == 'darwin' else 'Ctrl+A' assert utils.keyevent_to_string(evt) == expected def test_key_and_modifiers(self, fake_keyevent_factory): """Test with key and multiple modifiers pressed.""" evt = fake_keyevent_factory( key=Qt.Key_A, modifiers=(Qt.ControlModifier | Qt.AltModifier | Qt.MetaModifier | Qt.ShiftModifier)) assert utils.keyevent_to_string(evt) == 'Ctrl+Alt+Meta+Shift+A' def test_mac(self, monkeypatch, fake_keyevent_factory): """Test with a simulated mac.""" monkeypatch.setattr('sys.platform', 'darwin') evt = fake_keyevent_factory(key=Qt.Key_A, modifiers=Qt.ControlModifier) assert utils.keyevent_to_string(evt) == 'Meta+A' @pytest.mark.parametrize('keystr, expected', [ ('<Control-x>', utils.KeyInfo(Qt.Key_X, Qt.ControlModifier, '')), ('<Meta-x>', utils.KeyInfo(Qt.Key_X, Qt.MetaModifier, '')), ('<Ctrl-Alt-y>', utils.KeyInfo(Qt.Key_Y, Qt.ControlModifier | Qt.AltModifier, '')), ('x', utils.KeyInfo(Qt.Key_X, Qt.NoModifier, 'x')), ('X', utils.KeyInfo(Qt.Key_X, Qt.ShiftModifier, 'X')), ('<Escape>', utils.KeyInfo(Qt.Key_Escape, Qt.NoModifier, '')), ('foobar', utils.KeyParseError), ('x, y', utils.KeyParseError), ('xyz', utils.KeyParseError), ('Escape', utils.KeyParseError), ('<Ctrl-x>, <Ctrl-y>', utils.KeyParseError), ]) def test_parse_single_key(keystr, expected): if expected is utils.KeyParseError: with pytest.raises(utils.KeyParseError): utils._parse_single_key(keystr) else: assert utils._parse_single_key(keystr) == expected @pytest.mark.parametrize('keystr, expected', [ ('<Control-x>', [utils.KeyInfo(Qt.Key_X, Qt.ControlModifier, '')]), ('x', [utils.KeyInfo(Qt.Key_X, Qt.NoModifier, 'x')]), ('xy', [utils.KeyInfo(Qt.Key_X, Qt.NoModifier, 'x'), utils.KeyInfo(Qt.Key_Y, Qt.NoModifier, 'y')]), ('<Control-x><Meta-x>', utils.KeyParseError), ]) def test_parse_keystring(keystr, expected): if expected is utils.KeyParseError: with pytest.raises(utils.KeyParseError): utils.parse_keystring(keystr) else: assert utils.parse_keystring(keystr) == expected @pytest.mark.parametrize('orig, repl', [ ('Control+x', 'ctrl+x'), ('Windows+x', 'meta+x'), ('Mod1+x', 'alt+x'), ('Mod4+x', 'meta+x'), ('Control--', 'ctrl+-'), ('Windows++', 'meta++'), ('ctrl-x', 'ctrl+x'), ('control+x', 'ctrl+x') ]) def test_normalize_keystr(orig, repl): assert utils.normalize_keystr(orig) == repl class TestFakeIOStream: """Test FakeIOStream.""" def _write_func(self, text): return text def test_flush(self): """Smoke-test to see if flushing works.""" s = utils.FakeIOStream(self._write_func) s.flush() def test_isatty(self): """Make sure isatty() is always false.""" s = utils.FakeIOStream(self._write_func) assert not s.isatty() def test_write(self): """Make sure writing works.""" s = utils.FakeIOStream(self._write_func) assert s.write('echo') == 'echo' class TestFakeIO: """Test FakeIO.""" @pytest.yield_fixture(autouse=True) def restore_streams(self): """Restore sys.stderr/sys.stdout after tests.""" old_stdout = sys.stdout old_stderr = sys.stderr yield sys.stdout = old_stdout sys.stderr = old_stderr def test_normal(self, capsys): """Test without changing sys.stderr/sys.stdout.""" data = io.StringIO() with utils.fake_io(data.write): sys.stdout.write('hello\n') sys.stderr.write('world\n') out, err = capsys.readouterr() assert not out assert not err assert data.getvalue() == 'hello\nworld\n' sys.stdout.write('back to\n') sys.stderr.write('normal\n') out, err = capsys.readouterr() assert out == 'back to\n' assert err == 'normal\n' def test_stdout_replaced(self, capsys): """Test with replaced stdout.""" data = io.StringIO() new_stdout = io.StringIO() with utils.fake_io(data.write): sys.stdout.write('hello\n') sys.stderr.write('world\n') sys.stdout = new_stdout out, err = capsys.readouterr() assert not out assert not err assert data.getvalue() == 'hello\nworld\n' sys.stdout.write('still new\n') sys.stderr.write('normal\n') out, err = capsys.readouterr() assert not out assert err == 'normal\n' assert new_stdout.getvalue() == 'still new\n' def test_stderr_replaced(self, capsys): """Test with replaced stderr.""" data = io.StringIO() new_stderr = io.StringIO() with utils.fake_io(data.write): sys.stdout.write('hello\n') sys.stderr.write('world\n') sys.stderr = new_stderr out, err = capsys.readouterr() assert not out assert not err assert data.getvalue() == 'hello\nworld\n' sys.stdout.write('normal\n') sys.stderr.write('still new\n') out, err = capsys.readouterr() assert out == 'normal\n' assert not err assert new_stderr.getvalue() == 'still new\n' class GotException(Exception): """Exception used for TestDisabledExcepthook.""" pass def excepthook(_exc, _val, _tb): return def excepthook_2(_exc, _val, _tb): return class TestDisabledExcepthook: """Test disabled_excepthook. This doesn't test much as some things are untestable without triggering the excepthook (which is hard to test). """ @pytest.yield_fixture(autouse=True) def restore_excepthook(self): """Restore sys.excepthook and sys.__excepthook__ after tests.""" old_excepthook = sys.excepthook old_dunder_excepthook = sys.__excepthook__ yield sys.excepthook = old_excepthook sys.__excepthook__ = old_dunder_excepthook def test_normal(self): """Test without changing sys.excepthook.""" sys.excepthook = excepthook assert sys.excepthook is excepthook with utils.disabled_excepthook(): assert sys.excepthook is not excepthook assert sys.excepthook is excepthook def test_changed(self): """Test with changed sys.excepthook.""" sys.excepthook = excepthook with utils.disabled_excepthook(): assert sys.excepthook is not excepthook sys.excepthook = excepthook_2 assert sys.excepthook is excepthook_2 class TestPreventExceptions: """Test prevent_exceptions.""" @utils.prevent_exceptions(42) def func_raising(self): raise Exception def test_raising(self, caplog): """Test with a raising function.""" with caplog.at_level(logging.ERROR, 'misc'): ret = self.func_raising() assert ret == 42 assert len(caplog.records) == 1 expected = 'Error in test_utils.TestPreventExceptions.func_raising' actual = caplog.records[0].message assert actual == expected @utils.prevent_exceptions(42) def func_not_raising(self): return 23 def test_not_raising(self, caplog): """Test with a non-raising function.""" with caplog.at_level(logging.ERROR, 'misc'): ret = self.func_not_raising() assert ret == 23 assert not caplog.records @utils.prevent_exceptions(42, True) def func_predicate_true(self): raise Exception def test_predicate_true(self, caplog): """Test with a True predicate.""" with caplog.at_level(logging.ERROR, 'misc'): ret = self.func_predicate_true() assert ret == 42 assert len(caplog.records) == 1 @utils.prevent_exceptions(42, False) def func_predicate_false(self): raise Exception def test_predicate_false(self, caplog): """Test with a False predicate.""" with caplog.at_level(logging.ERROR, 'misc'): with pytest.raises(Exception): self.func_predicate_false() assert not caplog.records class Obj: """Test object for test_get_repr().""" pass @pytest.mark.parametrize('constructor, attrs, expected', [ (False, {}, '<test_utils.Obj>'), (False, {'foo': None}, '<test_utils.Obj foo=None>'), (False, {'foo': "b'ar", 'baz': 2}, '<test_utils.Obj baz=2 foo="b\'ar">'), (True, {}, 'test_utils.Obj()'), (True, {'foo': None}, 'test_utils.Obj(foo=None)'), (True, {'foo': "te'st", 'bar': 2}, 'test_utils.Obj(bar=2, foo="te\'st")'), ]) def test_get_repr(constructor, attrs, expected): """Test get_repr().""" assert utils.get_repr(Obj(), constructor, **attrs) == expected class QualnameObj(): """Test object for test_qualname.""" def func(self): """Test method for test_qualname.""" pass def qualname_func(_blah): """Test function for test_qualname.""" pass QUALNAME_OBJ = QualnameObj() @pytest.mark.parametrize('obj, expected', [ (QUALNAME_OBJ, repr(QUALNAME_OBJ)), # instance - unknown (QualnameObj, 'test_utils.QualnameObj'), # class (QualnameObj.func, 'test_utils.QualnameObj.func'), # unbound method (QualnameObj().func, 'test_utils.QualnameObj.func'), # bound method (qualname_func, 'test_utils.qualname_func'), # function (functools.partial(qualname_func, True), 'test_utils.qualname_func'), (qutebrowser, 'qutebrowser'), # module (qutebrowser.utils, 'qutebrowser.utils'), # submodule (utils, 'qutebrowser.utils.utils'), # submodule (from-import) ], ids=['instance', 'class', 'unbound-method', 'bound-method', 'function', 'partial', 'module', 'submodule', 'from-import']) def test_qualname(obj, expected): assert utils.qualname(obj) == expected class TestIsEnum: """Test is_enum.""" def test_enum(self): """Test is_enum with an enum.""" e = enum.Enum('Foo', 'bar, baz') assert utils.is_enum(e) def test_class(self): """Test is_enum with a non-enum class.""" class Test: """Test class for is_enum.""" pass assert not utils.is_enum(Test) def test_object(self): """Test is_enum with a non-enum object.""" assert not utils.is_enum(23) class TestRaises: """Test raises.""" def do_raise(self): """Helper function which raises an exception.""" raise Exception def do_nothing(self): """Helper function which does nothing.""" pass def test_raises_single_exc_true(self): """Test raises with a single exception which gets raised.""" assert utils.raises(ValueError, int, 'a') def test_raises_single_exc_false(self): """Test raises with a single exception which does not get raised.""" assert not utils.raises(ValueError, int, '1') def test_raises_multiple_exc_true(self): """Test raises with multiple exceptions which get raised.""" assert utils.raises((ValueError, TypeError), int, 'a') assert utils.raises((ValueError, TypeError), int, None) def test_raises_multiple_exc_false(self): """Test raises with multiple exceptions which do not get raised.""" assert not utils.raises((ValueError, TypeError), int, '1') def test_no_args_true(self): """Test with no args and an exception which gets raised.""" assert utils.raises(Exception, self.do_raise) def test_no_args_false(self): """Test with no args and an exception which does not get raised.""" assert not utils.raises(Exception, self.do_nothing) def test_unrelated_exception(self): """Test with an unrelated exception.""" with pytest.raises(Exception): utils.raises(ValueError, self.do_raise) @pytest.mark.parametrize('inp, enc, expected', [ ('hello world', 'ascii', 'hello world'), ('hellö wörld', 'utf-8', 'hellö wörld'), ('hellö wörld', 'ascii', 'hell? w?rld'), ]) def test_force_encoding(inp, enc, expected): assert utils.force_encoding(inp, enc) == expected @pytest.mark.parametrize('inp, expected', [ ('normal.txt', 'normal.txt'), ('user/repo issues.mht', 'user_repo issues.mht'), ('<Test\\File> - "*?:|', '_Test_File_ - _____'), ]) def test_sanitize_filename(inp, expected): assert utils.sanitize_filename(inp) == expected def test_sanitize_filename_empty_replacement(): name = '/<Bad File>/' assert utils.sanitize_filename(name, replacement=None) == 'Bad File' class TestNewestSlice: """Test newest_slice.""" def test_count_minus_two(self): """Test with a count of -2.""" with pytest.raises(ValueError): utils.newest_slice([], -2) @pytest.mark.parametrize('items, count, expected', [ # Count of -1 (all elements). (range(20), -1, range(20)), # Count of 0 (no elements). (range(20), 0, []), # Count which is much smaller than the iterable. (range(20), 5, [15, 16, 17, 18, 19]), # Count which is exactly one smaller.""" (range(5), 4, [1, 2, 3, 4]), # Count which is just as large as the iterable.""" (range(5), 5, range(5)), # Count which is one bigger than the iterable. (range(5), 6, range(5)), # Count which is much bigger than the iterable. (range(5), 50, range(5)), ]) def test_good(self, items, count, expected): """Test slices which shouldn't raise an exception.""" sliced = utils.newest_slice(items, count) assert list(sliced) == list(expected) class TestGetSetClipboard: @pytest.fixture(autouse=True) def clipboard_mock(self, mocker): m = mocker.patch('qutebrowser.utils.utils.QApplication.clipboard', autospec=True) clipboard = m() clipboard.text.return_value = 'mocked clipboard text' return clipboard def test_set(self, clipboard_mock, caplog): utils.set_clipboard('Hello World') clipboard_mock.setText.assert_called_with('Hello World', mode=QClipboard.Clipboard) assert not caplog.records def test_set_unsupported_selection(self, clipboard_mock): clipboard_mock.supportsSelection.return_value = False with pytest.raises(utils.SelectionUnsupportedError): utils.set_clipboard('foo', selection=True) @pytest.mark.parametrize('selection, what, text, expected', [ (True, 'primary selection', 'fake text', 'fake text'), (False, 'clipboard', 'fake text', 'fake text'), (False, 'clipboard', 'füb', r'f\u00fcb'), ]) def test_set_logging(self, clipboard_mock, caplog, selection, what, text, expected): utils.log_clipboard = True utils.set_clipboard(text, selection=selection) assert not clipboard_mock.setText.called expected = 'Setting fake {}: "{}"'.format(what, expected) assert caplog.records[0].message == expected def test_get(self): assert utils.get_clipboard() == 'mocked clipboard text' def test_get_unsupported_selection(self, clipboard_mock): clipboard_mock.supportsSelection.return_value = False with pytest.raises(utils.SelectionUnsupportedError): utils.get_clipboard(selection=True) @pytest.mark.parametrize('selection', [True, False]) def test_get_fake_clipboard(self, selection): utils.fake_clipboard = 'fake clipboard text' utils.get_clipboard(selection=selection) assert utils.fake_clipboard is None @pytest.mark.parametrize('selection', [True, False]) def test_supports_selection(self, clipboard_mock, selection): clipboard_mock.supportsSelection.return_value = selection assert utils.supports_selection() == selection
1
14,790
2 blank lines here (between functions)
qutebrowser-qutebrowser
py
@@ -23,8 +23,13 @@ import java.util.Set; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; class BaseRewriteFiles extends MergingSnapshotProducer<RewriteFiles> implements RewriteFiles { + BaseRewriteFiles(String tableName, TableOperations ops) { - super(tableName, ops); + this(tableName, ops, ops.current().spec()); + } + + BaseRewriteFiles(String tableName, TableOperations ops, PartitionSpec spec) { + super(tableName, ops, spec); // replace files must fail if any of the deleted paths is missing and cannot be deleted failMissingDeletePaths();
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.util.Set; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; class BaseRewriteFiles extends MergingSnapshotProducer<RewriteFiles> implements RewriteFiles { BaseRewriteFiles(String tableName, TableOperations ops) { super(tableName, ops); // replace files must fail if any of the deleted paths is missing and cannot be deleted failMissingDeletePaths(); } @Override protected RewriteFiles self() { return this; } @Override protected String operation() { return DataOperations.REPLACE; } @Override public RewriteFiles rewriteFiles(Set<DataFile> filesToDelete, Set<DataFile> filesToAdd) { Preconditions.checkArgument(filesToDelete != null && !filesToDelete.isEmpty(), "Files to delete cannot be null or empty"); Preconditions.checkArgument(filesToAdd != null && !filesToAdd.isEmpty(), "Files to add can not be null or empty"); for (DataFile toDelete : filesToDelete) { delete(toDelete); } for (DataFile toAdd : filesToAdd) { add(toAdd); } return this; } }
1
34,542
Nit: we try to avoid whitespace changes because they can easily cause unnecessary commit conflicts.
apache-iceberg
java
@@ -1279,10 +1279,13 @@ class Codebase } foreach ($reference_map as $start_pos => [$end_pos, $possible_reference]) { - if ($offset < $start_pos || $possible_reference[0] !== '*') { + if ($offset < $start_pos) { continue; } - + // If the reference precedes a "::" then treat it as a class reference. + if ($offset - $end_pos === 2 && substr($file_contents, $end_pos, 2) === '::') { + return [$possible_reference, '::', $offset]; + } if ($offset - $end_pos === 0) { $recent_type = $possible_reference;
1
<?php namespace Psalm; use Psalm\Internal\Analyzer\StatementsAnalyzer; use function array_combine; use function array_merge; use function count; use function error_log; use function explode; use function in_array; use function krsort; use function ksort; use LanguageServerProtocol\Command; use LanguageServerProtocol\Position; use LanguageServerProtocol\Range; use const PHP_MAJOR_VERSION; use const PHP_MINOR_VERSION; use PhpParser; use function preg_match; use Psalm\Internal\Analyzer\ProjectAnalyzer; use Psalm\Internal\Analyzer\Statements\Block\ForeachAnalyzer; use Psalm\Internal\Type\Comparator\UnionTypeComparator; use Psalm\Internal\Codebase\InternalCallMapHandler; use Psalm\Internal\Provider\ClassLikeStorageProvider; use Psalm\Internal\Provider\FileProvider; use Psalm\Internal\Provider\FileReferenceProvider; use Psalm\Internal\Provider\FileStorageProvider; use Psalm\Internal\Provider\Providers; use Psalm\Internal\Provider\StatementsProvider; use Psalm\Progress\Progress; use Psalm\Progress\VoidProgress; use Psalm\Storage\ClassLikeStorage; use Psalm\Storage\FileStorage; use Psalm\Storage\FunctionLikeStorage; use function is_string; use function strlen; use function strpos; use function strrpos; use function strtolower; use function substr; use function substr_count; class Codebase { /** * @var Config */ public $config; /** * A map of fully-qualified use declarations to the files * that reference them (keyed by filename) * * @var array<lowercase-string, array<int, \Psalm\CodeLocation>> */ public $use_referencing_locations = []; /** * A map of file names to the classes that they contain explicit references to * used in collaboration with use_referencing_locations * * @var array<string, array<lowercase-string, bool>> */ public $use_referencing_files = []; /** * @var FileStorageProvider */ public $file_storage_provider; /** * @var ClassLikeStorageProvider */ public $classlike_storage_provider; /** * @var bool */ public $collect_references = false; /** * @var bool */ public $collect_locations = false; /** * @var null|'always'|'auto' */ public $find_unused_code = null; /** * @var FileProvider */ public $file_provider; /** * @var FileReferenceProvider */ public $file_reference_provider; /** * @var StatementsProvider */ public $statements_provider; /** * @var Progress */ private $progress; /** * @var array<string, Type\Union> */ private static $stubbed_constants = []; /** * Whether to register autoloaded information * * @var bool */ public $register_autoload_files = false; /** * Whether to log functions just at the file level or globally (for stubs) * * @var bool */ public $register_stub_files = false; /** * @var bool */ public $find_unused_variables = false; /** * @var Internal\Codebase\Scanner */ public $scanner; /** * @var Internal\Codebase\Analyzer */ public $analyzer; /** * @var Internal\Codebase\Functions */ public $functions; /** * @var Internal\Codebase\ClassLikes */ public $classlikes; /** * @var Internal\Codebase\Methods */ public $methods; /** * @var Internal\Codebase\Properties */ public $properties; /** * @var Internal\Codebase\Populator */ public $populator; /** * @var ?Internal\Codebase\TaintFlowGraph */ public $taint_flow_graph = null; /** * @var bool */ public $server_mode = false; /** * @var bool */ public $store_node_types = false; /** * Whether or not to infer types from usage. Computationally expensive, so turned off by default * * @var bool */ public $infer_types_from_usage = false; /** * @var bool */ public $alter_code = false; /** * @var bool */ public $diff_methods = false; /** * @var array<lowercase-string, string> */ public $methods_to_move = []; /** * @var array<lowercase-string, string> */ public $methods_to_rename = []; /** * @var array<string, string> */ public $properties_to_move = []; /** * @var array<string, string> */ public $properties_to_rename = []; /** * @var array<string, string> */ public $class_constants_to_move = []; /** * @var array<string, string> */ public $class_constants_to_rename = []; /** * @var array<lowercase-string, string> */ public $classes_to_move = []; /** * @var array<lowercase-string, string> */ public $call_transforms = []; /** * @var array<string, string> */ public $property_transforms = []; /** * @var array<string, string> */ public $class_constant_transforms = []; /** * @var array<lowercase-string, string> */ public $class_transforms = []; /** * @var bool */ public $allow_backwards_incompatible_changes = true; /** * @var int */ public $php_major_version = PHP_MAJOR_VERSION; /** * @var int */ public $php_minor_version = PHP_MINOR_VERSION; /** * @var bool */ public $track_unused_suppressions = false; public function __construct( Config $config, Providers $providers, ?Progress $progress = null ) { if ($progress === null) { $progress = new VoidProgress(); } $this->config = $config; $this->file_storage_provider = $providers->file_storage_provider; $this->classlike_storage_provider = $providers->classlike_storage_provider; $this->progress = $progress; $this->file_provider = $providers->file_provider; $this->file_reference_provider = $providers->file_reference_provider; $this->statements_provider = $providers->statements_provider; self::$stubbed_constants = []; $reflection = new Internal\Codebase\Reflection($providers->classlike_storage_provider, $this); $this->scanner = new Internal\Codebase\Scanner( $this, $config, $providers->file_storage_provider, $providers->file_provider, $reflection, $providers->file_reference_provider, $progress ); $this->loadAnalyzer(); $this->functions = new Internal\Codebase\Functions($providers->file_storage_provider, $reflection); $this->classlikes = new Internal\Codebase\ClassLikes( $this->config, $providers->classlike_storage_provider, $providers->file_reference_provider, $providers->statements_provider, $this->scanner ); $this->properties = new Internal\Codebase\Properties( $providers->classlike_storage_provider, $providers->file_reference_provider, $this->classlikes ); $this->methods = new Internal\Codebase\Methods( $providers->classlike_storage_provider, $providers->file_reference_provider, $this->classlikes ); $this->populator = new Internal\Codebase\Populator( $config, $providers->classlike_storage_provider, $providers->file_storage_provider, $this->classlikes, $providers->file_reference_provider, $progress ); $this->loadAnalyzer(); } private function loadAnalyzer(): void { $this->analyzer = new Internal\Codebase\Analyzer( $this->config, $this->file_provider, $this->file_storage_provider, $this->progress ); } /** * @param array<string> $candidate_files * */ public function reloadFiles(ProjectAnalyzer $project_analyzer, array $candidate_files): void { $this->loadAnalyzer(); $this->file_reference_provider->loadReferenceCache(false); Internal\Analyzer\FunctionLikeAnalyzer::clearCache(); if (!$this->statements_provider->parser_cache_provider) { $diff_files = $candidate_files; } else { $diff_files = []; $parser_cache_provider = $this->statements_provider->parser_cache_provider; foreach ($candidate_files as $candidate_file_path) { if ($parser_cache_provider->loadExistingFileContentsFromCache($candidate_file_path) !== $this->file_provider->getContents($candidate_file_path) ) { $diff_files[] = $candidate_file_path; } } } $referenced_files = $project_analyzer->getReferencedFilesFromDiff($diff_files, false); foreach ($diff_files as $diff_file_path) { $this->invalidateInformationForFile($diff_file_path); } foreach ($referenced_files as $referenced_file_path) { if (in_array($referenced_file_path, $diff_files, true)) { continue; } $file_storage = $this->file_storage_provider->get($referenced_file_path); foreach ($file_storage->classlikes_in_file as $fq_classlike_name) { $this->classlike_storage_provider->remove($fq_classlike_name); $this->classlikes->removeClassLike($fq_classlike_name); } $this->file_storage_provider->remove($referenced_file_path); $this->scanner->removeFile($referenced_file_path); } $referenced_files = array_combine($referenced_files, $referenced_files); $this->scanner->addFilesToDeepScan($referenced_files); $this->addFilesToAnalyze(array_combine($candidate_files, $candidate_files)); $this->scanner->scanFiles($this->classlikes); $this->file_reference_provider->updateReferenceCache($this, $referenced_files); $this->populator->populateCodebase(); } public function enterServerMode(): void { $this->server_mode = true; $this->store_node_types = true; } public function collectLocations(): void { $this->collect_locations = true; $this->classlikes->collect_locations = true; $this->methods->collect_locations = true; $this->properties->collect_locations = true; } /** * @param 'always'|'auto' $find_unused_code * */ public function reportUnusedCode(string $find_unused_code = 'auto'): void { $this->collect_references = true; $this->classlikes->collect_references = true; $this->find_unused_code = $find_unused_code; $this->find_unused_variables = true; } public function reportUnusedVariables(): void { $this->collect_references = true; $this->find_unused_variables = true; } /** * @param array<string, string> $files_to_analyze * */ public function addFilesToAnalyze(array $files_to_analyze): void { $this->scanner->addFilesToDeepScan($files_to_analyze); $this->analyzer->addFilesToAnalyze($files_to_analyze); } /** * Scans all files their related files * */ public function scanFiles(int $threads = 1): void { $has_changes = $this->scanner->scanFiles($this->classlikes, $threads); if ($has_changes) { $this->populator->populateCodebase(); } } public function getFileContents(string $file_path): string { return $this->file_provider->getContents($file_path); } /** * @return list<PhpParser\Node\Stmt> */ public function getStatementsForFile(string $file_path): array { return $this->statements_provider->getStatementsForFile( $file_path, $this->php_major_version . '.' . $this->php_minor_version, $this->progress ); } public function createClassLikeStorage(string $fq_classlike_name): ClassLikeStorage { return $this->classlike_storage_provider->create($fq_classlike_name); } public function cacheClassLikeStorage(ClassLikeStorage $classlike_storage, string $file_path): void { $file_contents = $this->file_provider->getContents($file_path); if ($this->classlike_storage_provider->cache) { $this->classlike_storage_provider->cache->writeToCache($classlike_storage, $file_path, $file_contents); } } public function exhumeClassLikeStorage(string $fq_classlike_name, string $file_path): void { $file_contents = $this->file_provider->getContents($file_path); $storage = $this->classlike_storage_provider->exhume( $fq_classlike_name, $file_path, $file_contents ); if ($storage->is_trait) { $this->classlikes->addFullyQualifiedTraitName($storage->name, $file_path); } elseif ($storage->is_interface) { $this->classlikes->addFullyQualifiedInterfaceName($storage->name, $file_path); } else { $this->classlikes->addFullyQualifiedClassName($storage->name, $file_path); } } public static function getPsalmTypeFromReflection(?\ReflectionType $type) : Type\Union { return \Psalm\Internal\Codebase\Reflection::getPsalmTypeFromReflectionType($type); } public function createFileStorageForPath(string $file_path): FileStorage { return $this->file_storage_provider->create($file_path); } /** * @return array<int, CodeLocation> */ public function findReferencesToSymbol(string $symbol): array { if (!$this->collect_locations) { throw new \UnexpectedValueException('Should not be checking references'); } if (strpos($symbol, '::$') !== false) { return $this->findReferencesToProperty($symbol); } if (strpos($symbol, '::') !== false) { return $this->findReferencesToMethod($symbol); } return $this->findReferencesToClassLike($symbol); } /** * @return array<int, CodeLocation> */ public function findReferencesToMethod(string $method_id): array { return $this->file_reference_provider->getClassMethodLocations(strtolower($method_id)); } /** * @return array<int, CodeLocation> */ public function findReferencesToProperty(string $property_id): array { [$fq_class_name, $property_name] = explode('::', $property_id); return $this->file_reference_provider->getClassPropertyLocations( strtolower($fq_class_name) . '::' . $property_name ); } /** * @return CodeLocation[] * * @psalm-return array<int, CodeLocation> */ public function findReferencesToClassLike(string $fq_class_name): array { $fq_class_name_lc = strtolower($fq_class_name); $locations = $this->file_reference_provider->getClassLocations($fq_class_name_lc); if (isset($this->use_referencing_locations[$fq_class_name_lc])) { $locations = array_merge($locations, $this->use_referencing_locations[$fq_class_name_lc]); } return $locations; } public function getClosureStorage(string $file_path, string $closure_id): Storage\FunctionStorage { $file_storage = $this->file_storage_provider->get($file_path); // closures can be returned here if (isset($file_storage->functions[$closure_id])) { return $file_storage->functions[$closure_id]; } throw new \UnexpectedValueException( 'Expecting ' . $closure_id . ' to have storage in ' . $file_path ); } public function addGlobalConstantType(string $const_id, Type\Union $type): void { self::$stubbed_constants[$const_id] = $type; } public function getStubbedConstantType(string $const_id): ?Type\Union { return isset(self::$stubbed_constants[$const_id]) ? self::$stubbed_constants[$const_id] : null; } /** * @return array<string, Type\Union> */ public function getAllStubbedConstants(): array { return self::$stubbed_constants; } public function fileExists(string $file_path): bool { return $this->file_provider->fileExists($file_path); } /** * Check whether a class/interface exists */ public function classOrInterfaceExists( string $fq_class_name, ?CodeLocation $code_location = null, ?string $calling_fq_class_name = null, ?string $calling_method_id = null ): bool { return $this->classlikes->classOrInterfaceExists( $fq_class_name, $code_location, $calling_fq_class_name, $calling_method_id ); } public function classExtendsOrImplements(string $fq_class_name, string $possible_parent): bool { return $this->classlikes->classExtends($fq_class_name, $possible_parent) || $this->classlikes->classImplements($fq_class_name, $possible_parent); } /** * Determine whether or not a given class exists */ public function classExists( string $fq_class_name, ?CodeLocation $code_location = null, ?string $calling_fq_class_name = null, ?string $calling_method_id = null ): bool { return $this->classlikes->classExists( $fq_class_name, $code_location, $calling_fq_class_name, $calling_method_id ); } /** * Determine whether or not a class extends a parent * * @throws \Psalm\Exception\UnpopulatedClasslikeException when called on unpopulated class * @throws \InvalidArgumentException when class does not exist */ public function classExtends(string $fq_class_name, string $possible_parent): bool { return $this->classlikes->classExtends($fq_class_name, $possible_parent, true); } /** * Check whether a class implements an interface */ public function classImplements(string $fq_class_name, string $interface): bool { return $this->classlikes->classImplements($fq_class_name, $interface); } public function interfaceExists( string $fq_interface_name, ?CodeLocation $code_location = null, ?string $calling_fq_class_name = null, ?string $calling_method_id = null ): bool { return $this->classlikes->interfaceExists( $fq_interface_name, $code_location, $calling_fq_class_name, $calling_method_id ); } public function interfaceExtends(string $interface_name, string $possible_parent): bool { return $this->classlikes->interfaceExtends($interface_name, $possible_parent); } /** * @return array<string, string> all interfaces extended by $interface_name */ public function getParentInterfaces(string $fq_interface_name): array { return $this->classlikes->getParentInterfaces( $this->classlikes->getUnAliasedName($fq_interface_name) ); } /** * Determine whether or not a class has the correct casing */ public function classHasCorrectCasing(string $fq_class_name): bool { return $this->classlikes->classHasCorrectCasing($fq_class_name); } public function interfaceHasCorrectCasing(string $fq_interface_name): bool { return $this->classlikes->interfaceHasCorrectCasing($fq_interface_name); } public function traitHasCorrectCase(string $fq_trait_name): bool { return $this->classlikes->traitHasCorrectCase($fq_trait_name); } /** * Given a function id, return the function like storage for * a method, closure, or function. * * @param non-empty-string $function_id * * @return Storage\FunctionStorage|Storage\MethodStorage */ public function getFunctionLikeStorage( StatementsAnalyzer $statements_analyzer, string $function_id ): FunctionLikeStorage { $doesMethodExist = \Psalm\Internal\MethodIdentifier::isValidMethodIdReference($function_id) && $this->methodExists($function_id); if ($doesMethodExist) { $method_id = \Psalm\Internal\MethodIdentifier::wrap($function_id); $declaring_method_id = $this->methods->getDeclaringMethodId($method_id); if (!$declaring_method_id) { throw new \UnexpectedValueException('Declaring method for ' . $method_id . ' cannot be found'); } return $this->methods->getStorage($declaring_method_id); } return $this->functions->getStorage($statements_analyzer, strtolower($function_id)); } /** * Whether or not a given method exists * * @param string|\Psalm\Internal\MethodIdentifier $method_id * @param string|\Psalm\Internal\MethodIdentifier|null $calling_method_id * @return bool */ public function methodExists( $method_id, ?CodeLocation $code_location = null, $calling_method_id = null, ?string $file_path = null ): bool { return $this->methods->methodExists( Internal\MethodIdentifier::wrap($method_id), is_string($calling_method_id) ? strtolower($calling_method_id) : strtolower((string) $calling_method_id), $code_location, null, $file_path ); } /** * @param string|\Psalm\Internal\MethodIdentifier $method_id * * @return array<int, \Psalm\Storage\FunctionLikeParameter> */ public function getMethodParams($method_id): array { return $this->methods->getMethodParams(Internal\MethodIdentifier::wrap($method_id)); } /** * @param string|\Psalm\Internal\MethodIdentifier $method_id * */ public function isVariadic($method_id): bool { return $this->methods->isVariadic(Internal\MethodIdentifier::wrap($method_id)); } /** * @param string|\Psalm\Internal\MethodIdentifier $method_id * @param list<PhpParser\Node\Arg> $call_args * */ public function getMethodReturnType($method_id, ?string &$self_class, array $call_args = []): ?Type\Union { return $this->methods->getMethodReturnType( Internal\MethodIdentifier::wrap($method_id), $self_class, null, $call_args ); } /** * @param string|\Psalm\Internal\MethodIdentifier $method_id * */ public function getMethodReturnsByRef($method_id): bool { return $this->methods->getMethodReturnsByRef(Internal\MethodIdentifier::wrap($method_id)); } /** * @param string|\Psalm\Internal\MethodIdentifier $method_id * @param CodeLocation|null $defined_location * */ public function getMethodReturnTypeLocation( $method_id, CodeLocation &$defined_location = null ): ?CodeLocation { return $this->methods->getMethodReturnTypeLocation( Internal\MethodIdentifier::wrap($method_id), $defined_location ); } /** * @param string|\Psalm\Internal\MethodIdentifier $method_id * */ public function getDeclaringMethodId($method_id): ?string { $new_method_id = $this->methods->getDeclaringMethodId(Internal\MethodIdentifier::wrap($method_id)); return $new_method_id ? (string) $new_method_id : null; } /** * Get the class this method appears in (vs is declared in, which could give a trait) * * @param string|\Psalm\Internal\MethodIdentifier $method_id * */ public function getAppearingMethodId($method_id): ?string { $new_method_id = $this->methods->getAppearingMethodId(Internal\MethodIdentifier::wrap($method_id)); return $new_method_id ? (string) $new_method_id : null; } /** * @param string|\Psalm\Internal\MethodIdentifier $method_id * * @return array<string, Internal\MethodIdentifier> */ public function getOverriddenMethodIds($method_id): array { return $this->methods->getOverriddenMethodIds(Internal\MethodIdentifier::wrap($method_id)); } /** * @param string|\Psalm\Internal\MethodIdentifier $method_id * */ public function getCasedMethodId($method_id): string { return $this->methods->getCasedMethodId(Internal\MethodIdentifier::wrap($method_id)); } public function invalidateInformationForFile(string $file_path): void { $this->scanner->removeFile($file_path); try { $file_storage = $this->file_storage_provider->get($file_path); } catch (\InvalidArgumentException $e) { return; } foreach ($file_storage->classlikes_in_file as $fq_classlike_name) { $this->classlike_storage_provider->remove($fq_classlike_name); $this->classlikes->removeClassLike($fq_classlike_name); } $this->file_storage_provider->remove($file_path); } public function getSymbolInformation(string $file_path, string $symbol): ?string { if (\is_numeric($symbol[0])) { return \preg_replace('/^[^:]*:/', '', $symbol); } try { if (strpos($symbol, '::')) { if (strpos($symbol, '()')) { $symbol = substr($symbol, 0, -2); /** @psalm-suppress ArgumentTypeCoercion */ $method_id = new \Psalm\Internal\MethodIdentifier(...explode('::', $symbol)); $declaring_method_id = $this->methods->getDeclaringMethodId($method_id); if (!$declaring_method_id) { return null; } $storage = $this->methods->getStorage($declaring_method_id); return '<?php ' . $storage->getSignature(true); } [, $symbol_name] = explode('::', $symbol); if (strpos($symbol, '$') !== false) { $storage = $this->properties->getStorage($symbol); return '<?php ' . $storage->getInfo() . ' ' . $symbol_name; } [$fq_classlike_name, $const_name] = explode('::', $symbol); $class_constants = $this->classlikes->getConstantsForClass( $fq_classlike_name, \ReflectionProperty::IS_PRIVATE ); if (!isset($class_constants[$const_name])) { return null; } return '<?php ' . $const_name; } if (strpos($symbol, '()')) { $function_id = strtolower(substr($symbol, 0, -2)); $file_storage = $this->file_storage_provider->get($file_path); if (isset($file_storage->functions[$function_id])) { $function_storage = $file_storage->functions[$function_id]; return '<?php ' . $function_storage->getSignature(true); } if (!$function_id) { return null; } $function = $this->functions->getStorage(null, $function_id); return '<?php ' . $function->getSignature(true); } $storage = $this->classlike_storage_provider->get($symbol); return '<?php ' . ($storage->abstract ? 'abstract ' : '') . 'class ' . $storage->name; } catch (\Exception $e) { error_log($e->getMessage()); return null; } } public function getSymbolLocation(string $file_path, string $symbol): ?CodeLocation { if (\is_numeric($symbol[0])) { $symbol = \preg_replace('/:.*/', '', $symbol); $symbol_parts = explode('-', $symbol); $file_contents = $this->getFileContents($file_path); return new CodeLocation\Raw( $file_contents, $file_path, $this->config->shortenFileName($file_path), (int) $symbol_parts[0], (int) $symbol_parts[1] ); } try { if (strpos($symbol, '::')) { if (strpos($symbol, '()')) { $symbol = substr($symbol, 0, -2); /** @psalm-suppress ArgumentTypeCoercion */ $method_id = new \Psalm\Internal\MethodIdentifier(...explode('::', $symbol)); $declaring_method_id = $this->methods->getDeclaringMethodId($method_id); if (!$declaring_method_id) { return null; } $storage = $this->methods->getStorage($declaring_method_id); return $storage->location; } if (strpos($symbol, '$') !== false) { $storage = $this->properties->getStorage($symbol); return $storage->location; } [$fq_classlike_name, $const_name] = explode('::', $symbol); $class_constants = $this->classlikes->getConstantsForClass( $fq_classlike_name, \ReflectionProperty::IS_PRIVATE ); if (!isset($class_constants[$const_name])) { return null; } return $class_constants[$const_name]->location; } if (strpos($symbol, '()')) { $file_storage = $this->file_storage_provider->get($file_path); $function_id = strtolower(substr($symbol, 0, -2)); if (isset($file_storage->functions[$function_id])) { return $file_storage->functions[$function_id]->location; } if (!$function_id) { return null; } $function = $this->functions->getStorage(null, $function_id); return $function->location; } $storage = $this->classlike_storage_provider->get($symbol); return $storage->location; } catch (\UnexpectedValueException $e) { error_log($e->getMessage()); return null; } catch (\InvalidArgumentException $e) { return null; } } /** * @return array{0: string, 1: Range}|null */ public function getReferenceAtPosition(string $file_path, Position $position): ?array { $is_open = $this->file_provider->isOpen($file_path); if (!$is_open) { throw new \Psalm\Exception\UnanalyzedFileException($file_path . ' is not open'); } $file_contents = $this->getFileContents($file_path); $offset = $position->toOffset($file_contents); [$reference_map, $type_map] = $this->analyzer->getMapsForFile($file_path); $reference = null; if (!$reference_map && !$type_map) { return null; } $reference_start_pos = null; $reference_end_pos = null; ksort($reference_map); foreach ($reference_map as $start_pos => [$end_pos, $possible_reference]) { if ($offset < $start_pos) { break; } if ($offset > $end_pos) { continue; } $reference_start_pos = $start_pos; $reference_end_pos = $end_pos; $reference = $possible_reference; } if ($reference === null || $reference_start_pos === null || $reference_end_pos === null) { return null; } $range = new Range( self::getPositionFromOffset($reference_start_pos, $file_contents), self::getPositionFromOffset($reference_end_pos, $file_contents) ); return [$reference, $range]; } /** * @return array{0: non-empty-string, 1: int, 2: Range}|null */ public function getFunctionArgumentAtPosition(string $file_path, Position $position): ?array { $is_open = $this->file_provider->isOpen($file_path); if (!$is_open) { throw new \Psalm\Exception\UnanalyzedFileException($file_path . ' is not open'); } $file_contents = $this->getFileContents($file_path); $offset = $position->toOffset($file_contents); [, , $argument_map] = $this->analyzer->getMapsForFile($file_path); $reference = null; $argument_number = null; if (!$argument_map) { return null; } $start_pos = null; $end_pos = null; ksort($argument_map); foreach ($argument_map as $start_pos => [$end_pos, $possible_reference, $possible_argument_number]) { if ($offset < $start_pos) { break; } if ($offset > $end_pos) { continue; } $reference = $possible_reference; $argument_number = $possible_argument_number; } if ($reference === null || $start_pos === null || $end_pos === null || $argument_number === null) { return null; } $range = new Range( self::getPositionFromOffset($start_pos, $file_contents), self::getPositionFromOffset($end_pos, $file_contents) ); return [$reference, $argument_number, $range]; } /** * @param non-empty-string $function_symbol */ public function getSignatureInformation(string $function_symbol) : ?\LanguageServerProtocol\SignatureInformation { if (strpos($function_symbol, '::') !== false) { /** @psalm-suppress ArgumentTypeCoercion */ $method_id = new \Psalm\Internal\MethodIdentifier(...explode('::', $function_symbol)); $declaring_method_id = $this->methods->getDeclaringMethodId($method_id); if ($declaring_method_id === null) { return null; } $method_storage = $this->methods->getStorage($declaring_method_id); $params = $method_storage->params; } else { try { $function_storage = $this->functions->getStorage(null, strtolower($function_symbol)); $params = $function_storage->params; } catch (\Exception $exception) { if (InternalCallMapHandler::inCallMap($function_symbol)) { $callables = InternalCallMapHandler::getCallablesFromCallMap($function_symbol); if (!$callables || !$callables[0]->params) { return null; } $params = $callables[0]->params; } else { return null; } } } $signature_label = '('; $parameters = []; foreach ($params as $i => $param) { $parameter_label = ($param->type ?: 'mixed') . ' $' . $param->name; $parameters[] = new \LanguageServerProtocol\ParameterInformation([ strlen($signature_label), strlen($signature_label) + strlen($parameter_label), ]); $signature_label .= $parameter_label; if ($i < (count($params) - 1)) { $signature_label .= ', '; } } $signature_label .= ')'; return new \LanguageServerProtocol\SignatureInformation( $signature_label, $parameters ); } /** * @return array{0: string, 1: '->'|'::'|'symbol', 2: int}|null */ public function getCompletionDataAtPosition(string $file_path, Position $position): ?array { $is_open = $this->file_provider->isOpen($file_path); if (!$is_open) { throw new \Psalm\Exception\UnanalyzedFileException($file_path . ' is not open'); } $file_contents = $this->getFileContents($file_path); $offset = $position->toOffset($file_contents); [$reference_map, $type_map] = $this->analyzer->getMapsForFile($file_path); if (!$reference_map && !$type_map) { return null; } krsort($type_map); foreach ($type_map as $start_pos => [$end_pos_excluding_whitespace, $possible_type]) { if ($offset < $start_pos) { continue; } $num_whitespace_bytes = preg_match('/\G\s+/', $file_contents, $matches, 0, $end_pos_excluding_whitespace) ? strlen($matches[0]) : 0; $end_pos = $end_pos_excluding_whitespace + $num_whitespace_bytes; if ($offset - $end_pos === 2 || $offset - $end_pos === 3) { $candidate_gap = substr($file_contents, $end_pos, 2); if ($candidate_gap === '->' || $candidate_gap === '::') { $gap = $candidate_gap; $recent_type = $possible_type; if ($recent_type === 'mixed') { return null; } return [$recent_type, $gap, $offset]; } } } foreach ($reference_map as $start_pos => [$end_pos, $possible_reference]) { if ($offset < $start_pos || $possible_reference[0] !== '*') { continue; } if ($offset - $end_pos === 0) { $recent_type = $possible_reference; return [$recent_type, 'symbol', $offset]; } } return null; } /** * @return list<\LanguageServerProtocol\CompletionItem> */ public function getCompletionItemsForClassishThing(string $type_string, string $gap) : array { $instance_completion_items = []; $static_completion_items = []; $type = Type::parseString($type_string); foreach ($type->getAtomicTypes() as $atomic_type) { if ($atomic_type instanceof Type\Atomic\TNamedObject) { try { $class_storage = $this->classlike_storage_provider->get($atomic_type->value); foreach ($class_storage->appearing_method_ids as $declaring_method_id) { $method_storage = $this->methods->getStorage($declaring_method_id); $completion_item = new \LanguageServerProtocol\CompletionItem( $method_storage->cased_name, \LanguageServerProtocol\CompletionItemKind::METHOD, (string)$method_storage, null, (string)$method_storage->visibility, $method_storage->cased_name, $method_storage->cased_name . (count($method_storage->params) !== 0 ? '($0)' : '()'), null, null, new Command('Trigger parameter hints', 'editor.action.triggerParameterHints') ); $completion_item->insertTextFormat = \LanguageServerProtocol\InsertTextFormat::SNIPPET; if ($method_storage->is_static) { $static_completion_items[] = $completion_item; } else { $instance_completion_items[] = $completion_item; } } foreach ($class_storage->declaring_property_ids as $property_name => $declaring_class) { $property_storage = $this->properties->getStorage( $declaring_class . '::$' . $property_name ); $completion_item = new \LanguageServerProtocol\CompletionItem( '$' . $property_name, \LanguageServerProtocol\CompletionItemKind::PROPERTY, $property_storage->getInfo(), null, (string)$property_storage->visibility, $property_name, ($gap === '::' ? '$' : '') . $property_name ); if ($property_storage->is_static) { $static_completion_items[] = $completion_item; } else { $instance_completion_items[] = $completion_item; } } foreach ($class_storage->constants as $const_name => $_) { $static_completion_items[] = new \LanguageServerProtocol\CompletionItem( $const_name, \LanguageServerProtocol\CompletionItemKind::VARIABLE, 'const ' . $const_name, null, null, $const_name, $const_name ); } } catch (\Exception $e) { error_log($e->getMessage()); continue; } } } if ($gap === '->') { $completion_items = $instance_completion_items; } else { $completion_items = array_merge( $instance_completion_items, $static_completion_items ); } return $completion_items; } /** * @return list<\LanguageServerProtocol\CompletionItem> */ public function getCompletionItemsForPartialSymbol( string $type_string, int $offset, string $file_path ) : array { $matching_classlike_names = $this->classlikes->getMatchingClassLikeNames($type_string); $completion_items = []; $file_storage = $this->file_storage_provider->get($file_path); $aliases = null; foreach ($file_storage->classlikes_in_file as $fq_class_name => $_) { try { $class_storage = $this->classlike_storage_provider->get($fq_class_name); } catch (\Exception $e) { continue; } if (!$class_storage->stmt_location) { continue; } if ($offset > $class_storage->stmt_location->raw_file_start && $offset < $class_storage->stmt_location->raw_file_end ) { $aliases = $class_storage->aliases; break; } } if (!$aliases) { foreach ($file_storage->namespace_aliases as $namespace_start => $namespace_aliases) { if ($namespace_start < $offset) { $aliases = $namespace_aliases; break; } } if (!$aliases) { $aliases = $file_storage->aliases; } } foreach ($matching_classlike_names as $fq_class_name) { $extra_edits = []; $insertion_text = Type::getStringFromFQCLN( $fq_class_name, $aliases && $aliases->namespace ? $aliases->namespace : null, $aliases ? $aliases->uses_flipped : [], null ); if ($aliases && $aliases->namespace && $insertion_text === '\\' . $fq_class_name && $aliases->namespace_first_stmt_start ) { $file_contents = $this->getFileContents($file_path); $class_name = \preg_replace('/^.*\\\/', '', $fq_class_name); if ($aliases->uses_end) { $position = self::getPositionFromOffset($aliases->uses_end, $file_contents); $extra_edits[] = new \LanguageServerProtocol\TextEdit( new Range( $position, $position ), "\n" . 'use ' . $fq_class_name . ';' ); } else { $position = self::getPositionFromOffset($aliases->namespace_first_stmt_start, $file_contents); $extra_edits[] = new \LanguageServerProtocol\TextEdit( new Range( $position, $position ), 'use ' . $fq_class_name . ';' . "\n" . "\n" ); } $insertion_text = $class_name; } $completion_items[] = new \LanguageServerProtocol\CompletionItem( $fq_class_name, \LanguageServerProtocol\CompletionItemKind::CLASS_, null, null, null, $fq_class_name, $insertion_text, null, $extra_edits ); } return $completion_items; } private static function getPositionFromOffset(int $offset, string $file_contents) : Position { $file_contents = substr($file_contents, 0, $offset); $before_newline_count = strrpos($file_contents, "\n", $offset - strlen($file_contents)); return new Position( substr_count($file_contents, "\n"), $offset - (int)$before_newline_count - 1 ); } public function addTemporaryFileChanges(string $file_path, string $new_content): void { $this->file_provider->addTemporaryFileChanges($file_path, $new_content); } public function removeTemporaryFileChanges(string $file_path): void { $this->file_provider->removeTemporaryFileChanges($file_path); } /** * Checks if type is a subtype of other * * Given two types, checks if `$input_type` is a subtype of `$container_type`. * If you consider `Type\Union` as a set of types, this will tell you * if `$input_type` is fully contained in `$container_type`, * * $input_type ⊆ $container_type * * Useful for emitting issues like InvalidArgument, where argument at the call site * should be a subset of the function parameter type. */ public function isTypeContainedByType( Type\Union $input_type, Type\Union $container_type ): bool { return UnionTypeComparator::isContainedBy($this, $input_type, $container_type); } /** * Checks if type has any part that is a subtype of other * * Given two types, checks if *any part* of `$input_type` is a subtype of `$container_type`. * If you consider `Type\Union` as a set of types, this will tell you if intersection * of `$input_type` with `$container_type` is not empty. * * $input_type ∩ $container_type ≠ ∅ , e.g. they are not disjoint. * * Useful for emitting issues like PossiblyInvalidArgument, where argument at the call * site should be a subtype of the function parameter type, but it's has some types that are * not a subtype of the required type. */ public function canTypeBeContainedByType( Type\Union $input_type, Type\Union $container_type ): bool { return UnionTypeComparator::canBeContainedBy($this, $input_type, $container_type); } /** * Extracts key and value types from a traversable object (or iterable) * * Given an iterable type (*but not TArray*) returns a tuple of it's key/value types. * First element of the tuple holds key type, second has the value type. * * Example: * ```php * $codebase->getKeyValueParamsForTraversableObject(Type::parseString('iterable<int,string>')) * // returns [Union(TInt), Union(TString)] * ``` * * @return array{Type\Union,Type\Union} */ public function getKeyValueParamsForTraversableObject(Type\Atomic $type): array { $key_type = null; $value_type = null; ForeachAnalyzer::getKeyValueParamsForTraversableObject($type, $this, $key_type, $value_type); return [ $key_type ?? Type::getMixed(), $value_type ?? Type::getMixed(), ]; } /** * @param array<string, mixed> $phantom_classes * @psalm-suppress PossiblyUnusedMethod part of the public API */ public function queueClassLikeForScanning( string $fq_classlike_name, bool $analyze_too = false, bool $store_failure = true, array $phantom_classes = [] ): void { $this->scanner->queueClassLikeForScanning($fq_classlike_name, $analyze_too, $store_failure, $phantom_classes); } /** * @param array<string> $taints * * @psalm-suppress PossiblyUnusedMethod */ public function addTaintSource( Type\Union $expr_type, string $taint_id, array $taints = \Psalm\Type\TaintKindGroup::ALL_INPUT, ?CodeLocation $code_location = null ) : void { if (!$this->taint_flow_graph) { return; } $source = new \Psalm\Internal\DataFlow\TaintSource( $taint_id, $taint_id, $code_location, null, $taints ); $this->taint_flow_graph->addSource($source); $expr_type->parent_nodes = [ $source->id => $source, ]; } /** * @param array<string> $taints * * @psalm-suppress PossiblyUnusedMethod */ public function addTaintSink( string $taint_id, array $taints = \Psalm\Type\TaintKindGroup::ALL_INPUT, ?CodeLocation $code_location = null ) : void { if (!$this->taint_flow_graph) { return; } $sink = new \Psalm\Internal\DataFlow\TaintSink( $taint_id, $taint_id, $code_location, null, $taints ); $this->taint_flow_graph->addSink($sink); } }
1
9,880
As mentioned below, I'm not sure why this is only allowing refs that are tagged with `*` at the start!
vimeo-psalm
php
@@ -59,4 +59,19 @@ class BackgroundRepository implements Repository { public void setFilter(CombinedAppsFilter filter) { delegatedRepository.setFilter(filter); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + BackgroundRepository that = (BackgroundRepository) o; + + return delegatedRepository != null ? delegatedRepository.equals(that.delegatedRepository) : that.delegatedRepository == null; + } + + @Override + public int hashCode() { + return delegatedRepository != null ? delegatedRepository.hashCode() : 0; + } }
1
/* * Copyright (C) 2015-2017 PÂRIS Quentin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package org.phoenicis.apps; import org.phoenicis.apps.dto.CategoryDTO; import org.phoenicis.apps.dto.ScriptDTO; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.function.Consumer; class BackgroundRepository implements Repository { private final Repository delegatedRepository; private final ExecutorService executorService; BackgroundRepository(Repository delegatedRepository, ExecutorService executorService) { this.delegatedRepository = delegatedRepository; this.executorService = executorService; } @Override public List<CategoryDTO> fetchInstallableApplications() { throw new UnsupportedOperationException("The background apps manager is asynchronous"); } @Override public void onDelete() { this.delegatedRepository.onDelete(); } @Override public void fetchInstallableApplications(Consumer<List<CategoryDTO>> callback, Consumer<Exception> errorCallback) { executorService.submit(() -> delegatedRepository.fetchInstallableApplications(callback, errorCallback)); } @Override public void getScript(List<String> path, Consumer<ScriptDTO> callback, Consumer<Exception> errorCallback) { executorService.submit(() -> delegatedRepository.getScript(path, callback, errorCallback)); } @Override public void setFilter(CombinedAppsFilter filter) { delegatedRepository.setFilter(filter); } }
1
9,394
Don't forget the {} symbols, and please use EqualsBuilder and HashcodeBuilder as possible
PhoenicisOrg-phoenicis
java
@@ -56,15 +56,17 @@ func TestPinger_Provider_Consumer_Ping_Flow(t *testing.T) { // Create provider's UDP proxy listener to which pinger should hand off connection. // In real world this proxy represents started VPN service (WireGuard or OpenVPN). - proxyBuf := make([]byte, 1024) + ch := make(chan string) go func() { addr, _ := net.ResolveUDPAddr("udp4", fmt.Sprintf("127.0.0.1:%d", providerProxyPort)) conn, err := net.ListenUDP("udp4", addr) assert.NoError(t, err) for { - _, err := conn.Read(proxyBuf) + proxyBuf := make([]byte, 1024) + n, err := conn.Read(proxyBuf) assert.NoError(t, err) + ch <- string(proxyBuf[:n]) } }()
1
/* * Copyright (C) 2020 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package traversal import ( "fmt" "net" "testing" "time" "github.com/stretchr/testify/assert" ) func TestPinger_Start_Stop(t *testing.T) { pinger := newPinger(&PingConfig{ Interval: 1 * time.Millisecond, Timeout: 10 * time.Millisecond, }) go pinger.Start() // Make sure multiple stops doesn't crash. pinger.Stop() pinger.Stop() pinger.Stop() } func TestPinger_Provider_Consumer_Ping_Flow(t *testing.T) { providerProxyPort := 51199 providerPort := 51200 consumerPort := 51201 pingConfig := &PingConfig{ Interval: 10 * time.Millisecond, Timeout: 100 * time.Millisecond, } pinger := newPinger(pingConfig) go pinger.Start() defer pinger.Stop() // Create provider's UDP proxy listener to which pinger should hand off connection. // In real world this proxy represents started VPN service (WireGuard or OpenVPN). proxyBuf := make([]byte, 1024) go func() { addr, _ := net.ResolveUDPAddr("udp4", fmt.Sprintf("127.0.0.1:%d", providerProxyPort)) conn, err := net.ListenUDP("udp4", addr) assert.NoError(t, err) for { _, err := conn.Read(proxyBuf) assert.NoError(t, err) } }() // Start pinging consumer. go func() { pinger.BindServicePort("wg1", providerProxyPort) p := Params{ LocalPorts: []int{providerPort}, RemotePorts: []int{consumerPort}, IP: "127.0.0.1", ProxyPortMappingKey: "wg1", } pinger.PingTarget(p) }() // Wait some time to simulate real network delay conditions. time.Sleep(5 * pingConfig.Interval) // Start pinging provider. stop := make(chan struct{}) defer close(stop) _, _, err := pinger.PingProvider("127.0.0.1", []int{consumerPort}, []int{providerPort}, consumerPort+1) assert.NoError(t, err) assert.Contains(t, string(proxyBuf), fmt.Sprintf("continuously pinging to 127.0.0.1:%d", providerPort)) } func TestPinger_PingProvider_Timeout(t *testing.T) { pinger := newPinger(&PingConfig{ Interval: 1 * time.Millisecond, Timeout: 5 * time.Millisecond, }) providerPort := 51205 consumerPort := 51206 go func() { addr, _ := net.ResolveUDPAddr("udp4", fmt.Sprintf("127.0.0.1:%d", providerPort)) conn, err := net.ListenUDP("udp4", addr) assert.NoError(t, err) defer conn.Close() select {} }() stop := make(chan struct{}) defer close(stop) _, _, err := pinger.PingProvider("127.0.0.1", []int{consumerPort}, []int{providerPort}, 0) assert.Error(t, errNATPunchAttemptTimedOut, err) } func newPinger(config *PingConfig) NATPinger { return NewPinger(config, &mockPublisher{}) } type mockPublisher struct { } func (p mockPublisher) Publish(topic string, data interface{}) { }
1
15,878
Allocate buffer once outside loop.
mysteriumnetwork-node
go
@@ -1262,3 +1262,11 @@ TEST (network, filter) node1.network.inbound (keepalive, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_EQ (1, node1.stats.count (nano::stat::type::message, nano::stat::detail::invalid_network)); } + +TEST (network, fill_keepalive_self) +{ + nano::system system{ 2 }; + std::array<nano::endpoint, 8> target; + system.nodes[0]->network.fill_keepalive_self (target); + ASSERT_TRUE (target[2].port () == system.nodes[1]->network.port); +}
1
#include <nano/node/transport/udp.hpp> #include <nano/test_common/network.hpp> #include <nano/test_common/system.hpp> #include <nano/test_common/testutil.hpp> #include <gtest/gtest.h> #include <boost/iostreams/stream_buffer.hpp> #include <boost/range/join.hpp> #include <boost/thread.hpp> using namespace std::chrono_literals; TEST (network, tcp_connection) { boost::asio::io_context io_ctx; boost::asio::ip::tcp::acceptor acceptor (io_ctx); auto port = nano::get_available_port (); boost::asio::ip::tcp::endpoint endpoint (boost::asio::ip::address_v4::any (), port); acceptor.open (endpoint.protocol ()); acceptor.set_option (boost::asio::ip::tcp::acceptor::reuse_address (true)); acceptor.bind (endpoint); acceptor.listen (); boost::asio::ip::tcp::socket incoming (io_ctx); std::atomic<bool> done1 (false); std::string message1; acceptor.async_accept (incoming, [&done1, &message1] (boost::system::error_code const & ec_a) { if (ec_a) { message1 = ec_a.message (); std::cerr << message1; } done1 = true; }); boost::asio::ip::tcp::socket connector (io_ctx); std::atomic<bool> done2 (false); std::string message2; connector.async_connect (boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v4::loopback (), port), [&done2, &message2] (boost::system::error_code const & ec_a) { if (ec_a) { message2 = ec_a.message (); std::cerr << message2; } done2 = true; }); while (!done1 || !done2) { io_ctx.poll (); } ASSERT_EQ (0, message1.size ()); ASSERT_EQ (0, message2.size ()); } TEST (network, construction) { auto port = nano::get_available_port (); nano::system system; system.add_node (nano::node_config (port, system.logging)); ASSERT_EQ (1, system.nodes.size ()); ASSERT_EQ (port, system.nodes[0]->network.endpoint ().port ()); } TEST (network, self_discard) { nano::node_flags node_flags; node_flags.disable_udp = false; nano::system system (1, nano::transport::transport_type::tcp, node_flags); nano::message_buffer data; data.endpoint = system.nodes[0]->network.endpoint (); ASSERT_EQ (0, system.nodes[0]->stats.count (nano::stat::type::error, nano::stat::detail::bad_sender)); system.nodes[0]->network.udp_channels.receive_action (&data); ASSERT_EQ (1, system.nodes[0]->stats.count (nano::stat::type::error, nano::stat::detail::bad_sender)); } TEST (network, send_node_id_handshake) { nano::node_flags node_flags; node_flags.disable_udp = false; nano::system system; auto node0 = system.add_node (node_flags); ASSERT_EQ (0, node0->network.size ()); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work, node_flags)); node1->start (); system.nodes.push_back (node1); auto initial (node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in)); auto initial_node1 (node1->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in)); auto channel (std::make_shared<nano::transport::channel_udp> (node0->network.udp_channels, node1->network.endpoint (), node1->network_params.network.protocol_version)); node0->network.send_keepalive (channel); ASSERT_EQ (0, node0->network.size ()); ASSERT_EQ (0, node1->network.size ()); ASSERT_TIMELY (10s, node1->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in) != initial_node1); ASSERT_TIMELY (10s, node0->network.size () == 0 || node1->network.size () == 1); ASSERT_TIMELY (10s, node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in) == initial + 2); ASSERT_TIMELY (10s, node0->network.size () == 1 || node1->network.size () == 1); auto list1 (node0->network.list (1)); ASSERT_EQ (node1->network.endpoint (), list1[0]->get_endpoint ()); auto list2 (node1->network.list (1)); ASSERT_EQ (node0->network.endpoint (), list2[0]->get_endpoint ()); node1->stop (); } TEST (network, send_node_id_handshake_tcp) { nano::system system (1); auto node0 (system.nodes[0]); ASSERT_EQ (0, node0->network.size ()); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work)); node1->start (); system.nodes.push_back (node1); auto initial (node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in)); auto initial_node1 (node1->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in)); auto initial_keepalive (node0->stats.count (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in)); std::weak_ptr<nano::node> node_w (node0); node0->network.tcp_channels.start_tcp (node1->network.endpoint (), nano::keepalive_tcp_callback (*node1)); ASSERT_EQ (0, node0->network.size ()); ASSERT_EQ (0, node1->network.size ()); ASSERT_TIMELY (10s, node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in) >= initial + 2); ASSERT_TIMELY (5s, node1->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in) >= initial_node1 + 2); ASSERT_TIMELY (5s, node0->stats.count (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in) >= initial_keepalive + 2); ASSERT_TIMELY (5s, node1->stats.count (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in) >= initial_keepalive + 2); ASSERT_EQ (1, node0->network.size ()); ASSERT_EQ (1, node1->network.size ()); auto list1 (node0->network.list (1)); ASSERT_EQ (nano::transport::transport_type::tcp, list1[0]->get_type ()); ASSERT_EQ (node1->network.endpoint (), list1[0]->get_endpoint ()); auto list2 (node1->network.list (1)); ASSERT_EQ (nano::transport::transport_type::tcp, list2[0]->get_type ()); ASSERT_EQ (node0->network.endpoint (), list2[0]->get_endpoint ()); node1->stop (); } TEST (network, last_contacted) { nano::system system (1); auto node0 = system.nodes[0]; ASSERT_EQ (0, node0->network.size ()); nano::node_config node1_config (nano::get_available_port (), system.logging); node1_config.tcp_incoming_connections_max = 0; // Prevent ephemeral node1->node0 channel repacement with incoming connection auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::unique_path (), node1_config, system.work)); node1->start (); system.nodes.push_back (node1); auto channel1 = nano::establish_tcp (system, *node1, node0->network.endpoint ()); ASSERT_NE (nullptr, channel1); ASSERT_TIMELY (3s, node0->network.size () == 1); // channel0 is the other side of channel1, same connection different endpoint auto channel0 = node0->network.tcp_channels.find_node_id (node1->node_id.pub); ASSERT_NE (nullptr, channel0); { // check that the endpoints are part of the same connection std::shared_ptr<nano::socket> sock0 = channel0->socket.lock (); std::shared_ptr<nano::socket> sock1 = channel1->socket.lock (); ASSERT_TRUE (sock0->local_endpoint () == sock1->remote_endpoint ()); ASSERT_TRUE (sock1->local_endpoint () == sock0->remote_endpoint ()); } // capture the state before and ensure the clock ticks at least once auto timestamp_before_keepalive = channel0->get_last_packet_received (); auto keepalive_count = node0->stats.count (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in); ASSERT_TIMELY (3s, std::chrono::steady_clock::now () > timestamp_before_keepalive); // send 3 keepalives // we need an extra keepalive to handle the race condition between the timestamp set and the counter increment // and we need one more keepalive to handle the possibility that there is a keepalive already in flight when we start the crucial part of the test // it is possible that there could be multiple keepalives in flight but we assume here that there will be no more than one in flight for the purposes of this test node1->network.send_keepalive (channel1); node1->network.send_keepalive (channel1); node1->network.send_keepalive (channel1); ASSERT_TIMELY (3s, node0->stats.count (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in) >= keepalive_count + 3); ASSERT_EQ (node0->network.size (), 1); auto timestamp_after_keepalive = channel0->get_last_packet_received (); ASSERT_GT (timestamp_after_keepalive, timestamp_before_keepalive); } TEST (network, multi_keepalive) { nano::system system (1); auto node0 = system.nodes[0]; ASSERT_EQ (0, node0->network.size ()); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work)); ASSERT_FALSE (node1->init_error ()); node1->start (); system.nodes.push_back (node1); ASSERT_EQ (0, node1->network.size ()); ASSERT_EQ (0, node0->network.size ()); node1->network.tcp_channels.start_tcp (node0->network.endpoint (), nano::keepalive_tcp_callback (*node1)); ASSERT_TIMELY (10s, node0->network.size () == 1 && node0->stats.count (nano::stat::type::message, nano::stat::detail::keepalive) >= 1); auto node2 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work)); ASSERT_FALSE (node2->init_error ()); node2->start (); system.nodes.push_back (node2); node2->network.tcp_channels.start_tcp (node0->network.endpoint (), nano::keepalive_tcp_callback (*node2)); ASSERT_TIMELY (10s, node1->network.size () == 2 && node0->network.size () == 2 && node2->network.size () == 2 && node0->stats.count (nano::stat::type::message, nano::stat::detail::keepalive) >= 2); } TEST (network, send_discarded_publish) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); auto block (std::make_shared<nano::send_block> (1, 1, 2, nano::keypair ().prv, 4, *system.work.generate (nano::root (1)))); { auto transaction (node1.store.tx_begin_read ()); node1.network.flood_block (block); ASSERT_EQ (nano::dev::genesis->hash (), node1.ledger.latest (transaction, nano::dev::genesis_key.pub)); ASSERT_EQ (nano::dev::genesis->hash (), node2.latest (nano::dev::genesis_key.pub)); } ASSERT_TIMELY (10s, node2.stats.count (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in) != 0); auto transaction (node1.store.tx_begin_read ()); ASSERT_EQ (nano::dev::genesis->hash (), node1.ledger.latest (transaction, nano::dev::genesis_key.pub)); ASSERT_EQ (nano::dev::genesis->hash (), node2.latest (nano::dev::genesis_key.pub)); } TEST (network, send_invalid_publish) { nano::system system (2); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); auto block (std::make_shared<nano::send_block> (1, 1, 20, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (nano::root (1)))); { auto transaction (node1.store.tx_begin_read ()); node1.network.flood_block (block); ASSERT_EQ (nano::dev::genesis->hash (), node1.ledger.latest (transaction, nano::dev::genesis_key.pub)); ASSERT_EQ (nano::dev::genesis->hash (), node2.latest (nano::dev::genesis_key.pub)); } ASSERT_TIMELY (10s, node2.stats.count (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in) != 0); auto transaction (node1.store.tx_begin_read ()); ASSERT_EQ (nano::dev::genesis->hash (), node1.ledger.latest (transaction, nano::dev::genesis_key.pub)); ASSERT_EQ (nano::dev::genesis->hash (), node2.latest (nano::dev::genesis_key.pub)); } TEST (network, send_valid_confirm_ack) { std::vector<nano::transport::transport_type> types{ nano::transport::transport_type::tcp, nano::transport::transport_type::udp }; for (auto & type : types) { nano::node_flags node_flags; if (type == nano::transport::transport_type::udp) { node_flags.disable_tcp_realtime = true; node_flags.disable_bootstrap_listener = true; node_flags.disable_udp = false; } nano::system system (2, type, node_flags); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); system.wallet (1)->insert_adhoc (key2.prv); nano::block_hash latest1 (node1.latest (nano::dev::genesis_key.pub)); nano::send_block block2 (latest1, key2.pub, 50, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (latest1)); nano::block_hash latest2 (node2.latest (nano::dev::genesis_key.pub)); node1.process_active (std::make_shared<nano::send_block> (block2)); // Keep polling until latest block changes ASSERT_TIMELY (10s, node2.latest (nano::dev::genesis_key.pub) != latest2); // Make sure the balance has decreased after processing the block. ASSERT_EQ (50, node2.balance (nano::dev::genesis_key.pub)); } } TEST (network, send_valid_publish) { std::vector<nano::transport::transport_type> types{ nano::transport::transport_type::tcp, nano::transport::transport_type::udp }; for (auto & type : types) { nano::node_flags node_flags; if (type == nano::transport::transport_type::udp) { node_flags.disable_tcp_realtime = true; node_flags.disable_bootstrap_listener = true; node_flags.disable_udp = false; } nano::system system (2, type, node_flags); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); node1.bootstrap_initiator.stop (); node2.bootstrap_initiator.stop (); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key2; system.wallet (1)->insert_adhoc (key2.prv); nano::block_hash latest1 (node1.latest (nano::dev::genesis_key.pub)); nano::send_block block2 (latest1, key2.pub, 50, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (latest1)); auto hash2 (block2.hash ()); nano::block_hash latest2 (node2.latest (nano::dev::genesis_key.pub)); node2.process_active (std::make_shared<nano::send_block> (block2)); ASSERT_TIMELY (10s, node1.stats.count (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in) != 0); ASSERT_NE (hash2, latest2); ASSERT_TIMELY (10s, node2.latest (nano::dev::genesis_key.pub) != latest2); ASSERT_EQ (50, node2.balance (nano::dev::genesis_key.pub)); } } TEST (network, send_insufficient_work_udp) { nano::system system; nano::node_flags node_flags; node_flags.disable_udp = false; auto & node1 = *system.add_node (node_flags); auto & node2 = *system.add_node (node_flags); auto block (std::make_shared<nano::send_block> (0, 1, 20, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, 0)); nano::publish publish{ nano::dev::network_params.network, block }; nano::transport::channel_udp channel (node1.network.udp_channels, node2.network.endpoint (), node1.network_params.network.protocol_version); channel.send (publish, [] (boost::system::error_code const & ec, size_t size) {}); ASSERT_EQ (0, node1.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work)); ASSERT_TIMELY (10s, node2.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work) != 0); ASSERT_EQ (1, node2.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work)); } TEST (network, send_insufficient_work) { nano::system system (2); auto & node1 = *system.nodes[0]; auto & node2 = *system.nodes[1]; // Block zero work auto block1 (std::make_shared<nano::send_block> (0, 1, 20, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, 0)); nano::publish publish1{ nano::dev::network_params.network, block1 }; auto tcp_channel (node1.network.tcp_channels.find_channel (nano::transport::map_endpoint_to_tcp (node2.network.endpoint ()))); tcp_channel->send (publish1, [] (boost::system::error_code const & ec, size_t size) {}); ASSERT_EQ (0, node1.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work)); ASSERT_TIMELY (10s, node2.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work) != 0); ASSERT_EQ (1, node2.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work)); // Legacy block work between epoch_2_recieve & epoch_1 auto block2 (std::make_shared<nano::send_block> (block1->hash (), 1, 20, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, system.work_generate_limited (block1->hash (), node1.network_params.work.epoch_2_receive, node1.network_params.work.epoch_1 - 1))); nano::publish publish2{ nano::dev::network_params.network, block2 }; tcp_channel->send (publish2, [] (boost::system::error_code const & ec, size_t size) {}); ASSERT_TIMELY (10s, node2.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work) != 1); ASSERT_EQ (2, node2.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work)); // Legacy block work epoch_1 auto block3 (std::make_shared<nano::send_block> (block2->hash (), 1, 20, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (block2->hash (), node1.network_params.work.epoch_2))); nano::publish publish3{ nano::dev::network_params.network, block3 }; tcp_channel->send (publish3, [] (boost::system::error_code const & ec, size_t size) {}); ASSERT_EQ (0, node2.stats.count (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in)); ASSERT_TIMELY (10s, node2.stats.count (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in) != 0); ASSERT_EQ (1, node2.stats.count (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in)); // State block work epoch_2_recieve auto block4 (std::make_shared<nano::state_block> (nano::dev::genesis_key.pub, block1->hash (), nano::dev::genesis_key.pub, 20, 1, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, system.work_generate_limited (block1->hash (), node1.network_params.work.epoch_2_receive, node1.network_params.work.epoch_1 - 1))); nano::publish publish4{ nano::dev::network_params.network, block4 }; tcp_channel->send (publish4, [] (boost::system::error_code const & ec, size_t size) {}); ASSERT_TIMELY (10s, node2.stats.count (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in) != 0); ASSERT_EQ (1, node2.stats.count (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in)); ASSERT_EQ (2, node2.stats.count (nano::stat::type::error, nano::stat::detail::insufficient_work)); } TEST (receivable_processor, confirm_insufficient_pos) { nano::system system (1); auto & node1 (*system.nodes[0]); auto block1 (std::make_shared<nano::send_block> (nano::dev::genesis->hash (), 0, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, 0)); node1.work_generate_blocking (*block1); ASSERT_EQ (nano::process_result::progress, node1.process (*block1).code); node1.scheduler.activate (nano::dev::genesis_key.pub, node1.store.tx_begin_read ()); nano::keypair key1; auto vote (std::make_shared<nano::vote> (key1.pub, key1.prv, 0, block1)); nano::confirm_ack con1{ nano::dev::network_params.network, vote }; node1.network.inbound (con1, node1.network.udp_channels.create (node1.network.endpoint ())); } TEST (receivable_processor, confirm_sufficient_pos) { nano::system system (1); auto & node1 (*system.nodes[0]); auto block1 (std::make_shared<nano::send_block> (nano::dev::genesis->hash (), 0, 0, nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, 0)); node1.work_generate_blocking (*block1); ASSERT_EQ (nano::process_result::progress, node1.process (*block1).code); node1.scheduler.activate (nano::dev::genesis_key.pub, node1.store.tx_begin_read ()); auto vote (std::make_shared<nano::vote> (nano::dev::genesis_key.pub, nano::dev::genesis_key.prv, 0, block1)); nano::confirm_ack con1{ nano::dev::network_params.network, vote }; node1.network.inbound (con1, node1.network.udp_channels.create (node1.network.endpoint ())); } TEST (receivable_processor, send_with_receive) { std::vector<nano::transport::transport_type> types{ nano::transport::transport_type::tcp, nano::transport::transport_type::udp }; for (auto & type : types) { nano::node_flags node_flags; if (type == nano::transport::transport_type::udp) { node_flags.disable_tcp_realtime = true; node_flags.disable_bootstrap_listener = true; node_flags.disable_udp = false; } nano::system system (2, type, node_flags); auto & node1 (*system.nodes[0]); auto & node2 (*system.nodes[1]); auto amount (std::numeric_limits<nano::uint128_t>::max ()); nano::keypair key2; system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::block_hash latest1 (node1.latest (nano::dev::genesis_key.pub)); system.wallet (1)->insert_adhoc (key2.prv); auto block1 (std::make_shared<nano::send_block> (latest1, key2.pub, amount - node1.config.receive_minimum.number (), nano::dev::genesis_key.prv, nano::dev::genesis_key.pub, *system.work.generate (latest1))); ASSERT_EQ (amount, node1.balance (nano::dev::genesis_key.pub)); ASSERT_EQ (0, node1.balance (key2.pub)); ASSERT_EQ (amount, node2.balance (nano::dev::genesis_key.pub)); ASSERT_EQ (0, node2.balance (key2.pub)); node1.process_active (block1); node1.block_processor.flush (); node2.process_active (block1); node2.block_processor.flush (); ASSERT_EQ (amount - node1.config.receive_minimum.number (), node1.balance (nano::dev::genesis_key.pub)); ASSERT_EQ (0, node1.balance (key2.pub)); ASSERT_EQ (amount - node1.config.receive_minimum.number (), node2.balance (nano::dev::genesis_key.pub)); ASSERT_EQ (0, node2.balance (key2.pub)); ASSERT_TIMELY (10s, node1.balance (key2.pub) == node1.config.receive_minimum.number () && node2.balance (key2.pub) == node1.config.receive_minimum.number ()); ASSERT_EQ (amount - node1.config.receive_minimum.number (), node1.balance (nano::dev::genesis_key.pub)); ASSERT_EQ (node1.config.receive_minimum.number (), node1.balance (key2.pub)); ASSERT_EQ (amount - node1.config.receive_minimum.number (), node2.balance (nano::dev::genesis_key.pub)); ASSERT_EQ (node1.config.receive_minimum.number (), node2.balance (key2.pub)); } } TEST (network, receive_weight_change) { nano::system system (2); system.wallet (0)->insert_adhoc (nano::dev::genesis_key.prv); nano::keypair key2; system.wallet (1)->insert_adhoc (key2.prv); { auto transaction (system.nodes[1]->wallets.tx_begin_write ()); system.wallet (1)->store.representative_set (transaction, key2.pub); } ASSERT_NE (nullptr, system.wallet (0)->send_action (nano::dev::genesis_key.pub, key2.pub, system.nodes[0]->config.receive_minimum.number ())); ASSERT_TIMELY (10s, std::all_of (system.nodes.begin (), system.nodes.end (), [&] (std::shared_ptr<nano::node> const & node_a) { return node_a->weight (key2.pub) == system.nodes[0]->config.receive_minimum.number (); })); } TEST (parse_endpoint, valid) { std::string string ("::1:24000"); nano::endpoint endpoint; ASSERT_FALSE (nano::parse_endpoint (string, endpoint)); ASSERT_EQ (boost::asio::ip::address_v6::loopback (), endpoint.address ()); ASSERT_EQ (24000, endpoint.port ()); } TEST (parse_endpoint, invalid_port) { std::string string ("::1:24a00"); nano::endpoint endpoint; ASSERT_TRUE (nano::parse_endpoint (string, endpoint)); } TEST (parse_endpoint, invalid_address) { std::string string ("::q:24000"); nano::endpoint endpoint; ASSERT_TRUE (nano::parse_endpoint (string, endpoint)); } TEST (parse_endpoint, no_address) { std::string string (":24000"); nano::endpoint endpoint; ASSERT_TRUE (nano::parse_endpoint (string, endpoint)); } TEST (parse_endpoint, no_port) { std::string string ("::1:"); nano::endpoint endpoint; ASSERT_TRUE (nano::parse_endpoint (string, endpoint)); } TEST (parse_endpoint, no_colon) { std::string string ("::1"); nano::endpoint endpoint; ASSERT_TRUE (nano::parse_endpoint (string, endpoint)); } TEST (network, ipv6) { boost::asio::ip::address_v6 address (boost::asio::ip::make_address_v6 ("::ffff:127.0.0.1")); ASSERT_TRUE (address.is_v4_mapped ()); nano::endpoint endpoint1 (address, 16384); std::vector<uint8_t> bytes1; { nano::vectorstream stream (bytes1); nano::write (stream, address.to_bytes ()); } ASSERT_EQ (16, bytes1.size ()); for (auto i (bytes1.begin ()), n (bytes1.begin () + 10); i != n; ++i) { ASSERT_EQ (0, *i); } ASSERT_EQ (0xff, bytes1[10]); ASSERT_EQ (0xff, bytes1[11]); std::array<uint8_t, 16> bytes2; nano::bufferstream stream (bytes1.data (), bytes1.size ()); auto error (nano::try_read (stream, bytes2)); ASSERT_FALSE (error); nano::endpoint endpoint2 (boost::asio::ip::address_v6 (bytes2), 16384); ASSERT_EQ (endpoint1, endpoint2); } TEST (network, ipv6_from_ipv4) { nano::endpoint endpoint1 (boost::asio::ip::address_v4::loopback (), 16000); ASSERT_TRUE (endpoint1.address ().is_v4 ()); nano::endpoint endpoint2 (boost::asio::ip::address_v6::v4_mapped (endpoint1.address ().to_v4 ()), 16000); ASSERT_TRUE (endpoint2.address ().is_v6 ()); } TEST (network, ipv6_bind_send_ipv4) { boost::asio::io_context io_ctx; auto port1 = nano::get_available_port (); auto port2 = nano::get_available_port (); nano::endpoint endpoint1 (boost::asio::ip::address_v6::any (), port1); nano::endpoint endpoint2 (boost::asio::ip::address_v4::any (), port2); std::array<uint8_t, 16> bytes1; auto finish1 (false); nano::endpoint endpoint3; boost::asio::ip::udp::socket socket1 (io_ctx, endpoint1); socket1.async_receive_from (boost::asio::buffer (bytes1.data (), bytes1.size ()), endpoint3, [&finish1] (boost::system::error_code const & error, size_t size_a) { ASSERT_FALSE (error); ASSERT_EQ (16, size_a); finish1 = true; }); boost::asio::ip::udp::socket socket2 (io_ctx, endpoint2); nano::endpoint endpoint5 (boost::asio::ip::address_v4::loopback (), port1); nano::endpoint endpoint6 (boost::asio::ip::address_v6::v4_mapped (boost::asio::ip::address_v4::loopback ()), port2); socket2.async_send_to (boost::asio::buffer (std::array<uint8_t, 16>{}, 16), endpoint5, [] (boost::system::error_code const & error, size_t size_a) { ASSERT_FALSE (error); ASSERT_EQ (16, size_a); }); auto iterations (0); while (!finish1) { io_ctx.poll (); ++iterations; ASSERT_LT (iterations, 200); } ASSERT_EQ (endpoint6, endpoint3); std::array<uint8_t, 16> bytes2; nano::endpoint endpoint4; socket2.async_receive_from (boost::asio::buffer (bytes2.data (), bytes2.size ()), endpoint4, [] (boost::system::error_code const & error, size_t size_a) { ASSERT_FALSE (!error); ASSERT_EQ (16, size_a); }); socket1.async_send_to (boost::asio::buffer (std::array<uint8_t, 16>{}, 16), endpoint6, [] (boost::system::error_code const & error, size_t size_a) { ASSERT_FALSE (error); ASSERT_EQ (16, size_a); }); } TEST (network, endpoint_bad_fd) { nano::system system (1); system.nodes[0]->stop (); auto endpoint (system.nodes[0]->network.endpoint ()); ASSERT_TRUE (endpoint.address ().is_loopback ()); // The endpoint is invalidated asynchronously ASSERT_TIMELY (10s, system.nodes[0]->network.endpoint ().port () == 0); } TEST (network, reserved_address) { nano::system system (1); // 0 port test ASSERT_TRUE (nano::transport::reserved_address (nano::endpoint (boost::asio::ip::make_address_v6 ("2001::"), 0))); // Valid address test ASSERT_FALSE (nano::transport::reserved_address (nano::endpoint (boost::asio::ip::make_address_v6 ("2001::"), 1))); nano::endpoint loopback (boost::asio::ip::make_address_v6 ("::1"), 1); ASSERT_FALSE (nano::transport::reserved_address (loopback)); nano::endpoint private_network_peer (boost::asio::ip::make_address_v6 ("::ffff:10.0.0.0"), 1); ASSERT_TRUE (nano::transport::reserved_address (private_network_peer, false)); ASSERT_FALSE (nano::transport::reserved_address (private_network_peer, true)); } TEST (network, ipv6_bind_subnetwork) { auto address1 (boost::asio::ip::make_address_v6 ("a41d:b7b2:8298:cf45:672e:bd1a:e7fb:f713")); auto subnet1 (boost::asio::ip::make_network_v6 (address1, 48)); ASSERT_EQ (boost::asio::ip::make_address_v6 ("a41d:b7b2:8298::"), subnet1.network ()); auto address1_subnet (nano::transport::ipv4_address_or_ipv6_subnet (address1)); ASSERT_EQ (subnet1.network (), address1_subnet); // Ipv4 should return initial address auto address2 (boost::asio::ip::make_address_v6 ("::ffff:192.168.1.1")); auto address2_subnet (nano::transport::ipv4_address_or_ipv6_subnet (address2)); ASSERT_EQ (address2, address2_subnet); } TEST (network, network_range_ipv6) { auto address1 (boost::asio::ip::make_address_v6 ("a41d:b7b2:8298:cf45:672e:bd1a:e7fb:f713")); auto subnet1 (boost::asio::ip::make_network_v6 (address1, 58)); ASSERT_EQ (boost::asio::ip::make_address_v6 ("a41d:b7b2:8298:cf40::"), subnet1.network ()); auto address2 (boost::asio::ip::make_address_v6 ("520d:2402:3d:5e65:11:f8:7c54:3f")); auto subnet2 (boost::asio::ip::make_network_v6 (address2, 33)); ASSERT_EQ (boost::asio::ip::make_address_v6 ("520d:2402:0::"), subnet2.network ()); // Default settings test auto address3 (boost::asio::ip::make_address_v6 ("a719:0f12:536e:d88a:1331:ba53:4598:04e5")); auto subnet3 (boost::asio::ip::make_network_v6 (address3, 32)); ASSERT_EQ (boost::asio::ip::make_address_v6 ("a719:0f12::"), subnet3.network ()); auto address3_subnet (nano::transport::map_address_to_subnetwork (address3)); ASSERT_EQ (subnet3.network (), address3_subnet); } TEST (network, network_range_ipv4) { auto address1 (boost::asio::ip::make_address_v6 ("::ffff:192.168.1.1")); auto subnet1 (boost::asio::ip::make_network_v6 (address1, 96 + 16)); ASSERT_EQ (boost::asio::ip::make_address_v6 ("::ffff:192.168.0.0"), subnet1.network ()); // Default settings test auto address2 (boost::asio::ip::make_address_v6 ("::ffff:80.67.148.225")); auto subnet2 (boost::asio::ip::make_network_v6 (address2, 96 + 24)); ASSERT_EQ (boost::asio::ip::make_address_v6 ("::ffff:80.67.148.0"), subnet2.network ()); auto address2_subnet (nano::transport::map_address_to_subnetwork (address2)); ASSERT_EQ (subnet2.network (), address2_subnet); } TEST (node, port_mapping) { nano::system system (1); auto node0 (system.nodes[0]); node0->port_mapping.refresh_devices (); node0->port_mapping.start (); auto end (std::chrono::steady_clock::now () + std::chrono::seconds (500)); (void)end; //while (std::chrono::steady_clock::now () < end) { ASSERT_NO_ERROR (system.poll ()); } } TEST (message_buffer_manager, one_buffer) { nano::stat stats; nano::message_buffer_manager buffer (stats, 512, 1); auto buffer1 (buffer.allocate ()); ASSERT_NE (nullptr, buffer1); buffer.enqueue (buffer1); auto buffer2 (buffer.dequeue ()); ASSERT_EQ (buffer1, buffer2); buffer.release (buffer2); auto buffer3 (buffer.allocate ()); ASSERT_EQ (buffer1, buffer3); } TEST (message_buffer_manager, two_buffers) { nano::stat stats; nano::message_buffer_manager buffer (stats, 512, 2); auto buffer1 (buffer.allocate ()); ASSERT_NE (nullptr, buffer1); auto buffer2 (buffer.allocate ()); ASSERT_NE (nullptr, buffer2); ASSERT_NE (buffer1, buffer2); buffer.enqueue (buffer2); buffer.enqueue (buffer1); auto buffer3 (buffer.dequeue ()); ASSERT_EQ (buffer2, buffer3); auto buffer4 (buffer.dequeue ()); ASSERT_EQ (buffer1, buffer4); buffer.release (buffer3); buffer.release (buffer4); auto buffer5 (buffer.allocate ()); ASSERT_EQ (buffer2, buffer5); auto buffer6 (buffer.allocate ()); ASSERT_EQ (buffer1, buffer6); } TEST (message_buffer_manager, one_overflow) { nano::stat stats; nano::message_buffer_manager buffer (stats, 512, 1); auto buffer1 (buffer.allocate ()); ASSERT_NE (nullptr, buffer1); buffer.enqueue (buffer1); auto buffer2 (buffer.allocate ()); ASSERT_EQ (buffer1, buffer2); } TEST (message_buffer_manager, two_overflow) { nano::stat stats; nano::message_buffer_manager buffer (stats, 512, 2); auto buffer1 (buffer.allocate ()); ASSERT_NE (nullptr, buffer1); buffer.enqueue (buffer1); auto buffer2 (buffer.allocate ()); ASSERT_NE (nullptr, buffer2); ASSERT_NE (buffer1, buffer2); buffer.enqueue (buffer2); auto buffer3 (buffer.allocate ()); ASSERT_EQ (buffer1, buffer3); auto buffer4 (buffer.allocate ()); ASSERT_EQ (buffer2, buffer4); } TEST (message_buffer_manager, one_buffer_multithreaded) { nano::stat stats; nano::message_buffer_manager buffer (stats, 512, 1); boost::thread thread ([&buffer] () { auto done (false); while (!done) { auto item (buffer.dequeue ()); done = item == nullptr; if (item != nullptr) { buffer.release (item); } } }); auto buffer1 (buffer.allocate ()); ASSERT_NE (nullptr, buffer1); buffer.enqueue (buffer1); auto buffer2 (buffer.allocate ()); ASSERT_EQ (buffer1, buffer2); buffer.stop (); thread.join (); } TEST (message_buffer_manager, many_buffers_multithreaded) { nano::stat stats; nano::message_buffer_manager buffer (stats, 512, 16); std::vector<boost::thread> threads; for (auto i (0); i < 4; ++i) { threads.push_back (boost::thread ([&buffer] () { auto done (false); while (!done) { auto item (buffer.dequeue ()); done = item == nullptr; if (item != nullptr) { buffer.release (item); } } })); } std::atomic_int count (0); for (auto i (0); i < 4; ++i) { threads.push_back (boost::thread ([&buffer, &count] () { auto done (false); for (auto i (0); !done && i < 1000; ++i) { auto item (buffer.allocate ()); done = item == nullptr; if (item != nullptr) { buffer.enqueue (item); ++count; if (count > 3000) { buffer.stop (); } } } })); } buffer.stop (); for (auto & i : threads) { i.join (); } } TEST (message_buffer_manager, stats) { nano::stat stats; nano::message_buffer_manager buffer (stats, 512, 1); auto buffer1 (buffer.allocate ()); buffer.enqueue (buffer1); buffer.allocate (); ASSERT_EQ (1, stats.count (nano::stat::type::udp, nano::stat::detail::overflow)); } TEST (tcp_listener, tcp_node_id_handshake) { nano::system system (1); auto socket (std::make_shared<nano::socket> (*system.nodes[0])); auto bootstrap_endpoint (system.nodes[0]->bootstrap.endpoint ()); auto cookie (system.nodes[0]->network.syn_cookies.assign (nano::transport::map_tcp_to_endpoint (bootstrap_endpoint))); nano::node_id_handshake node_id_handshake{ nano::dev::network_params.network, cookie, boost::none }; auto input (node_id_handshake.to_shared_const_buffer ()); std::atomic<bool> write_done (false); socket->async_connect (bootstrap_endpoint, [&input, socket, &write_done] (boost::system::error_code const & ec) { ASSERT_FALSE (ec); socket->async_write (input, [&input, &write_done] (boost::system::error_code const & ec, size_t size_a) { ASSERT_FALSE (ec); ASSERT_EQ (input.size (), size_a); write_done = true; }); }); ASSERT_TIMELY (5s, write_done); boost::optional<std::pair<nano::account, nano::signature>> response_zero (std::make_pair (nano::account (0), nano::signature (0))); nano::node_id_handshake node_id_handshake_response{ nano::dev::network_params.network, boost::none, response_zero }; auto output (node_id_handshake_response.to_bytes ()); std::atomic<bool> done (false); socket->async_read (output, output->size (), [&output, &done] (boost::system::error_code const & ec, size_t size_a) { ASSERT_FALSE (ec); ASSERT_EQ (output->size (), size_a); done = true; }); ASSERT_TIMELY (5s, done); } TEST (tcp_listener, tcp_listener_timeout_empty) { nano::system system (1); auto node0 (system.nodes[0]); auto socket (std::make_shared<nano::socket> (*node0)); std::atomic<bool> connected (false); socket->async_connect (node0->bootstrap.endpoint (), [&connected] (boost::system::error_code const & ec) { ASSERT_FALSE (ec); connected = true; }); ASSERT_TIMELY (5s, connected); bool disconnected (false); system.deadline_set (std::chrono::seconds (6)); while (!disconnected) { { nano::lock_guard<nano::mutex> guard (node0->bootstrap.mutex); disconnected = node0->bootstrap.connections.empty (); } ASSERT_NO_ERROR (system.poll ()); } } TEST (tcp_listener, tcp_listener_timeout_node_id_handshake) { nano::system system (1); auto node0 (system.nodes[0]); auto socket (std::make_shared<nano::socket> (*node0)); auto cookie (node0->network.syn_cookies.assign (nano::transport::map_tcp_to_endpoint (node0->bootstrap.endpoint ()))); nano::node_id_handshake node_id_handshake{ nano::dev::network_params.network, cookie, boost::none }; auto channel = std::make_shared<nano::transport::channel_tcp> (*node0, socket); socket->async_connect (node0->bootstrap.endpoint (), [&node_id_handshake, channel] (boost::system::error_code const & ec) { ASSERT_FALSE (ec); channel->send (node_id_handshake, [] (boost::system::error_code const & ec, size_t size_a) { ASSERT_FALSE (ec); }); }); ASSERT_TIMELY (5s, node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake) != 0); { nano::lock_guard<nano::mutex> guard (node0->bootstrap.mutex); ASSERT_EQ (node0->bootstrap.connections.size (), 1); } bool disconnected (false); system.deadline_set (std::chrono::seconds (20)); while (!disconnected) { { nano::lock_guard<nano::mutex> guard (node0->bootstrap.mutex); disconnected = node0->bootstrap.connections.empty (); } ASSERT_NO_ERROR (system.poll ()); } } TEST (network, replace_port) { nano::system system; nano::node_flags node_flags; node_flags.disable_udp = false; node_flags.disable_ongoing_telemetry_requests = true; node_flags.disable_initial_telemetry_requests = true; nano::node_config node0_config (nano::get_available_port (), system.logging); node0_config.io_threads = 8; auto node0 = system.add_node (node0_config, node_flags); ASSERT_EQ (0, node0->network.size ()); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work, node_flags)); node1->start (); system.nodes.push_back (node1); auto wrong_endpoint = nano::endpoint (node1->network.endpoint ().address (), nano::get_available_port ()); auto channel0 (node0->network.udp_channels.insert (wrong_endpoint, node1->network_params.network.protocol_version)); ASSERT_NE (nullptr, channel0); node0->network.udp_channels.modify (channel0, [&node1] (std::shared_ptr<nano::transport::channel> const & channel_a) { channel_a->set_node_id (node1->node_id.pub); }); auto peers_list (node0->network.list (std::numeric_limits<size_t>::max ())); ASSERT_EQ (peers_list[0]->get_node_id (), node1->node_id.pub); auto channel1 (std::make_shared<nano::transport::channel_udp> (node0->network.udp_channels, node1->network.endpoint (), node1->network_params.network.protocol_version)); ASSERT_EQ (node0->network.udp_channels.size (), 1); node0->network.send_keepalive (channel1); // On handshake, the channel is replaced ASSERT_TIMELY (5s, !node0->network.udp_channels.channel (wrong_endpoint) && node0->network.udp_channels.channel (node1->network.endpoint ())); } TEST (network, peer_max_tcp_attempts) { // Add nodes that can accept TCP connection, but not node ID handshake nano::node_flags node_flags; node_flags.disable_connection_cleanup = true; nano::system system; auto node = system.add_node (node_flags); for (auto i (0); i < node->network_params.network.max_peers_per_ip; ++i) { auto node2 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work, node_flags)); node2->start (); system.nodes.push_back (node2); // Start TCP attempt node->network.merge_peer (node2->network.endpoint ()); } ASSERT_EQ (0, node->network.size ()); ASSERT_TRUE (node->network.tcp_channels.reachout (nano::endpoint (node->network.endpoint ().address (), nano::get_available_port ()))); ASSERT_EQ (1, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_ip, nano::stat::dir::out)); } namespace nano { namespace transport { TEST (network, peer_max_tcp_attempts_subnetwork) { nano::system system (1); auto node (system.nodes[0]); for (auto i (0); i < node->network_params.network.max_peers_per_subnetwork; ++i) { auto address (boost::asio::ip::address_v6::v4_mapped (boost::asio::ip::address_v4 (0x7f000001 + i))); // 127.0.0.1 hex nano::endpoint endpoint (address, nano::get_available_port ()); ASSERT_FALSE (node->network.tcp_channels.reachout (endpoint)); } ASSERT_EQ (0, node->network.size ()); ASSERT_EQ (0, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_ip, nano::stat::dir::out)); ASSERT_TRUE (node->network.tcp_channels.reachout (nano::endpoint (boost::asio::ip::make_address_v6 ("::ffff:127.0.0.1"), nano::get_available_port ()))); ASSERT_EQ (1, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_ip, nano::stat::dir::out)); } } } TEST (network, duplicate_detection) { nano::system system; nano::node_flags node_flags; node_flags.disable_udp = false; auto & node0 (*system.add_node (node_flags)); auto & node1 (*system.add_node (node_flags)); auto udp_channel (std::make_shared<nano::transport::channel_udp> (node0.network.udp_channels, node1.network.endpoint (), node1.network_params.network.protocol_version)); nano::publish publish{ nano::dev::network_params.network, nano::dev::genesis }; // Publish duplicate detection through UDP ASSERT_EQ (0, node1.stats.count (nano::stat::type::filter, nano::stat::detail::duplicate_publish)); udp_channel->send (publish); udp_channel->send (publish); ASSERT_TIMELY (2s, node1.stats.count (nano::stat::type::filter, nano::stat::detail::duplicate_publish) == 1); // Publish duplicate detection through TCP auto tcp_channel (node0.network.tcp_channels.find_channel (nano::transport::map_endpoint_to_tcp (node1.network.endpoint ()))); ASSERT_EQ (1, node1.stats.count (nano::stat::type::filter, nano::stat::detail::duplicate_publish)); tcp_channel->send (publish); ASSERT_TIMELY (2s, node1.stats.count (nano::stat::type::filter, nano::stat::detail::duplicate_publish) == 2); } TEST (network, duplicate_revert_publish) { nano::system system; nano::node_flags node_flags; node_flags.block_processor_full_size = 0; auto & node (*system.add_node (node_flags)); ASSERT_TRUE (node.block_processor.full ()); nano::publish publish{ nano::dev::network_params.network, nano::dev::genesis }; std::vector<uint8_t> bytes; { nano::vectorstream stream (bytes); publish.block->serialize (stream); } // Add to the blocks filter // Should be cleared when dropping due to a full block processor, as long as the message has the optional digest attached // Test network.duplicate_detection ensures that the digest is attached when deserializing messages nano::uint128_t digest; ASSERT_FALSE (node.network.publish_filter.apply (bytes.data (), bytes.size (), &digest)); ASSERT_TRUE (node.network.publish_filter.apply (bytes.data (), bytes.size ())); auto other_node (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work)); other_node->start (); system.nodes.push_back (other_node); auto channel = nano::establish_tcp (system, *other_node, node.network.endpoint ()); ASSERT_NE (nullptr, channel); ASSERT_EQ (0, publish.digest); node.network.inbound (publish, channel); ASSERT_TRUE (node.network.publish_filter.apply (bytes.data (), bytes.size ())); publish.digest = digest; node.network.inbound (publish, channel); ASSERT_FALSE (node.network.publish_filter.apply (bytes.data (), bytes.size ())); } // The test must be completed in less than 1 second TEST (network, bandwidth_limiter) { nano::system system; nano::publish message{ nano::dev::network_params.network, nano::dev::genesis }; auto message_size = message.to_bytes ()->size (); auto message_limit = 4; // must be multiple of the number of channels nano::node_config node_config (nano::get_available_port (), system.logging); node_config.bandwidth_limit = message_limit * message_size; node_config.bandwidth_limit_burst_ratio = 1.0; auto & node = *system.add_node (node_config); auto channel1 (node.network.udp_channels.create (node.network.endpoint ())); auto channel2 (node.network.udp_channels.create (node.network.endpoint ())); // Send droppable messages for (auto i = 0; i < message_limit; i += 2) // number of channels { channel1->send (message); channel2->send (message); } // Only sent messages below limit, so we don't expect any drops ASSERT_TIMELY (1s, 0 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out)); // Send droppable message; drop stats should increase by one now channel1->send (message); ASSERT_TIMELY (1s, 1 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out)); // Send non-droppable message, i.e. drop stats should not increase channel2->send (message, nullptr, nano::buffer_drop_policy::no_limiter_drop); ASSERT_TIMELY (1s, 1 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out)); // change the bandwidth settings, 2 packets will be dropped node.network.set_bandwidth_params (1.1, message_size * 2); channel1->send (message); channel2->send (message); channel1->send (message); channel2->send (message); ASSERT_TIMELY (1s, 3 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out)); // change the bandwidth settings, no packet will be dropped node.network.set_bandwidth_params (4, message_size); channel1->send (message); channel2->send (message); channel1->send (message); channel2->send (message); ASSERT_TIMELY (1s, 3 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out)); node.stop (); } namespace nano { TEST (peer_exclusion, validate) { nano::peer_exclusion excluded_peers; size_t fake_peers_count = 10; auto max_size = excluded_peers.limited_size (fake_peers_count); for (auto i = 0; i < max_size + 2; ++i) { nano::tcp_endpoint endpoint (boost::asio::ip::address_v6::v4_mapped (boost::asio::ip::address_v4 (i)), 0); ASSERT_FALSE (excluded_peers.check (endpoint)); ASSERT_EQ (1, excluded_peers.add (endpoint, fake_peers_count)); ASSERT_FALSE (excluded_peers.check (endpoint)); } // The oldest one must have been removed ASSERT_EQ (max_size + 1, excluded_peers.size ()); auto & peers_by_endpoint (excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ()); nano::tcp_endpoint oldest (boost::asio::ip::address_v6::v4_mapped (boost::asio::ip::address_v4 (0x0)), 0); ASSERT_EQ (peers_by_endpoint.end (), peers_by_endpoint.find (oldest.address ())); auto to_seconds = [] (std::chrono::steady_clock::time_point const & timepoint) { return static_cast<double> (std::chrono::duration_cast<std::chrono::seconds> (timepoint.time_since_epoch ()).count ()); }; nano::tcp_endpoint first (boost::asio::ip::address_v6::v4_mapped (boost::asio::ip::address_v4 (0x1)), 0); ASSERT_NE (peers_by_endpoint.end (), peers_by_endpoint.find (first.address ())); nano::tcp_endpoint second (boost::asio::ip::address_v6::v4_mapped (boost::asio::ip::address_v4 (0x2)), 0); ASSERT_EQ (false, excluded_peers.check (second)); ASSERT_NEAR (to_seconds (std::chrono::steady_clock::now () + excluded_peers.exclude_time_hours), to_seconds (peers_by_endpoint.find (second.address ())->exclude_until), 2); ASSERT_EQ (2, excluded_peers.add (second, fake_peers_count)); ASSERT_EQ (peers_by_endpoint.end (), peers_by_endpoint.find (first.address ())); ASSERT_NEAR (to_seconds (std::chrono::steady_clock::now () + excluded_peers.exclude_time_hours), to_seconds (peers_by_endpoint.find (second.address ())->exclude_until), 2); ASSERT_EQ (3, excluded_peers.add (second, fake_peers_count)); ASSERT_NEAR (to_seconds (std::chrono::steady_clock::now () + excluded_peers.exclude_time_hours * 3 * 2), to_seconds (peers_by_endpoint.find (second.address ())->exclude_until), 2); ASSERT_EQ (max_size, excluded_peers.size ()); // Clear many entries if there are a low number of peers ASSERT_EQ (4, excluded_peers.add (second, 0)); ASSERT_EQ (1, excluded_peers.size ()); auto component (nano::collect_container_info (excluded_peers, "")); auto composite (dynamic_cast<nano::container_info_composite *> (component.get ())); ASSERT_NE (nullptr, component); auto & children (composite->get_children ()); ASSERT_EQ (1, children.size ()); auto child_leaf (dynamic_cast<nano::container_info_leaf *> (children.front ().get ())); ASSERT_NE (nullptr, child_leaf); auto child_info (child_leaf->get_info ()); ASSERT_EQ ("peers", child_info.name); ASSERT_EQ (1, child_info.count); ASSERT_EQ (sizeof (decltype (excluded_peers.peers)::value_type), child_info.sizeof_element); } } TEST (network, tcp_no_connect_excluded_peers) { nano::system system (1); auto node0 (system.nodes[0]); ASSERT_EQ (0, node0->network.size ()); auto node1 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work)); node1->start (); system.nodes.push_back (node1); auto endpoint1 (node1->network.endpoint ()); auto endpoint1_tcp (nano::transport::map_endpoint_to_tcp (endpoint1)); while (!node0->network.excluded_peers.check (endpoint1_tcp)) { node0->network.excluded_peers.add (endpoint1_tcp, 1); } ASSERT_EQ (0, node0->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_excluded)); node1->network.merge_peer (node0->network.endpoint ()); ASSERT_TIMELY (5s, node0->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_excluded) >= 1); ASSERT_EQ (nullptr, node0->network.find_channel (endpoint1)); // Should not actively reachout to excluded peers ASSERT_TRUE (node0->network.reachout (endpoint1, true)); // Erasing from excluded peers should allow a connection node0->network.excluded_peers.remove (endpoint1_tcp); ASSERT_FALSE (node0->network.excluded_peers.check (endpoint1_tcp)); // Wait until there is a syn_cookie ASSERT_TIMELY (5s, node1->network.syn_cookies.cookies_size () != 0); // Manually cleanup previous attempt node1->network.cleanup (std::chrono::steady_clock::now ()); node1->network.syn_cookies.purge (std::chrono::steady_clock::now ()); // Ensure a successful connection ASSERT_EQ (0, node0->network.size ()); node1->network.merge_peer (node0->network.endpoint ()); ASSERT_TIMELY (5s, node0->network.size () == 1); } namespace nano { TEST (network, tcp_message_manager) { nano::tcp_message_manager manager (1); nano::tcp_message_item item; item.node_id = nano::account (100); ASSERT_EQ (0, manager.entries.size ()); manager.put_message (item); ASSERT_EQ (1, manager.entries.size ()); ASSERT_EQ (manager.get_message ().node_id, item.node_id); ASSERT_EQ (0, manager.entries.size ()); // Fill the queue manager.entries = decltype (manager.entries) (manager.max_entries, item); ASSERT_EQ (manager.entries.size (), manager.max_entries); // This task will wait until a message is consumed auto future = std::async (std::launch::async, [&] { manager.put_message (item); }); // This should give sufficient time to execute put_message // and prove that it waits on condition variable std::this_thread::sleep_for (CI ? 200ms : 100ms); ASSERT_EQ (manager.entries.size (), manager.max_entries); ASSERT_EQ (manager.get_message ().node_id, item.node_id); ASSERT_NE (std::future_status::timeout, future.wait_for (1s)); ASSERT_EQ (manager.entries.size (), manager.max_entries); nano::tcp_message_manager manager2 (2); size_t message_count = 10'000; std::vector<std::thread> consumers; for (auto i = 0; i < 4; ++i) { consumers.emplace_back ([&] { for (auto i = 0; i < message_count; ++i) { ASSERT_EQ (manager.get_message ().node_id, item.node_id); } }); } std::vector<std::thread> producers; for (auto i = 0; i < 4; ++i) { producers.emplace_back ([&] { for (auto i = 0; i < message_count; ++i) { manager.put_message (item); } }); } for (auto & t : boost::range::join (producers, consumers)) { t.join (); } } } TEST (network, cleanup_purge) { auto test_start = std::chrono::steady_clock::now (); nano::system system (1); auto & node1 (*system.nodes[0]); auto node2 (std::make_shared<nano::node> (system.io_ctx, nano::get_available_port (), nano::unique_path (), system.logging, system.work)); node2->start (); system.nodes.push_back (node2); ASSERT_EQ (0, node1.network.size ()); node1.network.cleanup (test_start); ASSERT_EQ (0, node1.network.size ()); node1.network.udp_channels.insert (node2->network.endpoint (), node1.network_params.network.protocol_version); ASSERT_EQ (1, node1.network.size ()); node1.network.cleanup (test_start); ASSERT_EQ (1, node1.network.size ()); node1.network.cleanup (std::chrono::steady_clock::now ()); ASSERT_EQ (0, node1.network.size ()); std::weak_ptr<nano::node> node_w = node1.shared (); node1.network.tcp_channels.start_tcp (node2->network.endpoint (), [node_w] (std::shared_ptr<nano::transport::channel> const & channel_a) { if (auto node_l = node_w.lock ()) { node_l->network.send_keepalive (channel_a); } }); ASSERT_TIMELY (3s, node1.network.size () == 1); node1.network.cleanup (test_start); ASSERT_EQ (1, node1.network.size ()); node1.network.cleanup (std::chrono::steady_clock::now ()); ASSERT_EQ (0, node1.network.size ()); } TEST (network, loopback_channel) { nano::system system (2); auto & node1 = *system.nodes[0]; auto & node2 = *system.nodes[1]; nano::transport::channel_loopback channel1 (node1); ASSERT_EQ (channel1.get_type (), nano::transport::transport_type::loopback); ASSERT_EQ (channel1.get_endpoint (), node1.network.endpoint ()); ASSERT_EQ (channel1.get_tcp_endpoint (), nano::transport::map_endpoint_to_tcp (node1.network.endpoint ())); ASSERT_EQ (channel1.get_network_version (), node1.network_params.network.protocol_version); ASSERT_EQ (channel1.get_node_id (), node1.node_id.pub); ASSERT_EQ (channel1.get_node_id_optional ().value_or (0), node1.node_id.pub); nano::transport::channel_loopback channel2 (node2); ASSERT_TRUE (channel1 == channel1); ASSERT_FALSE (channel1 == channel2); ++node1.network.port; ASSERT_NE (channel1.get_endpoint (), node1.network.endpoint ()); } // Ensure the network filters messages with the incorrect magic number TEST (network, filter) { nano::system system{ 1 }; auto & node1 = *system.nodes[0]; nano::keepalive keepalive{ nano::dev::network_params.network }; const_cast<nano::networks &> (keepalive.header.network) = nano::networks::nano_dev_network; node1.network.inbound (keepalive, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_EQ (0, node1.stats.count (nano::stat::type::message, nano::stat::detail::invalid_network)); const_cast<nano::networks &> (keepalive.header.network) = nano::networks::invalid; node1.network.inbound (keepalive, std::make_shared<nano::transport::channel_loopback> (node1)); ASSERT_EQ (1, node1.stats.count (nano::stat::type::message, nano::stat::detail::invalid_network)); }
1
16,878
It would be better to check that system.nodes[1]->network.port is somewhere in the target without specifying its exact position. But it is a very minor point and I have no string opinion on it just thought I'd mention it because our tests in general have too implementation detail.
nanocurrency-nano-node
cpp
@@ -43,8 +43,11 @@ public class WebApplicationExceptionHandler implements ExceptionMapper<WebApplic switch (ex.getResponse().getStatus()) { // BadRequest case 400: + // It's strange to have these "startsWith" conditionals here. They both come from Access.java. if ( (ex.getMessage()+"").toLowerCase().startsWith("tabular data required")) { jrb.message(BundleUtil.getStringFromBundle("access.api.exception.metadata.not.available.for.nontabular.file")); + } else if ((ex.getMessage() + "").toLowerCase().startsWith("no permission to download file")) { + jrb.message("You do not have permission to download this file."); } else { jrb.message("Bad Request. The API request cannot be completed with the parameters supplied. Please check your code for typos, or consult our API guide at http://guides.dataverse.org."); jrb.request(request);
1
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package edu.harvard.iq.dataverse.api.errorhandlers; import edu.harvard.iq.dataverse.api.util.JsonResponseBuilder; import edu.harvard.iq.dataverse.util.BundleUtil; import javax.servlet.http.HttpServletRequest; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Context; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; import java.util.Optional; import java.util.logging.Level; import java.util.logging.Logger; /** * Catches all types of web application exceptions like NotFoundException, etc etc and handles them properly. */ @Provider public class WebApplicationExceptionHandler implements ExceptionMapper<WebApplicationException> { static final Logger logger = Logger.getLogger(WebApplicationExceptionHandler.class.getSimpleName()); @Context HttpServletRequest request; @Override public Response toResponse(WebApplicationException ex) { // If this is not a HTTP client or server error, just pass the response. if (ex.getResponse().getStatus() < 400) return ex.getResponse(); // Otherwise, do stuff. JsonResponseBuilder jrb = JsonResponseBuilder.error(ex.getResponse()); // See also https://en.wikipedia.org/wiki/List_of_HTTP_status_codes for a list of status codes. switch (ex.getResponse().getStatus()) { // BadRequest case 400: if ( (ex.getMessage()+"").toLowerCase().startsWith("tabular data required")) { jrb.message(BundleUtil.getStringFromBundle("access.api.exception.metadata.not.available.for.nontabular.file")); } else { jrb.message("Bad Request. The API request cannot be completed with the parameters supplied. Please check your code for typos, or consult our API guide at http://guides.dataverse.org."); jrb.request(request); } break; // Forbidden case 403: jrb.message("Not authorized to access this object via this API endpoint. Please check your code for typos, or consult our API guide at http://guides.dataverse.org."); jrb.request(request); break; // NotFound case 404: if ( (ex.getMessage()+"").toLowerCase().startsWith("datafile")) { jrb.message(ex.getMessage()); } else { jrb.message("API endpoint does not exist on this server. Please check your code for typos, or consult our API guide at http://guides.dataverse.org."); jrb.request(request); } break; // MethodNotAllowed case 405: jrb.message("API endpoint does not support this method. Consult our API guide at http://guides.dataverse.org."); jrb.request(request); break; // NotAcceptable (might be content type, charset, encoding or language) case 406: jrb.message("API endpoint does not accept your request. Consult our API guide at http://guides.dataverse.org."); jrb.request(request); jrb.requestContentType(request); break; // InternalServerError case 500: jrb.randomIncidentId(); jrb.internalError(ex); jrb.request(request); jrb.log(logger, Level.SEVERE, Optional.of(ex)); break; // ServiceUnavailable case 503: if ( (ex.getMessage()+"").toLowerCase().startsWith("datafile")) { jrb.message(ex.getMessage()); } else { jrb.message("Requested service or method not available on the requested object"); } break; default: jrb.message(ex.getMessage()); break; } // Logging for debugging. Will not double-log messages. jrb.log(logger, Level.FINEST, Optional.of(ex)); return jrb.build(); } }
1
43,966
Should this be in a bundle?
IQSS-dataverse
java
@@ -291,9 +291,8 @@ namespace OpenTelemetry.Instrumentation.Http.Implementation private static void ProcessResult(IAsyncResult asyncResult, AsyncCallback asyncCallback, Activity activity, object result, bool forceResponseCopy) { - // We could be executing on a different thread now so set the activity. - Debug.Assert(Activity.Current == null || Activity.Current == activity, "There was an unexpected active Activity on the result thread."); - if (Activity.Current == null) + // We could be executing on a different thread now so restore the activity if needed. + if (Activity.Current != activity) { Activity.Current = activity; }
1
// <copyright file="HttpWebRequestActivitySource.netfx.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> #if NETFRAMEWORK using System; using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Net; using System.Reflection; using System.Reflection.Emit; using System.Runtime.CompilerServices; using OpenTelemetry.Context.Propagation; using OpenTelemetry.Trace; namespace OpenTelemetry.Instrumentation.Http.Implementation { /// <summary> /// Hooks into the <see cref="HttpWebRequest"/> class reflectively and writes diagnostic events as requests are processed. /// </summary> /// <remarks> /// Inspired from the System.Diagnostics.DiagnosticSource.HttpHandlerDiagnosticListener class which has some bugs and feature gaps. /// See https://github.com/dotnet/runtime/pull/33732 for details. /// </remarks> internal static class HttpWebRequestActivitySource { public const string ActivitySourceName = "OpenTelemetry.HttpWebRequest"; public const string ActivityName = ActivitySourceName + ".HttpRequestOut"; internal static readonly Func<HttpWebRequest, string, IEnumerable<string>> HttpWebRequestHeaderValuesGetter = (request, name) => request.Headers.GetValues(name); internal static readonly Action<HttpWebRequest, string, string> HttpWebRequestHeaderValuesSetter = (request, name, value) => request.Headers.Add(name, value); internal static HttpWebRequestInstrumentationOptions Options = new HttpWebRequestInstrumentationOptions(); private static readonly Version Version = typeof(HttpWebRequestActivitySource).Assembly.GetName().Version; private static readonly ActivitySource WebRequestActivitySource = new ActivitySource(ActivitySourceName, Version.ToString()); // Fields for reflection private static FieldInfo connectionGroupListField; private static Type connectionGroupType; private static FieldInfo connectionListField; private static Type connectionType; private static FieldInfo writeListField; private static Func<object, IAsyncResult> writeAResultAccessor; private static Func<object, IAsyncResult> readAResultAccessor; // LazyAsyncResult & ContextAwareResult private static Func<object, AsyncCallback> asyncCallbackAccessor; private static Action<object, AsyncCallback> asyncCallbackModifier; private static Func<object, object> asyncStateAccessor; private static Action<object, object> asyncStateModifier; private static Func<object, bool> endCalledAccessor; private static Func<object, object> resultAccessor; private static Func<object, bool> isContextAwareResultChecker; // HttpWebResponse private static Func<object[], HttpWebResponse> httpWebResponseCtor; private static Func<HttpWebResponse, Uri> uriAccessor; private static Func<HttpWebResponse, object> verbAccessor; private static Func<HttpWebResponse, string> mediaTypeAccessor; private static Func<HttpWebResponse, bool> usesProxySemanticsAccessor; private static Func<HttpWebResponse, object> coreResponseDataAccessor; private static Func<HttpWebResponse, bool> isWebSocketResponseAccessor; private static Func<HttpWebResponse, string> connectionGroupNameAccessor; static HttpWebRequestActivitySource() { try { PrepareReflectionObjects(); PerformInjection(); } catch (Exception ex) { // If anything went wrong, just no-op. Write an event so at least we can find out. HttpInstrumentationEventSource.Log.ExceptionInitializingInstrumentation(typeof(HttpWebRequestActivitySource).FullName, ex); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void AddRequestTagsAndInstrumentRequest(HttpWebRequest request, Activity activity) { activity.DisplayName = HttpTagHelper.GetOperationNameForHttpMethod(request.Method); if (activity.IsAllDataRequested) { activity.SetTag(SemanticConventions.AttributeHttpMethod, request.Method); activity.SetTag(SemanticConventions.AttributeHttpHost, HttpTagHelper.GetHostTagValueFromRequestUri(request.RequestUri)); activity.SetTag(SemanticConventions.AttributeHttpUrl, HttpTagHelper.GetUriTagValueFromRequestUri(request.RequestUri)); if (Options.SetHttpFlavor) { activity.SetTag(SemanticConventions.AttributeHttpFlavor, HttpTagHelper.GetFlavorTagValueFromProtocolVersion(request.ProtocolVersion)); } try { Options.Enrich?.Invoke(activity, "OnStartActivity", request); } catch (Exception ex) { HttpInstrumentationEventSource.Log.EnrichmentException(ex); } } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void AddResponseTags(HttpWebResponse response, Activity activity) { if (activity.IsAllDataRequested) { activity.SetTag(SemanticConventions.AttributeHttpStatusCode, (int)response.StatusCode); activity.SetStatus(SpanHelper.ResolveSpanStatusForHttpStatusCode((int)response.StatusCode)); try { Options.Enrich?.Invoke(activity, "OnStopActivity", response); } catch (Exception ex) { HttpInstrumentationEventSource.Log.EnrichmentException(ex); } } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void AddExceptionTags(Exception exception, Activity activity) { if (!activity.IsAllDataRequested) { return; } Status status; if (exception is WebException wexc) { if (wexc.Response is HttpWebResponse response) { activity.SetTag(SemanticConventions.AttributeHttpStatusCode, (int)response.StatusCode); status = SpanHelper.ResolveSpanStatusForHttpStatusCode((int)response.StatusCode); } else { switch (wexc.Status) { case WebExceptionStatus.Timeout: case WebExceptionStatus.RequestCanceled: status = Status.Error; break; case WebExceptionStatus.SendFailure: case WebExceptionStatus.ConnectFailure: case WebExceptionStatus.SecureChannelFailure: case WebExceptionStatus.TrustFailure: case WebExceptionStatus.ServerProtocolViolation: case WebExceptionStatus.MessageLengthLimitExceeded: status = Status.Error.WithDescription(exception.Message); break; default: status = Status.Error.WithDescription(exception.Message); break; } } } else { status = Status.Error.WithDescription(exception.Message); } activity.SetStatus(status); if (Options.RecordException) { activity.RecordException(exception); } try { Options.Enrich?.Invoke(activity, "OnException", exception); } catch (Exception ex) { HttpInstrumentationEventSource.Log.EnrichmentException(ex); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void InstrumentRequest(HttpWebRequest request, ActivityContext activityContext) => Propagators.DefaultTextMapPropagator.Inject(new PropagationContext(activityContext, Baggage.Current), request, HttpWebRequestHeaderValuesSetter); [MethodImpl(MethodImplOptions.AggressiveInlining)] private static bool IsRequestInstrumented(HttpWebRequest request) => Propagators.DefaultTextMapPropagator.Extract(default, request, HttpWebRequestHeaderValuesGetter) != default; private static void ProcessRequest(HttpWebRequest request) { if (!WebRequestActivitySource.HasListeners() || !Options.EventFilter(request)) { // No subscribers to the ActivitySource or User provider Filter is // filtering this request. // Propagation must still be done in such cases, to allow // downstream services to continue from parent context, if any. // Eg: Parent could be the Asp.Net activity. InstrumentRequest(request, Activity.Current?.Context ?? default); return; } if (IsRequestInstrumented(request)) { // This request was instrumented by previous // ProcessRequest, such is the case with redirect responses where the same request is sent again. return; } var activity = WebRequestActivitySource.StartActivity(ActivityName, ActivityKind.Client); var activityContext = Activity.Current?.Context ?? default; // Propagation must still be done in all cases, to allow // downstream services to continue from parent context, if any. // Eg: Parent could be the Asp.Net activity. InstrumentRequest(request, activityContext); if (activity == null) { // There is a listener but it decided not to sample the current request. return; } IAsyncResult asyncContext = writeAResultAccessor(request); if (asyncContext != null) { // Flow here is for [Begin]GetRequestStream[Async]. AsyncCallbackWrapper callback = new AsyncCallbackWrapper(request, activity, asyncCallbackAccessor(asyncContext)); asyncCallbackModifier(asyncContext, callback.AsyncCallback); } else { // Flow here is for [Begin]GetResponse[Async] without a prior call to [Begin]GetRequestStream[Async]. asyncContext = readAResultAccessor(request); AsyncCallbackWrapper callback = new AsyncCallbackWrapper(request, activity, asyncCallbackAccessor(asyncContext)); asyncCallbackModifier(asyncContext, callback.AsyncCallback); } AddRequestTagsAndInstrumentRequest(request, activity); } private static void HookOrProcessResult(HttpWebRequest request) { IAsyncResult writeAsyncContext = writeAResultAccessor(request); if (writeAsyncContext == null || !(asyncCallbackAccessor(writeAsyncContext)?.Target is AsyncCallbackWrapper writeAsyncContextCallback)) { // If we already hooked into the read result during ProcessRequest or we hooked up after the fact already we don't need to do anything here. return; } // If we got here it means the user called [Begin]GetRequestStream[Async] and we have to hook the read result after the fact. IAsyncResult readAsyncContext = readAResultAccessor(request); if (readAsyncContext == null) { // We're still trying to establish the connection (no read has started). return; } // Clear our saved callback so we know not to process again. asyncCallbackModifier(writeAsyncContext, null); if (endCalledAccessor.Invoke(readAsyncContext) || readAsyncContext.CompletedSynchronously) { // We need to process the result directly because the read callback has already fired. Force a copy because response has likely already been disposed. ProcessResult(readAsyncContext, null, writeAsyncContextCallback.Activity, resultAccessor(readAsyncContext), true); return; } // Hook into the result callback if it hasn't already fired. AsyncCallbackWrapper callback = new AsyncCallbackWrapper(writeAsyncContextCallback.Request, writeAsyncContextCallback.Activity, asyncCallbackAccessor(readAsyncContext)); asyncCallbackModifier(readAsyncContext, callback.AsyncCallback); } private static void ProcessResult(IAsyncResult asyncResult, AsyncCallback asyncCallback, Activity activity, object result, bool forceResponseCopy) { // We could be executing on a different thread now so set the activity. Debug.Assert(Activity.Current == null || Activity.Current == activity, "There was an unexpected active Activity on the result thread."); if (Activity.Current == null) { Activity.Current = activity; } try { if (result is Exception ex) { AddExceptionTags(ex, activity); } else { HttpWebResponse response = (HttpWebResponse)result; if (forceResponseCopy || (asyncCallback == null && isContextAwareResultChecker(asyncResult))) { // For async calls (where asyncResult is ContextAwareResult)... // If no callback was set assume the user is manually calling BeginGetResponse & EndGetResponse // in which case they could dispose the HttpWebResponse before our listeners have a chance to work with it. // Disposed HttpWebResponse throws when accessing properties, so let's make a copy of the data to ensure that doesn't happen. HttpWebResponse responseCopy = httpWebResponseCtor( new object[] { uriAccessor(response), verbAccessor(response), coreResponseDataAccessor(response), mediaTypeAccessor(response), usesProxySemanticsAccessor(response), DecompressionMethods.None, isWebSocketResponseAccessor(response), connectionGroupNameAccessor(response), }); AddResponseTags(responseCopy, activity); } else { AddResponseTags(response, activity); } } } catch (Exception ex) { HttpInstrumentationEventSource.Log.FailedProcessResult(ex); } activity.Stop(); } private static void PrepareReflectionObjects() { // At any point, if the operation failed, it should just throw. The caller should catch all exceptions and swallow. Type servicePointType = typeof(ServicePoint); Assembly systemNetHttpAssembly = servicePointType.Assembly; connectionGroupListField = servicePointType.GetField("m_ConnectionGroupList", BindingFlags.Instance | BindingFlags.NonPublic); connectionGroupType = systemNetHttpAssembly?.GetType("System.Net.ConnectionGroup"); connectionListField = connectionGroupType?.GetField("m_ConnectionList", BindingFlags.Instance | BindingFlags.NonPublic); connectionType = systemNetHttpAssembly?.GetType("System.Net.Connection"); writeListField = connectionType?.GetField("m_WriteList", BindingFlags.Instance | BindingFlags.NonPublic); writeAResultAccessor = CreateFieldGetter<IAsyncResult>(typeof(HttpWebRequest), "_WriteAResult", BindingFlags.NonPublic | BindingFlags.Instance); readAResultAccessor = CreateFieldGetter<IAsyncResult>(typeof(HttpWebRequest), "_ReadAResult", BindingFlags.NonPublic | BindingFlags.Instance); // Double checking to make sure we have all the pieces initialized if (connectionGroupListField == null || connectionGroupType == null || connectionListField == null || connectionType == null || writeListField == null || writeAResultAccessor == null || readAResultAccessor == null || !PrepareAsyncResultReflectionObjects(systemNetHttpAssembly) || !PrepareHttpWebResponseReflectionObjects(systemNetHttpAssembly)) { // If anything went wrong here, just return false. There is nothing we can do. throw new InvalidOperationException("Unable to initialize all required reflection objects"); } } private static bool PrepareAsyncResultReflectionObjects(Assembly systemNetHttpAssembly) { Type lazyAsyncResultType = systemNetHttpAssembly?.GetType("System.Net.LazyAsyncResult"); if (lazyAsyncResultType != null) { asyncCallbackAccessor = CreateFieldGetter<AsyncCallback>(lazyAsyncResultType, "m_AsyncCallback", BindingFlags.NonPublic | BindingFlags.Instance); asyncCallbackModifier = CreateFieldSetter<AsyncCallback>(lazyAsyncResultType, "m_AsyncCallback", BindingFlags.NonPublic | BindingFlags.Instance); asyncStateAccessor = CreateFieldGetter<object>(lazyAsyncResultType, "m_AsyncState", BindingFlags.NonPublic | BindingFlags.Instance); asyncStateModifier = CreateFieldSetter<object>(lazyAsyncResultType, "m_AsyncState", BindingFlags.NonPublic | BindingFlags.Instance); endCalledAccessor = CreateFieldGetter<bool>(lazyAsyncResultType, "m_EndCalled", BindingFlags.NonPublic | BindingFlags.Instance); resultAccessor = CreateFieldGetter<object>(lazyAsyncResultType, "m_Result", BindingFlags.NonPublic | BindingFlags.Instance); } Type contextAwareResultType = systemNetHttpAssembly?.GetType("System.Net.ContextAwareResult"); if (contextAwareResultType != null) { isContextAwareResultChecker = CreateTypeChecker(contextAwareResultType); } return asyncCallbackAccessor != null && asyncCallbackModifier != null && asyncStateAccessor != null && asyncStateModifier != null && endCalledAccessor != null && resultAccessor != null && isContextAwareResultChecker != null; } private static bool PrepareHttpWebResponseReflectionObjects(Assembly systemNetHttpAssembly) { Type knownHttpVerbType = systemNetHttpAssembly?.GetType("System.Net.KnownHttpVerb"); Type coreResponseData = systemNetHttpAssembly?.GetType("System.Net.CoreResponseData"); if (knownHttpVerbType != null && coreResponseData != null) { var constructorParameterTypes = new Type[] { typeof(Uri), knownHttpVerbType, coreResponseData, typeof(string), typeof(bool), typeof(DecompressionMethods), typeof(bool), typeof(string), }; ConstructorInfo ctor = typeof(HttpWebResponse).GetConstructor( BindingFlags.NonPublic | BindingFlags.Instance, null, constructorParameterTypes, null); if (ctor != null) { httpWebResponseCtor = CreateTypeInstance<HttpWebResponse>(ctor); } } uriAccessor = CreateFieldGetter<HttpWebResponse, Uri>("m_Uri", BindingFlags.NonPublic | BindingFlags.Instance); verbAccessor = CreateFieldGetter<HttpWebResponse, object>("m_Verb", BindingFlags.NonPublic | BindingFlags.Instance); mediaTypeAccessor = CreateFieldGetter<HttpWebResponse, string>("m_MediaType", BindingFlags.NonPublic | BindingFlags.Instance); usesProxySemanticsAccessor = CreateFieldGetter<HttpWebResponse, bool>("m_UsesProxySemantics", BindingFlags.NonPublic | BindingFlags.Instance); coreResponseDataAccessor = CreateFieldGetter<HttpWebResponse, object>("m_CoreResponseData", BindingFlags.NonPublic | BindingFlags.Instance); isWebSocketResponseAccessor = CreateFieldGetter<HttpWebResponse, bool>("m_IsWebSocketResponse", BindingFlags.NonPublic | BindingFlags.Instance); connectionGroupNameAccessor = CreateFieldGetter<HttpWebResponse, string>("m_ConnectionGroupName", BindingFlags.NonPublic | BindingFlags.Instance); return httpWebResponseCtor != null && uriAccessor != null && verbAccessor != null && mediaTypeAccessor != null && usesProxySemanticsAccessor != null && coreResponseDataAccessor != null && isWebSocketResponseAccessor != null && connectionGroupNameAccessor != null; } private static void PerformInjection() { FieldInfo servicePointTableField = typeof(ServicePointManager).GetField("s_ServicePointTable", BindingFlags.Static | BindingFlags.NonPublic); if (servicePointTableField == null) { // If anything went wrong here, just return false. There is nothing we can do. throw new InvalidOperationException("Unable to access the ServicePointTable field"); } Hashtable originalTable = servicePointTableField.GetValue(null) as Hashtable; ServicePointHashtable newTable = new ServicePointHashtable(originalTable ?? new Hashtable()); servicePointTableField.SetValue(null, newTable); } private static Func<TClass, TField> CreateFieldGetter<TClass, TField>(string fieldName, BindingFlags flags) where TClass : class { FieldInfo field = typeof(TClass).GetField(fieldName, flags); if (field != null) { string methodName = field.ReflectedType.FullName + ".get_" + field.Name; DynamicMethod getterMethod = new DynamicMethod(methodName, typeof(TField), new[] { typeof(TClass) }, true); ILGenerator generator = getterMethod.GetILGenerator(); generator.Emit(OpCodes.Ldarg_0); generator.Emit(OpCodes.Ldfld, field); generator.Emit(OpCodes.Ret); return (Func<TClass, TField>)getterMethod.CreateDelegate(typeof(Func<TClass, TField>)); } return null; } /// <summary> /// Creates getter for a field defined in private or internal type /// repesented with classType variable. /// </summary> private static Func<object, TField> CreateFieldGetter<TField>(Type classType, string fieldName, BindingFlags flags) { FieldInfo field = classType.GetField(fieldName, flags); if (field != null) { string methodName = classType.FullName + ".get_" + field.Name; DynamicMethod getterMethod = new DynamicMethod(methodName, typeof(TField), new[] { typeof(object) }, true); ILGenerator generator = getterMethod.GetILGenerator(); generator.Emit(OpCodes.Ldarg_0); generator.Emit(OpCodes.Castclass, classType); generator.Emit(OpCodes.Ldfld, field); generator.Emit(OpCodes.Ret); return (Func<object, TField>)getterMethod.CreateDelegate(typeof(Func<object, TField>)); } return null; } /// <summary> /// Creates setter for a field defined in private or internal type /// repesented with classType variable. /// </summary> private static Action<object, TField> CreateFieldSetter<TField>(Type classType, string fieldName, BindingFlags flags) { FieldInfo field = classType.GetField(fieldName, flags); if (field != null) { string methodName = classType.FullName + ".set_" + field.Name; DynamicMethod setterMethod = new DynamicMethod(methodName, null, new[] { typeof(object), typeof(TField) }, true); ILGenerator generator = setterMethod.GetILGenerator(); generator.Emit(OpCodes.Ldarg_0); generator.Emit(OpCodes.Castclass, classType); generator.Emit(OpCodes.Ldarg_1); generator.Emit(OpCodes.Stfld, field); generator.Emit(OpCodes.Ret); return (Action<object, TField>)setterMethod.CreateDelegate(typeof(Action<object, TField>)); } return null; } /// <summary> /// Creates an "is" method for the private or internal type. /// </summary> private static Func<object, bool> CreateTypeChecker(Type classType) { string methodName = classType.FullName + ".typeCheck"; DynamicMethod setterMethod = new DynamicMethod(methodName, typeof(bool), new[] { typeof(object) }, true); ILGenerator generator = setterMethod.GetILGenerator(); generator.Emit(OpCodes.Ldarg_0); generator.Emit(OpCodes.Isinst, classType); generator.Emit(OpCodes.Ldnull); generator.Emit(OpCodes.Cgt_Un); generator.Emit(OpCodes.Ret); return (Func<object, bool>)setterMethod.CreateDelegate(typeof(Func<object, bool>)); } /// <summary> /// Creates an instance of T using a private or internal ctor. /// </summary> private static Func<object[], T> CreateTypeInstance<T>(ConstructorInfo constructorInfo) { Type classType = typeof(T); string methodName = classType.FullName + ".ctor"; DynamicMethod setterMethod = new DynamicMethod(methodName, classType, new Type[] { typeof(object[]) }, true); ILGenerator generator = setterMethod.GetILGenerator(); ParameterInfo[] ctorParams = constructorInfo.GetParameters(); for (int i = 0; i < ctorParams.Length; i++) { generator.Emit(OpCodes.Ldarg_0); switch (i) { case 0: generator.Emit(OpCodes.Ldc_I4_0); break; case 1: generator.Emit(OpCodes.Ldc_I4_1); break; case 2: generator.Emit(OpCodes.Ldc_I4_2); break; case 3: generator.Emit(OpCodes.Ldc_I4_3); break; case 4: generator.Emit(OpCodes.Ldc_I4_4); break; case 5: generator.Emit(OpCodes.Ldc_I4_5); break; case 6: generator.Emit(OpCodes.Ldc_I4_6); break; case 7: generator.Emit(OpCodes.Ldc_I4_7); break; case 8: generator.Emit(OpCodes.Ldc_I4_8); break; default: generator.Emit(OpCodes.Ldc_I4, i); break; } generator.Emit(OpCodes.Ldelem_Ref); Type paramType = ctorParams[i].ParameterType; generator.Emit(paramType.IsValueType ? OpCodes.Unbox_Any : OpCodes.Castclass, paramType); } generator.Emit(OpCodes.Newobj, constructorInfo); generator.Emit(OpCodes.Ret); return (Func<object[], T>)setterMethod.CreateDelegate(typeof(Func<object[], T>)); } private class HashtableWrapper : Hashtable, IEnumerable { private readonly Hashtable table; internal HashtableWrapper(Hashtable table) : base() { this.table = table; } public override int Count => this.table.Count; public override bool IsReadOnly => this.table.IsReadOnly; public override bool IsFixedSize => this.table.IsFixedSize; public override bool IsSynchronized => this.table.IsSynchronized; public override object SyncRoot => this.table.SyncRoot; public override ICollection Keys => this.table.Keys; public override ICollection Values => this.table.Values; public override object this[object key] { get => this.table[key]; set => this.table[key] = value; } public override void Add(object key, object value) { this.table.Add(key, value); } public override void Clear() { this.table.Clear(); } public override bool Contains(object key) { return this.table.Contains(key); } public override bool ContainsKey(object key) { return this.table.ContainsKey(key); } public override bool ContainsValue(object key) { return this.table.ContainsValue(key); } public override void CopyTo(Array array, int arrayIndex) { this.table.CopyTo(array, arrayIndex); } public override object Clone() { return new HashtableWrapper((Hashtable)this.table.Clone()); } IEnumerator IEnumerable.GetEnumerator() { return this.table.GetEnumerator(); } public override IDictionaryEnumerator GetEnumerator() { return this.table.GetEnumerator(); } public override void Remove(object key) { this.table.Remove(key); } } /// <summary> /// Helper class used for ServicePointManager.s_ServicePointTable. The goal here is to /// intercept each new ServicePoint object being added to ServicePointManager.s_ServicePointTable /// and replace its ConnectionGroupList hashtable field. /// </summary> private sealed class ServicePointHashtable : HashtableWrapper { public ServicePointHashtable(Hashtable table) : base(table) { } public override object this[object key] { get => base[key]; set { if (value is WeakReference weakRef && weakRef.IsAlive) { if (weakRef.Target is ServicePoint servicePoint) { // Replace the ConnectionGroup hashtable inside this ServicePoint object, // which allows us to intercept each new ConnectionGroup object added under // this ServicePoint. Hashtable originalTable = connectionGroupListField.GetValue(servicePoint) as Hashtable; ConnectionGroupHashtable newTable = new ConnectionGroupHashtable(originalTable ?? new Hashtable()); connectionGroupListField.SetValue(servicePoint, newTable); } } base[key] = value; } } } /// <summary> /// Helper class used for ServicePoint.m_ConnectionGroupList. The goal here is to /// intercept each new ConnectionGroup object being added to ServicePoint.m_ConnectionGroupList /// and replace its m_ConnectionList arraylist field. /// </summary> private sealed class ConnectionGroupHashtable : HashtableWrapper { public ConnectionGroupHashtable(Hashtable table) : base(table) { } public override object this[object key] { get => base[key]; set { if (connectionGroupType.IsInstanceOfType(value)) { // Replace the Connection arraylist inside this ConnectionGroup object, // which allows us to intercept each new Connection object added under // this ConnectionGroup. ArrayList originalArrayList = connectionListField.GetValue(value) as ArrayList; ConnectionArrayList newArrayList = new ConnectionArrayList(originalArrayList ?? new ArrayList()); connectionListField.SetValue(value, newArrayList); } base[key] = value; } } } /// <summary> /// Helper class used to wrap the array list object. This class itself doesn't actually /// have the array elements, but rather access another array list that's given at /// construction time. /// </summary> private class ArrayListWrapper : ArrayList { private ArrayList list; internal ArrayListWrapper(ArrayList list) : base() { this.list = list; } public override int Capacity { get => this.list.Capacity; set => this.list.Capacity = value; } public override int Count => this.list.Count; public override bool IsReadOnly => this.list.IsReadOnly; public override bool IsFixedSize => this.list.IsFixedSize; public override bool IsSynchronized => this.list.IsSynchronized; public override object SyncRoot => this.list.SyncRoot; public override object this[int index] { get => this.list[index]; set => this.list[index] = value; } public override int Add(object value) { return this.list.Add(value); } public override void AddRange(ICollection c) { this.list.AddRange(c); } public override int BinarySearch(object value) { return this.list.BinarySearch(value); } public override int BinarySearch(object value, IComparer comparer) { return this.list.BinarySearch(value, comparer); } public override int BinarySearch(int index, int count, object value, IComparer comparer) { return this.list.BinarySearch(index, count, value, comparer); } public override void Clear() { this.list.Clear(); } public override object Clone() { return new ArrayListWrapper((ArrayList)this.list.Clone()); } public override bool Contains(object item) { return this.list.Contains(item); } public override void CopyTo(Array array) { this.list.CopyTo(array); } public override void CopyTo(Array array, int index) { this.list.CopyTo(array, index); } public override void CopyTo(int index, Array array, int arrayIndex, int count) { this.list.CopyTo(index, array, arrayIndex, count); } public override IEnumerator GetEnumerator() { return this.list.GetEnumerator(); } public override IEnumerator GetEnumerator(int index, int count) { return this.list.GetEnumerator(index, count); } public override int IndexOf(object value) { return this.list.IndexOf(value); } public override int IndexOf(object value, int startIndex) { return this.list.IndexOf(value, startIndex); } public override int IndexOf(object value, int startIndex, int count) { return this.list.IndexOf(value, startIndex, count); } public override void Insert(int index, object value) { this.list.Insert(index, value); } public override void InsertRange(int index, ICollection c) { this.list.InsertRange(index, c); } public override int LastIndexOf(object value) { return this.list.LastIndexOf(value); } public override int LastIndexOf(object value, int startIndex) { return this.list.LastIndexOf(value, startIndex); } public override int LastIndexOf(object value, int startIndex, int count) { return this.list.LastIndexOf(value, startIndex, count); } public override void Remove(object value) { this.list.Remove(value); } public override void RemoveAt(int index) { this.list.RemoveAt(index); } public override void RemoveRange(int index, int count) { this.list.RemoveRange(index, count); } public override void Reverse(int index, int count) { this.list.Reverse(index, count); } public override void SetRange(int index, ICollection c) { this.list.SetRange(index, c); } public override ArrayList GetRange(int index, int count) { return this.list.GetRange(index, count); } public override void Sort() { this.list.Sort(); } public override void Sort(IComparer comparer) { this.list.Sort(comparer); } public override void Sort(int index, int count, IComparer comparer) { this.list.Sort(index, count, comparer); } public override object[] ToArray() { return this.list.ToArray(); } public override Array ToArray(Type type) { return this.list.ToArray(type); } public override void TrimToSize() { this.list.TrimToSize(); } public ArrayList Swap() { ArrayList old = this.list; this.list = new ArrayList(old.Capacity); return old; } } /// <summary> /// Helper class used for ConnectionGroup.m_ConnectionList. The goal here is to /// intercept each new Connection object being added to ConnectionGroup.m_ConnectionList /// and replace its m_WriteList arraylist field. /// </summary> private sealed class ConnectionArrayList : ArrayListWrapper { public ConnectionArrayList(ArrayList list) : base(list) { } public override int Add(object value) { if (connectionType.IsInstanceOfType(value)) { // Replace the HttpWebRequest arraylist inside this Connection object, // which allows us to intercept each new HttpWebRequest object added under // this Connection. ArrayList originalArrayList = writeListField.GetValue(value) as ArrayList; HttpWebRequestArrayList newArrayList = new HttpWebRequestArrayList(originalArrayList ?? new ArrayList()); writeListField.SetValue(value, newArrayList); } return base.Add(value); } } /// <summary> /// Helper class used for Connection.m_WriteList. The goal here is to /// intercept all new HttpWebRequest objects being added to Connection.m_WriteList /// and notify the listener about the HttpWebRequest that's about to send a request. /// It also intercepts all HttpWebRequest objects that are about to get removed from /// Connection.m_WriteList as they have completed the request. /// </summary> private sealed class HttpWebRequestArrayList : ArrayListWrapper { public HttpWebRequestArrayList(ArrayList list) : base(list) { } public override int Add(object value) { // Add before firing events so if some user code cancels/aborts the request it will be found in the outstanding list. int index = base.Add(value); if (value is HttpWebRequest request) { ProcessRequest(request); } return index; } public override void RemoveAt(int index) { object request = this[index]; base.RemoveAt(index); if (request is HttpWebRequest webRequest) { HookOrProcessResult(webRequest); } } public override void Clear() { ArrayList oldList = this.Swap(); for (int i = 0; i < oldList.Count; i++) { if (oldList[i] is HttpWebRequest request) { HookOrProcessResult(request); } } } } /// <summary> /// A closure object so our state is available when our callback executes. /// </summary> private sealed class AsyncCallbackWrapper { public AsyncCallbackWrapper(HttpWebRequest request, Activity activity, AsyncCallback originalCallback) { this.Request = request; this.Activity = activity; this.OriginalCallback = originalCallback; } public HttpWebRequest Request { get; } public Activity Activity { get; } public AsyncCallback OriginalCallback { get; } public void AsyncCallback(IAsyncResult asyncResult) { object result = resultAccessor(asyncResult); if (result is Exception || result is HttpWebResponse) { ProcessResult(asyncResult, this.OriginalCallback, this.Activity, result, false); } this.OriginalCallback?.Invoke(asyncResult); } } } } #endif
1
21,161
When Activity is lost (more precisely, ExecutionContext is lost) in the HttpModule we restore the root (HttpIn) Activity. That makes this assert invalid. I tried to fix the HttpModule so that it restores the Activity that was last running, but it is impossible to retrieve do to the way ExecutionContext works. It isn't an issue to remove the assert, but it is unnerving. Any instrumentation running in IIS reliant on Activity.Current could run into trouble.
open-telemetry-opentelemetry-dotnet
.cs
@@ -63,13 +63,16 @@ func WrapNetwork(net network.GossipNode, log logging.Logger) agreement.Network { i.net = net i.log = log + return i +} + +func (i *networkImpl) Start() { handlers := []network.TaggedMessageHandler{ {Tag: protocol.AgreementVoteTag, MessageHandler: network.HandlerFunc(i.processVoteMessage)}, {Tag: protocol.ProposalPayloadTag, MessageHandler: network.HandlerFunc(i.processProposalMessage)}, {Tag: protocol.VoteBundleTag, MessageHandler: network.HandlerFunc(i.processBundleMessage)}, } - net.RegisterHandlers(handlers) - return i + i.net.RegisterHandlers(handlers) } func messageMetadataFromHandle(h agreement.MessageHandle) *messageMetadata {
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. // Package gossip adapts the interface of network.GossipNode to // agreement.Network. package gossip import ( "context" "time" "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/metrics" ) var ( voteBufferSize = 10000 proposalBufferSize = 14 bundleBufferSize = 7 ) var messagesHandled = metrics.MakeCounter(metrics.AgreementMessagesHandled) var messagesDropped = metrics.MakeCounter(metrics.AgreementMessagesDropped) type messageMetadata struct { raw network.IncomingMessage } // networkImpl wraps network.GossipNode to provide a compatible interface with agreement. type networkImpl struct { voteCh chan agreement.Message proposalCh chan agreement.Message bundleCh chan agreement.Message net network.GossipNode log logging.Logger } // WrapNetwork adapts a network.GossipNode into an agreement.Network. func WrapNetwork(net network.GossipNode, log logging.Logger) agreement.Network { i := new(networkImpl) i.voteCh = make(chan agreement.Message, voteBufferSize) i.proposalCh = make(chan agreement.Message, proposalBufferSize) i.bundleCh = make(chan agreement.Message, bundleBufferSize) i.net = net i.log = log handlers := []network.TaggedMessageHandler{ {Tag: protocol.AgreementVoteTag, MessageHandler: network.HandlerFunc(i.processVoteMessage)}, {Tag: protocol.ProposalPayloadTag, MessageHandler: network.HandlerFunc(i.processProposalMessage)}, {Tag: protocol.VoteBundleTag, MessageHandler: network.HandlerFunc(i.processBundleMessage)}, } net.RegisterHandlers(handlers) return i } func messageMetadataFromHandle(h agreement.MessageHandle) *messageMetadata { if msg, isMsg := h.(*messageMetadata); isMsg { return msg } return nil } func (i *networkImpl) processVoteMessage(raw network.IncomingMessage) network.OutgoingMessage { return i.processMessage(raw, i.voteCh) } func (i *networkImpl) processProposalMessage(raw network.IncomingMessage) network.OutgoingMessage { return i.processMessage(raw, i.proposalCh) } func (i *networkImpl) processBundleMessage(raw network.IncomingMessage) network.OutgoingMessage { return i.processMessage(raw, i.bundleCh) } // i.e. process<Type>Message func (i *networkImpl) processMessage(raw network.IncomingMessage, submit chan<- agreement.Message) network.OutgoingMessage { metadata := &messageMetadata{raw: raw} select { case submit <- agreement.Message{MessageHandle: agreement.MessageHandle(metadata), Data: raw.Data}: // It would be slightly better to measure at de-queue // time, but that happens in many places in code and // this is much easier. messagesHandled.Inc(nil) default: messagesDropped.Inc(nil) } // Immediately ignore everything here, sometimes Relay/Broadcast/Disconnect later based on API handles saved from IncomingMessage return network.OutgoingMessage{Action: network.Ignore} } func (i *networkImpl) Messages(t protocol.Tag) <-chan agreement.Message { switch t { case protocol.AgreementVoteTag: return i.voteCh case protocol.ProposalPayloadTag: return i.proposalCh case protocol.VoteBundleTag: return i.bundleCh default: i.log.Panicf("bad tag! %v", t) return nil } } func (i *networkImpl) Broadcast(t protocol.Tag, data []byte) (err error) { err = i.net.Broadcast(context.Background(), t, data, false, nil) if err != nil { i.log.Infof("agreement: could not broadcast message with tag %v: %v", t, err) } return } func (i *networkImpl) Relay(h agreement.MessageHandle, t protocol.Tag, data []byte) (err error) { metadata := messageMetadataFromHandle(h) if metadata == nil { // synthentic loopback err = i.net.Broadcast(context.Background(), t, data, false, nil) if err != nil { i.log.Infof("agreement: could not (pseudo)relay message with tag %v: %v", t, err) } } else { err = i.net.Relay(context.Background(), t, data, false, metadata.raw.Sender) if err != nil { i.log.Infof("agreement: could not relay message from %v with tag %v: %v", metadata.raw.Sender, t, err) } } return } func (i *networkImpl) Disconnect(h agreement.MessageHandle) { metadata := messageMetadataFromHandle(h) if metadata == nil { // synthentic loopback // TODO warn return } i.net.Disconnect(metadata.raw.Sender) } // broadcastTimeout is currently only used by test code. // In test code we want to queue up a bunch of outbound packets and then see that they got through, so we need to wait at least a little bit for them to all go out. // Normal agreement state machine code uses GossipNode.Broadcast non-blocking and may drop outbound packets. func (i *networkImpl) broadcastTimeout(t protocol.Tag, data []byte, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() return i.net.Broadcast(ctx, t, data, true, nil) }
1
37,867
what was the point in moving handlers registration into a separate method?
algorand-go-algorand
go
@@ -30,6 +30,9 @@ func newSyncCache(state *core.BuildState, remoteOnly bool) core.Cache { if state.Config.Cache.HTTPURL != "" { mplex.caches = append(mplex.caches, newHTTPCache(state.Config)) } + if state.Config.Cache.RetrieveCommand != "" { + mplex.caches = append(mplex.caches, newCmdCache(state.Config)) + } if len(mplex.caches) == 0 { return &noopCache{} } else if len(mplex.caches) == 1 {
1
// Caching support for Please. package cache import ( "sync" "gopkg.in/op/go-logging.v1" "github.com/thought-machine/please/src/core" ) var log = logging.MustGetLogger("cache") // NewCache is the factory function for creating a cache setup from the given config. func NewCache(state *core.BuildState) core.Cache { c := newSyncCache(state, false) if state.Config.Cache.Workers > 0 { return newAsyncCache(c, state.Config) } return c } // newSyncCache creates a new cache, possibly multiplexing many underneath. func newSyncCache(state *core.BuildState, remoteOnly bool) core.Cache { mplex := &cacheMultiplexer{} if state.Config.Cache.Dir != "" && !remoteOnly { mplex.caches = append(mplex.caches, newDirCache(state.Config)) } if state.Config.Cache.HTTPURL != "" { mplex.caches = append(mplex.caches, newHTTPCache(state.Config)) } if len(mplex.caches) == 0 { return &noopCache{} } else if len(mplex.caches) == 1 { return mplex.caches[0] // Skip the extra layer of indirection } return mplex } // A cacheMultiplexer multiplexes several caches into one. // Used when we have several active (eg. http, dir). type cacheMultiplexer struct { caches []core.Cache } func (mplex cacheMultiplexer) Store(target *core.BuildTarget, key []byte, files []string) { mplex.storeUntil(target, key, files, len(mplex.caches)) } // storeUntil stores artifacts into higher priority caches than the given one. // Used after artifact retrieval to ensure we have them in eg. the directory cache after // downloading from the RPC cache. // This is a little inefficient since we could write the file to plz-out then copy it to the dir cache, // but it's hard to fix that without breaking the cache abstraction. func (mplex cacheMultiplexer) storeUntil(target *core.BuildTarget, key []byte, files []string, stopAt int) { // Attempt to store on all caches simultaneously. var wg sync.WaitGroup for i, cache := range mplex.caches { if i == stopAt { break } wg.Add(1) go func(cache core.Cache) { cache.Store(target, key, files) wg.Done() }(cache) } wg.Wait() } func (mplex cacheMultiplexer) Retrieve(target *core.BuildTarget, key []byte, files []string) bool { // Retrieve from caches sequentially; if we did them simultaneously we could // easily write the same file from two goroutines at once. for i, cache := range mplex.caches { if ok := cache.Retrieve(target, key, files); ok { // Store this into other caches mplex.storeUntil(target, key, files, i) return ok } } return false } func (mplex cacheMultiplexer) Clean(target *core.BuildTarget) { for _, cache := range mplex.caches { cache.Clean(target) } } func (mplex cacheMultiplexer) CleanAll() { for _, cache := range mplex.caches { cache.CleanAll() } } func (mplex cacheMultiplexer) Shutdown() { for _, cache := range mplex.caches { cache.Shutdown() } }
1
10,379
At this point we probably want to ensure there's a store command set.
thought-machine-please
go
@@ -106,8 +106,13 @@ HDPrivateKey._getDerivationIndexes = function(path) { } var indexes = steps.slice(1).map(function(step) { - var index = parseInt(step); - index += step != index.toString() ? HDPrivateKey.Hardened : 0; + var index = step ? +step : NaN; + + var isHardened = isNaN(index) && step[step.length-1] == "'"; + if (isHardened) { + index = (+(step.slice(0, -1))) + HDPrivateKey.Hardened; + } + return index; });
1
'use strict'; var assert = require('assert'); var buffer = require('buffer'); var _ = require('lodash'); var BN = require('./crypto/bn'); var Base58 = require('./encoding/base58'); var Base58Check = require('./encoding/base58check'); var Hash = require('./crypto/hash'); var Network = require('./networks'); var HDKeyCache = require('./hdkeycache'); var Point = require('./crypto/point'); var PrivateKey = require('./privatekey'); var Random = require('./crypto/random'); var errors = require('./errors'); var hdErrors = errors.HDPrivateKey; var BufferUtil = require('./util/buffer'); var JSUtil = require('./util/js'); var MINIMUM_ENTROPY_BITS = 128; var BITS_TO_BYTES = 1/8; var MAXIMUM_ENTROPY_BITS = 512; /** * Represents an instance of an hierarchically derived private key. * * More info on https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki * * @constructor * @param {string|Buffer|Object} arg */ function HDPrivateKey(arg) { /* jshint maxcomplexity: 10 */ if (arg instanceof HDPrivateKey) { return arg; } if (!(this instanceof HDPrivateKey)) { return new HDPrivateKey(arg); } if (!arg) { return this._generateRandomly(); } if (Network.get(arg)) { return this._generateRandomly(arg); } else if (_.isString(arg) || BufferUtil.isBuffer(arg)) { if (HDPrivateKey.isValidSerialized(arg)) { this._buildFromSerialized(arg); } else if (JSUtil.isValidJSON(arg)) { this._buildFromJSON(arg); } else { throw HDPrivateKey.getSerializedError(arg); } } else if (_.isObject(arg)) { this._buildFromObject(arg); } else { throw new hdErrors.UnrecognizedArgument(arg); } } /** * Verifies that a given path is valid. * * @param {string|number} arg * @param {boolean?} hardened * @return {boolean} */ HDPrivateKey.isValidPath = function(arg, hardened) { if (_.isString(arg)) { var indexes = HDPrivateKey._getDerivationIndexes(arg); return indexes !== null && _.all(indexes, HDPrivateKey.isValidPath); } if (_.isNumber(arg)) { if (arg < HDPrivateKey.Hardened && hardened === true) { arg += HDPrivateKey.Hardened; } return arg >= 0 && arg < HDPrivateKey.MaxIndex; } return false; }; /** * Internal function that splits a string path into a derivation index array. * It will return null if the string path is malformed. * It does not validate if indexes are in bounds. * * @param {string} path * @return {Array} */ HDPrivateKey._getDerivationIndexes = function(path) { var steps = path.split('/'); // Special cases: if (_.contains(HDPrivateKey.RootElementAlias, path)) { return []; } if (!_.contains(HDPrivateKey.RootElementAlias, steps[0])) { return null; } var indexes = steps.slice(1).map(function(step) { var index = parseInt(step); index += step != index.toString() ? HDPrivateKey.Hardened : 0; return index; }); return _.any(indexes, isNaN) ? null : indexes; } /** * Get a derivated child based on a string or number. * * If the first argument is a string, it's parsed as the full path of * derivation. Valid values for this argument include "m" (which returns the * same private key), "m/0/1/40/2'/1000", where the ' quote means a hardened * derivation. * * If the first argument is a number, the child with that index will be * derived. If the second argument is truthy, the hardened version will be * derived. See the example usage for clarification. * * @example * ```javascript * var parent = new HDPrivateKey('xprv...'); * var child_0_1_2h = parent.derive(0).derive(1).derive(2, true); * var copy_of_child_0_1_2h = parent.derive("m/0/1/2'"); * assert(child_0_1_2h.xprivkey === copy_of_child_0_1_2h); * ``` * * @param {string|number} arg * @param {boolean?} hardened */ HDPrivateKey.prototype.derive = function(arg, hardened) { if (_.isNumber(arg)) { return this._deriveWithNumber(arg, hardened); } else if (_.isString(arg)) { return this._deriveFromString(arg); } else { throw new hdErrors.InvalidDerivationArgument(arg); } }; HDPrivateKey.prototype._deriveWithNumber = function(index, hardened) { /* jshint maxstatements: 20 */ /* jshint maxcomplexity: 10 */ if (!HDPrivateKey.isValidPath(index, hardened)) { throw new hdErrors.InvalidPath(index); } hardened = index >= HDPrivateKey.Hardened ? true : hardened; if (index < HDPrivateKey.Hardened && hardened === true) { index += HDPrivateKey.Hardened; } var cached = HDKeyCache.get(this.xprivkey, index, hardened); if (cached) { return cached; } var indexBuffer = BufferUtil.integerAsBuffer(index); var data; if (hardened) { data = BufferUtil.concat([new buffer.Buffer([0]), this.privateKey.toBuffer(), indexBuffer]); } else { data = BufferUtil.concat([this.publicKey.toBuffer(), indexBuffer]); } var hash = Hash.sha512hmac(data, this._buffers.chainCode); var leftPart = BN.fromBuffer(hash.slice(0, 32), {size: 32}); var chainCode = hash.slice(32, 64); var privateKey = leftPart.add(this.privateKey.toBigNumber()).mod(Point.getN()).toBuffer({size: 32}); var derived = new HDPrivateKey({ network: this.network, depth: this.depth + 1, parentFingerPrint: this.fingerPrint, childIndex: index, chainCode: chainCode, privateKey: privateKey }); HDKeyCache.set(this.xprivkey, index, hardened, derived); return derived; }; HDPrivateKey.prototype._deriveFromString = function(path) { if (!HDPrivateKey.isValidPath(path)) { throw new hdErrors.InvalidPath(path); } var indexes = HDPrivateKey._getDerivationIndexes(path); var derived = indexes.reduce(function(prev, index) { return prev._deriveWithNumber(index); }, this); return derived; }; /** * Verifies that a given serialized private key in base58 with checksum format * is valid. * * @param {string|Buffer} data - the serialized private key * @param {string|Network=} network - optional, if present, checks that the * network provided matches the network serialized. * @return {boolean} */ HDPrivateKey.isValidSerialized = function(data, network) { return !HDPrivateKey.getSerializedError(data, network); }; /** * Checks what's the error that causes the validation of a serialized private key * in base58 with checksum to fail. * * @param {string|Buffer} data - the serialized private key * @param {string|Network=} network - optional, if present, checks that the * network provided matches the network serialized. * @return {errors.InvalidArgument|null} */ HDPrivateKey.getSerializedError = function(data, network) { /* jshint maxcomplexity: 10 */ if (!(_.isString(data) || BufferUtil.isBuffer(data))) { return new hdErrors.UnrecognizedArgument('Expected string or buffer'); } if (!Base58.validCharacters(data)) { return new errors.InvalidB58Char('(unknown)', data); } try { data = Base58Check.decode(data); } catch (e) { return new errors.InvalidB58Checksum(data); } if (data.length !== HDPrivateKey.DataLength) { return new hdErrors.InvalidLength(data); } if (!_.isUndefined(network)) { var error = HDPrivateKey._validateNetwork(data, network); if (error) { return error; } } return null; }; HDPrivateKey._validateNetwork = function(data, networkArg) { var network = Network.get(networkArg); if (!network) { return new errors.InvalidNetworkArgument(networkArg); } var version = data.slice(0, 4); if (BufferUtil.integerFromBuffer(version) !== network.xprivkey) { return new errors.InvalidNetwork(version); } return null; }; HDPrivateKey.fromJSON = HDPrivateKey.fromObject = HDPrivateKey.fromString = function(arg) { return new HDPrivateKey(arg); }; HDPrivateKey.prototype._buildFromJSON = function(arg) { return this._buildFromObject(JSON.parse(arg)); }; HDPrivateKey.prototype._buildFromObject = function(arg) { /* jshint maxcomplexity: 12 */ // TODO: Type validation var buffers = { version: arg.network ? BufferUtil.integerAsBuffer(Network.get(arg.network).xprivkey) : arg.version, depth: _.isNumber(arg.depth) ? BufferUtil.integerAsSingleByteBuffer(arg.depth) : arg.depth, parentFingerPrint: _.isNumber(arg.parentFingerPrint) ? BufferUtil.integerAsBuffer(arg.parentFingerPrint) : arg.parentFingerPrint, childIndex: _.isNumber(arg.childIndex) ? BufferUtil.integerAsBuffer(arg.childIndex) : arg.childIndex, chainCode: _.isString(arg.chainCode) ? BufferUtil.hexToBuffer(arg.chainCode) : arg.chainCode, privateKey: (_.isString(arg.privateKey) && JSUtil.isHexa(arg.privateKey)) ? BufferUtil.hexToBuffer(arg.privateKey) : arg.privateKey, checksum: arg.checksum ? (arg.checksum.length ? arg.checksum : BufferUtil.integerAsBuffer(arg.checksum)) : undefined }; return this._buildFromBuffers(buffers); }; HDPrivateKey.prototype._buildFromSerialized = function(arg) { var decoded = Base58Check.decode(arg); var buffers = { version: decoded.slice(HDPrivateKey.VersionStart, HDPrivateKey.VersionEnd), depth: decoded.slice(HDPrivateKey.DepthStart, HDPrivateKey.DepthEnd), parentFingerPrint: decoded.slice(HDPrivateKey.ParentFingerPrintStart, HDPrivateKey.ParentFingerPrintEnd), childIndex: decoded.slice(HDPrivateKey.ChildIndexStart, HDPrivateKey.ChildIndexEnd), chainCode: decoded.slice(HDPrivateKey.ChainCodeStart, HDPrivateKey.ChainCodeEnd), privateKey: decoded.slice(HDPrivateKey.PrivateKeyStart, HDPrivateKey.PrivateKeyEnd), checksum: decoded.slice(HDPrivateKey.ChecksumStart, HDPrivateKey.ChecksumEnd), xprivkey: arg }; return this._buildFromBuffers(buffers); }; HDPrivateKey.prototype._generateRandomly = function(network) { return HDPrivateKey.fromSeed(Random.getRandomBuffer(64), network); }; /** * Generate a private key from a seed, as described in BIP32 * * @param {string|Buffer} hexa * @param {*} network * @return HDPrivateKey */ HDPrivateKey.fromSeed = function(hexa, network) { /* jshint maxcomplexity: 8 */ if (JSUtil.isHexaString(hexa)) { hexa = BufferUtil.hexToBuffer(hexa); } if (!Buffer.isBuffer(hexa)) { throw new hdErrors.InvalidEntropyArgument(hexa); } if (hexa.length < MINIMUM_ENTROPY_BITS * BITS_TO_BYTES) { throw new hdErrors.InvalidEntropyArgument.NotEnoughEntropy(hexa); } if (hexa.length > MAXIMUM_ENTROPY_BITS * BITS_TO_BYTES) { throw new hdErrors.InvalidEntropyArgument.TooMuchEntropy(hexa); } var hash = Hash.sha512hmac(hexa, new buffer.Buffer('Bitcoin seed')); return new HDPrivateKey({ network: Network.get(network) || Network.defaultNetwork, depth: 0, parentFingerPrint: 0, childIndex: 0, privateKey: hash.slice(0, 32), chainCode: hash.slice(32, 64) }); }; /** * Receives a object with buffers in all the properties and populates the * internal structure * * @param {Object} arg * @param {buffer.Buffer} arg.version * @param {buffer.Buffer} arg.depth * @param {buffer.Buffer} arg.parentFingerPrint * @param {buffer.Buffer} arg.childIndex * @param {buffer.Buffer} arg.chainCode * @param {buffer.Buffer} arg.privateKey * @param {buffer.Buffer} arg.checksum * @param {string=} arg.xprivkey - if set, don't recalculate the base58 * representation * @return {HDPrivateKey} this */ HDPrivateKey.prototype._buildFromBuffers = function(arg) { /* jshint maxcomplexity: 8 */ /* jshint maxstatements: 20 */ HDPrivateKey._validateBufferArguments(arg); JSUtil.defineImmutable(this, { _buffers: arg }); var sequence = [ arg.version, arg.depth, arg.parentFingerPrint, arg.childIndex, arg.chainCode, BufferUtil.emptyBuffer(1), arg.privateKey ]; var concat = buffer.Buffer.concat(sequence); if (!arg.checksum || !arg.checksum.length) { arg.checksum = Base58Check.checksum(concat); } else { if (arg.checksum.toString() !== Base58Check.checksum(concat).toString()) { throw new errors.InvalidB58Checksum(concat); } } var xprivkey; if (!arg.xprivkey) { xprivkey = Base58Check.encode(buffer.Buffer.concat(sequence)); } else { xprivkey = arg.xprivkey; } var privateKey = new PrivateKey(BN.fromBuffer(arg.privateKey)); var publicKey = privateKey.toPublicKey(); var size = HDPrivateKey.ParentFingerPrintSize; var fingerPrint = Hash.sha256ripemd160(publicKey.toBuffer()).slice(0, size); JSUtil.defineImmutable(this, { xprivkey: xprivkey, network: Network.get(BufferUtil.integerFromBuffer(arg.version)), depth: BufferUtil.integerFromSingleByteBuffer(arg.depth), privateKey: privateKey, publicKey: publicKey, fingerPrint: fingerPrint }); var HDPublicKey = require('./hdpublickey'); var hdPublicKey = new HDPublicKey(this); JSUtil.defineImmutable(this, { hdPublicKey: hdPublicKey, xpubkey: hdPublicKey.xpubkey }); return this; }; HDPrivateKey._validateBufferArguments = function(arg) { var checkBuffer = function(name, size) { var buff = arg[name]; assert(BufferUtil.isBuffer(buff), name + ' argument is not a buffer'); assert( buff.length === size, name + ' has not the expected size: found ' + buff.length + ', expected ' + size ); }; checkBuffer('version', HDPrivateKey.VersionSize); checkBuffer('depth', HDPrivateKey.DepthSize); checkBuffer('parentFingerPrint', HDPrivateKey.ParentFingerPrintSize); checkBuffer('childIndex', HDPrivateKey.ChildIndexSize); checkBuffer('chainCode', HDPrivateKey.ChainCodeSize); checkBuffer('privateKey', HDPrivateKey.PrivateKeySize); if (arg.checksum && arg.checksum.length) { checkBuffer('checksum', HDPrivateKey.CheckSumSize); } }; /** * Returns the string representation of this private key (a string starting * with "xprv..." * * @return string */ HDPrivateKey.prototype.toString = function() { return this.xprivkey; }; /** * Returns the console representation of this extended private key. * @return string */ HDPrivateKey.prototype.inspect = function() { return '<HDPrivateKey: ' + this.xprivkey + '>'; }; /** * Returns a plain object with a representation of this private key. * * Fields include:<ul> * <li> network: either 'livenet' or 'testnet' * <li> depth: a number ranging from 0 to 255 * <li> fingerPrint: a number ranging from 0 to 2^32-1, taken from the hash of the * <li> associated public key * <li> parentFingerPrint: a number ranging from 0 to 2^32-1, taken from the hash * <li> of this parent's associated public key or zero. * <li> childIndex: the index from which this child was derived (or zero) * <li> chainCode: an hexa string representing a number used in the derivation * <li> privateKey: the private key associated, in hexa representation * <li> xprivkey: the representation of this extended private key in checksum * <li> base58 format * <li> checksum: the base58 checksum of xprivkey * </ul> * @return {Object} */ HDPrivateKey.prototype.toObject = function toObject() { return { network: Network.get(BufferUtil.integerFromBuffer(this._buffers.version)).name, depth: BufferUtil.integerFromSingleByteBuffer(this._buffers.depth), fingerPrint: BufferUtil.integerFromBuffer(this.fingerPrint), parentFingerPrint: BufferUtil.integerFromBuffer(this._buffers.parentFingerPrint), childIndex: BufferUtil.integerFromBuffer(this._buffers.childIndex), chainCode: BufferUtil.bufferToHex(this._buffers.chainCode), privateKey: this.privateKey.toBuffer().toString('hex'), checksum: BufferUtil.integerFromBuffer(this._buffers.checksum), xprivkey: this.xprivkey }; }; HDPrivateKey.prototype.toJSON = function toJSON() { return JSON.stringify(this.toObject()); }; HDPrivateKey.DefaultDepth = 0; HDPrivateKey.DefaultFingerprint = 0; HDPrivateKey.DefaultChildIndex = 0; HDPrivateKey.Hardened = 0x80000000; HDPrivateKey.MaxIndex = 2 * HDPrivateKey.Hardened; HDPrivateKey.RootElementAlias = ['m', 'M', 'm\'', 'M\'']; HDPrivateKey.VersionSize = 4; HDPrivateKey.DepthSize = 1; HDPrivateKey.ParentFingerPrintSize = 4; HDPrivateKey.ChildIndexSize = 4; HDPrivateKey.ChainCodeSize = 32; HDPrivateKey.PrivateKeySize = 32; HDPrivateKey.CheckSumSize = 4; HDPrivateKey.DataLength = 78; HDPrivateKey.SerializedByteSize = 82; HDPrivateKey.VersionStart = 0; HDPrivateKey.VersionEnd = HDPrivateKey.VersionStart + HDPrivateKey.VersionSize; HDPrivateKey.DepthStart = HDPrivateKey.VersionEnd; HDPrivateKey.DepthEnd = HDPrivateKey.DepthStart + HDPrivateKey.DepthSize; HDPrivateKey.ParentFingerPrintStart = HDPrivateKey.DepthEnd; HDPrivateKey.ParentFingerPrintEnd = HDPrivateKey.ParentFingerPrintStart + HDPrivateKey.ParentFingerPrintSize; HDPrivateKey.ChildIndexStart = HDPrivateKey.ParentFingerPrintEnd; HDPrivateKey.ChildIndexEnd = HDPrivateKey.ChildIndexStart + HDPrivateKey.ChildIndexSize; HDPrivateKey.ChainCodeStart = HDPrivateKey.ChildIndexEnd; HDPrivateKey.ChainCodeEnd = HDPrivateKey.ChainCodeStart + HDPrivateKey.ChainCodeSize; HDPrivateKey.PrivateKeyStart = HDPrivateKey.ChainCodeEnd + 1; HDPrivateKey.PrivateKeyEnd = HDPrivateKey.PrivateKeyStart + HDPrivateKey.PrivateKeySize; HDPrivateKey.ChecksumStart = HDPrivateKey.PrivateKeyEnd; HDPrivateKey.ChecksumEnd = HDPrivateKey.ChecksumStart + HDPrivateKey.CheckSumSize; assert(HDPrivateKey.ChecksumEnd === HDPrivateKey.SerializedByteSize); module.exports = HDPrivateKey;
1
13,886
This code is way too complicated for what it does. I don't want to be a PITA, but what can you consider rewriting it in a simpler way? I'm talking about the whole `_getDerivationIndexes` function
bitpay-bitcore
js
@@ -151,6 +151,10 @@ namespace Nethermind.Vault.JsonRpc { try { + if (message.StartsWith("0x")) + { + throw new Exception($"Vault message should not be in hex; message: {message}"); + } string result = await _vaultService.Sign(Guid.Parse(vaultId), Guid.Parse(keyId), message); return ResultWrapper<string>.Success(result); }
1
// Copyright (c) 2020 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Linq; using Nethermind.Logging; using System.Threading.Tasks; using Nethermind.JsonRpc; using provide.Model.Vault; namespace Nethermind.Vault.JsonRpc { public class VaultModule : IVaultModule { private readonly ILogger _logger; private readonly IVaultService _vaultService; public VaultModule(IVaultService vaultService, ILogManager logManager) { _vaultService = vaultService ?? throw new ArgumentNullException(nameof(vaultService)); _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); } public async Task<ResultWrapper<Key>> vault_createKey(string vaultId, Key key) { Key result = await _vaultService.CreateKey(Guid.Parse(vaultId), key); return ResultWrapper<Key>.Success(result); } public async Task<ResultWrapper<Secret>> vault_createSecret(string vaultId, Secret secret) { try { Secret result = await _vaultService.CreateSecret(Guid.Parse(vaultId), secret); return ResultWrapper<Secret>.Success(result); } catch (Exception e) { return ResultWrapper<Secret>.Fail(e); } } public async Task<ResultWrapper<provide.Model.Vault.Vault>> vault_createVault(provide.Model.Vault.Vault vault) { try { provide.Model.Vault.Vault vaultId = await _vaultService.CreateVault(vault); return ResultWrapper<provide.Model.Vault.Vault>.Success(vaultId); } catch (Exception e) { return ResultWrapper<provide.Model.Vault.Vault>.Fail(e); } } public async Task<ResultWrapper<provide.Model.Vault.Vault>> vault_deleteVault(string vaultId) { try { provide.Model.Vault.Vault result = await _vaultService.DeleteVault(Guid.Parse(vaultId)); return ResultWrapper<provide.Model.Vault.Vault>.Success(result); } catch (Exception e) { return ResultWrapper<provide.Model.Vault.Vault>.Fail(e); } } public async Task<ResultWrapper<bool>> vault_deleteKey(string vaultId, string keyId) { try { await _vaultService.DeleteKey(Guid.Parse(vaultId), Guid.Parse(keyId)); return ResultWrapper<bool>.Success(true); } catch (Exception e) { return ResultWrapper<bool>.Fail(e); } } public async Task<ResultWrapper<bool>> vault_deleteSecret(string vaultId, string secretId) { try { await _vaultService.DeleteSecret(Guid.Parse(vaultId), Guid.Parse(secretId)); return ResultWrapper<bool>.Success(true); } catch (Exception e) { return ResultWrapper<bool>.Fail(e); } } public async Task<ResultWrapper<string[]>> vault_listVaults() { try { var result = await _vaultService.ListVaultIds(); return ResultWrapper<string[]>.Success(result.Select(id => id.ToString()).ToArray()); } catch (Exception e) { return ResultWrapper<string[]>.Fail(e); } } public async Task<ResultWrapper<Key[]>> vault_listKeys(string vaultId) { try { var keys = await _vaultService.ListKeys(Guid.Parse(vaultId)); return ResultWrapper<Key[]>.Success(keys.ToArray()); } catch (Exception e) { return ResultWrapper<Key[]>.Fail(e); } } public async Task<ResultWrapper<Secret[]>> vault_listSecrets(string vaultId) { try { var secrets = await _vaultService.ListSecrets(Guid.Parse(vaultId)); return ResultWrapper<Secret[]>.Success(secrets.ToArray()); } catch (Exception e) { return ResultWrapper<Secret[]>.Fail(e); } } public async Task<ResultWrapper<string>> vault_signMessage(string vaultId, string keyId, string message) { try { string result = await _vaultService.Sign(Guid.Parse(vaultId), Guid.Parse(keyId), message); return ResultWrapper<string>.Success(result); } catch (Exception e) { return ResultWrapper<string>.Fail(e); } } public async Task<ResultWrapper<bool>> vault_verifySignature( string vaultId, string keyId, string message, string signature) { try { bool result = await _vaultService.Verify(Guid.Parse(vaultId), Guid.Parse(keyId), message, signature); return ResultWrapper<bool>.Success(result); } catch (Exception e) { return ResultWrapper<bool>.Fail(e); } } public async Task<ResultWrapper<bool>> vault_setToken(string token) { try { await _vaultService.ResetToken(token); return ResultWrapper<bool>.Success(true); } catch (Exception e) { return ResultWrapper<bool>.Fail(e); } } public async Task<ResultWrapper<bool>> vault_configure(string scheme, string host, string path, string token) { try { await _vaultService.Reset(scheme, host, path, token); return ResultWrapper<bool>.Success(true); } catch (Exception e) { return ResultWrapper<bool>.Fail(e); } } } }
1
24,572
Don't throw base exception, specify more precise type
NethermindEth-nethermind
.cs
@@ -5,6 +5,16 @@ import torch def cast_tensor_type(inputs, src_type, dst_type): + """Recursive converted Tensor in inputs from src_type to dst_type. + + Args: + inputs: Inputs that to be casted. + src_type (torch.dtype): Source type.. + dst_type (torch.dtype): Destination type. + + Returns: + The same type with inputs, but all contained Tensors have been cast. + """ if isinstance(inputs, torch.Tensor): return inputs.to(dst_type) elif isinstance(inputs, str):
1
from collections import abc import numpy as np import torch def cast_tensor_type(inputs, src_type, dst_type): if isinstance(inputs, torch.Tensor): return inputs.to(dst_type) elif isinstance(inputs, str): return inputs elif isinstance(inputs, np.ndarray): return inputs elif isinstance(inputs, abc.Mapping): return type(inputs)({ k: cast_tensor_type(v, src_type, dst_type) for k, v in inputs.items() }) elif isinstance(inputs, abc.Iterable): return type(inputs)( cast_tensor_type(item, src_type, dst_type) for item in inputs) else: return inputs
1
20,489
Recursive -> Recursively converted -> convert
open-mmlab-mmdetection
py
@@ -947,7 +947,8 @@ class CSharpGenerator : public BaseGenerator { } // JVM specifications restrict default constructor params to be < 255. // Longs and doubles take up 2 units, so we set the limit to be < 127. - if (has_no_struct_fields && num_fields && num_fields < 127) { + if ((has_no_struct_fields || opts.generate_object_based_api) && + num_fields && num_fields < 127) { struct_has_create = true; // Generate a table constructor of the form: // public static int createName(FlatBufferBuilder builder, args...)
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // independent from idl_parser, since this code is not needed for most clients #include "flatbuffers/code_generators.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" #if defined(FLATBUFFERS_CPP98_STL) # include <cctype> #endif // defined(FLATBUFFERS_CPP98_STL) namespace flatbuffers { static TypedFloatConstantGenerator CSharpFloatGen("Double.", "Single.", "NaN", "PositiveInfinity", "NegativeInfinity"); static CommentConfig comment_config = { nullptr, "///", nullptr, }; namespace csharp { class CSharpGenerator : public BaseGenerator { public: CSharpGenerator(const Parser &parser, const std::string &path, const std::string &file_name) : BaseGenerator(parser, path, file_name, "", ".", "cs"), cur_name_space_(nullptr) {} CSharpGenerator &operator=(const CSharpGenerator &); bool generate() { std::string one_file_code; cur_name_space_ = parser_.current_namespace_; for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { std::string enumcode; auto &enum_def = **it; if (!parser_.opts.one_file) cur_name_space_ = enum_def.defined_namespace; GenEnum(enum_def, &enumcode, parser_.opts); if (parser_.opts.one_file) { one_file_code += enumcode; } else { if (!SaveType(enum_def.name, *enum_def.defined_namespace, enumcode, false)) return false; } } for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { std::string declcode; auto &struct_def = **it; if (!parser_.opts.one_file) cur_name_space_ = struct_def.defined_namespace; GenStruct(struct_def, &declcode, parser_.opts); if (parser_.opts.one_file) { one_file_code += declcode; } else { if (!SaveType(struct_def.name, *struct_def.defined_namespace, declcode, true)) return false; } } if (parser_.opts.one_file) { return SaveType(file_name_, *parser_.current_namespace_, one_file_code, true); } return true; } // Save out the generated code for a single class while adding // declaration boilerplate. bool SaveType(const std::string &defname, const Namespace &ns, const std::string &classcode, bool needs_includes) const { if (!classcode.length()) return true; std::string code = "// <auto-generated>\n" "// " + std::string(FlatBuffersGeneratedWarning()) + "\n" "// </auto-generated>\n\n"; std::string namespace_name = FullNamespace(".", ns); if (!namespace_name.empty()) { code += "namespace " + namespace_name + "\n{\n\n"; } if (needs_includes) { code += "using global::System;\n"; code += "using global::System.Collections.Generic;\n"; code += "using global::FlatBuffers;\n\n"; } code += classcode; if (!namespace_name.empty()) { code += "\n}\n"; } auto filename = NamespaceDir(ns) + defname + ".cs"; return SaveFile(filename.c_str(), code, false); } const Namespace *CurrentNameSpace() const { return cur_name_space_; } std::string GenTypeBasic(const Type &type, bool enableLangOverrides) const { // clang-format off static const char * const csharp_typename[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, ...) \ #NTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; // clang-format on if (enableLangOverrides) { if (IsEnum(type)) return WrapInNameSpace(*type.enum_def); if (type.base_type == BASE_TYPE_STRUCT) { return "Offset<" + WrapInNameSpace(*type.struct_def) + ">"; } } return csharp_typename[type.base_type]; } inline std::string GenTypeBasic(const Type &type) const { return GenTypeBasic(type, true); } std::string GenTypePointer(const Type &type) const { switch (type.base_type) { case BASE_TYPE_STRING: return "string"; case BASE_TYPE_VECTOR: return GenTypeGet(type.VectorType()); case BASE_TYPE_STRUCT: return WrapInNameSpace(*type.struct_def); case BASE_TYPE_UNION: return "TTable"; default: return "Table"; } } std::string GenTypeGet(const Type &type) const { return IsScalar(type.base_type) ? GenTypeBasic(type) : (IsArray(type) ? GenTypeGet(type.VectorType()) : GenTypePointer(type)); } std::string GenOffsetType(const StructDef &struct_def) const { return "Offset<" + WrapInNameSpace(struct_def) + ">"; } std::string GenOffsetConstruct(const StructDef &struct_def, const std::string &variable_name) const { return "new Offset<" + WrapInNameSpace(struct_def) + ">(" + variable_name + ")"; } // Casts necessary to correctly read serialized data std::string DestinationCast(const Type &type) const { if (IsSeries(type)) { return DestinationCast(type.VectorType()); } else { if (IsEnum(type)) return "(" + WrapInNameSpace(*type.enum_def) + ")"; } return ""; } // Cast statements for mutator method parameters. // In Java, parameters representing unsigned numbers need to be cast down to // their respective type. For example, a long holding an unsigned int value // would be cast down to int before being put onto the buffer. In C#, one cast // directly cast an Enum to its underlying type, which is essential before // putting it onto the buffer. std::string SourceCast(const Type &type) const { if (IsSeries(type)) { return SourceCast(type.VectorType()); } else { if (IsEnum(type)) return "(" + GenTypeBasic(type, false) + ")"; } return ""; } std::string SourceCastBasic(const Type &type) const { return IsScalar(type.base_type) ? SourceCast(type) : ""; } std::string GenEnumDefaultValue(const FieldDef &field) const { auto &value = field.value; FLATBUFFERS_ASSERT(value.type.enum_def); auto &enum_def = *value.type.enum_def; auto enum_val = enum_def.FindByValue(value.constant); return enum_val ? (WrapInNameSpace(enum_def) + "." + enum_val->name) : value.constant; } std::string GenDefaultValue(const FieldDef &field, bool enableLangOverrides) const { auto &value = field.value; if (enableLangOverrides) { // handles both enum case and vector of enum case if (value.type.enum_def != nullptr && value.type.base_type != BASE_TYPE_UNION) { return GenEnumDefaultValue(field); } } auto longSuffix = ""; switch (value.type.base_type) { case BASE_TYPE_BOOL: return value.constant == "0" ? "false" : "true"; case BASE_TYPE_ULONG: return value.constant; case BASE_TYPE_UINT: case BASE_TYPE_LONG: return value.constant + longSuffix; default: if (IsFloat(value.type.base_type)) return CSharpFloatGen.GenFloatConstant(field); else return value.constant; } } std::string GenDefaultValue(const FieldDef &field) const { return GenDefaultValue(field, true); } std::string GenDefaultValueBasic(const FieldDef &field, bool enableLangOverrides) const { auto &value = field.value; if (!IsScalar(value.type.base_type)) { if (enableLangOverrides) { switch (value.type.base_type) { case BASE_TYPE_STRING: return "default(StringOffset)"; case BASE_TYPE_STRUCT: return "default(Offset<" + WrapInNameSpace(*value.type.struct_def) + ">)"; case BASE_TYPE_VECTOR: return "default(VectorOffset)"; default: break; } } return "0"; } return GenDefaultValue(field, enableLangOverrides); } std::string GenDefaultValueBasic(const FieldDef &field) const { return GenDefaultValueBasic(field, true); } void GenEnum(EnumDef &enum_def, std::string *code_ptr, const IDLOptions &opts) const { std::string &code = *code_ptr; if (enum_def.generated) return; // Generate enum definitions of the form: // public static (final) int name = value; // In Java, we use ints rather than the Enum feature, because we want them // to map directly to how they're used in C/C++ and file formats. // That, and Java Enums are expensive, and not universally liked. GenComment(enum_def.doc_comment, code_ptr, &comment_config); if (opts.cs_gen_json_serializer && opts.generate_object_based_api) { code += "[Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters." "StringEnumConverter))]\n"; } // In C# this indicates enumeration values can be treated as bit flags. if (enum_def.attributes.Lookup("bit_flags")) { code += "[System.FlagsAttribute]\n"; } if (enum_def.attributes.Lookup("private")) { code += "internal "; } else { code += "public "; } code += "enum " + enum_def.name; code += " : " + GenTypeBasic(enum_def.underlying_type, false); code += "\n{\n"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { auto &ev = **it; GenComment(ev.doc_comment, code_ptr, &comment_config, " "); code += " "; code += ev.name + " = "; code += enum_def.ToString(ev); code += ",\n"; } // Close the class code += "};\n\n"; if (opts.generate_object_based_api) { GenEnum_ObjectAPI(enum_def, code_ptr, opts); } } bool HasUnionStringValue(const EnumDef &enum_def) const { if (!enum_def.is_union) return false; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { auto &val = **it; if (val.union_type.base_type == BASE_TYPE_STRING) { return true; } } return false; } // Returns the function name that is able to read a value of the given type. std::string GenGetter(const Type &type) const { switch (type.base_type) { case BASE_TYPE_STRING: return "__p.__string"; case BASE_TYPE_STRUCT: return "__p.__struct"; case BASE_TYPE_UNION: return "__p.__union"; case BASE_TYPE_VECTOR: return GenGetter(type.VectorType()); case BASE_TYPE_ARRAY: return GenGetter(type.VectorType()); default: { std::string getter = "__p.bb.Get"; if (type.base_type == BASE_TYPE_BOOL) { getter = "0!=" + getter; } else if (GenTypeBasic(type, false) != "byte") { getter += MakeCamel(GenTypeBasic(type, false)); } return getter; } } } // Returns the function name that is able to read a value of the given type. std::string GenGetterForLookupByKey(flatbuffers::FieldDef *key_field, const std::string &data_buffer, const char *num = nullptr) const { auto type = key_field->value.type; auto dest_mask = ""; auto dest_cast = DestinationCast(type); auto getter = data_buffer + ".Get"; if (GenTypeBasic(type, false) != "byte") { getter += MakeCamel(GenTypeBasic(type, false)); } getter = dest_cast + getter + "(" + GenOffsetGetter(key_field, num) + ")" + dest_mask; return getter; } // Direct mutation is only allowed for scalar fields. // Hence a setter method will only be generated for such fields. std::string GenSetter(const Type &type) const { if (IsScalar(type.base_type)) { std::string setter = "__p.bb.Put"; if (GenTypeBasic(type, false) != "byte" && type.base_type != BASE_TYPE_BOOL) { setter += MakeCamel(GenTypeBasic(type, false)); } return setter; } else { return ""; } } // Returns the method name for use with add/put calls. std::string GenMethod(const Type &type) const { return IsScalar(type.base_type) ? MakeCamel(GenTypeBasic(type, false)) : (IsStruct(type) ? "Struct" : "Offset"); } // Recursively generate arguments for a constructor, to deal with nested // structs. void GenStructArgs(const StructDef &struct_def, std::string *code_ptr, const char *nameprefix, size_t array_count = 0) const { std::string &code = *code_ptr; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; const auto &field_type = field.value.type; const auto array_field = IsArray(field_type); const auto &type = array_field ? field_type.VectorType() : field_type; const auto array_cnt = array_field ? (array_count + 1) : array_count; if (IsStruct(type)) { // Generate arguments for a struct inside a struct. To ensure names // don't clash, and to make it obvious these arguments are constructing // a nested struct, prefix the name with the field name. GenStructArgs(*field_type.struct_def, code_ptr, (nameprefix + (field.name + "_")).c_str(), array_cnt); } else { code += ", "; code += GenTypeBasic(type); if (array_cnt > 0) { code += "["; for (size_t i = 1; i < array_cnt; i++) code += ","; code += "]"; } code += " "; code += nameprefix; code += MakeCamel(field.name, true); } } } // Recusively generate struct construction statements of the form: // builder.putType(name); // and insert manual padding. void GenStructBody(const StructDef &struct_def, std::string *code_ptr, const char *nameprefix, size_t index = 0, bool in_array = false) const { std::string &code = *code_ptr; std::string indent((index + 1) * 2, ' '); code += indent + " builder.Prep("; code += NumToString(struct_def.minalign) + ", "; code += NumToString(struct_def.bytesize) + ");\n"; for (auto it = struct_def.fields.vec.rbegin(); it != struct_def.fields.vec.rend(); ++it) { auto &field = **it; const auto &field_type = field.value.type; if (field.padding) { code += indent + " builder.Pad("; code += NumToString(field.padding) + ");\n"; } if (IsStruct(field_type)) { GenStructBody(*field_type.struct_def, code_ptr, (nameprefix + (field.name + "_")).c_str(), index, in_array); } else { const auto &type = IsArray(field_type) ? field_type.VectorType() : field_type; const auto index_var = "_idx" + NumToString(index); if (IsArray(field_type)) { code += indent + " for (int " + index_var + " = "; code += NumToString(field_type.fixed_length); code += "; " + index_var + " > 0; " + index_var + "--) {\n"; in_array = true; } if (IsStruct(type)) { GenStructBody(*field_type.struct_def, code_ptr, (nameprefix + (field.name + "_")).c_str(), index + 1, in_array); } else { code += IsArray(field_type) ? " " : ""; code += indent + " builder.Put"; code += GenMethod(type) + "("; code += SourceCast(type); auto argname = nameprefix + MakeCamel(field.name, true); code += argname; size_t array_cnt = index + (IsArray(field_type) ? 1 : 0); if (array_cnt > 0) { code += "["; for (size_t i = 0; in_array && i < array_cnt; i++) { code += "_idx" + NumToString(i) + "-1"; if (i != (array_cnt - 1)) code += ","; } code += "]"; } code += ");\n"; } if (IsArray(field_type)) { code += indent + " }\n"; } } } } std::string GenOffsetGetter(flatbuffers::FieldDef *key_field, const char *num = nullptr) const { std::string key_offset = "Table.__offset(" + NumToString(key_field->value.offset) + ", "; if (num) { key_offset += num; key_offset += ".Value, builder.DataBuffer)"; } else { key_offset += "bb.Length"; key_offset += " - tableOffset, bb)"; } return key_offset; } std::string GenLookupKeyGetter(flatbuffers::FieldDef *key_field) const { std::string key_getter = " "; key_getter += "int tableOffset = Table."; key_getter += "__indirect(vectorLocation + 4 * (start + middle)"; key_getter += ", bb);\n "; if (key_field->value.type.base_type == BASE_TYPE_STRING) { key_getter += "int comp = Table."; key_getter += "CompareStrings("; key_getter += GenOffsetGetter(key_field); key_getter += ", byteKey, bb);\n"; } else { auto get_val = GenGetterForLookupByKey(key_field, "bb"); key_getter += "int comp = " + get_val + ".CompareTo(key);\n"; } return key_getter; } std::string GenKeyGetter(flatbuffers::FieldDef *key_field) const { std::string key_getter = ""; auto data_buffer = "builder.DataBuffer"; if (key_field->value.type.base_type == BASE_TYPE_STRING) { key_getter += "Table.CompareStrings("; key_getter += GenOffsetGetter(key_field, "o1") + ", "; key_getter += GenOffsetGetter(key_field, "o2") + ", " + data_buffer + ")"; } else { auto field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o1"); key_getter += field_getter; field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o2"); key_getter += ".CompareTo(" + field_getter + ")"; } return key_getter; } void GenStruct(StructDef &struct_def, std::string *code_ptr, const IDLOptions &opts) const { if (struct_def.generated) return; std::string &code = *code_ptr; // Generate a struct accessor class, with methods of the form: // public type name() { return bb.getType(i + offset); } // or for tables of the form: // public type name() { // int o = __offset(offset); return o != 0 ? bb.getType(o + i) : default; // } GenComment(struct_def.doc_comment, code_ptr, &comment_config); if (struct_def.attributes.Lookup("private")) { code += "internal "; } else { code += "public "; } if (struct_def.attributes.Lookup("csharp_partial")) { // generate a partial class for this C# struct/table code += "partial "; } code += "struct " + struct_def.name; code += " : IFlatbufferObject"; code += "\n{\n"; code += " private "; code += struct_def.fixed ? "Struct" : "Table"; code += " __p;\n"; code += " public ByteBuffer ByteBuffer { get { return __p.bb; } }\n"; if (!struct_def.fixed) { // Generate verson check method. // Force compile time error if not using the same version runtime. code += " public static void ValidateVersion() {"; code += " FlatBufferConstants."; code += "FLATBUFFERS_1_12_0(); "; code += "}\n"; // Generate a special accessor for the table that when used as the root // of a FlatBuffer std::string method_name = "GetRootAs" + struct_def.name; std::string method_signature = " public static " + struct_def.name + " " + method_name; // create convenience method that doesn't require an existing object code += method_signature + "(ByteBuffer _bb) "; code += "{ return " + method_name + "(_bb, new " + struct_def.name + "()); }\n"; // create method that allows object reuse code += method_signature + "(ByteBuffer _bb, " + struct_def.name + " obj) { "; code += "return (obj.__assign(_bb.GetInt(_bb.Position"; code += ") + _bb.Position"; code += ", _bb)); }\n"; if (parser_.root_struct_def_ == &struct_def) { if (parser_.file_identifier_.length()) { // Check if a buffer has the identifier. code += " public static "; code += "bool " + struct_def.name; code += "BufferHasIdentifier(ByteBuffer _bb) { return "; code += "Table.__has_identifier(_bb, \""; code += parser_.file_identifier_; code += "\"); }\n"; } } } // Generate the __init method that sets the field in a pre-existing // accessor object. This is to allow object reuse. code += " public void __init(int _i, ByteBuffer _bb) "; code += "{ "; code += "__p = new "; code += struct_def.fixed ? "Struct" : "Table"; code += "(_i, _bb); "; code += "}\n"; code += " public " + struct_def.name + " __assign(int _i, ByteBuffer _bb) "; code += "{ __init(_i, _bb); return this; }\n\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; GenComment(field.doc_comment, code_ptr, &comment_config, " "); std::string type_name = GenTypeGet(field.value.type); std::string type_name_dest = GenTypeGet(field.value.type); std::string conditional_cast = ""; std::string optional = ""; if (!struct_def.fixed && (field.value.type.base_type == BASE_TYPE_STRUCT || field.value.type.base_type == BASE_TYPE_UNION || (field.value.type.base_type == BASE_TYPE_VECTOR && (field.value.type.element == BASE_TYPE_STRUCT || field.value.type.element == BASE_TYPE_UNION)))) { optional = "?"; conditional_cast = "(" + type_name_dest + optional + ")"; } std::string dest_mask = ""; std::string dest_cast = DestinationCast(field.value.type); std::string src_cast = SourceCast(field.value.type); std::string method_start = " public " + type_name_dest + optional + " " + MakeCamel(field.name, true); std::string obj = "(new " + type_name + "())"; // Most field accessors need to retrieve and test the field offset first, // this is the prefix code for that: auto offset_prefix = IsArray(field.value.type) ? " { return " : (" { int o = __p.__offset(" + NumToString(field.value.offset) + "); return o != 0 ? "); // Generate the accessors that don't do object reuse. if (field.value.type.base_type == BASE_TYPE_STRUCT) { } else if (field.value.type.base_type == BASE_TYPE_VECTOR && field.value.type.element == BASE_TYPE_STRUCT) { } else if (field.value.type.base_type == BASE_TYPE_UNION || (field.value.type.base_type == BASE_TYPE_VECTOR && field.value.type.VectorType().base_type == BASE_TYPE_UNION)) { method_start += "<TTable>"; type_name = type_name_dest; } std::string getter = dest_cast + GenGetter(field.value.type); code += method_start; std::string default_cast = ""; // only create default casts for c# scalars or vectors of scalars if ((IsScalar(field.value.type.base_type) || (field.value.type.base_type == BASE_TYPE_VECTOR && IsScalar(field.value.type.element)))) { // For scalars, default value will be returned by GetDefaultValue(). // If the scalar is an enum, GetDefaultValue() returns an actual c# enum // that doesn't need to be casted. However, default values for enum // elements of vectors are integer literals ("0") and are still casted // for clarity. if (field.value.type.enum_def == nullptr || field.value.type.base_type == BASE_TYPE_VECTOR) { default_cast = "(" + type_name_dest + ")"; } } std::string member_suffix = "; "; if (IsScalar(field.value.type.base_type)) { code += " { get"; member_suffix += "} "; if (struct_def.fixed) { code += " { return " + getter; code += "(__p.bb_pos + "; code += NumToString(field.value.offset) + ")"; code += dest_mask; } else { code += offset_prefix + getter; code += "(o + __p.bb_pos)" + dest_mask; code += " : " + default_cast; code += GenDefaultValue(field); } } else { switch (field.value.type.base_type) { case BASE_TYPE_STRUCT: code += " { get"; member_suffix += "} "; if (struct_def.fixed) { code += " { return " + obj + ".__assign(" + "__p."; code += "bb_pos + " + NumToString(field.value.offset) + ", "; code += "__p.bb)"; } else { code += offset_prefix + conditional_cast; code += obj + ".__assign("; code += field.value.type.struct_def->fixed ? "o + __p.bb_pos" : "__p.__indirect(o + __p.bb_pos)"; code += ", __p.bb) : null"; } break; case BASE_TYPE_STRING: code += " { get"; member_suffix += "} "; code += offset_prefix + getter + "(o + " + "__p."; code += "bb_pos) : null"; break; case BASE_TYPE_ARRAY: FLATBUFFERS_FALLTHROUGH(); // fall thru case BASE_TYPE_VECTOR: { auto vectortype = field.value.type.VectorType(); if (vectortype.base_type == BASE_TYPE_UNION) { conditional_cast = "(TTable?)"; getter += "<TTable>"; } code += "("; if (vectortype.base_type == BASE_TYPE_STRUCT) { getter = obj + ".__assign"; } else if (vectortype.base_type == BASE_TYPE_UNION) { } code += "int j)"; const auto body = offset_prefix + conditional_cast + getter + "("; if (vectortype.base_type == BASE_TYPE_UNION) { code += " where TTable : struct, IFlatbufferObject" + body; } else { code += body; } std::string index = "__p."; if (IsArray(field.value.type)) { index += "bb_pos + " + NumToString(field.value.offset) + " + "; } else { index += "__vector(o) + "; } index += "j * " + NumToString(InlineSize(vectortype)); if (vectortype.base_type == BASE_TYPE_STRUCT) { code += vectortype.struct_def->fixed ? index : "__p.__indirect(" + index + ")"; code += ", __p.bb"; } else { code += index; } code += ")" + dest_mask; if (!IsArray(field.value.type)) { code += " : "; code += field.value.type.element == BASE_TYPE_BOOL ? "false" : (IsScalar(field.value.type.element) ? default_cast + "0" : "null"); } if (vectortype.base_type == BASE_TYPE_UNION && HasUnionStringValue(*vectortype.enum_def)) { code += member_suffix; code += "}\n"; code += " public string " + MakeCamel(field.name, true) + "AsString(int j)"; code += offset_prefix + GenGetter(Type(BASE_TYPE_STRING)); code += "(" + index + ") : null"; } break; } case BASE_TYPE_UNION: code += "() where TTable : struct, IFlatbufferObject"; code += offset_prefix + "(TTable?)" + getter; code += "<TTable>(o + __p.bb_pos) : null"; if (HasUnionStringValue(*field.value.type.enum_def)) { code += member_suffix; code += "}\n"; code += " public string " + MakeCamel(field.name, true) + "AsString()"; code += offset_prefix + GenGetter(Type(BASE_TYPE_STRING)); code += "(o + __p.bb_pos) : null"; } break; default: FLATBUFFERS_ASSERT(0); } } code += member_suffix; code += "}\n"; if (field.value.type.base_type == BASE_TYPE_VECTOR) { code += " public int " + MakeCamel(field.name, true); code += "Length"; code += " { get"; code += offset_prefix; code += "__p.__vector_len(o) : 0; "; code += "} "; code += "}\n"; // See if we should generate a by-key accessor. if (field.value.type.element == BASE_TYPE_STRUCT && !field.value.type.struct_def->fixed) { auto &sd = *field.value.type.struct_def; auto &fields = sd.fields.vec; for (auto kit = fields.begin(); kit != fields.end(); ++kit) { auto &key_field = **kit; if (key_field.key) { auto qualified_name = WrapInNameSpace(sd); code += " public " + qualified_name + "? "; code += MakeCamel(field.name, true) + "ByKey("; code += GenTypeGet(key_field.value.type) + " key)"; code += offset_prefix; code += qualified_name + ".__lookup_by_key("; code += "__p.__vector(o), key, "; code += "__p.bb) : null; "; code += "}\n"; break; } } } } // Generate a ByteBuffer accessor for strings & vectors of scalars. if ((field.value.type.base_type == BASE_TYPE_VECTOR && IsScalar(field.value.type.VectorType().base_type)) || field.value.type.base_type == BASE_TYPE_STRING) { code += "#if ENABLE_SPAN_T\n"; code += " public Span<" + GenTypeBasic(field.value.type.VectorType()) + "> Get"; code += MakeCamel(field.name, true); code += "Bytes() { return "; code += "__p.__vector_as_span<" + GenTypeBasic(field.value.type.VectorType()) + ">("; code += NumToString(field.value.offset); code += ", " + NumToString(SizeOf(field.value.type.VectorType().base_type)); code += "); }\n"; code += "#else\n"; code += " public ArraySegment<byte>? Get"; code += MakeCamel(field.name, true); code += "Bytes() { return "; code += "__p.__vector_as_arraysegment("; code += NumToString(field.value.offset); code += "); }\n"; code += "#endif\n"; // For direct blockcopying the data into a typed array code += " public "; code += GenTypeBasic(field.value.type.VectorType()); code += "[] Get"; code += MakeCamel(field.name, true); code += "Array() { "; if (IsEnum(field.value.type.VectorType())) { // Since __vector_as_array does not work for enum types, // fill array using an explicit loop. code += "int o = __p.__offset("; code += NumToString(field.value.offset); code += "); if (o == 0) return null; int p = "; code += "__p.__vector(o); int l = "; code += "__p.__vector_len(o); "; code += GenTypeBasic(field.value.type.VectorType()); code += "[] a = new "; code += GenTypeBasic(field.value.type.VectorType()); code += "[l]; for (int i = 0; i < l; i++) { a[i] = " + getter; code += "(p + i * "; code += NumToString(InlineSize(field.value.type.VectorType())); code += "); } return a;"; } else { code += "return "; code += "__p.__vector_as_array<"; code += GenTypeBasic(field.value.type.VectorType()); code += ">("; code += NumToString(field.value.offset); code += ");"; } code += " }\n"; } // generate object accessors if is nested_flatbuffer if (field.nested_flatbuffer) { auto nested_type_name = WrapInNameSpace(*field.nested_flatbuffer); auto nested_method_name = MakeCamel(field.name, true) + "As" + field.nested_flatbuffer->name; auto get_nested_method_name = nested_method_name; get_nested_method_name = "Get" + nested_method_name; conditional_cast = "(" + nested_type_name + "?)"; obj = "(new " + nested_type_name + "())"; code += " public " + nested_type_name + "? "; code += get_nested_method_name + "("; code += ") { int o = __p.__offset("; code += NumToString(field.value.offset) + "); "; code += "return o != 0 ? " + conditional_cast + obj + ".__assign("; code += "__p."; code += "__indirect(__p.__vector(o)), "; code += "__p.bb) : null; }\n"; } // Generate mutators for scalar fields or vectors of scalars. if (parser_.opts.mutable_buffer) { auto is_series = (IsSeries(field.value.type)); const auto &underlying_type = is_series ? field.value.type.VectorType() : field.value.type; // Boolean parameters have to be explicitly converted to byte // representation. auto setter_parameter = underlying_type.base_type == BASE_TYPE_BOOL ? "(byte)(" + field.name + " ? 1 : 0)" : field.name; auto mutator_prefix = MakeCamel("mutate", true); // A vector mutator also needs the index of the vector element it should // mutate. auto mutator_params = (is_series ? "(int j, " : "(") + GenTypeGet(underlying_type) + " " + field.name + ") { "; auto setter_index = is_series ? "__p." + (IsArray(field.value.type) ? "bb_pos + " + NumToString(field.value.offset) : "__vector(o)") + +" + j * " + NumToString(InlineSize(underlying_type)) : (struct_def.fixed ? "__p.bb_pos + " + NumToString(field.value.offset) : "o + __p.bb_pos"); if (IsScalar(underlying_type.base_type) && !IsUnion(field.value.type)) { code += " public "; code += struct_def.fixed ? "void " : "bool "; code += mutator_prefix + MakeCamel(field.name, true); code += mutator_params; if (struct_def.fixed) { code += GenSetter(underlying_type) + "(" + setter_index + ", "; code += src_cast + setter_parameter + "); }\n"; } else { code += "int o = __p.__offset("; code += NumToString(field.value.offset) + ");"; code += " if (o != 0) { " + GenSetter(underlying_type); code += "(" + setter_index + ", " + src_cast + setter_parameter + "); return true; } else { return false; } }\n"; } } } if (parser_.opts.java_primitive_has_method && IsScalar(field.value.type.base_type) && !struct_def.fixed) { auto vt_offset_constant = " public static final int VT_" + MakeScreamingCamel(field.name) + " = " + NumToString(field.value.offset) + ";"; code += vt_offset_constant; code += "\n"; } } code += "\n"; auto struct_has_create = false; std::set<flatbuffers::FieldDef *> field_has_create_set; flatbuffers::FieldDef *key_field = nullptr; if (struct_def.fixed) { struct_has_create = true; // create a struct constructor function code += " public static " + GenOffsetType(struct_def) + " "; code += "Create"; code += struct_def.name + "(FlatBufferBuilder builder"; GenStructArgs(struct_def, code_ptr, ""); code += ") {\n"; GenStructBody(struct_def, code_ptr, ""); code += " return "; code += GenOffsetConstruct(struct_def, "builder.Offset"); code += ";\n }\n"; } else { // Generate a method that creates a table in one go. This is only possible // when the table has no struct fields, since those have to be created // inline, and there's no way to do so in Java. bool has_no_struct_fields = true; int num_fields = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; if (IsStruct(field.value.type)) { has_no_struct_fields = false; } else { num_fields++; } } // JVM specifications restrict default constructor params to be < 255. // Longs and doubles take up 2 units, so we set the limit to be < 127. if (has_no_struct_fields && num_fields && num_fields < 127) { struct_has_create = true; // Generate a table constructor of the form: // public static int createName(FlatBufferBuilder builder, args...) code += " public static " + GenOffsetType(struct_def) + " "; code += "Create" + struct_def.name; code += "(FlatBufferBuilder builder"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; code += ",\n "; code += GenTypeBasic(field.value.type); code += " "; code += field.name; if (!IsScalar(field.value.type.base_type)) code += "Offset"; code += " = "; code += GenDefaultValueBasic(field); } code += ") {\n builder."; code += "StartTable("; code += NumToString(struct_def.fields.vec.size()) + ");\n"; for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size; size /= 2) { for (auto it = struct_def.fields.vec.rbegin(); it != struct_def.fields.vec.rend(); ++it) { auto &field = **it; if (!field.deprecated && (!struct_def.sortbysize || size == SizeOf(field.value.type.base_type))) { code += " " + struct_def.name + "."; code += "Add"; code += MakeCamel(field.name) + "(builder, " + field.name; if (!IsScalar(field.value.type.base_type)) code += "Offset"; code += ");\n"; } } } code += " return " + struct_def.name + "."; code += "End" + struct_def.name; code += "(builder);\n }\n\n"; } // Generate a set of static methods that allow table construction, // of the form: // public static void addName(FlatBufferBuilder builder, short name) // { builder.addShort(id, name, default); } // Unlike the Create function, these always work. code += " public static void Start"; code += struct_def.name; code += "(FlatBufferBuilder builder) { builder."; code += "StartTable("; code += NumToString(struct_def.fields.vec.size()) + "); }\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; if (field.key) key_field = &field; code += " public static void Add"; code += MakeCamel(field.name); code += "(FlatBufferBuilder builder, "; code += GenTypeBasic(field.value.type); auto argname = MakeCamel(field.name, false); if (!IsScalar(field.value.type.base_type)) argname += "Offset"; code += " " + argname + ") { builder.Add"; code += GenMethod(field.value.type) + "("; code += NumToString(it - struct_def.fields.vec.begin()) + ", "; code += SourceCastBasic(field.value.type); code += argname; if (!IsScalar(field.value.type.base_type) && field.value.type.base_type != BASE_TYPE_UNION) { code += ".Value"; } code += ", "; code += GenDefaultValue(field, false); code += "); }\n"; if (field.value.type.base_type == BASE_TYPE_VECTOR) { auto vector_type = field.value.type.VectorType(); auto alignment = InlineAlignment(vector_type); auto elem_size = InlineSize(vector_type); if (!IsStruct(vector_type)) { field_has_create_set.insert(&field); code += " public static VectorOffset "; code += "Create"; code += MakeCamel(field.name); code += "Vector(FlatBufferBuilder builder, "; code += GenTypeBasic(vector_type) + "[] data) "; code += "{ builder.StartVector("; code += NumToString(elem_size); code += ", data.Length, "; code += NumToString(alignment); code += "); for (int i = data."; code += "Length - 1; i >= 0; i--) builder."; code += "Add"; code += GenMethod(vector_type); code += "("; code += SourceCastBasic(vector_type); code += "data[i]"; if ((vector_type.base_type == BASE_TYPE_STRUCT || vector_type.base_type == BASE_TYPE_STRING)) code += ".Value"; code += "); return "; code += "builder.EndVector(); }\n"; code += " public static VectorOffset "; code += "Create"; code += MakeCamel(field.name); code += "VectorBlock(FlatBufferBuilder builder, "; code += GenTypeBasic(vector_type) + "[] data) "; code += "{ builder.StartVector("; code += NumToString(elem_size); code += ", data.Length, "; code += NumToString(alignment); code += "); builder.Add(data); return builder.EndVector(); }\n"; } // Generate a method to start a vector, data to be added manually // after. code += " public static void Start"; code += MakeCamel(field.name); code += "Vector(FlatBufferBuilder builder, int numElems) "; code += "{ builder.StartVector("; code += NumToString(elem_size); code += ", numElems, " + NumToString(alignment); code += "); }\n"; } } code += " public static " + GenOffsetType(struct_def) + " "; code += "End" + struct_def.name; code += "(FlatBufferBuilder builder) {\n int o = builder."; code += "EndTable();\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (!field.deprecated && field.required) { code += " builder.Required(o, "; code += NumToString(field.value.offset); code += "); // " + field.name + "\n"; } } code += " return " + GenOffsetConstruct(struct_def, "o") + ";\n }\n"; if (parser_.root_struct_def_ == &struct_def) { std::string size_prefix[] = { "", "SizePrefixed" }; for (int i = 0; i < 2; ++i) { code += " public static void "; code += "Finish" + size_prefix[i] + struct_def.name; code += "Buffer(FlatBufferBuilder builder, " + GenOffsetType(struct_def); code += " offset) {"; code += " builder.Finish" + size_prefix[i] + "(offset"; code += ".Value"; if (parser_.file_identifier_.length()) code += ", \"" + parser_.file_identifier_ + "\""; code += "); }\n"; } } } // Only generate key compare function for table, // because `key_field` is not set for struct if (struct_def.has_key && !struct_def.fixed) { FLATBUFFERS_ASSERT(key_field); code += "\n public static VectorOffset "; code += "CreateSortedVectorOf" + struct_def.name; code += "(FlatBufferBuilder builder, "; code += "Offset<" + struct_def.name + ">"; code += "[] offsets) {\n"; code += " Array.Sort(offsets, (Offset<" + struct_def.name + "> o1, Offset<" + struct_def.name + "> o2) => " + GenKeyGetter(key_field); code += ");\n"; code += " return builder.CreateVectorOfTables(offsets);\n }\n"; code += "\n public static " + struct_def.name + "?"; code += " __lookup_by_key("; code += "int vectorLocation, "; code += GenTypeGet(key_field->value.type); code += " key, ByteBuffer bb) {\n"; if (key_field->value.type.base_type == BASE_TYPE_STRING) { code += " byte[] byteKey = "; code += "System.Text.Encoding.UTF8.GetBytes(key);\n"; } code += " int span = "; code += "bb.GetInt(vectorLocation - 4);\n"; code += " int start = 0;\n"; code += " while (span != 0) {\n"; code += " int middle = span / 2;\n"; code += GenLookupKeyGetter(key_field); code += " if (comp > 0) {\n"; code += " span = middle;\n"; code += " } else if (comp < 0) {\n"; code += " middle++;\n"; code += " start += middle;\n"; code += " span -= middle;\n"; code += " } else {\n"; code += " return "; code += "new " + struct_def.name + "()"; code += ".__assign(tableOffset, bb);\n"; code += " }\n }\n"; code += " return null;\n"; code += " }\n"; } if (opts.generate_object_based_api) { GenPackUnPack_ObjectAPI(struct_def, code_ptr, opts, struct_has_create, field_has_create_set); } code += "};\n\n"; if (opts.generate_object_based_api) { GenStruct_ObjectAPI(struct_def, code_ptr, opts); } } void GenVectorAccessObject(StructDef &struct_def, std::string *code_ptr) const { auto &code = *code_ptr; // Generate a vector of structs accessor class. code += "\n"; code += " "; if (!struct_def.attributes.Lookup("private")) code += "public "; code += "static struct Vector : BaseVector\n{\n"; // Generate the __assign method that sets the field in a pre-existing // accessor object. This is to allow object reuse. std::string method_indent = " "; code += method_indent + "public Vector "; code += "__assign(int _vector, int _element_size, ByteBuffer _bb) { "; code += "__reset(_vector, _element_size, _bb); return this; }\n\n"; auto type_name = struct_def.name; auto method_start = method_indent + "public " + type_name + " Get"; // Generate the accessors that don't do object reuse. code += method_start + "(int j) { return Get"; code += "(new " + type_name + "(), j); }\n"; code += method_start + "(" + type_name + " obj, int j) { "; code += " return obj.__assign("; code += struct_def.fixed ? "__p.__element(j)" : "__p.__indirect(__p.__element(j), bb)"; code += ", __p.bb); }\n"; // See if we should generate a by-key accessor. if (!struct_def.fixed) { auto &fields = struct_def.fields.vec; for (auto kit = fields.begin(); kit != fields.end(); ++kit) { auto &key_field = **kit; if (key_field.key) { auto nullable_annotation = parser_.opts.gen_nullable ? "@Nullable " : ""; code += method_indent + nullable_annotation; code += "public " + type_name + "? "; code += "GetByKey("; code += GenTypeGet(key_field.value.type) + " key) { "; code += " return __lookup_by_key(null, "; code += "__p.__vector(), key, "; code += "__p.bb); "; code += "}\n"; code += method_indent + nullable_annotation; code += "public " + type_name + "?" + " "; code += "GetByKey("; code += type_name + "? obj, "; code += GenTypeGet(key_field.value.type) + " key) { "; code += " return __lookup_by_key(obj, "; code += "__p.__vector(), key, "; code += "__p.bb); "; code += "}\n"; break; } } } code += " }\n"; } void GenEnum_ObjectAPI(EnumDef &enum_def, std::string *code_ptr, const IDLOptions &opts) const { auto &code = *code_ptr; if (enum_def.generated) return; if (!enum_def.is_union) return; if (enum_def.attributes.Lookup("private")) { code += "internal "; } else { code += "public "; } auto union_name = enum_def.name + "Union"; code += "class " + union_name + " {\n"; // Type code += " public " + enum_def.name + " Type { get; set; }\n"; // Value code += " public object Value { get; set; }\n"; code += "\n"; // Constructor code += " public " + union_name + "() {\n"; code += " this.Type = " + enum_def.name + "." + enum_def.Vals()[0]->name + ";\n"; code += " this.Value = null;\n"; code += " }\n\n"; // As<T> code += " public T As<T>() where T : class { return this.Value as T; }\n"; // As for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { auto &ev = **it; if (ev.union_type.base_type == BASE_TYPE_NONE) continue; auto type_name = GenTypeGet_ObjectAPI(ev.union_type, opts); if (ev.union_type.base_type == BASE_TYPE_STRUCT && ev.union_type.struct_def->attributes.Lookup("private")) { code += " internal "; } else { code += " public "; } code += type_name + " As" + ev.name + "() { return this.As<" + type_name + ">(); }\n"; } code += "\n"; // Pack() code += " public static int Pack(FlatBuffers.FlatBufferBuilder builder, " + union_name + " _o) {\n"; code += " switch (_o.Type) {\n"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { auto &ev = **it; if (ev.union_type.base_type == BASE_TYPE_NONE) { code += " default: return 0;\n"; } else { code += " case " + enum_def.name + "." + ev.name + ": return "; if (ev.union_type.base_type == BASE_TYPE_STRING) { code += "builder.CreateString(_o.As" + ev.name + "()).Value;\n"; } else { code += GenTypeGet(ev.union_type) + ".Pack(builder, _o.As" + ev.name + "()).Value;\n"; } } } code += " }\n"; code += " }\n"; code += "}\n\n"; // JsonConverter if (opts.cs_gen_json_serializer) { if (enum_def.attributes.Lookup("private")) { code += "internal "; } else { code += "public "; } code += "class " + union_name + "_JsonConverter : Newtonsoft.Json.JsonConverter {\n"; code += " public override bool CanConvert(System.Type objectType) {\n"; code += " return objectType == typeof(" + union_name + ") || objectType == typeof(System.Collections.Generic.List<" + union_name + ">);\n"; code += " }\n"; code += " public override void WriteJson(Newtonsoft.Json.JsonWriter writer, " "object value, " "Newtonsoft.Json.JsonSerializer serializer) {\n"; code += " var _olist = value as System.Collections.Generic.List<" + union_name + ">;\n"; code += " if (_olist != null) {\n"; code += " writer.WriteStartArray();\n"; code += " foreach (var _o in _olist) { this.WriteJson(writer, _o, " "serializer); }\n"; code += " writer.WriteEndArray();\n"; code += " } else {\n"; code += " this.WriteJson(writer, value as " + union_name + ", serializer);\n"; code += " }\n"; code += " }\n"; code += " public void WriteJson(Newtonsoft.Json.JsonWriter writer, " + union_name + " _o, " "Newtonsoft.Json.JsonSerializer serializer) {\n"; code += " if (_o == null) return;\n"; code += " serializer.Serialize(writer, _o.Value);\n"; code += " }\n"; code += " public override object ReadJson(Newtonsoft.Json.JsonReader " "reader, " "System.Type objectType, " "object existingValue, Newtonsoft.Json.JsonSerializer serializer) " "{\n"; code += " var _olist = existingValue as System.Collections.Generic.List<" + union_name + ">;\n"; code += " if (_olist != null) {\n"; code += " for (var _j = 0; _j < _olist.Count; ++_j) {\n"; code += " reader.Read();\n"; code += " _olist[_j] = this.ReadJson(reader, _olist[_j], " "serializer);\n"; code += " }\n"; code += " reader.Read();\n"; code += " return _olist;\n"; code += " } else {\n"; code += " return this.ReadJson(reader, existingValue as " + union_name + ", serializer);\n"; code += " }\n"; code += " }\n"; code += " public " + union_name + " ReadJson(Newtonsoft.Json.JsonReader reader, " + union_name + " _o, Newtonsoft.Json.JsonSerializer serializer) {\n"; code += " if (_o == null) return null;\n"; code += " switch (_o.Type) {\n"; for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) { auto &ev = **it; if (ev.union_type.base_type == BASE_TYPE_NONE) { code += " default: break;\n"; } else { auto type_name = GenTypeGet_ObjectAPI(ev.union_type, opts); code += " case " + enum_def.name + "." + ev.name + ": _o.Value = serializer.Deserialize<" + type_name + ">(reader); break;\n"; } } code += " }\n"; code += " return _o;\n"; code += " }\n"; code += "}\n\n"; } } std::string GenTypeName_ObjectAPI(const std::string &name, const IDLOptions &opts) const { return opts.object_prefix + name + opts.object_suffix; } void GenUnionUnPack_ObjectAPI(const EnumDef &enum_def, std::string *code_ptr, const std::string &camel_name, bool is_vector) const { auto &code = *code_ptr; std::string varialbe_name = "_o." + camel_name; std::string type_suffix = ""; std::string func_suffix = "()"; std::string indent = " "; if (is_vector) { varialbe_name = "_o_" + camel_name; type_suffix = "(_j)"; func_suffix = "(_j)"; indent = " "; } if (is_vector) { code += indent + "var " + varialbe_name + " = new "; } else { code += indent + varialbe_name + " = new "; } code += WrapInNameSpace(enum_def) + "Union();\n"; code += indent + varialbe_name + ".Type = this." + camel_name + "Type" + type_suffix + ";\n"; code += indent + "switch (this." + camel_name + "Type" + type_suffix + ") {\n"; for (auto eit = enum_def.Vals().begin(); eit != enum_def.Vals().end(); ++eit) { auto &ev = **eit; if (ev.union_type.base_type == BASE_TYPE_NONE) { code += indent + " default: break;\n"; } else { code += indent + " case " + WrapInNameSpace(enum_def) + "." + ev.name + ":\n"; code += indent + " " + varialbe_name + ".Value = this." + camel_name; if (ev.union_type.base_type == BASE_TYPE_STRING) { code += "AsString" + func_suffix + ";\n"; } else { code += "<" + GenTypeGet(ev.union_type) + ">" + func_suffix; code += ".HasValue ? this." + camel_name; code += "<" + GenTypeGet(ev.union_type) + ">" + func_suffix + ".Value.UnPack() : null;\n"; } code += indent + " break;\n"; } } code += indent + "}\n"; if (is_vector) { code += indent + "_o." + camel_name + ".Add(" + varialbe_name + ");\n"; } } void GenPackUnPack_ObjectAPI( StructDef &struct_def, std::string *code_ptr, const IDLOptions &opts, bool struct_has_create, const std::set<FieldDef *> &field_has_create) const { auto &code = *code_ptr; auto struct_name = GenTypeName_ObjectAPI(struct_def.name, opts); // UnPack() code += " public " + struct_name + " UnPack() {\n"; code += " var _o = new " + struct_name + "();\n"; code += " this.UnPackTo(_o);\n"; code += " return _o;\n"; code += " }\n"; // UnPackTo() code += " public void UnPackTo(" + struct_name + " _o) {\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; auto camel_name = MakeCamel(field.name); auto start = " _o." + camel_name + " = "; switch (field.value.type.base_type) { case BASE_TYPE_STRUCT: { auto fixed = struct_def.fixed && field.value.type.struct_def->fixed; if (fixed) { code += start + "this." + camel_name + ".UnPack();\n"; } else { code += start + "this." + camel_name + ".HasValue ? this." + camel_name + ".Value.UnPack() : null;\n"; } break; } case BASE_TYPE_ARRAY: { auto type_name = GenTypeGet_ObjectAPI(field.value.type, opts); auto length_str = NumToString(field.value.type.fixed_length); auto unpack_method = field.value.type.struct_def == nullptr ? "" : field.value.type.struct_def->fixed ? ".UnPack()" : "?.UnPack()"; code += start + "new " + type_name.substr(0, type_name.length() - 1) + length_str + "];\n"; code += " for (var _j = 0; _j < " + length_str + "; ++_j) { _o." + camel_name + "[_j] = this." + camel_name + "(_j)" + unpack_method + "; }\n"; break; } case BASE_TYPE_VECTOR: if (field.value.type.element == BASE_TYPE_UNION) { code += start + "new " + GenTypeGet_ObjectAPI(field.value.type, opts) + "();\n"; code += " for (var _j = 0; _j < this." + camel_name + "Length; ++_j) {\n"; GenUnionUnPack_ObjectAPI(*field.value.type.enum_def, code_ptr, camel_name, true); code += " }\n"; } else if (field.value.type.element != BASE_TYPE_UTYPE) { auto fixed = field.value.type.struct_def == nullptr; code += start + "new " + GenTypeGet_ObjectAPI(field.value.type, opts) + "();\n"; code += " for (var _j = 0; _j < this." + camel_name + "Length; ++_j) {"; code += "_o." + camel_name + ".Add("; if (fixed) { code += "this." + camel_name + "(_j)"; } else { code += "this." + camel_name + "(_j).HasValue ? this." + camel_name + "(_j).Value.UnPack() : null"; } code += ");}\n"; } break; case BASE_TYPE_UTYPE: break; case BASE_TYPE_UNION: { GenUnionUnPack_ObjectAPI(*field.value.type.enum_def, code_ptr, camel_name, false); break; } default: { code += start + "this." + camel_name + ";\n"; break; } } } code += " }\n"; // Pack() code += " public static " + GenOffsetType(struct_def) + " Pack(FlatBufferBuilder builder, " + struct_name + " _o) {\n"; code += " if (_o == null) return default(" + GenOffsetType(struct_def) + ");\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; auto camel_name = MakeCamel(field.name); // pre switch (field.value.type.base_type) { case BASE_TYPE_STRUCT: { if (!field.value.type.struct_def->fixed) { code += " var _" + field.name + " = _o." + camel_name + " == null ? default(" + GenOffsetType(*field.value.type.struct_def) + ") : " + GenTypeGet(field.value.type) + ".Pack(builder, _o." + camel_name + ");\n"; } break; } case BASE_TYPE_STRING: { std::string create_string = field.shared ? "CreateSharedString" : "CreateString"; code += " var _" + field.name + " = _o." + camel_name + " == null ? default(StringOffset) : " "builder." + create_string + "(_o." + camel_name + ");\n"; break; } case BASE_TYPE_VECTOR: { if (field_has_create.find(&field) != field_has_create.end()) { auto property_name = camel_name; auto gen_for_loop = true; std::string array_name = "__" + field.name; std::string array_type = ""; std::string to_array = ""; switch (field.value.type.element) { case BASE_TYPE_STRING: { std::string create_string = field.shared ? "CreateSharedString" : "CreateString"; array_type = "StringOffset"; to_array += "builder." + create_string + "(_o." + property_name + "[_j])"; break; } case BASE_TYPE_STRUCT: array_type = "Offset<" + GenTypeGet(field.value.type) + ">"; to_array = GenTypeGet(field.value.type) + ".Pack(builder, _o." + property_name + "[_j])"; break; case BASE_TYPE_UTYPE: property_name = camel_name.substr(0, camel_name.size() - 4); array_type = WrapInNameSpace(*field.value.type.enum_def); to_array = "_o." + property_name + "[_j].Type"; break; case BASE_TYPE_UNION: array_type = "int"; to_array = WrapInNameSpace(*field.value.type.enum_def) + "Union.Pack(builder, _o." + property_name + "[_j])"; break; default: gen_for_loop = false; break; } code += " var _" + field.name + " = default(VectorOffset);\n"; code += " if (_o." + property_name + " != null) {\n"; if (gen_for_loop) { code += " var " + array_name + " = new " + array_type + "[_o." + property_name + ".Count];\n"; code += " for (var _j = 0; _j < " + array_name + ".Length; ++_j) { "; code += array_name + "[_j] = " + to_array + "; }\n"; } else { code += " var " + array_name + " = _o." + property_name + ".ToArray();\n"; } code += " _" + field.name + " = Create" + camel_name + "Vector(builder, " + array_name + ");\n"; code += " }\n"; } else { auto pack_method = field.value.type.struct_def == nullptr ? "builder.Add" + GenMethod(field.value.type.VectorType()) + "(_o." + camel_name + "[_j]);" : GenTypeGet(field.value.type) + ".Pack(builder, _o." + camel_name + "[_j]);"; code += " var _" + field.name + " = default(VectorOffset);\n"; code += " if (_o." + camel_name + " != null) {\n"; code += " Start" + camel_name + "Vector(builder, _o." + camel_name + ".Count);\n"; code += " for (var _j = _o." + camel_name + ".Count - 1; _j >= 0; --_j) { " + pack_method + " }\n"; code += " _" + field.name + " = builder.EndVector();\n"; code += " }\n"; } break; } case BASE_TYPE_ARRAY: { if (field.value.type.struct_def != nullptr) { std::vector<std::string> name_vec; name_vec.push_back(field.name); std::vector<int> array_length_vec; array_length_vec.push_back(field.value.type.fixed_length); GenArrayPackDecl_ObjectAPI(*field.value.type.struct_def, code_ptr, name_vec, array_length_vec); } else { code += " var _" + field.name + " = _o." + camel_name + ";\n"; } break; } case BASE_TYPE_UNION: { code += " var _" + field.name + "_type = _o." + camel_name + " == null ? " + WrapInNameSpace(*field.value.type.enum_def) + ".NONE : " + "_o." + camel_name + ".Type;\n"; code += " var _" + field.name + " = _o." + camel_name + " == null ? 0 : " + GenTypeGet_ObjectAPI(field.value.type, opts) + ".Pack(builder, _o." + camel_name + ");\n"; break; } default: break; } } if (struct_has_create) { // Create code += " return Create" + struct_def.name + "(\n"; code += " builder"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; auto camel_name = MakeCamel(field.name); switch (field.value.type.base_type) { case BASE_TYPE_STRUCT: { if (struct_def.fixed) { GenStructArgs_ObjectAPI(*field.value.type.struct_def, code_ptr, " _o." + camel_name + "."); } else { code += ",\n"; if (field.value.type.struct_def->fixed) { code += " " + GenTypeGet(field.value.type) + ".Pack(builder, _o." + camel_name + ")"; } else { code += " _" + field.name; } } break; } case BASE_TYPE_ARRAY: { if (field.value.type.struct_def != nullptr) { GenArrayPackCall_ObjectAPI(*field.value.type.struct_def, code_ptr, " _" + field.name + "_"); } else { code += ",\n"; code += " _" + field.name; } break; } case BASE_TYPE_UNION: FLATBUFFERS_FALLTHROUGH(); // fall thru case BASE_TYPE_UTYPE: FLATBUFFERS_FALLTHROUGH(); // fall thru case BASE_TYPE_STRING: FLATBUFFERS_FALLTHROUGH(); // fall thru case BASE_TYPE_VECTOR: { code += ",\n"; code += " _" + field.name; break; } default: // scalar code += ",\n"; code += " _o." + camel_name; break; } } code += ");\n"; } else { // Start, End code += " Start" + struct_def.name + "(builder);\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; auto camel_name = MakeCamel(field.name); switch (field.value.type.base_type) { case BASE_TYPE_STRUCT: { if (field.value.type.struct_def->fixed) { code += " Add" + camel_name + "(builder, " + GenTypeGet(field.value.type) + ".Pack(builder, _o." + camel_name + "));\n"; } else { code += " Add" + camel_name + "(builder, _" + field.name + ");\n"; } break; } case BASE_TYPE_STRING: FLATBUFFERS_FALLTHROUGH(); // fall thru case BASE_TYPE_ARRAY: FLATBUFFERS_FALLTHROUGH(); // fall thru case BASE_TYPE_VECTOR: { code += " Add" + camel_name + "(builder, _" + field.name + ");\n"; break; } case BASE_TYPE_UTYPE: break; case BASE_TYPE_UNION: { code += " Add" + camel_name + "Type(builder, _" + field.name + "_type);\n"; code += " Add" + camel_name + "(builder, _" + field.name + ");\n"; break; } // scalar default: { code += " Add" + camel_name + "(builder, _o." + camel_name + ");\n"; break; } } } code += " return End" + struct_def.name + "(builder);\n"; } code += " }\n"; } void GenStructArgs_ObjectAPI(const StructDef &struct_def, std::string *code_ptr, std::string prefix) const { auto &code = *code_ptr; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; const auto &field_type = field.value.type; if (IsStruct(field_type)) { GenStructArgs_ObjectAPI(*field_type.struct_def, code_ptr, prefix + "." + MakeCamel(field.name) + "."); } else { code += ",\n"; code += prefix + MakeCamel(field.name); } } } void GenArrayPackDecl_ObjectAPI(const StructDef &struct_def, std::string *code_ptr, std::vector<std::string> name_vec, std::vector<int> array_length_vec) const { auto &code = *code_ptr; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; auto is_array = IsArray(field.value.type); const auto &field_type = is_array ? field.value.type.VectorType() : field.value.type; if (!IsStruct(field_type)) { auto tmp_name_vec = name_vec; tmp_name_vec.push_back(field.name); auto tmp_array_length_vec = array_length_vec; if (is_array) { tmp_array_length_vec.push_back(field_type.fixed_length); } std::string name; for (size_t tmp_name_index = 0; tmp_name_index < tmp_name_vec.size(); ++tmp_name_index) { name += "_" + tmp_name_vec[tmp_name_index]; } code += " var " + name + " = new " + GenTypeBasic(field_type) + "["; code += NumToString(tmp_array_length_vec[0]); for (size_t i = 1; i < tmp_array_length_vec.size(); ++i) { auto array_length = tmp_array_length_vec[i]; code += "," + NumToString(array_length); } code += "];\n"; code += " "; // initialize array for (size_t i = 0; i < tmp_array_length_vec.size(); ++i) { auto array_length = tmp_array_length_vec[i]; auto idx = "idx" + NumToString(i); code += "for (var " + idx + " = 0; " + idx + " < " + NumToString(array_length) + "; ++" + idx + ") {"; } code += name + "[idx0"; for (size_t i = 1; i < tmp_array_length_vec.size(); ++i) { auto idx = "idx" + NumToString(i); code += "," + idx; } code += "] = _o"; for (size_t i = 0; i < tmp_array_length_vec.size(); ++i) { auto idx = "idx" + NumToString(i); code += "." + MakeCamel(tmp_name_vec[i]) + "[" + idx + "]"; } if (!is_array) { code += "." + MakeCamel(field.name); } code += ";"; for (size_t i = 0; i < tmp_array_length_vec.size(); ++i) { code += "}"; } code += "\n"; } } } void GenArrayPackCall_ObjectAPI(const StructDef &struct_def, std::string *code_ptr, std::string prefix) const { auto &code = *code_ptr; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; const auto &field_type = field.value.type; if (IsStruct(field_type)) { GenArrayPackCall_ObjectAPI(*field_type.struct_def, code_ptr, prefix + field.name + "_"); } else { code += ",\n"; code += prefix + field.name; } } } std::string GenTypeGet_ObjectAPI(flatbuffers::Type type, const IDLOptions &opts) const { auto type_name = GenTypeGet(type); // Replace to ObjectBaseAPI Type Name switch (type.base_type) { case BASE_TYPE_STRUCT: FLATBUFFERS_FALLTHROUGH(); // fall thru case BASE_TYPE_ARRAY: FLATBUFFERS_FALLTHROUGH(); // fall thru case BASE_TYPE_VECTOR: { if (type.struct_def != nullptr) { auto type_name_length = type.struct_def->name.length(); auto new_type_name = GenTypeName_ObjectAPI(type.struct_def->name, opts); type_name.replace(type_name.length() - type_name_length, type_name_length, new_type_name); } else if (type.element == BASE_TYPE_UNION) { type_name = WrapInNameSpace(*type.enum_def) + "Union"; } break; } case BASE_TYPE_UNION: { type_name = WrapInNameSpace(*type.enum_def) + "Union"; break; } default: break; } switch (type.base_type) { case BASE_TYPE_ARRAY: { type_name = type_name + "[]"; break; } case BASE_TYPE_VECTOR: { type_name = "List<" + type_name + ">"; break; } default: break; } return type_name; } void GenStruct_ObjectAPI(StructDef &struct_def, std::string *code_ptr, const IDLOptions &opts) const { auto &code = *code_ptr; if (struct_def.attributes.Lookup("private")) { code += "internal "; } else { code += "public "; } if (struct_def.attributes.Lookup("csharp_partial")) { // generate a partial class for this C# struct/table code += "partial "; } auto class_name = GenTypeName_ObjectAPI(struct_def.name, opts); code += "class " + class_name; code += "\n{\n"; // Generate Properties for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; if (field.value.type.base_type == BASE_TYPE_UTYPE) continue; if (field.value.type.element == BASE_TYPE_UTYPE) continue; auto type_name = GenTypeGet_ObjectAPI(field.value.type, opts); auto camel_name = MakeCamel(field.name, true); if (opts.cs_gen_json_serializer) { if (IsUnion(field.value.type)) { auto utype_name = WrapInNameSpace(*field.value.type.enum_def); code += " [Newtonsoft.Json.JsonProperty(\"" + field.name + "_type\")]\n"; if (field.value.type.base_type == BASE_TYPE_VECTOR) { code += " private " + utype_name + "[] " + camel_name + "Type {\n"; code += " get {\n"; code += " if (this." + camel_name + " == null) return null;\n"; code += " var _o = new " + utype_name + "[this." + camel_name + ".Count];\n"; code += " for (var _j = 0; _j < _o.Length; ++_j) { _o[_j] = " "this." + camel_name + "[_j].Type; }\n"; code += " return _o;\n"; code += " }\n"; code += " set {\n"; code += " this." + camel_name + " = new List<" + utype_name + "Union>();\n"; code += " for (var _j = 0; _j < value.Length; ++_j) {\n"; code += " var _o = new " + utype_name + "Union();\n"; code += " _o.Type = value[_j];\n"; code += " this." + camel_name + ".Add(_o);\n"; code += " }\n"; code += " }\n"; code += " }\n"; } else { code += " private " + utype_name + " " + camel_name + "Type {\n"; code += " get {\n"; code += " return this." + camel_name + " != null ? this." + camel_name + ".Type : " + utype_name + ".NONE;\n"; code += " }\n"; code += " set {\n"; code += " this." + camel_name + " = new " + utype_name + "Union();\n"; code += " this." + camel_name + ".Type = value;\n"; code += " }\n"; code += " }\n"; } } code += " [Newtonsoft.Json.JsonProperty(\"" + field.name + "\")]\n"; if (IsUnion(field.value.type)) { auto union_name = (field.value.type.base_type == BASE_TYPE_VECTOR) ? GenTypeGet_ObjectAPI(field.value.type.VectorType(), opts) : type_name; code += " [Newtonsoft.Json.JsonConverter(typeof(" + union_name + "_JsonConverter))]\n"; } if (field.attributes.Lookup("hash")) { code += " [Newtonsoft.Json.JsonIgnore()]\n"; } } code += " public " + type_name + " " + camel_name + " { get; set; }\n"; } // Generate Constructor code += "\n"; code += " public " + class_name + "() {\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; if (field.value.type.base_type == BASE_TYPE_UTYPE) continue; if (field.value.type.element == BASE_TYPE_UTYPE) continue; code += " this." + MakeCamel(field.name) + " = "; auto type_name = GenTypeGet_ObjectAPI(field.value.type, opts); if (IsScalar(field.value.type.base_type)) { code += GenDefaultValue(field) + ";\n"; } else { switch (field.value.type.base_type) { case BASE_TYPE_STRUCT: { if (IsStruct(field.value.type)) { code += "new " + type_name + "();\n"; } else { code += "null;\n"; } break; } case BASE_TYPE_ARRAY: { code += "new " + type_name.substr(0, type_name.length() - 1) + NumToString(field.value.type.fixed_length) + "];\n"; break; } default: { code += "null;\n"; break; } } } } code += " }\n"; // Generate Serialization if (opts.cs_gen_json_serializer && parser_.root_struct_def_ == &struct_def) { code += "\n"; code += " public static " + class_name + " DeserializeFromJson(string jsonText) {\n"; code += " return Newtonsoft.Json.JsonConvert.DeserializeObject<" + class_name + ">(jsonText);\n"; code += " }\n"; code += " public string SerializeToJson() {\n"; code += " return Newtonsoft.Json.JsonConvert.SerializeObject(this, " "Newtonsoft.Json.Formatting.Indented);\n"; code += " }\n"; } if (parser_.root_struct_def_ == &struct_def) { code += " public static " + class_name + " DeserializeFromBinary(byte[] fbBuffer) {\n"; code += " return " + struct_def.name + ".GetRootAs" + struct_def.name + "(new ByteBuffer(fbBuffer)).UnPack();\n"; code += " }\n"; code += " public byte[] SerializeToBinary() {\n"; code += " var fbb = new FlatBufferBuilder(0x10000);\n"; code += " fbb.Finish(" + struct_def.name + ".Pack(fbb, this).Value);\n"; code += " return fbb.DataBuffer.ToSizedArray();\n"; code += " }\n"; } code += "}\n\n"; } // This tracks the current namespace used to determine if a type need to be // prefixed by its namespace const Namespace *cur_name_space_; }; } // namespace csharp bool GenerateCSharp(const Parser &parser, const std::string &path, const std::string &file_name) { csharp::CSharpGenerator generator(parser, path, file_name); return generator.generate(); } } // namespace flatbuffers
1
17,932
why is this object API only? I think the reason why we didn't do this before was because this code was shared with Java which doesn't have value structs.
google-flatbuffers
java
@@ -432,7 +432,7 @@ func acsWsURL(endpoint, cluster, containerInstanceArn string, taskEngine engine. query.Set("agentVersion", version.Version) query.Set("seqNum", "1") if dockerVersion, err := taskEngine.Version(); err == nil { - query.Set("dockerVersion", "DockerVersion: "+dockerVersion) + query.Set("dockerVersion", dockerVersion) } query.Set(sendCredentialsURLParameterName, acsSessionState.getSendCredentialsURLParameter()) return acsURL + "?" + query.Encode()
1
// Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package handler deals with appropriately reacting to all ACS messages as well // as maintaining the connection to ACS. package handler import ( "context" "io" "net/url" "strconv" "strings" "time" acsclient "github.com/aws/amazon-ecs-agent/agent/acs/client" "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" updater "github.com/aws/amazon-ecs-agent/agent/acs/update_handler" "github.com/aws/amazon-ecs-agent/agent/api" "github.com/aws/amazon-ecs-agent/agent/config" rolecredentials "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/amazon-ecs-agent/agent/engine" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/eventhandler" "github.com/aws/amazon-ecs-agent/agent/eventstream" "github.com/aws/amazon-ecs-agent/agent/statemanager" "github.com/aws/amazon-ecs-agent/agent/utils/retry" "github.com/aws/amazon-ecs-agent/agent/utils/ttime" "github.com/aws/amazon-ecs-agent/agent/version" "github.com/aws/amazon-ecs-agent/agent/wsclient" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/cihub/seelog" ) const ( // heartbeatTimeout is the maximum time to wait between heartbeats // without disconnecting heartbeatTimeout = 1 * time.Minute heartbeatJitter = 1 * time.Minute // wsRWTimeout is the duration of read and write deadline for the // websocket connection wsRWTimeout = 2*heartbeatTimeout + heartbeatJitter inactiveInstanceReconnectDelay = 1 * time.Hour connectionBackoffMin = 250 * time.Millisecond connectionBackoffMax = 2 * time.Minute connectionBackoffJitter = 0.2 connectionBackoffMultiplier = 1.5 // payloadMessageBufferSize is the maximum number of payload messages // to queue up without having handled previous ones. payloadMessageBufferSize = 10 // sendCredentialsURLParameterName is the name of the URL parameter // in the ACS URL that is used to indicate if ACS should send // credentials for all tasks on establishing the connection sendCredentialsURLParameterName = "sendCredentials" inactiveInstanceExceptionPrefix = "InactiveInstanceException:" ) // Session defines an interface for handler's long-lived connection with ACS. type Session interface { Start() error } // session encapsulates all arguments needed by the handler to connect to ACS // and to handle messages received by ACS. The Session.Start() method can be used // to start processing messages from ACS. type session struct { containerInstanceARN string credentialsProvider *credentials.Credentials agentConfig *config.Config deregisterInstanceEventStream *eventstream.EventStream taskEngine engine.TaskEngine ecsClient api.ECSClient state dockerstate.TaskEngineState stateManager statemanager.StateManager credentialsManager rolecredentials.Manager taskHandler *eventhandler.TaskHandler ctx context.Context cancel context.CancelFunc backoff retry.Backoff resources sessionResources _heartbeatTimeout time.Duration _heartbeatJitter time.Duration _inactiveInstanceReconnectDelay time.Duration } // sessionResources defines the resource creator interface for starting // a session with ACS. This interface is intended to define methods // that create resources used to establish the connection to ACS // It is confined to just the createACSClient() method for now. It can be // extended to include the acsWsURL() and newDisconnectionTimer() methods // when needed // The goal is to make it easier to test and inject dependencies type sessionResources interface { // createACSClient creates a new websocket client createACSClient(url string, cfg *config.Config) wsclient.ClientServer sessionState } // acsSessionResources implements resource creator and session state interfaces // to create resources needed to connect to ACS and to record session state // for the same type acsSessionResources struct { credentialsProvider *credentials.Credentials // sendCredentials is used to set the 'sendCredentials' URL parameter // used to connect to ACS // It is set to 'true' for the very first successful connection on // agent start. It is set to false for all successive connections sendCredentials bool } // sessionState defines state recorder interface for the // session established with ACS. It can be used to record and // retrieve data shared across multiple connections to ACS type sessionState interface { // connectedToACS callback indicates that the client has // connected to ACS connectedToACS() // getSendCredentialsURLParameter retrieves the value for // the 'sendCredentials' URL parameter getSendCredentialsURLParameter() string } // NewSession creates a new Session object func NewSession(ctx context.Context, config *config.Config, deregisterInstanceEventStream *eventstream.EventStream, containerInstanceArn string, credentialsProvider *credentials.Credentials, ecsClient api.ECSClient, taskEngineState dockerstate.TaskEngineState, stateManager statemanager.StateManager, taskEngine engine.TaskEngine, credentialsManager rolecredentials.Manager, taskHandler *eventhandler.TaskHandler) Session { resources := newSessionResources(credentialsProvider) backoff := retry.NewExponentialBackoff(connectionBackoffMin, connectionBackoffMax, connectionBackoffJitter, connectionBackoffMultiplier) derivedContext, cancel := context.WithCancel(ctx) return &session{ agentConfig: config, deregisterInstanceEventStream: deregisterInstanceEventStream, containerInstanceARN: containerInstanceArn, credentialsProvider: credentialsProvider, ecsClient: ecsClient, state: taskEngineState, stateManager: stateManager, taskEngine: taskEngine, credentialsManager: credentialsManager, taskHandler: taskHandler, ctx: derivedContext, cancel: cancel, backoff: backoff, resources: resources, _heartbeatTimeout: heartbeatTimeout, _heartbeatJitter: heartbeatJitter, _inactiveInstanceReconnectDelay: inactiveInstanceReconnectDelay, } } // Start starts the session. It'll forever keep trying to connect to ACS unless // the context is cancelled. // // If the context is cancelled, Start() would return with the error code returned // by the context. // If the instance is deregistered, Start() would emit an event to the // deregister-instance event stream and sets the connection backoff time to 1 hour. func (acsSession *session) Start() error { // connectToACS channel is used to indicate the intent to connect to ACS // It's processed by the select loop to connect to ACS connectToACS := make(chan struct{}) // This is required to trigger the first connection to ACS. Subsequent // connections are triggered by the handleACSError() method go func() { connectToACS <- struct{}{} }() for { select { case <-connectToACS: seelog.Debugf("Received connect to ACS message") // Start a session with ACS acsError := acsSession.startSessionOnce() // Session with ACS was stopped with some error, start processing the error isInactiveInstance := isInactiveInstanceError(acsError) if isInactiveInstance { // If the instance was deregistered, send an event to the event stream // for the same seelog.Debug("Container instance is deregistered, notifying listeners") err := acsSession.deregisterInstanceEventStream.WriteToEventStream(struct{}{}) if err != nil { seelog.Debugf("Failed to write to deregister container instance event stream, err: %v", err) } } if shouldReconnectWithoutBackoff(acsError) { // If ACS closed the connection, there's no need to backoff, // reconnect immediately seelog.Infof("ACS Websocket connection closed for a valid reason: %v", acsError) acsSession.backoff.Reset() sendEmptyMessageOnChannel(connectToACS) } else { // Disconnected unexpectedly from ACS, compute backoff duration to // reconnect reconnectDelay := acsSession.computeReconnectDelay(isInactiveInstance) seelog.Infof("Reconnecting to ACS in: %s", reconnectDelay.String()) waitComplete := acsSession.waitForDuration(reconnectDelay) if waitComplete { // If the context was not cancelled and we've waited for the // wait duration without any errors, send the message to the channel // to reconnect to ACS seelog.Info("Done waiting; reconnecting to ACS") sendEmptyMessageOnChannel(connectToACS) } else { // Wait was interrupted. We expect the session to close as canceling // the session context is the only way to end up here. Print a message // to indicate the same seelog.Info("Interrupted waiting for reconnect delay to elapse; Expect session to close") } } case <-acsSession.ctx.Done(): seelog.Debugf("ACS session context cancelled") return acsSession.ctx.Err() } } } // startSessionOnce creates a session with ACS and handles requests using the passed // in arguments func (acsSession *session) startSessionOnce() error { acsEndpoint, err := acsSession.ecsClient.DiscoverPollEndpoint(acsSession.containerInstanceARN) if err != nil { seelog.Errorf("acs: unable to discover poll endpoint, err: %v", err) return err } url := acsWsURL(acsEndpoint, acsSession.agentConfig.Cluster, acsSession.containerInstanceARN, acsSession.taskEngine, acsSession.resources) client := acsSession.resources.createACSClient(url, acsSession.agentConfig) defer client.Close() return acsSession.startACSSession(client) } // startACSSession starts a session with ACS. It adds request handlers for various // kinds of messages expected from ACS. It returns on server disconnection or when // the context is cancelled func (acsSession *session) startACSSession(client wsclient.ClientServer) error { cfg := acsSession.agentConfig refreshCredsHandler := newRefreshCredentialsHandler(acsSession.ctx, cfg.Cluster, acsSession.containerInstanceARN, client, acsSession.credentialsManager, acsSession.taskEngine) defer refreshCredsHandler.clearAcks() refreshCredsHandler.start() defer refreshCredsHandler.stop() client.AddRequestHandler(refreshCredsHandler.handlerFunc()) // Add handler to ack task ENI attach message eniAttachHandler := newAttachTaskENIHandler( acsSession.ctx, cfg.Cluster, acsSession.containerInstanceARN, client, acsSession.state, acsSession.stateManager, ) eniAttachHandler.start() defer eniAttachHandler.stop() client.AddRequestHandler(eniAttachHandler.handlerFunc()) // Add handler to ack instance ENI attach message instanceENIAttachHandler := newAttachInstanceENIHandler( acsSession.ctx, cfg.Cluster, acsSession.containerInstanceARN, client, acsSession.state, acsSession.stateManager, ) instanceENIAttachHandler.start() defer instanceENIAttachHandler.stop() client.AddRequestHandler(instanceENIAttachHandler.handlerFunc()) // Add request handler for handling payload messages from ACS payloadHandler := newPayloadRequestHandler( acsSession.ctx, acsSession.taskEngine, acsSession.ecsClient, cfg.Cluster, acsSession.containerInstanceARN, client, acsSession.stateManager, refreshCredsHandler, acsSession.credentialsManager, acsSession.taskHandler) // Clear the acks channel on return because acks of messageids don't have any value across sessions defer payloadHandler.clearAcks() payloadHandler.start() defer payloadHandler.stop() client.AddRequestHandler(payloadHandler.handlerFunc()) // Ignore heartbeat messages; anyMessageHandler gets 'em client.AddRequestHandler(func(*ecsacs.HeartbeatMessage) {}) updater.AddAgentUpdateHandlers(client, cfg, acsSession.stateManager, acsSession.taskEngine) err := client.Connect() if err != nil { seelog.Errorf("Error connecting to ACS: %v", err) return err } seelog.Info("Connected to ACS endpoint") // Start inactivity timer for closing the connection timer := newDisconnectionTimer(client, acsSession.heartbeatTimeout(), acsSession.heartbeatJitter()) // Any message from the server resets the disconnect timeout client.SetAnyRequestHandler(anyMessageHandler(timer, client)) defer timer.Stop() acsSession.resources.connectedToACS() backoffResetTimer := time.AfterFunc( retry.AddJitter(acsSession.heartbeatTimeout(), acsSession.heartbeatJitter()), func() { // If we do not have an error connecting and remain connected for at // least 1 or so minutes, reset the backoff. This prevents disconnect // errors that only happen infrequently from damaging the reconnect // delay as significantly. acsSession.backoff.Reset() }) defer backoffResetTimer.Stop() serveErr := make(chan error, 1) go func() { serveErr <- client.Serve() }() for { select { case <-acsSession.ctx.Done(): // Stop receiving and sending messages from and to ACS when // the context received from the main function is canceled seelog.Infof("ACS session context cancelled.") return acsSession.ctx.Err() case err := <-serveErr: // Stop receiving and sending messages from and to ACS when // client.Serve returns an error. This can happen when the // the connection is closed by ACS or the agent seelog.Infof("ACS connection closed: %v", err) return err } } } func (acsSession *session) computeReconnectDelay(isInactiveInstance bool) time.Duration { if isInactiveInstance { return acsSession._inactiveInstanceReconnectDelay } return acsSession.backoff.Duration() } // waitForDuration waits for the specified duration of time. If the wait is interrupted, // it returns a false value. Else, it returns true, indicating completion of wait time. func (acsSession *session) waitForDuration(delay time.Duration) bool { reconnectTimer := time.NewTimer(delay) select { case <-reconnectTimer.C: return true case <-acsSession.ctx.Done(): reconnectTimer.Stop() return false } } func (acsSession *session) heartbeatTimeout() time.Duration { return acsSession._heartbeatTimeout } func (acsSession *session) heartbeatJitter() time.Duration { return acsSession._heartbeatJitter } // createACSClient creates the ACS Client using the specified URL func (acsResources *acsSessionResources) createACSClient(url string, cfg *config.Config) wsclient.ClientServer { return acsclient.New(url, cfg, acsResources.credentialsProvider, wsRWTimeout) } // connectedToACS records a successful connection to ACS // It sets sendCredentials to false on such an event func (acsResources *acsSessionResources) connectedToACS() { acsResources.sendCredentials = false } // getSendCredentialsURLParameter gets the value to be set for the // 'sendCredentials' URL parameter func (acsResources *acsSessionResources) getSendCredentialsURLParameter() string { return strconv.FormatBool(acsResources.sendCredentials) } func newSessionResources(credentialsProvider *credentials.Credentials) sessionResources { return &acsSessionResources{ credentialsProvider: credentialsProvider, sendCredentials: true, } } // acsWsURL returns the websocket url for ACS given the endpoint func acsWsURL(endpoint, cluster, containerInstanceArn string, taskEngine engine.TaskEngine, acsSessionState sessionState) string { acsURL := endpoint if endpoint[len(endpoint)-1] != '/' { acsURL += "/" } acsURL += "ws" query := url.Values{} query.Set("clusterArn", cluster) query.Set("containerInstanceArn", containerInstanceArn) query.Set("agentHash", version.GitHashString()) query.Set("agentVersion", version.Version) query.Set("seqNum", "1") if dockerVersion, err := taskEngine.Version(); err == nil { query.Set("dockerVersion", "DockerVersion: "+dockerVersion) } query.Set(sendCredentialsURLParameterName, acsSessionState.getSendCredentialsURLParameter()) return acsURL + "?" + query.Encode() } // newDisconnectionTimer creates a new time object, with a callback to // disconnect from ACS on inactivity func newDisconnectionTimer(client wsclient.ClientServer, timeout time.Duration, jitter time.Duration) ttime.Timer { timer := time.AfterFunc(retry.AddJitter(timeout, jitter), func() { seelog.Warn("ACS Connection hasn't had any activity for too long; closing connection") if err := client.Close(); err != nil { seelog.Warnf("Error disconnecting: %v", err) } seelog.Info("Disconnected from ACS") }) return timer } // anyMessageHandler handles any server message. Any server message means the // connection is active and thus the heartbeat disconnect should not occur func anyMessageHandler(timer ttime.Timer, client wsclient.ClientServer) func(interface{}) { return func(interface{}) { seelog.Debug("ACS activity occurred") // Reset read deadline as there's activity on the channel if err := client.SetReadDeadline(time.Now().Add(wsRWTimeout)); err != nil { seelog.Warnf("Unable to extend read deadline for ACS connection: %v", err) } // Reset heartbeat timer timer.Reset(retry.AddJitter(heartbeatTimeout, heartbeatJitter)) } } func shouldReconnectWithoutBackoff(acsError error) bool { return acsError == nil || acsError == io.EOF } func isInactiveInstanceError(acsError error) bool { return acsError != nil && strings.HasPrefix(acsError.Error(), inactiveInstanceExceptionPrefix) } // sendEmptyMessageOnChannel sends an empty message using a go-routine on the // specified channel func sendEmptyMessageOnChannel(channel chan<- struct{}) { go func() { channel <- struct{}{} }() }
1
23,103
just to confirm, so backend will deal with both old format and new format?
aws-amazon-ecs-agent
go
@@ -91,7 +91,7 @@ namespace Datadog.Trace } private static T? ParseEnum<T>(IHeadersCollection headers, string headerName) - where T : struct + where T : struct, Enum { var headerValues = headers.GetValues(headerName).ToList();
1
using System; using System.Globalization; using System.Linq; using Datadog.Trace.Headers; using Datadog.Trace.Logging; namespace Datadog.Trace { internal class SpanContextPropagator { private const NumberStyles NumberStyles = System.Globalization.NumberStyles.Integer; private static readonly CultureInfo InvariantCulture = CultureInfo.InvariantCulture; private static readonly ILog Log = LogProvider.For<SpanContextPropagator>(); private SpanContextPropagator() { } public static SpanContextPropagator Instance { get; } = new SpanContextPropagator(); /// <summary> /// Propagates the specified context by adding new headers to a <see cref="IHeadersCollection"/>. /// This locks the sampling priority for <paramref name="context"/>. /// </summary> /// <param name="context">A <see cref="SpanContext"/> value that will be propagated into <paramref name="headers"/>.</param> /// <param name="headers">A <see cref="IHeadersCollection"/> to add new headers to.</param> public void Inject(SpanContext context, IHeadersCollection headers) { if (context == null) { throw new ArgumentNullException(nameof(context)); } if (headers == null) { throw new ArgumentNullException(nameof(headers)); } // lock sampling priority when span propagates. context.TraceContext?.LockSamplingPriority(); headers.Set(HttpHeaderNames.TraceId, context.TraceId.ToString(InvariantCulture)); headers.Set(HttpHeaderNames.ParentId, context.SpanId.ToString(InvariantCulture)); var samplingPriority = (int?)(context.TraceContext?.SamplingPriority ?? context.SamplingPriority); headers.Set( HttpHeaderNames.SamplingPriority, samplingPriority?.ToString(InvariantCulture)); } /// <summary> /// Extracts a <see cref="SpanContext"/> from the values found in the specified headers. /// </summary> /// <param name="headers">The headers that contain the values to be extracted.</param> /// <returns>A new <see cref="SpanContext"/> that contains the values obtained from <paramref name="headers"/>.</returns> public SpanContext Extract(IHeadersCollection headers) { if (headers == null) { throw new ArgumentNullException(nameof(headers)); } var traceId = ParseUInt64(headers, HttpHeaderNames.TraceId); if (traceId == 0) { // a valid traceId is required to use distributed tracing return null; } var parentId = ParseUInt64(headers, HttpHeaderNames.ParentId); var samplingPriority = ParseEnum<SamplingPriority>(headers, HttpHeaderNames.SamplingPriority); return new SpanContext(traceId, parentId, samplingPriority); } private static ulong ParseUInt64(IHeadersCollection headers, string headerName) { var headerValues = headers.GetValues(headerName).ToList(); if (headerValues.Count > 0) { foreach (string headerValue in headerValues) { if (ulong.TryParse(headerValue, NumberStyles, InvariantCulture, out var result)) { return result; } } Log.InfoFormat("Could not parse {0} headers: {1}", headerName, string.Join(",", headerValues)); } return 0; } private static T? ParseEnum<T>(IHeadersCollection headers, string headerName) where T : struct { var headerValues = headers.GetValues(headerName).ToList(); if (headerValues.Count > 0) { foreach (string headerValue in headerValues) { if (int.TryParse(headerValue, NumberStyles, InvariantCulture, out var result) && Enum.IsDefined(typeof(T), result)) { return (T)Enum.ToObject(typeof(T), result); } } Log.InfoFormat("Could not parse {0} headers: {1}", headerName, string.Join(",", headerValues)); } return default; } } }
1
14,824
huh, look at that, I wasn't aware you could constrain a type by Enum
DataDog-dd-trace-dotnet
.cs
@@ -229,6 +229,15 @@ PYBIND11_MODULE(ADIOS2_PYTHON_MODULE_NAME, m) const bool opBool = adios ? true : false; return opBool; }) + .def(pybind11::init<const bool>(), + "adios2 module starting point " + "non-MPI, constructs an ADIOS class " + "object", + pybind11::arg("debugMode") = true) + .def(pybind11::init<const std::string &, const bool>(), + "adios2 module starting point non-MPI, constructs an ADIOS class " + "object", + pybind11::arg("configFile"), pybind11::arg("debugMode") = true) #if ADIOS2_USE_MPI .def(pybind11::init<const adios2::py11::MPI4PY_Comm, const bool>(), "adios2 module starting point, constructs an ADIOS class object",
1
/* * Distributed under the OSI-approved Apache License, Version 2.0. See * accompanying file Copyright.txt for details. * * py11glue.cpp * * Created on: Mar 16, 2017 * Author: William F Godoy [email protected] */ #include <pybind11/pybind11.h> #include <pybind11/stl.h> #include <sstream> #include <stdexcept> #include <adios2.h> #if ADIOS2_USE_MPI #include <mpi4py/mpi4py.h> #endif #include "py11ADIOS.h" #include "py11Attribute.h" #include "py11Engine.h" #include "py11File.h" #include "py11IO.h" #include "py11Operator.h" #include "py11Variable.h" #if ADIOS2_USE_MPI namespace pybind11 { namespace detail { template <> struct type_caster<adios2::py11::MPI4PY_Comm> { public: /** * This macro establishes the name 'MPI4PY_Comm' in * function signatures and declares a local variable * 'value' of type MPI4PY_Comm */ PYBIND11_TYPE_CASTER(adios2::py11::MPI4PY_Comm, _("MPI4PY_Comm")); /** * Conversion part 1 (Python->C++): convert a PyObject into a MPI4PY_Comm * instance or return false upon failure. The second argument * indicates whether implicit conversions should be applied. */ bool load(handle src, bool) { // Import mpi4py if it does not exist. if (!PyMPIComm_Get) { if (import_mpi4py() < 0) { throw std::runtime_error( "ERROR: mpi4py not loaded correctly\n"); /* Python 2.X */ } } // If src is not actually a MPI4PY communicator, the next // call returns nullptr, and we return false to indicate the conversion // failed. MPI_Comm *mpiCommPtr = PyMPIComm_Get(src.ptr()); if (mpiCommPtr == nullptr) { return false; } value.comm = *mpiCommPtr; return true; } }; } // namespace detail } // namespace pybind11 #endif #if ADIOS2_USE_MPI adios2::py11::File OpenMPI(const std::string &name, const std::string mode, adios2::py11::MPI4PY_Comm comm, const std::string enginetype) { return adios2::py11::File(name, mode, comm, enginetype); } adios2::py11::File OpenConfigMPI(const std::string &name, const std::string mode, adios2::py11::MPI4PY_Comm comm, const std::string &configfile, const std::string ioinconfigfile) { return adios2::py11::File(name, mode, comm, configfile, ioinconfigfile); } #endif adios2::py11::File Open(const std::string &name, const std::string mode, const std::string enginetype) { return adios2::py11::File(name, mode, enginetype); } adios2::py11::File OpenConfig(const std::string &name, const std::string mode, const std::string configfile, const std::string ioinconfigfile) { return adios2::py11::File(name, mode, configfile, ioinconfigfile); } PYBIND11_MODULE(ADIOS2_PYTHON_MODULE_NAME, m) { m.attr("DebugON") = true; m.attr("DebugOFF") = false; m.attr("ConstantDims") = true; m.attr("VariableDims") = false; m.attr("LocalValueDim") = adios2::LocalValueDim; m.attr("GlobalValue") = false; m.attr("LocalValue") = true; m.attr("__version__") = ADIOS2_VERSION_STR; // enum classes pybind11::enum_<adios2::Mode>(m, "Mode") .value("Write", adios2::Mode::Write) .value("Read", adios2::Mode::Read) .value("Append", adios2::Mode::Append) .value("Deferred", adios2::Mode::Deferred) .value("Sync", adios2::Mode::Sync) .export_values(); pybind11::enum_<adios2::ShapeID>(m, "ShapeID") .value("Unknown", adios2::ShapeID::Unknown) .value("GlobalValue", adios2::ShapeID::GlobalValue) .value("GlobalArray", adios2::ShapeID::GlobalArray) .value("LocalValue", adios2::ShapeID::LocalValue) .value("LocalArray", adios2::ShapeID::LocalArray) .export_values(); pybind11::enum_<adios2::StepMode>(m, "StepMode") .value("Append", adios2::StepMode::Append) .value("Update", adios2::StepMode::Update) .value("Read", adios2::StepMode::Read) .export_values(); pybind11::enum_<adios2::StepStatus>(m, "StepStatus") .value("OK", adios2::StepStatus::OK) .value("NotReady", adios2::StepStatus::NotReady) .value("EndOfStream", adios2::StepStatus::EndOfStream) .value("OtherError", adios2::StepStatus::OtherError) .export_values(); #if ADIOS2_USE_MPI m.def("open", &OpenMPI, pybind11::arg("name"), pybind11::arg("mode"), pybind11::arg("comm"), pybind11::arg("engine_type") = "BPFile", R"md( Simple API MPI open, based on python IO. Allows for passing parameters in source code. Parameters name stream name mode "w" : write, "r" : read, "a" : append (append not yet supported) comm (mpi4py) MPI communicator engine_type adios2 engine type, default=BPFile Returns file (adios2 stream) handler to adios File for the simple Python API )md"); m.def("open", &OpenConfigMPI, pybind11::arg("name"), pybind11::arg("mode"), pybind11::arg("comm"), pybind11::arg("config_file"), pybind11::arg("io_in_config_file"), R"md( Simple API MPI open, based on python IO. Allows for passing a runtime configuration file in xml format and the name of the io element related to the returning File. Parameters name stream name mode "w" : write, "r" : read, "a" : append (append not yet supported) comm (mpi4py) MPI communicator config_file adios2 runtime configuration file name, in xml format io_in_config_file io element in configfile related to returning File Returns file (adios2 stream) handler to adios File for the simple Python API )md"); #endif m.def("open", &Open, "High-level API, file object open", pybind11::arg("name"), pybind11::arg("mode"), pybind11::arg("engine_type") = "BPFile"); m.def("open", &OpenConfig, "High-level API, file object open with a runtime config file", pybind11::arg("name"), pybind11::arg("mode"), pybind11::arg("config_file"), pybind11::arg("io_in_config_file")); pybind11::class_<adios2::py11::ADIOS>(m, "ADIOS") // Python 2 .def("__nonzero__", [](const adios2::py11::ADIOS &adios) { const bool opBool = adios ? true : false; return opBool; }) // Python 3 .def("__bool__", [](const adios2::py11::ADIOS &adios) { const bool opBool = adios ? true : false; return opBool; }) #if ADIOS2_USE_MPI .def(pybind11::init<const adios2::py11::MPI4PY_Comm, const bool>(), "adios2 module starting point, constructs an ADIOS class object", pybind11::arg("comm"), pybind11::arg("debugMode") = true) .def(pybind11::init<const std::string &, const adios2::py11::MPI4PY_Comm, const bool>(), "adios2 module starting point, constructs an ADIOS class object", pybind11::arg("configFile"), pybind11::arg("comm"), pybind11::arg("debugMode") = true) #endif .def(pybind11::init<const bool>(), "adios2 module starting point " "non-MPI, constructs an ADIOS class " "object", pybind11::arg("debugMode") = true) .def(pybind11::init<const std::string &, const bool>(), "adios2 module starting point non-MPI, constructs an ADIOS class " "object", pybind11::arg("configFile"), pybind11::arg("debugMode") = true) .def("DeclareIO", &adios2::py11::ADIOS::DeclareIO, "spawn IO object component returning a IO object with a unique " "name, throws an exception if IO with the same name is declared " "twice") .def("AtIO", &adios2::py11::ADIOS::AtIO, "returns an IO object " "previously defined IO object " "with DeclareIO, throws " "an exception if not found") .def("DefineOperator", &adios2::py11::ADIOS::DefineOperator) .def("InquireOperator", &adios2::py11::ADIOS::InquireOperator) .def("FlushAll", &adios2::py11::ADIOS::FlushAll, "flushes all engines in all spawned IO objects") .def("RemoveIO", &adios2::py11::ADIOS::RemoveIO, "DANGER ZONE: remove a particular IO by name, creates dangling " "objects to parameters, variable, attributes, engines created " "with removed IO") .def("RemoveAllIOs", &adios2::py11::ADIOS::RemoveAllIOs, "DANGER ZONE: remove all IOs in current ADIOS object, creates " "dangling objects to parameters, variable, attributes, engines " "created with removed IO"); pybind11::class_<adios2::py11::IO>(m, "IO") // Python 2 .def("__nonzero__", [](const adios2::py11::IO &io) { const bool opBool = io ? true : false; return opBool; }) // Python 3 .def("__bool__", [](const adios2::py11::IO &io) { const bool opBool = io ? true : false; return opBool; }) .def("SetEngine", &adios2::py11::IO::SetEngine) .def("SetParameters", &adios2::py11::IO::SetParameters, pybind11::arg("parameters") = adios2::Params()) .def("SetParameter", &adios2::py11::IO::SetParameter) .def("Parameters", &adios2::py11::IO::Parameters) .def("AddTransport", &adios2::py11::IO::AddTransport, pybind11::arg("type"), pybind11::arg("parameters") = adios2::Params()) .def("DefineVariable", (adios2::py11::Variable(adios2::py11::IO::*)( const std::string &, const pybind11::array &, const adios2::Dims &, const adios2::Dims &, const adios2::Dims &, const bool)) & adios2::py11::IO::DefineVariable, pybind11::return_value_policy::move, pybind11::arg("name"), pybind11::arg("array"), pybind11::arg("shape") = adios2::Dims(), pybind11::arg("start") = adios2::Dims(), pybind11::arg("count") = adios2::Dims(), pybind11::arg("isConstantDims") = false) .def( "DefineVariable", (adios2::py11::Variable(adios2::py11::IO::*)(const std::string &)) & adios2::py11::IO::DefineVariable, pybind11::return_value_policy::move, pybind11::arg("name")) .def("InquireVariable", &adios2::py11::IO::InquireVariable, pybind11::return_value_policy::move) .def("InquireAttribute", &adios2::py11::IO::InquireAttribute, pybind11::return_value_policy::move) .def("DefineAttribute", (adios2::py11::Attribute(adios2::py11::IO::*)( const std::string &, const pybind11::array &, const std::string &, const std::string)) & adios2::py11::IO::DefineAttribute, pybind11::arg("name"), pybind11::arg("array"), pybind11::arg("variable_name") = "", pybind11::arg("separator") = "/", pybind11::return_value_policy::move) .def("DefineAttribute", (adios2::py11::Attribute(adios2::py11::IO::*)( const std::string &, const std::string &, const std::string &, const std::string)) & adios2::py11::IO::DefineAttribute, pybind11::arg("name"), pybind11::arg("stringValue"), pybind11::arg("variable_name") = "", pybind11::arg("separator") = "/", pybind11::return_value_policy::move) .def("DefineAttribute", (adios2::py11::Attribute(adios2::py11::IO::*)( const std::string &, const std::vector<std::string> &, const std::string &, const std::string)) & adios2::py11::IO::DefineAttribute, pybind11::arg("name"), pybind11::arg("strings"), pybind11::arg("variable_name") = "", pybind11::arg("separator") = "/", pybind11::return_value_policy::move) .def("Open", (adios2::py11::Engine(adios2::py11::IO::*)( const std::string &, const int)) & adios2::py11::IO::Open) #if ADIOS2_USE_MPI .def("Open", (adios2::py11::Engine(adios2::py11::IO::*)( const std::string &, const int, adios2::py11::MPI4PY_Comm comm)) & adios2::py11::IO::Open) #endif .def("AvailableVariables", &adios2::py11::IO::AvailableVariables) .def("AvailableAttributes", &adios2::py11::IO::AvailableAttributes) .def("FlushAll", &adios2::py11::IO::FlushAll) .def("EngineType", &adios2::py11::IO::EngineType) .def("RemoveVariable", &adios2::py11::IO::RemoveVariable) .def("RemoveAllVariables", &adios2::py11::IO::RemoveAllVariables) .def("RemoveAttribute", &adios2::py11::IO::RemoveAttribute) .def("RemoveAllAttributes", &adios2::py11::IO::RemoveAllAttributes); pybind11::class_<adios2::py11::Variable>(m, "Variable") // Python 2 .def("__nonzero__", [](const adios2::py11::Variable &variable) { const bool opBool = variable ? true : false; return opBool; }) // Python 3 .def("__bool__", [](const adios2::py11::Variable &variable) { const bool opBool = variable ? true : false; return opBool; }) .def("SetShape", &adios2::py11::Variable::SetShape) .def("SetBlockSelection", &adios2::py11::Variable::SetBlockSelection) .def("SetSelection", &adios2::py11::Variable::SetSelection) .def("SetStepSelection", &adios2::py11::Variable::SetStepSelection) .def("SelectionSize", &adios2::py11::Variable::SelectionSize) .def("Name", &adios2::py11::Variable::Name) .def("Type", &adios2::py11::Variable::Type) .def("Sizeof", &adios2::py11::Variable::Sizeof) .def("ShapeID", &adios2::py11::Variable::ShapeID) .def("Shape", &adios2::py11::Variable::Shape, pybind11::arg("step") = adios2::EngineCurrentStep) .def("Start", &adios2::py11::Variable::Start) .def("Count", &adios2::py11::Variable::Count) .def("Steps", &adios2::py11::Variable::Steps) .def("StepsStart", &adios2::py11::Variable::StepsStart) .def("BlockID", &adios2::py11::Variable::BlockID) .def("AddOperation", &adios2::py11::Variable::AddOperation) .def("Operations", &adios2::py11::Variable::Operations); pybind11::class_<adios2::py11::Attribute>(m, "Attribute") // Python 2 .def("__nonzero__", [](const adios2::py11::Attribute &attribute) { const bool opBool = attribute ? true : false; return opBool; }) // Python 3 .def("__bool__", [](const adios2::py11::Attribute &attribute) { const bool opBool = attribute ? true : false; return opBool; }) .def("Name", &adios2::py11::Attribute::Name) .def("Type", &adios2::py11::Attribute::Type) .def("DataString", &adios2::py11::Attribute::DataString) .def("Data", &adios2::py11::Attribute::Data); pybind11::class_<adios2::py11::Engine>(m, "Engine") // Python 2 .def("__nonzero__", [](const adios2::py11::Engine &engine) { const bool opBool = engine ? true : false; return opBool; }) // Python 3 .def("__bool__", [](const adios2::py11::Engine &engine) { const bool opBool = engine ? true : false; return opBool; }) .def("BeginStep", (adios2::StepStatus(adios2::py11::Engine::*)( const adios2::StepMode, const float)) & adios2::py11::Engine::BeginStep, pybind11::arg("mode"), pybind11::arg("timeoutSeconds") = -1.f, pybind11::return_value_policy::move) .def("BeginStep", (adios2::StepStatus(adios2::py11::Engine::*)()) & adios2::py11::Engine::BeginStep, pybind11::return_value_policy::move) .def("Put", (void (adios2::py11::Engine::*)(adios2::py11::Variable, const pybind11::array &, const adios2::Mode launch)) & adios2::py11::Engine::Put, pybind11::arg("variable"), pybind11::arg("array"), pybind11::arg("launch") = adios2::Mode::Deferred) .def("Put", (void (adios2::py11::Engine::*)(adios2::py11::Variable, const std::string &)) & adios2::py11::Engine::Put) .def("PerformPuts", &adios2::py11::Engine::PerformPuts) .def("Get", (void (adios2::py11::Engine::*)(adios2::py11::Variable, pybind11::array &, const adios2::Mode launch)) & adios2::py11::Engine::Get, pybind11::arg("variable"), pybind11::arg("array"), pybind11::arg("launch") = adios2::Mode::Deferred) .def("Get", (void (adios2::py11::Engine::*)(adios2::py11::Variable, std::string &, const adios2::Mode launch)) & adios2::py11::Engine::Get, pybind11::arg("variable"), pybind11::arg("string"), pybind11::arg("launch") = adios2::Mode::Deferred) .def("PerformGets", &adios2::py11::Engine::PerformGets) .def("EndStep", &adios2::py11::Engine::EndStep) .def("Flush", &adios2::py11::Engine::Flush) .def("Close", &adios2::py11::Engine::Close, pybind11::arg("transportIndex") = -1) .def("CurrentStep", &adios2::py11::Engine::CurrentStep) .def("Name", &adios2::py11::Engine::Name) .def("Type", &adios2::py11::Engine::Type) .def("Steps", &adios2::py11::Engine::Steps) .def("LockWriterDefinitions", &adios2::py11::Engine::LockWriterDefinitions) .def("LockReaderSelections", &adios2::py11::Engine::LockReaderSelections) .def("BlocksInfo", &adios2::py11::Engine::BlocksInfo); pybind11::class_<adios2::py11::Operator>(m, "Operator") // Python 2 .def("__nonzero__", [](const adios2::py11::Operator &op) { const bool opBool = op ? true : false; return opBool; }) // Python 3 .def("__bool__", [](const adios2::py11::Operator &op) { const bool opBool = op ? true : false; return opBool; }) .def("Type", &adios2::py11::Operator::Type) .def("SetParameter", &adios2::py11::Operator::SetParameter) .def("Parameters", &adios2::py11::Operator::Parameters); pybind11::class_<adios2::py11::File>(m, "File") .def("__repr__", [](const adios2::py11::File &stream) { return "<adios2.file named '" + stream.m_Name + "' and mode '" + stream.m_Mode + "'>"; }) // enter and exit are defined for the with-as operator in Python .def("__enter__", [](const adios2::py11::File &stream) { return stream; }) .def("__exit__", [](adios2::py11::File &stream, pybind11::args) { stream.Close(); }) .def("__iter__", [](adios2::py11::File &stream) { return stream; }, pybind11::keep_alive<0, 1>()) .def("__next__", [](adios2::py11::File &stream) { if (!stream.GetStep()) { throw pybind11::stop_iteration(); } return stream; }) .def("set_parameter", &adios2::py11::File::SetParameter, pybind11::arg("key"), pybind11::arg("value"), R"md( Sets a single parameter. Overwrites value if key exists. Parameters key input parameter key value parameter value )md") .def("set_parameters", &adios2::py11::File::SetParameters, pybind11::arg("parameters"), R"md( Sets parameters using a dictionary. Removes any previous parameter. Parameters parameters input key/value parameters value parameter value )md") .def("add_transport", &adios2::py11::File::AddTransport, pybind11::return_value_policy::move, pybind11::arg("type"), pybind11::arg("parameters") = adios2::Params(), R"md( Adds a transport and its parameters to current IO. Must be supported by current engine type. Parameters type must be a supported transport type for current engine. parameters acceptable parameters for a particular transport CAN'T use the keywords "Transport" or "transport" in key Returns transport_index handler to added transport )md") .def("available_variables", &adios2::py11::File::AvailableVariables, pybind11::return_value_policy::move, pybind11::arg("keys") = std::vector<std::string>(), R"md( Returns a 2-level dictionary with variable information. Read mode only. Parameters keys list of variable information keys to be extracted (case insensitive) keys=['AvailableStepsCount','Type','Max','Min','SingleValue','Shape'] keys=['Name'] returns only the variable names as 1st-level keys leave empty to return all possible keys Returns variables dictionary key variable name value variable information dictionary )md") .def("available_attributes", &adios2::py11::File::AvailableAttributes, pybind11::return_value_policy::move, R"md( Returns a 2-level dictionary with attribute information. Read mode only. Returns attributes dictionary key attribute name value attribute information dictionary )md") .def("write", (void (adios2::py11::File::*)( const std::string &, const pybind11::array &, const adios2::Dims &, const adios2::Dims &, const adios2::Dims &, const bool)) & adios2::py11::File::Write, pybind11::arg("name"), pybind11::arg("array"), pybind11::arg("shape") = adios2::Dims(), pybind11::arg("start") = adios2::Dims(), pybind11::arg("count") = adios2::Dims(), pybind11::arg("end_step") = false, R"md( writes a self-describing array (numpy) variable Parameters name variable name array variable data values shape variable global MPI dimensions. Pass empty numpy array for local variables. start variable offset for current MPI rank. Pass empty numpy array for local variables. count variable dimension for current MPI rank. Pass a numpy array for local variables. end_step end current step, begin next step and flush (default = false). )md") .def("write", (void (adios2::py11::File::*)( const std::string &, const pybind11::array &, const adios2::Dims &, const adios2::Dims &, const adios2::Dims &, const adios2::vParams &, const bool)) & adios2::py11::File::Write, pybind11::arg("name"), pybind11::arg("array"), pybind11::arg("shape"), pybind11::arg("start"), pybind11::arg("count"), pybind11::arg("operations"), pybind11::arg("end_step") = false, R"md( writes a self-describing array (numpy) variable with operations e.g. compression: 'zfp', 'mgard', 'sz' Parameters name variable name array variable data values shape variable global MPI dimensions. Pass empty numpy array for local variables. start variable offset for current MPI rank. Pass empty numpy array for local variables. count variable dimension for current MPI rank. Pass a numpy array for local variables. end_step end current step, begin next step and flush (default = false). )md") .def("write", (void (adios2::py11::File::*)(const std::string &, const pybind11::array &, const bool, const bool)) & adios2::py11::File::Write, pybind11::arg("name"), pybind11::arg("array"), pybind11::arg("local_value") = false, pybind11::arg("end_step") = false, R"md( writes a self-describing single value array (numpy) variable Parameters name variable name array variable data single value local_value true: local value, false: global value end_step end current step, begin next step and flush (default = false). )md") .def("write", (void (adios2::py11::File::*)(const std::string &, const std::string &, const bool, const bool)) & adios2::py11::File::Write, pybind11::arg("name"), pybind11::arg("string"), pybind11::arg("local_value") = false, pybind11::arg("end_step") = false, R"md( writes a self-describing single value string variable Parameters name variable name string variable data single value local_value true: local value, false: global value end_step end current step, begin next step and flush (default = false). )md") .def("write_attribute", (void (adios2::py11::File::*)( const std::string &, const pybind11::array &, const std::string &, const std::string, const bool)) & adios2::py11::File::WriteAttribute, pybind11::arg("name"), pybind11::arg("array"), pybind11::arg("variable_name") = "", pybind11::arg("separator") = "/", pybind11::arg("end_step") = false, R"md( writes a self-describing single value array (numpy) variable Parameters name attribute name array attribute numpy array data variable_name if attribute is associated with a variable separator concatenation string between variable_name and attribute e.g. variable_name + separator + name ("var/attr") Not used if variable_name is empty end_step end current step, begin next step and flush (default = false). )md") .def("write_attribute", (void (adios2::py11::File::*)( const std::string &, const std::string &, const std::string &, const std::string, const bool)) & adios2::py11::File::WriteAttribute, pybind11::arg("name"), pybind11::arg("string_value"), pybind11::arg("variable_name") = "", pybind11::arg("separator") = "/", pybind11::arg("end_step") = false, R"md( writes a self-describing single value array (numpy) variable Parameters name attribute name string_value attribute single string variable_name if attribute is associated with a variable separator concatenation string between variable_name and attribute e.g. variable_name + separator + name ("var/attr") Not used if variable_name is empty end_step end current step, begin next step and flush (default = false). )md") .def("write_attribute", (void (adios2::py11::File::*)( const std::string &, const std::vector<std::string> &, const std::string &, const std::string, const bool)) & adios2::py11::File::WriteAttribute, pybind11::arg("name"), pybind11::arg("string_array"), pybind11::arg("variable_name") = "", pybind11::arg("separator") = "/", pybind11::arg("end_step") = false, R"md( writes a self-describing single value array (numpy) variable Parameters name attribute name string_array attribute string array variable_name if attribute is associated with a variable separator concatenation string between variable_name and attribute e.g. variable_name + separator + name ("var/attr") Not used if variable_name is empty end_step end current step, begin next step and flush (default = false). )md") .def("read_string", (std::vector<std::string>(adios2::py11::File::*)( const std::string &, const size_t)) & adios2::py11::File::ReadString, pybind11::return_value_policy::take_ownership, pybind11::arg("name"), pybind11::arg("block_id") = 0, R"md( Reads string value for current step (use for streaming mode step by step) Parameters name string variable name block_id required for local variables Returns list data string values. For global values: returns 1 element For local values: returns n-block elements )md") .def("read_string", (std::vector<std::string>(adios2::py11::File::*)( const std::string &, const size_t, const size_t, const size_t)) & adios2::py11::File::ReadString, pybind11::return_value_policy::take_ownership, pybind11::arg("name"), pybind11::arg("step_start"), pybind11::arg("step_count"), pybind11::arg("block_id") = 0, R"md( Reads string value for a certain step (random access mode) Parameters name string variable name step_start variable step start step_count variable number of steps to read from step_start block_id required for local variables Returns string data string values for a certain step range. )md") .def("read", (pybind11::array(adios2::py11::File::*)(const std::string &, const size_t)) & adios2::py11::File::Read, pybind11::return_value_policy::take_ownership, pybind11::arg("name"), pybind11::arg("block_id") = 0, R"md( Reads entire variable for current step (streaming mode step by step) Parameters name variable name block_id required for local array variables Returns array values of variable name for current step. Single values will have a shape={1} numpy array )md") .def("read", (pybind11::array(adios2::py11::File::*)( const std::string &, const adios2::Dims &, const adios2::Dims &, const size_t)) & adios2::py11::File::Read, pybind11::return_value_policy::take_ownership, pybind11::arg("name"), pybind11::arg("start") = adios2::Dims(), pybind11::arg("count") = adios2::Dims(), pybind11::arg("block_id") = 0, R"md( Reads a selection piece in dimension for current step (streaming mode step by step) Parameters name variable name start variable local offset selection (defaults to (0, 0, ...) count variable local dimension selection from start defaults to whole array for GlobalArrays, or selected Block size for LocalArrays block_id required for local array variables Returns array values of variable name for current step empty if exception is thrown )md") .def( "read", (pybind11::array(adios2::py11::File::*)( const std::string &, const adios2::Dims &, const adios2::Dims &, const size_t, const size_t, const size_t)) & adios2::py11::File::Read, pybind11::return_value_policy::take_ownership, pybind11::arg("name"), pybind11::arg("start"), pybind11::arg("count"), pybind11::arg("step_start"), pybind11::arg("step_count"), pybind11::arg("block_id") = 0, R"md( Random access read allowed to select steps, only valid with File Engines Parameters name variable to be read start variable offset dimensions count variable local dimensions from offset step_start variable step start step_count variable number of steps to read from step_start block_id required for local array variables Returns array resulting array from selection )md") .def("read_attribute", (pybind11::array(adios2::py11::File::*)( const std::string &, const std::string &, const std::string)) & adios2::py11::File::ReadAttribute, pybind11::return_value_policy::take_ownership, pybind11::arg("name"), pybind11::arg("variable_name") = "", pybind11::arg("separator") = "/", R"md( Reads a numpy based attribute Parameters name attribute name variable_name if attribute is associated with a variable separator concatenation string between variable_name and attribute e.g. variable_name + separator + name (var/attr) Not used if variable_name is empty Returns array resulting array attribute data )md") .def("read_attribute_string", (std::vector<std::string>(adios2::py11::File::*)( const std::string &, const std::string &, const std::string)) & adios2::py11::File::ReadAttributeString, pybind11::return_value_policy::take_ownership, pybind11::arg("name"), pybind11::arg("variable_name") = "", pybind11::arg("separator") = "/", R"md( Read a string attribute Parameters name attribute name variable_name if attribute is associated with a variable separator concatenation string between variable_name and attribute e.g. variable_name + separator + name (var/attr) Not used if variable_name is empty Returns list resulting string list attribute data)md") .def("end_step", &adios2::py11::File::EndStep, R"md( Write mode: advances to the next step. Convenient when declaring variable attributes as advancing to the next step is not attached to any variable. Read mode: in streaming mode releases the current step (no effect in file based engines) )md") .def("close", &adios2::py11::File::Close, R"md( Closes file, thus becoming unreachable. Not required if using open in a with-as statement. Required in all other cases per-open to avoid resource leaks. )md") .def("current_step", &adios2::py11::File::CurrentStep, R"md( Inspect current step when using for-in loops, read mode only Returns current step )md") .def("steps", &adios2::py11::File::Steps, R"md( Inspect available number of steps, for file engines, read mode only Returns steps )md"); }
1
14,176
Even though the debug parameter is effectively ignored, should this default to `false` instead of `true`?
ornladios-ADIOS2
cpp
@@ -0,0 +1,17 @@ +/* Copyright 2016 Google Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.api.codegen.viewmodel; + +public interface ParamView {}
1
1
16,098
What is the purpose of this interface? Unlike ParamDocView, it doesn't seem to be used anywhere to enable polymorphism
googleapis-gapic-generator
java
@@ -1,12 +1,11 @@ package startupshutdown import ( - "log" - "os" - "os/exec" "strings" + "github.com/google/uuid" "github.com/mholt/caddy" + "github.com/mholt/caddy/onevent/hook" ) func init() {
1
package startupshutdown import ( "log" "os" "os/exec" "strings" "github.com/mholt/caddy" ) func init() { caddy.RegisterPlugin("startup", caddy.Plugin{Action: Startup}) caddy.RegisterPlugin("shutdown", caddy.Plugin{Action: Shutdown}) } // Startup registers a startup callback to execute during server start. func Startup(c *caddy.Controller) error { return registerCallback(c, c.OnFirstStartup) } // Shutdown registers a shutdown callback to execute during server stop. func Shutdown(c *caddy.Controller) error { return registerCallback(c, c.OnFinalShutdown) } // registerCallback registers a callback function to execute by // using c to parse the directive. It registers the callback // to be executed using registerFunc. func registerCallback(c *caddy.Controller, registerFunc func(func() error)) error { var funcs []func() error for c.Next() { args := c.RemainingArgs() if len(args) == 0 { return c.ArgErr() } nonblock := false if len(args) > 1 && args[len(args)-1] == "&" { // Run command in background; non-blocking nonblock = true args = args[:len(args)-1] } command, args, err := caddy.SplitCommandAndArgs(strings.Join(args, " ")) if err != nil { return c.Err(err.Error()) } fn := func() error { cmd := exec.Command(command, args...) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if nonblock { log.Printf("[INFO] Nonblocking Command:\"%s %s\"", command, strings.Join(args, " ")) return cmd.Start() } log.Printf("[INFO] Blocking Command:\"%s %s\"", command, strings.Join(args, " ")) return cmd.Run() } funcs = append(funcs, fn) } return c.OncePerServerBlock(func() error { for _, fn := range funcs { registerFunc(fn) } return nil }) }
1
11,373
We added a library to generate a UUID for the request_id directive "github.com/nu7hatch/gouuid" can we uses the same one for what you want rather than add another dependency? Or if the google one is better can we remove the nu7hatch library?
caddyserver-caddy
go
@@ -161,16 +161,13 @@ func (p *Pinger) PingProvider(ip string, localPorts, remotePorts []int, proxyPor return localPort, remotePort, err } -func (p *Pinger) ping(conn *net.UDPConn, ttl int, stop <-chan struct{}) error { - // Windows detects that 1 TTL is too low and throws an exception during send - i := 0 - +func (p *Pinger) ping(conn *net.UDPConn, remoteAddr *net.UDPAddr, ttl int, stop <-chan struct{}) error { err := ipv4.NewConn(conn).SetTTL(ttl) if err != nil { return errors.Wrap(err, "pinger setting ttl failed") } - for { + for i := 1; time.Duration(i)*p.pingConfig.Interval < p.pingConfig.Timeout; i++ { select { case <-stop: return nil
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package traversal import ( "fmt" "net" "sync" "time" "github.com/mysteriumnetwork/node/core/port" "github.com/mysteriumnetwork/node/eventbus" "github.com/mysteriumnetwork/node/nat/event" "github.com/pkg/errors" "github.com/rs/zerolog/log" "golang.org/x/net/ipv4" ) // StageName represents hole-punching stage of NAT traversal const StageName = "hole_punching" var ( errNATPunchAttemptStopped = errors.New("NAT punch attempt stopped") errNATPunchAttemptTimedOut = errors.New("NAT punch attempt timed out") ) // NATProviderPinger pings provider and optionally hands off connection to consumer proxy. type NATProviderPinger interface { PingProvider(ip string, localPorts, remotePorts []int, proxyPort int) (localPort, remotePort int, err error) } // NATPinger is responsible for pinging nat holes type NATPinger interface { NATProviderPinger PingTarget(Params) BindServicePort(key string, port int) Start() Stop() SetProtectSocketCallback(SocketProtect func(socket int) bool) Valid() bool } // PingConfig represents NAT pinger config. type PingConfig struct { Interval time.Duration Timeout time.Duration } // DefaultPingConfig returns default NAT pinger config. func DefaultPingConfig() *PingConfig { return &PingConfig{ Interval: 200 * time.Millisecond, Timeout: 10 * time.Second, } } // Pinger represents NAT pinger structure type Pinger struct { pingConfig *PingConfig pingTarget chan Params stop chan struct{} stopNATProxy chan struct{} once sync.Once natProxy *natProxy eventPublisher eventbus.Publisher } // PortSupplier provides port needed to run a service on type PortSupplier interface { Acquire() (port.Port, error) } // NewPinger returns Pinger instance func NewPinger(pingConfig *PingConfig, publisher eventbus.Publisher) NATPinger { return &Pinger{ pingConfig: pingConfig, pingTarget: make(chan Params), stop: make(chan struct{}), stopNATProxy: make(chan struct{}), natProxy: newNATProxy(), eventPublisher: publisher, } } // Params contains session parameters needed to NAT ping remote peer type Params struct { RemotePorts []int LocalPorts []int IP string ProxyPortMappingKey string } // Start starts NAT pinger and waits for PingTarget to ping func (p *Pinger) Start() { log.Info().Msg("Starting a NAT pinger") for { select { case <-p.stop: log.Info().Msg("NAT pinger is stopped") return case params := <-p.pingTarget: go p.pingTargetConsumer(params) } } } // Stop stops pinger loop func (p *Pinger) Stop() { p.once.Do(func() { close(p.stopNATProxy) close(p.stop) }) } // PingProvider pings provider determined by destination provided in sessionConfig func (p *Pinger) PingProvider(ip string, localPorts, remotePorts []int, proxyPort int) (localPort, remotePort int, err error) { log.Info().Msg("NAT pinging to provider") stop := make(chan struct{}) defer close(stop) conn, err := p.multiPing(ip, localPorts, remotePorts, 128, stop) if err != nil { log.Err(err).Msg("Failed to ping remote peer") return 0, 0, err } if addr, ok := conn.LocalAddr().(*net.UDPAddr); ok { localPort = addr.Port } if addr, ok := conn.RemoteAddr().(*net.UDPAddr); ok { remotePort = addr.Port } if proxyPort > 0 { consumerAddr := fmt.Sprintf("127.0.0.1:%d", proxyPort) log.Info().Msg("Handing connection to consumer NATProxy: " + consumerAddr) p.stopNATProxy = p.natProxy.consumerHandOff(consumerAddr, conn) } else { conn.Close() } return localPort, remotePort, err } func (p *Pinger) ping(conn *net.UDPConn, ttl int, stop <-chan struct{}) error { // Windows detects that 1 TTL is too low and throws an exception during send i := 0 err := ipv4.NewConn(conn).SetTTL(ttl) if err != nil { return errors.Wrap(err, "pinger setting ttl failed") } for { select { case <-stop: return nil case <-p.stop: return nil case <-time.After(p.pingConfig.Interval): log.Debug().Msgf("Pinging %s from %s...", conn.RemoteAddr().String(), conn.LocalAddr().String()) _, err := conn.Write([]byte("continuously pinging to " + conn.RemoteAddr().String())) if err != nil { p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildFailureEvent(StageName, err)) return errors.Wrap(err, "pinging request failed") } i++ if time.Duration(i)*p.pingConfig.Interval > p.pingConfig.Timeout { err := errors.New("timeout while waiting for ping ack, trying to continue") p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildFailureEvent(StageName, err)) return err } } } } func (p *Pinger) getConnection(ip string, remotePort int, localPort int) (*net.UDPConn, error) { udpAddr, err := net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", ip, remotePort)) if err != nil { return nil, err } log.Info().Msg("Remote socket: " + udpAddr.String()) conn, err := net.DialUDP("udp", &net.UDPAddr{Port: localPort}, udpAddr) if err != nil { return nil, err } log.Info().Msg("Local socket: " + conn.LocalAddr().String()) return conn, nil } // PingTarget relays ping target address data func (p *Pinger) PingTarget(target Params) { select { case p.pingTarget <- target: return case <-p.stop: return // do not block if ping target is not received case <-time.After(100 * time.Millisecond): log.Info().Msgf("Ping target timeout: %v", target) return } } // BindServicePort register service port to forward connection to func (p *Pinger) BindServicePort(key string, port int) { p.natProxy.registerServicePort(key, port) } func (p *Pinger) pingReceiver(conn *net.UDPConn, stop <-chan struct{}) error { timeout := time.After(p.pingConfig.Timeout) buf := make([]byte, bufferLen) for { select { case <-timeout: return errNATPunchAttemptTimedOut case <-stop: return errNATPunchAttemptStopped case <-p.stop: return errNATPunchAttemptStopped default: // Add read deadline to prevent possible conn.Read hang when remote peer doesn't send ping ack. conn.SetReadDeadline(time.Now().Add(p.pingConfig.Timeout * 2)) n, err := conn.Read(buf) // Reset read deadline. conn.SetReadDeadline(time.Time{}) if err != nil || n == 0 { log.Error().Err(err).Msgf("Failed to read remote peer: %s - attempting to continue", conn.RemoteAddr().String()) continue } log.Info().Msgf("Remote peer data received: %s, len: %d", string(buf[:n]), n) return nil } } } // SetProtectSocketCallback sets socket protection callback to be called when new socket is created in consumer NATProxy func (p *Pinger) SetProtectSocketCallback(socketProtect func(socket int) bool) { p.natProxy.setProtectSocketCallback(socketProtect) } // Valid returns that this pinger is a valid pinger func (p *Pinger) Valid() bool { return true } func (p *Pinger) pingTargetConsumer(params Params) { log.Info().Msgf("Pinging peer with: %+v", params) if params.ProxyPortMappingKey == "" { log.Error().Msg("Service proxy connection port mapping key is missing") return } stop := make(chan struct{}) defer close(stop) conn, err := p.multiPing(params.IP, params.LocalPorts, params.RemotePorts, 2, stop) if err != nil { log.Err(err).Msg("Failed to ping remote peer") return } err = ipv4.NewConn(conn).SetTTL(128) if err != nil { log.Error().Err(err).Msg("Failed to set connection TTL") return } conn.Write([]byte("Using this connection")) p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildSuccessfulEvent(StageName)) log.Info().Msg("Ping received, waiting for a new connection") go p.natProxy.handOff(params.ProxyPortMappingKey, conn) } func (p *Pinger) multiPing(ip string, localPorts, remotePorts []int, initialTTL int, stop <-chan struct{}) (*net.UDPConn, error) { if len(localPorts) != len(remotePorts) { return nil, errors.New("number of local and remote ports does not match") } type res struct { conn *net.UDPConn err error } ch := make(chan res, len(localPorts)) for i := range localPorts { go func(i int) { conn, err := p.singlePing(ip, localPorts[i], remotePorts[i], initialTTL+i, stop) ch <- res{conn, err} }(i) } // First response wins. Other are not important. r := <-ch return r.conn, r.err } func (p *Pinger) singlePing(remoteIP string, localPort, remotePort, ttl int, stop <-chan struct{}) (*net.UDPConn, error) { conn, err := p.getConnection(remoteIP, remotePort, localPort) if err != nil { return nil, errors.Wrap(err, "failed to get connection") } go func() { err := p.ping(conn, ttl, stop) if err != nil { log.Warn().Err(err).Msg("Error while pinging") } }() err = p.pingReceiver(conn, stop) return conn, errors.Wrap(err, "ping receiver error") }
1
15,864
Shouldn't this be `i * Interval`? You're multiplying nanos with a Duration, I'm not sure what's the result :)
mysteriumnetwork-node
go
@@ -2513,7 +2513,7 @@ stack_alloc(size_t size, byte *min_addr) #else /* For UNIX we just mark it as inaccessible. */ if (!DYNAMO_OPTION(guard_pages)) - make_unwritable(guard, PAGE_SIZE); + set_protection(guard, PAGE_SIZE, MEMPROT_READ); #endif }
1
/* ********************************************************** * Copyright (c) 2010-2017 Google, Inc. All rights reserved. * Copyright (c) 2001-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2001 Hewlett-Packard Company */ /* * heap.c - heap manager */ #include "globals.h" #include <string.h> /* for memcpy */ #include <limits.h> #include "fragment.h" /* for struct sizes */ #include "link.h" /* for struct sizes */ #include "instr.h" /* for struct sizes */ #include "fcache.h" /* fcache_low_on_memory */ #ifdef DEBUG # include "hotpatch.h" /* To handle leak for case 9593. */ #endif #ifdef CLIENT_INTERFACE # include "instrument.h" #endif #ifdef HEAP_ACCOUNTING # ifndef DEBUG # error HEAP_ACCOUNTING requires DEBUG # endif #endif #ifdef DEBUG_MEMORY /* on by default but higher than general asserts */ # define CHKLVL_MEMFILL CHKLVL_DEFAULT #endif extern bool vm_areas_exited; /*************************************************************************** * we cannot use malloc in the middle of interpreting the client program * because we could be in the middle of interpreting malloc, which is not * always reentrant * * We have a virtual memory manager which makes sure memory is * reserved within the application address space so that we don't have * to fight with the application. We call os_heap_reserve to allocate * virtual space in a single consecutive region. We later use * os_heap_commit to get committed memory in large chunks and manage * the chunks using a simple scheme of free lists of different sizes. * The virtual memory manager has to store out of band information about * used and free blocks, since of course there is no real memory to use. * The chunks (heap units) store in band extra information both for * used and free. However, in the allocated blocks within a unit we * don't need to store any information since heap_free passes in the * size; we store the next pointers for the free lists at the start of * the free blocks themselves. We have one large reservation for most of * our allocations, and yet another for allocations that we do not * plan on ever freeing up on detach - the only unavoidable tombstones * are those for thread private code system calls that may be stuck on * callbacks. In case we run out of reserved memory we do fall back * on requests from the OS, but any of these may fail if we are * competing with the application. * * looking at dynamo behavior as of Jan 2001, most heap_alloc requests are * for < 128 bytes, very few for larger, so we have a bunch of fixed-size * blocks of small sizes * * the UINT_MAX size is a variable-length block, we keep one byte to store * the size (again storing the next pointer when free at the start of * what we pass to the user) */ static const uint BLOCK_SIZES[] = { 8, /* for instr bits */ #ifndef X64 /* for x64 future_fragment_t is 24 bytes (could be 20 if we could put flags last) */ sizeof(future_fragment_t), /* 12 (24 x64) */ #endif /* we have a lot of size 16 requests for IR but they are transient */ 24, /* fcache empties and vm_area_t are now 20, vm area extras still 24 */ /* 40 dbg / 36 rel: */ ALIGN_FORWARD(sizeof(fragment_t) + sizeof(indirect_linkstub_t), HEAP_ALIGNMENT), #if defined(X64) || defined(CUSTOM_EXIT_STUBS) sizeof(instr_t), /* 64 (96 x64) */ sizeof(fragment_t) + sizeof(direct_linkstub_t) + sizeof(cbr_fallthrough_linkstub_t), /* 68 dbg / 64 rel, 112 x64 */ /* all other bb/trace buckets are 8 larger but in same order */ #else sizeof(fragment_t) + sizeof(direct_linkstub_t) + sizeof(cbr_fallthrough_linkstub_t), /* 60 dbg / 56 rel */ sizeof(instr_t), /* 64 */ #endif /* we keep this bucket even though only 10% or so of normal bbs * hit this. * FIXME: release == instr_t here so a small waste when walking buckets */ ALIGN_FORWARD(sizeof(fragment_t) + 2*sizeof(direct_linkstub_t), HEAP_ALIGNMENT), /* 68 dbg / 64 rel (128 x64) */ ALIGN_FORWARD(sizeof(trace_t) + 2*sizeof(direct_linkstub_t) + sizeof(uint), HEAP_ALIGNMENT), /* 80 dbg / 76 rel (148 x64 => 152) */ /* FIXME: measure whether should put in indirect mixes as well */ ALIGN_FORWARD(sizeof(trace_t) + 3*sizeof(direct_linkstub_t) + sizeof(uint), HEAP_ALIGNMENT), /* 96 dbg / 92 rel (180 x64 => 184) */ ALIGN_FORWARD(sizeof(trace_t) + 5*sizeof(direct_linkstub_t) + sizeof(uint), HEAP_ALIGNMENT), /* 128 dbg / 124 rel (244 x64 => 248) */ 256, 512, UINT_MAX /* variable-length */ }; #define BLOCK_TYPES (sizeof(BLOCK_SIZES)/sizeof(uint)) #ifdef DEBUG /* FIXME: would be nice to have these stats per HEAPACCT category */ /* These are ints only b/c we used to do non-atomic adds and wanted to * gracefully handle underflow to negative values */ DECLARE_NEVERPROT_VAR(static int block_total_count[BLOCK_TYPES], {0}); DECLARE_NEVERPROT_VAR(static int block_count[BLOCK_TYPES], {0}); DECLARE_NEVERPROT_VAR(static int block_peak_count[BLOCK_TYPES], {0}); DECLARE_NEVERPROT_VAR(static int block_wasted[BLOCK_TYPES], {0}); DECLARE_NEVERPROT_VAR(static int block_peak_wasted[BLOCK_TYPES], {0}); DECLARE_NEVERPROT_VAR(static int block_align_pad[BLOCK_TYPES], {0}); DECLARE_NEVERPROT_VAR(static int block_peak_align_pad[BLOCK_TYPES], {0}); DECLARE_NEVERPROT_VAR(static bool out_of_vmheap_once, false); #endif /* variable-length: we steal one int for the size */ #define HEADER_SIZE (sizeof(size_t)) /* VARIABLE_SIZE is assignable */ #define VARIABLE_SIZE(p) (*(size_t *)((p)-HEADER_SIZE)) #define MEMSET_HEADER(p, value) VARIABLE_SIZE(p) = HEAP_TO_PTR_UINT(value) #define GET_VARIABLE_ALLOCATION_SIZE(p) (VARIABLE_SIZE(p) + HEADER_SIZE) /* heap is allocated in units * we start out with a small unit, then each additional unit we * need doubles in size, up to a maximum, we default to 32kb initial size * (24kb useful with guard pages), max size defaults to 64kb (56kb useful with * guard pages), we keep the max small to save memory, it doesn't seem to be * perf hit! Though with guard pages we are wasting quite a bit of reserved * (though not committed) space */ /* the only big things global heap is used for are pc sampling * hash table and sideline sampling hash table -- if none of those * are in use, 16KB should be plenty, we default to 32kb since guard * pages are on by default (gives 24kb useful) max size is same as for * normal heap units. */ /* the old defaults were 32kb (usable) for initial thread private and 16kb * (usable) for initial global, changed to simplify the logic for allocating * in multiples of the os allocation granularity. The new defaults prob. * make more sense with the shared cache then the old did anyways. */ /* restrictions - * any guard pages are included in the size, size must be > UNITOVERHEAD * for best performance sizes should be of the form * 2^n * page_size (where n is a positve integer) and max should be a multiple * of the os allocation granularity so that with enough doublings we are * reserving memory in multiples of the allocation granularity and not wasting * any virtual address space (beyond our guard pages) */ #define HEAP_UNIT_MIN_SIZE DYNAMO_OPTION(initial_heap_unit_size) #define HEAP_UNIT_MAX_SIZE INTERNAL_OPTION(max_heap_unit_size) #define GLOBAL_UNIT_MIN_SIZE DYNAMO_OPTION(initial_global_heap_unit_size) #define GUARD_PAGE_ADJUSTMENT (dynamo_options.guard_pages ? 2 * PAGE_SIZE : 0) /* gets usable space in the unit */ #define UNITROOM(u) ((size_t) (u->end_pc - u->start_pc)) #define UNIT_RESERVED_ROOM(u) (u->reserved_end_pc - u->start_pc) /* we keep the heap_unit_t header at top of the unit, this macro calculates * the committed size of the unit by adding header size to available size */ #define UNIT_COMMIT_SIZE(u) (UNITROOM(u) + sizeof(heap_unit_t)) #define UNIT_RESERVED_SIZE(u) (UNIT_RESERVED_ROOM(u) + sizeof(heap_unit_t)) #define UNIT_ALLOC_START(u) (u->start_pc - sizeof(heap_unit_t)) #define UNIT_GET_START_PC(u) (byte*)(((ptr_uint_t)u) + sizeof(heap_unit_t)) #define UNIT_COMMIT_END(u) (u->end_pc) #define UNIT_RESERVED_END(u) (u->reserved_end_pc) /* gets the allocated size of the unit (reserved size + guard pages) */ #define UNITALLOC(u) (UNIT_RESERVED_SIZE(u) + GUARD_PAGE_ADJUSTMENT) /* gets unit overhead, includes the reserved (guard pages) and committed * (sizeof(heap_unit_t)) portions */ #define UNITOVERHEAD (sizeof(heap_unit_t) + GUARD_PAGE_ADJUSTMENT) /* any alloc request larger than this needs a special unit */ #define MAXROOM (HEAP_UNIT_MAX_SIZE - UNITOVERHEAD) /* maximum valid allocation (to guard against internal integer overflows) */ #define MAX_VALID_HEAP_ALLOCATION INT_MAX /* thread-local heap structure * this struct is kept at top of unit itself, not in separate allocation */ typedef struct _heap_unit_t { heap_pc start_pc; /* start address of heap storage */ heap_pc end_pc; /* open-ended end address of heap storage */ heap_pc cur_pc; /* open-ended current end of allocated storage */ heap_pc reserved_end_pc; /* open-ended end of reserved (not nec committed) memory */ bool in_vmarea_list; /* perf opt for delayed batch vmarea updating */ #ifdef DEBUG int id; /* # of this unit */ #endif struct _heap_unit_t *next_local; /* used to link thread's units */ struct _heap_unit_t *next_global; /* used to link all units */ struct _heap_unit_t *prev_global; /* used to link all units */ } heap_unit_t; #ifdef HEAP_ACCOUNTING typedef struct _heap_acct_t { size_t alloc_reuse[ACCT_LAST]; size_t alloc_new[ACCT_LAST]; size_t cur_usage[ACCT_LAST]; size_t max_usage[ACCT_LAST]; size_t max_single[ACCT_LAST]; uint num_alloc[ACCT_LAST]; } heap_acct_t; #endif /* FIXME (case 6336): rename to heap_t: * a heap_t is a collection of units with the same properties * to reflect that this is used for more than just thread-private memory. * Also rename the "tu" vars to "h" */ typedef struct _thread_units_t { heap_unit_t *top_unit; /* start of linked list of heap units */ heap_unit_t *cur_unit; /* current unit in heap list */ heap_pc free_list[BLOCK_TYPES]; #ifdef DEBUG int num_units; /* total # of heap units */ #endif dcontext_t *dcontext; /* back pointer to owner */ bool writable; /* remember state of heap protection */ #ifdef HEAP_ACCOUNTING heap_acct_t acct; #endif } thread_units_t; /* We separate out heap memory used for fragments, linking, and vmarea multi-entries * both to enable resetting memory and for safety for unlink flushing in the presence * of clean calls out of the cache that might allocate IR memory (which does not * use nonpersistent heap). Any client actions that involve fragments or linking * should require couldbelinking status, which makes them safe wrt unlink flushing. * Xref DrMi#1791. */ #define SEPARATE_NONPERSISTENT_HEAP() \ (DYNAMO_OPTION(enable_reset) IF_CLIENT_INTERFACE(|| true)) /* per-thread structure: */ typedef struct _thread_heap_t { thread_units_t *local_heap; thread_units_t *nonpersistent_heap; } thread_heap_t; /* global, unique thread-shared structure: * FIXME: give this name to thread_units_t, and name this AllHeapUnits */ typedef struct _heap_t { heap_unit_t *units; /* list of all allocated units */ heap_unit_t *dead; /* list of deleted units ready for re-allocation */ /* FIXME: num_dead duplicates stats->heap_num_free, but we want num_dead * for release build too, so it's separate...can we do better? */ uint num_dead; } heap_t; /* no synch needed since only written once */ static bool heap_exiting = false; #ifdef DEBUG DECLARE_NEVERPROT_VAR(static bool ever_beyond_vmm, false); #endif /* Lock used only for managing heap units, not for normal thread-local alloc. * Must be recursive due to circular dependencies between vmareas and global heap. * Furthermore, always grab dynamo_vm_areas_lock() before grabbing this lock, * to make DR areas update and heap alloc/free atomic! */ DECLARE_CXTSWPROT_VAR(static recursive_lock_t heap_unit_lock, INIT_RECURSIVE_LOCK(heap_unit_lock)); /* N.B.: if these two locks are ever owned at the same time, the convention is * that global_alloc_lock MUST be grabbed first, to avoid deadlocks */ /* separate lock for global heap access to avoid contention between local unit * creation and global heap alloc * must be recursive so that heap_vmareas_synch_units can hold it and heap_unit_lock * up front to avoid deadlocks, and still allow vmareas to global_alloc -- * BUT we do NOT want global_alloc() to be able to recurse! * FIXME: either find a better solution to the heap_vmareas_synch_units deadlock * that is as efficient, or find a way to assert that the only recursion is * from heap_vmareas_synch_units to global_alloc */ DECLARE_CXTSWPROT_VAR(static recursive_lock_t global_alloc_lock, INIT_RECURSIVE_LOCK(global_alloc_lock)); #if defined(DEBUG) && defined(HEAP_ACCOUNTING) && defined(HOT_PATCHING_INTERFACE) static int get_special_heap_header_size(void); #endif vm_area_vector_t *landing_pad_areas; /* PR 250294 */ #ifdef WINDOWS /* i#939: we steal space from ntdll's +rx segment */ static app_pc lpad_temp_writable_start; static size_t lpad_temp_writable_size; static void release_landing_pad_mem(void); #endif /* Indicates whether should back out of a global alloc/free and grab the * DR areas lock first, to retry */ static bool safe_to_allocate_or_free_heap_units() { return ((!self_owns_recursive_lock(&global_alloc_lock) && !self_owns_recursive_lock(&heap_unit_lock)) || self_owns_dynamo_vm_area_lock()); } /* indicates a dynamo vm area remove was delayed * protected by the heap_unit_lock */ DECLARE_FREQPROT_VAR(static bool dynamo_areas_pending_remove, false); #ifdef HEAP_ACCOUNTING const char * whichheap_name[] = { /* max length for aligned output is length of "BB Fragments" */ "BB Fragments", "Coarse Links", "Future Frag", "Frag Tables", "IBL Tables", "Traces", "FC Empties", "Vm Multis", "IR", "RCT Tables", "VM Areas", "Symbols", # ifdef SIDELINE "Sideline", # endif "TH Counter", "Tombstone", "Hot Patching", "Thread Mgt", "Memory Mgt", "Stats", "SpecialHeap", # ifdef CLIENT_INTERFACE "Client", # endif "Lib Dup", "Clean Call", /* NOTE: Add your heap name here */ "Other", }; /* Since using a lock for these stats adds a lot of contention, we * follow a two-pronged strategy: * 1) For accurate stats we add a thread's final stats to the global only * when it is cleaned up. But, this prevents global stats from being * available in the middle of a run or if a run is not cleaned up nicely. * 2) We have a set of heap_accounting stats for incremental global stats * that are available at any time, yet racy and so may be off a little. */ /* all set to 0 is only initialization we need */ DECLARE_NEVERPROT_VAR(static thread_units_t global_racy_units, {0}); /* macro to get the type abstracted */ # define ACCOUNT_FOR_ALLOC_HELPER(type, tu, which, alloc_sz, ask_sz) do { \ (tu)->acct.type[which] += alloc_sz; \ (tu)->acct.num_alloc[which]++; \ (tu)->acct.cur_usage[which] += alloc_sz; \ if ((tu)->acct.cur_usage[which] > (tu)->acct.max_usage[which]) \ (tu)->acct.max_usage[which] = (tu)->acct.cur_usage[which]; \ if (ask_sz > (tu)->acct.max_single[which]) \ (tu)->acct.max_single[which] = ask_sz; \ } while (0) # define ACCOUNT_FOR_ALLOC(type, tu, which, alloc_sz, ask_sz) do { \ STATS_ADD_PEAK(heap_claimed, alloc_sz); \ ACCOUNT_FOR_ALLOC_HELPER(type, tu, which, alloc_sz, ask_sz); \ ACCOUNT_FOR_ALLOC_HELPER(type, &global_racy_units, which, \ alloc_sz, ask_sz); \ } while (0) # define ACCOUNT_FOR_FREE(tu, which, size) do { \ STATS_SUB(heap_claimed, (size)); \ (tu)->acct.cur_usage[which] -= size; \ global_racy_units.acct.cur_usage[which] -= size; \ } while (0) #else # define ACCOUNT_FOR_ALLOC(type, tu, which, alloc_sz, ask_sz) # define ACCOUNT_FOR_FREE(tu, which, size) #endif typedef byte *vm_addr_t; #ifdef X64 /* designates the closed interval within which we must allocate DR heap space */ static byte *heap_allowable_region_start = (byte *)PTR_UINT_0; static byte *heap_allowable_region_end = (byte *)POINTER_MAX; /* used only to protect read/write access to the must_reach_* static variables in * request_region_be_heap_reachable() */ DECLARE_CXTSWPROT_VAR(static mutex_t request_region_be_heap_reachable_lock, INIT_LOCK_FREE(request_region_be_heap_reachable_lock)); /* Request that the supplied region be 32bit offset reachable from the DR heap. Should * be called before vmm_heap_init() so we can place the DR heap to meet these constraints. * Can also be called post vmm_heap_init() but at that point acts as an assert that the * supplied region is reachable since the heap is already reserved. * * Must be called at least once up front, for the -heap_in_lower_4GB code here * to kick in! */ void request_region_be_heap_reachable(byte *start, size_t size) { /* initialize so will be overridden on first call; protected by the * request_region_be_heap_reachable_lock */ static byte *must_reach_region_start = (byte *)POINTER_MAX; static byte *must_reach_region_end = (byte *)PTR_UINT_0; /* closed */ LOG(GLOBAL, LOG_HEAP, 2, "Adding must-be-reachable-from-heap region "PFX"-"PFX"\n" "Existing must-be-reachable region "PFX"-"PFX"\n" "Existing allowed range "PFX"-"PFX"\n", start, start+size, must_reach_region_start, must_reach_region_end, heap_allowable_region_start, heap_allowable_region_end); ASSERT(!POINTER_OVERFLOW_ON_ADD(start, size)); ASSERT(size > 0); mutex_lock(&request_region_be_heap_reachable_lock); if (start < must_reach_region_start) { byte *allowable_end_tmp; SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); must_reach_region_start = start; allowable_end_tmp = REACHABLE_32BIT_END(must_reach_region_start, must_reach_region_end); /* PR 215395 - add in absolute address reachability */ if (DYNAMO_OPTION(heap_in_lower_4GB) && allowable_end_tmp > ( byte *)POINTER_MAX_32BIT) { allowable_end_tmp = (byte *)POINTER_MAX_32BIT; } /* Write assumed to be atomic so we don't have to hold a lock to use * heap_allowable_region_end. */ heap_allowable_region_end = allowable_end_tmp; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } if (start + size - 1 > must_reach_region_end) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); must_reach_region_end = start + size - 1; /* closed */ /* Write assumed to be atomic so we don't have to hold a lock to use * heap_allowable_region_start. */ heap_allowable_region_start = REACHABLE_32BIT_START(must_reach_region_start, must_reach_region_end); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } ASSERT(must_reach_region_start <= must_reach_region_end); /* correctness check */ /* verify can be addressed absolutely (if required), correctness check */ ASSERT(!DYNAMO_OPTION(heap_in_lower_4GB) || heap_allowable_region_end <= (byte *)POINTER_MAX_32BIT); mutex_unlock(&request_region_be_heap_reachable_lock); LOG(GLOBAL, LOG_HEAP, 1, "Added must-be-reachable-from-heap region "PFX"-"PFX"\n" "New must-be-reachable region "PFX"-"PFX"\n" "New allowed range "PFX"-"PFX"\n", start, start+size, must_reach_region_start, must_reach_region_end, heap_allowable_region_start, heap_allowable_region_end); /* Reachability checks (xref PR 215395, note since we currently can't directly * control where DR/client dlls are loaded these could fire if rebased). */ ASSERT(heap_allowable_region_start <= must_reach_region_start && "x64 reachability contraints not satisfiable"); ASSERT(must_reach_region_end <= heap_allowable_region_end && "x64 reachability contraints not satisfiable"); /* Handle release build failure. */ if (heap_allowable_region_start > must_reach_region_start || must_reach_region_end > heap_allowable_region_end) { /* FIXME - in a released product we may want to detach or something else less * drastic than triggering a FATAL_USAGE_ERROR. */ FATAL_USAGE_ERROR(HEAP_CONTRAINTS_UNSATISFIABLE, 2, get_application_name(), get_application_pid()); } } void vmcode_get_reachable_region(byte **region_start OUT, byte **region_end OUT) { /* We track sub-page for more accuracy on additional constraints, and * align when asked about it. */ if (region_start != NULL) *region_start = (byte *) ALIGN_FORWARD(heap_allowable_region_start, PAGE_SIZE); if (region_end != NULL) *region_end = (byte *) ALIGN_BACKWARD(heap_allowable_region_end, PAGE_SIZE); } #endif /* forward declarations of static functions */ static void threadunits_init(dcontext_t *dcontext, thread_units_t *tu, size_t size); /* dcontext only used for debugging */ static void threadunits_exit(thread_units_t *tu, dcontext_t *dcontext); static void *common_heap_alloc(thread_units_t *tu, size_t size HEAPACCT(which_heap_t which)); static bool common_heap_free(thread_units_t *tu, void *p, size_t size HEAPACCT(which_heap_t which)); static void release_real_memory(void *p, size_t size, bool remove_vm, which_vmm_t which); static void release_guarded_real_memory(vm_addr_t p, size_t size, bool remove_vm, bool guarded, which_vmm_t which); typedef enum { /* I - Init, Interop - first allocation failed * check for incompatible kernel drivers */ OOM_INIT = 0x1, /* R - Reserve - out of virtual reservation * * increase -vm_size to reserve more memory */ OOM_RESERVE = 0x2, /* C - Commit - systemwide page file limit, or current process job limit hit * Increase pagefile size, check for memory leak in any application. * * FIXME: possible automatic actions * if systemwide failure we may want to wait if transient * FIXME: if in a job latter we want to detect and just die * (though after freeing as much memory as we can) */ OOM_COMMIT = 0x4, /* E - Extending Commit - same reasons as Commit * as a possible workaround increasing -heap_commit_increment * may make expose us to commit-ing less frequently, * On the other hand committing smaller chunks has a higher * chance of getting through when there is very little memory. * * FIXME: not much more informative than OOM_COMMIT */ OOM_EXTEND = 0x8, } oom_source_t; static void report_low_on_memory(oom_source_t source, heap_error_code_t os_error_code); enum { /* maximum 512MB for 32-bit, 1GB for 64-bit */ MAX_VMM_HEAP_UNIT_SIZE = IF_X64_ELSE(1024*1024*1024, 512*1024*1024), /* We should normally have only one large unit, so this is in fact * the maximum we should count on in one process */ }; /* minimum will be used only if an invalid option is set */ #define MIN_VMM_HEAP_UNIT_SIZE DYNAMO_OPTION(vmm_block_size) typedef struct { vm_addr_t start_addr; /* base virtual address */ vm_addr_t end_addr; /* noninclusive virtual memory range [start,end) */ vm_addr_t alloc_start; /* base allocation virtual address */ size_t alloc_size; /* allocation size */ /* for 64-bit do we want to shift to size_t to allow a larger region? * if so must update the bitmap_t routines */ uint num_blocks; /* total number of blocks in virtual allocation */ mutex_t lock; /* write access to the rest of the fields is protected */ /* We make an assumption about the bitmap_t implementation being static therefore we don't grab locks on read accesses. Anyways, currently the bitmap_t is used with no write intent only for ASSERTs. */ uint num_free_blocks; /* currently free blocks */ /* Bitmap uses 2KB static data for granularity 64KB and static maximum 1GB on Windows, * and 32KB on Linux where granularity is 4KB. These amounts are halved for * 32-bit, so 1KB Windows and 16KB Linux. */ /* Since we expect only two of these, for now it is ok for users to have static max rather than dynamically allocating with exact size - however this field is left last in the structure in case we do want to save some memory */ bitmap_element_t blocks[BITMAP_INDEX(MAX_VMM_HEAP_UNIT_SIZE / MIN_VMM_BLOCK_SIZE)]; } vm_heap_t; /* We keep our heap management structs on the heap for selfprot (case 8074). * Note that we do have static structs for bootstrapping and we later move * the data here. */ typedef struct _heap_management_t { /* high-level management */ /* we reserve only a single vm_heap_t for guaranteed allocation, * we fall back to OS when run out of reservation space */ vm_heap_t vmheap; heap_t heap; /* thread-shared heaps: */ thread_units_t global_units; thread_units_t global_nonpersistent_units; bool global_heap_writable; thread_units_t global_unprotected_units; } heap_management_t; /* For bootstrapping until we can allocate our real heapmgt (case 8074). * temp_heapmgt.lock is initialized in vmm_heap_unit_init(). */ static heap_management_t temp_heapmgt; static heap_management_t *heapmgt = &temp_heapmgt; /* initial value until alloced */ static bool vmm_heap_exited = false; /* FIXME: used only to thwart stack_free from trying, should change the interface for the last stack */ static inline uint vmm_addr_to_block(vm_heap_t *vmh, vm_addr_t p) { ASSERT(CHECK_TRUNCATE_TYPE_uint((p - vmh->start_addr) / DYNAMO_OPTION(vmm_block_size))); return (uint) ((p - vmh->start_addr) / DYNAMO_OPTION(vmm_block_size)); } static inline vm_addr_t vmm_block_to_addr(vm_heap_t *vmh, uint block) { ASSERT(block >=0 && block < vmh->num_blocks); return (vm_addr_t)(vmh->start_addr + block * DYNAMO_OPTION(vmm_block_size)); } static bool vmm_in_same_block(vm_addr_t p1, vm_addr_t p2) { return vmm_addr_to_block(&heapmgt->vmheap, p1) == vmm_addr_to_block(&heapmgt->vmheap, p2); } #if defined(DEBUG) && defined(INTERNAL) static void vmm_dump_map(vm_heap_t *vmh) { uint i; bitmap_element_t *b = vmh->blocks; uint bitmap_size = vmh->num_blocks; uint last_i = 0; bool is_used = bitmap_test(b, 0) == 0; LOG(GLOBAL, LOG_HEAP, 3, "vmm_dump_map("PFX")\n", vmh); /* raw dump first - if you really want binary dump use windbg's dyd */ DOLOG(4, LOG_HEAP, { dump_buffer_as_bytes(GLOBAL, b, BITMAP_INDEX(bitmap_size)*sizeof(bitmap_element_t), DUMP_RAW|DUMP_ADDRESS); }); LOG(GLOBAL, LOG_HEAP, 1, "\nvmm_dump_map("PFX") virtual regions\n", vmh); # define VMM_DUMP_MAP_LOG(i, last_i) \ LOG(GLOBAL, LOG_HEAP, 1, PFX"-"PFX" size=%d %s\n", vmm_block_to_addr(vmh, last_i), \ vmm_block_to_addr(vmh, i-1) + DYNAMO_OPTION(vmm_block_size) - 1, \ (i-last_i)*DYNAMO_OPTION(vmm_block_size), \ is_used ? "reserved" : "free"); for (i=0; i < bitmap_size; i++) { /* start counting at free/used boundaries */ if (is_used != (bitmap_test(b, i) == 0)) { VMM_DUMP_MAP_LOG(i, last_i); is_used = (bitmap_test(b, i) == 0); last_i = i; } } VMM_DUMP_MAP_LOG(bitmap_size, last_i); } #endif /* DEBUG */ void print_vmm_heap_data(file_t outf) { mutex_lock(&heapmgt->vmheap.lock); print_file(outf, "VM heap: addr range "PFX"--"PFX", # free blocks %d\n", heapmgt->vmheap.start_addr, heapmgt->vmheap.end_addr, heapmgt->vmheap.num_free_blocks); mutex_unlock(&heapmgt->vmheap.lock); } static inline void vmm_heap_initialize_unusable(vm_heap_t *vmh) { vmh->start_addr = vmh->end_addr = NULL; vmh->num_free_blocks = vmh->num_blocks = 0; } static void vmm_heap_unit_init(vm_heap_t *vmh, size_t size) { ptr_uint_t preferred = 0; heap_error_code_t error_code = 0; ASSIGN_INIT_LOCK_FREE(vmh->lock, vmh_lock); size = ALIGN_FORWARD(size, DYNAMO_OPTION(vmm_block_size)); ASSERT(size <= MAX_VMM_HEAP_UNIT_SIZE); vmh->alloc_size = size; vmh->start_addr = NULL; if (size == 0) { vmm_heap_initialize_unusable(&heapmgt->vmheap); return; } #ifdef X64 /* -heap_in_lower_4GB takes top priority and has already set heap_allowable_region_*. * Next comes -vm_base_near_app. */ if (DYNAMO_OPTION(vm_base_near_app)) { /* Required for STATIC_LIBRARY: must be near app b/c clients are there. * Non-static: still a good idea for fewer rip-rel manglings. * Asking for app base means we'll prefer before the app, which * has less of an impact on its heap. */ app_pc app_base = get_application_base(); app_pc app_end = get_application_end(); /* To avoid ignoring -vm_base and -vm_max_offset we fall through to that * code if the app base is near -vm_base. */ if (!REL32_REACHABLE(app_base, (app_pc)DYNAMO_OPTION(vm_base)) || !REL32_REACHABLE(app_base, (app_pc)DYNAMO_OPTION(vm_base) + DYNAMO_OPTION(vm_max_offset))) { byte *reach_base = MAX(REACHABLE_32BIT_START(app_base, app_end), heap_allowable_region_start); byte *reach_end = MIN(REACHABLE_32BIT_END(app_base, app_end), heap_allowable_region_end); if (reach_base < reach_end) { vmh->alloc_start = os_heap_reserve_in_region ((void *)ALIGN_FORWARD(reach_base, PAGE_SIZE), (void *)ALIGN_BACKWARD(reach_end, PAGE_SIZE), size + DYNAMO_OPTION(vmm_block_size), &error_code, true/*+x*/); if (vmh->alloc_start != NULL) { vmh->start_addr = (heap_pc) ALIGN_FORWARD(vmh->alloc_start, DYNAMO_OPTION(vmm_block_size)); request_region_be_heap_reachable(app_base, app_end - app_base); } } } } #endif /* X64 */ /* Next we try the -vm_base value plus a random offset. */ if (vmh->start_addr == NULL) { /* Out of 32 bits = 12 bits are page offset, windows wastes 4 more * since its allocation base is 64KB, and if we want to stay * safely in say 0x20000000-0x2fffffff we're left with only 12 * bits of randomness - which may be too little. On the other * hand changing any of the lower 16 bits will make our bugs * non-deterministic. */ /* Make sure we don't waste the lower bits from our random number */ preferred = (DYNAMO_OPTION(vm_base) + get_random_offset(DYNAMO_OPTION(vm_max_offset) / DYNAMO_OPTION(vmm_block_size)) * DYNAMO_OPTION(vmm_block_size)); preferred = ALIGN_FORWARD(preferred, DYNAMO_OPTION(vmm_block_size)); /* overflow check: w/ vm_base shouldn't happen so debug-only check */ ASSERT(!POINTER_OVERFLOW_ON_ADD(preferred, size)); /* let's assume a single chunk is sufficient to reserve */ #ifdef X64 if ((byte *)preferred < heap_allowable_region_start || (byte *)preferred + size > heap_allowable_region_end) { error_code = HEAP_ERROR_NOT_AT_PREFERRED; } else { #endif vmh->alloc_start = os_heap_reserve((void*)preferred, size, &error_code, true/*+x*/); vmh->start_addr = vmh->alloc_start; LOG(GLOBAL, LOG_HEAP, 1, "vmm_heap_unit_init preferred="PFX" got start_addr="PFX"\n", preferred, vmh->start_addr); #ifdef X64 } #endif } while (vmh->start_addr == NULL && DYNAMO_OPTION(vm_allow_not_at_base)) { /* Since we prioritize low-4GB or near-app over -vm_base, we do not * syslog or assert here */ /* need extra size to ensure alignment */ vmh->alloc_size = size + DYNAMO_OPTION(vmm_block_size); #ifdef X64 /* PR 215395, make sure allocation satisfies heap reachability contraints */ vmh->alloc_start = os_heap_reserve_in_region ((void *)ALIGN_FORWARD(heap_allowable_region_start, PAGE_SIZE), (void *)ALIGN_BACKWARD(heap_allowable_region_end, PAGE_SIZE), size + DYNAMO_OPTION(vmm_block_size), &error_code, true/*+x*/); #else vmh->alloc_start = (heap_pc) os_heap_reserve(NULL, size + DYNAMO_OPTION(vmm_block_size), &error_code, true/*+x*/); #endif vmh->start_addr = (heap_pc) ALIGN_FORWARD(vmh->alloc_start, DYNAMO_OPTION(vmm_block_size)); LOG(GLOBAL, LOG_HEAP, 1, "vmm_heap_unit_init unable to allocate at preferred=" PFX" letting OS place sz=%dM addr="PFX"\n", preferred, size/(1024*1024), vmh->start_addr); if (vmh->alloc_start == NULL && DYNAMO_OPTION(vm_allow_smaller)) { /* Just a little smaller might fit */ size_t sub = (size_t) ALIGN_FORWARD(size/16, 1024*1024); SYSLOG_INTERNAL_WARNING_ONCE("Full size vmm heap allocation failed"); if (size > sub) size -= sub; else break; } else break; } #ifdef X64 /* ensure future out-of-block heap allocations are reachable from this allocation */ if (vmh->start_addr != NULL) { ASSERT(vmh->start_addr >= heap_allowable_region_start && !POINTER_OVERFLOW_ON_ADD(vmh->start_addr, size) && vmh->start_addr + size <= heap_allowable_region_end); request_region_be_heap_reachable(vmh->start_addr, size); } #endif if (vmh->start_addr == 0) { vmm_heap_initialize_unusable(vmh); /* we couldn't even reserve initial virtual memory - we're out of luck */ /* XXX case 7373: make sure we tag as a potential * interoperability issue, in staging mode we should probably * get out from the process since we haven't really started yet */ report_low_on_memory(OOM_INIT, error_code); ASSERT_NOT_REACHED(); } vmh->end_addr = vmh->start_addr + size; ASSERT_TRUNCATE(vmh->num_blocks, uint, size / DYNAMO_OPTION(vmm_block_size)); vmh->num_blocks = (uint) (size / DYNAMO_OPTION(vmm_block_size)); vmh->num_free_blocks = vmh->num_blocks; LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_unit_init ["PFX","PFX") total=%d free=%d\n", vmh->start_addr, vmh->end_addr, vmh->num_blocks, vmh->num_free_blocks); /* make sure static bitmap_t size is properly aligned on block boundaries */ ASSERT(ALIGNED(MAX_VMM_HEAP_UNIT_SIZE, DYNAMO_OPTION(vmm_block_size))); bitmap_initialize_free(vmh->blocks, vmh->num_blocks); DOLOG(1, LOG_HEAP, { vmm_dump_map(vmh); }); ASSERT(bitmap_check_consistency(vmh->blocks, vmh->num_blocks, vmh->num_free_blocks)); } static void vmm_heap_unit_exit(vm_heap_t *vmh) { LOG(GLOBAL, LOG_HEAP, 1, "vmm_heap_unit_exit ["PFX","PFX") total=%d free=%d\n", vmh->start_addr, vmh->end_addr, vmh->num_blocks, vmh->num_free_blocks); /* we assume single thread in DR at this point */ DELETE_LOCK(vmh->lock); if (vmh->start_addr == NULL) return; DOLOG(1, LOG_HEAP, { vmm_dump_map(vmh); }); ASSERT(bitmap_check_consistency(vmh->blocks, vmh->num_blocks, vmh->num_free_blocks)); ASSERT(vmh->num_blocks * DYNAMO_OPTION(vmm_block_size) == (ptr_uint_t)(vmh->end_addr - vmh->start_addr)); /* In case there are no tombstones we can just free the unit and * that is what we'll do, otherwise it will stay up forever. */ bool free_heap = vmh->num_free_blocks == vmh->num_blocks; #ifdef UNIX /* On unix there's no fear of leftover tombstones, and as long as we're * doing a detach we can be sure our stack is not actually in the heap. */ if (doing_detach) { DODEBUG({ byte *sp; GET_STACK_PTR(sp); ASSERT(!(sp >= vmh->start_addr && sp < vmh->end_addr)); }); free_heap = true; } #endif if (free_heap) { heap_error_code_t error_code; os_heap_free(vmh->alloc_start, vmh->alloc_size, &error_code); ASSERT(error_code == HEAP_ERROR_SUCCESS); } else { /* FIXME: doing nothing for now - we only care about this in * detach scenarios where we should try to clean up from the * virtual address space */ } vmm_heap_initialize_unusable(vmh); } /* Returns whether within the region we reserved from the OS for doling * out internally via our vm_heap_t; asserts that the address was also * logically reserved within the vm_heap_t. */ static bool vmm_is_reserved_unit(vm_heap_t *vmh, vm_addr_t p, size_t size) { size = ALIGN_FORWARD(size, DYNAMO_OPTION(vmm_block_size)); if (p < vmh->start_addr || vmh->end_addr < p/*overflow*/ || vmh->end_addr < (p + size)) return false; ASSERT(CHECK_TRUNCATE_TYPE_uint(size/DYNAMO_OPTION(vmm_block_size))); ASSERT(bitmap_are_reserved_blocks(vmh->blocks, vmh->num_blocks, vmm_addr_to_block(vmh, p), (uint)size/DYNAMO_OPTION(vmm_block_size))); return true; } /* Returns whether entirely within the region we reserved from the OS for doling * out internally via our vm_heap_t */ bool is_vmm_reserved_address(byte *pc, size_t size) { ASSERT(heapmgt != NULL); /* Case 10293: we don't call vmm_is_reserved_unit to avoid its * assert, which we want to maintain for callers only dealing with * DR-allocated addresses, while this routine is called w/ random * addresses */ return (heapmgt != NULL && heapmgt->vmheap.start_addr != NULL && pc >= heapmgt->vmheap.start_addr && !POINTER_OVERFLOW_ON_ADD(pc, size) && (pc + size) <= heapmgt->vmheap.end_addr); } void get_vmm_heap_bounds(byte **heap_start/*OUT*/, byte **heap_end/*OUT*/) { ASSERT(heapmgt != NULL); ASSERT(heap_start != NULL && heap_end != NULL); *heap_start = heapmgt->vmheap.start_addr; *heap_end = heapmgt->vmheap.end_addr; } /* i#774: eventually we'll split vmheap from vmcode. For now, vmcode queries * refer to the single vmheap reservation. */ byte * vmcode_get_start(void) { byte *start, *end; get_vmm_heap_bounds(&start, &end); return start; } byte * vmcode_get_end(void) { byte *start, *end; get_vmm_heap_bounds(&start, &end); return end; } byte * vmcode_unreachable_pc(void) { #ifdef X86_64 /* This is used to indicate something that is unreachable from *everything* * for DR_CLEANCALL_INDIRECT, so ideally we want to not just provide an * address that vmcode can't reach. * We use a non-canonical address for x86_64. */ return (byte *)0x8000000100000000ULL; #else /* This is not really used for aarch* so we just go with vmcode reachability. */ ptr_uint_t start, end; get_vmm_heap_bounds((byte **)&start, (byte **)&end); if (start > INT_MAX) return NULL; else { /* We do not use -1 to avoid wraparound from thinking it's reachable. */ return (byte *)end + INT_MAX + PAGE_SIZE; } #endif } bool rel32_reachable_from_vmcode(byte *tgt) { #ifdef X64 /* To handle beyond-vmm-reservation allocs, we must compare to the allowable * heap range and not just the vmcode range (i#1479). */ ptr_int_t new_offs = (tgt > heap_allowable_region_start) ? (tgt - heap_allowable_region_start) : (heap_allowable_region_end - tgt); ASSERT(vmcode_get_start() >= heap_allowable_region_start); ASSERT(vmcode_get_end() <= heap_allowable_region_end+1/*closed*/); return REL32_REACHABLE_OFFS(new_offs); #else return true; #endif } static inline void vmm_update_block_stats(which_vmm_t which, uint num_blocks, bool add) { /* XXX: find some way to make a stats array */ if (add) { if (which == VMM_HEAP) RSTATS_ADD_PEAK(vmm_blocks_heap, num_blocks); else if (which == VMM_CACHE) RSTATS_ADD_PEAK(vmm_blocks_cache, num_blocks); else if (which == VMM_STACK) RSTATS_ADD_PEAK(vmm_blocks_stack, num_blocks); else if (which == VMM_SPECIAL_HEAP) RSTATS_ADD_PEAK(vmm_blocks_special_heap, num_blocks); else if (which == VMM_SPECIAL_MMAP) RSTATS_ADD_PEAK(vmm_blocks_special_mmap, num_blocks); } else { if (which == VMM_HEAP) RSTATS_SUB(vmm_blocks_heap, num_blocks); else if (which == VMM_CACHE) RSTATS_SUB(vmm_blocks_cache, num_blocks); else if (which == VMM_STACK) RSTATS_SUB(vmm_blocks_stack, num_blocks); else if (which == VMM_SPECIAL_HEAP) RSTATS_SUB(vmm_blocks_special_heap, num_blocks); else if (which == VMM_SPECIAL_MMAP) RSTATS_SUB(vmm_blocks_special_mmap, num_blocks); } } /* Reservations here are done with DYNAMO_OPTION(vmm_block_size) alignment * (e.g. 64KB) but the caller is not forced to request at that * alignment. We explicitly synchronize reservations and decommits * within the vm_heap_t. * Returns NULL if the VMMHeap is full or too fragmented to satisfy * the request. */ static vm_addr_t vmm_heap_reserve_blocks(vm_heap_t *vmh, size_t size_in, which_vmm_t which) { vm_addr_t p; uint request; uint first_block; size_t size; size = ALIGN_FORWARD(size_in, DYNAMO_OPTION(vmm_block_size)); ASSERT_TRUNCATE(request, uint, size/DYNAMO_OPTION(vmm_block_size)); request = (uint) size/DYNAMO_OPTION(vmm_block_size); LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_reserve_blocks: size=%d => %d in blocks=%d free_blocks~=%d\n", size_in, size, request, vmh->num_free_blocks); mutex_lock(&vmh->lock); if (vmh->num_free_blocks < request) { mutex_unlock(&vmh->lock); return NULL; } first_block = bitmap_allocate_blocks(vmh->blocks, vmh->num_blocks, request); if (first_block != BITMAP_NOT_FOUND) { vmh->num_free_blocks -= request; } mutex_unlock(&vmh->lock); if (first_block != BITMAP_NOT_FOUND) { p = vmm_block_to_addr(vmh, first_block); RSTATS_ADD_PEAK(vmm_vsize_used, size); STATS_ADD_PEAK(vmm_vsize_blocks_used, request); STATS_ADD_PEAK(vmm_vsize_wasted, size - size_in); vmm_update_block_stats(which, request, true/*add*/); DOSTATS({ if (request > 1) { STATS_INC(vmm_multi_block_allocs); STATS_ADD(vmm_multi_blocks, request); } }); } else { p = NULL; } LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_reserve_blocks: size=%d blocks=%d p="PFX"\n", size, request, p); DOLOG(5, LOG_HEAP, { vmm_dump_map(vmh); }); return p; } /* We explicitly synchronize reservations and decommits within the vm_heap_t. * Update bookkeeping information about the freed region. */ static void vmm_heap_free_blocks(vm_heap_t *vmh, vm_addr_t p, size_t size_in, which_vmm_t which) { uint first_block = vmm_addr_to_block(vmh, p); uint request; size_t size; size = ALIGN_FORWARD(size_in, DYNAMO_OPTION(vmm_block_size)); ASSERT_TRUNCATE(request, uint, size/DYNAMO_OPTION(vmm_block_size)); request = (uint) size/DYNAMO_OPTION(vmm_block_size); LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_free_blocks: size=%d blocks=%d p="PFX"\n", size, request, p); mutex_lock(&vmh->lock); bitmap_free_blocks(vmh->blocks, vmh->num_blocks, first_block, request); vmh->num_free_blocks += request; mutex_unlock(&vmh->lock); ASSERT(vmh->num_free_blocks <= vmh->num_blocks); RSTATS_SUB(vmm_vsize_used, size); STATS_SUB(vmm_vsize_blocks_used, request); vmm_update_block_stats(which, request, false/*sub*/); STATS_SUB(vmm_vsize_wasted, size - size_in); } /* This is the proper interface for the rest of heap.c to the os_heap_* functions */ /* place all the local-scope static vars (from DO_THRESHOLD) into .fspdata to avoid * protection changes */ START_DATA_SECTION(FREQ_PROTECTED_SECTION, "w"); static bool at_reset_at_vmm_limit() { return (DYNAMO_OPTION(reset_at_vmm_percent_free_limit) != 0 && 100 * heapmgt->vmheap.num_free_blocks < DYNAMO_OPTION(reset_at_vmm_percent_free_limit) * heapmgt->vmheap.num_blocks) || (DYNAMO_OPTION(reset_at_vmm_free_limit) != 0 && heapmgt->vmheap.num_free_blocks * DYNAMO_OPTION(vmm_block_size) < DYNAMO_OPTION(reset_at_vmm_free_limit)); } /* Reserve virtual address space without committing swap space for it */ static vm_addr_t vmm_heap_reserve(size_t size, heap_error_code_t *error_code, bool executable, which_vmm_t which) { vm_addr_t p; /* should only be used on sizable aligned pieces */ ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); ASSERT(!OWN_MUTEX(&reset_pending_lock)); if (DYNAMO_OPTION(vm_reserve)) { /* FIXME: should we make this an external option? */ if (INTERNAL_OPTION(vm_use_last) || (DYNAMO_OPTION(switch_to_os_at_vmm_reset_limit) && at_reset_at_vmm_limit())) { DO_ONCE({ if (DYNAMO_OPTION(reset_at_switch_to_os_at_vmm_limit)) schedule_reset(RESET_ALL); DOCHECK(1, { if (!INTERNAL_OPTION(vm_use_last)) { ASSERT_CURIOSITY(false && "running low on vm reserve"); } }); /* FIXME - for our testing would be nice to have some release build * notification of this ... */ }); DODEBUG(ever_beyond_vmm = true;); #ifdef X64 /* PR 215395, make sure allocation satisfies heap reachability contraints */ p = os_heap_reserve_in_region ((void *)ALIGN_FORWARD(heap_allowable_region_start, PAGE_SIZE), (void *)ALIGN_BACKWARD(heap_allowable_region_end, PAGE_SIZE), size, error_code, executable); /* ensure future heap allocations are reachable from this allocation */ if (p != NULL) request_region_be_heap_reachable(p, size); #else p = os_heap_reserve(NULL, size, error_code, executable); #endif if (p != NULL) return p; LOG(GLOBAL, LOG_HEAP, 1, "vmm_heap_reserve: failed "PFX"\n", *error_code); } if (at_reset_at_vmm_limit()) { /* We're running low on our reservation, trigger a reset */ if (schedule_reset(RESET_ALL)) { STATS_INC(reset_low_vmm_count); DO_THRESHOLD_SAFE(DYNAMO_OPTION(report_reset_vmm_threshold), FREQ_PROTECTED_SECTION, {/* < max - nothing */}, {/* >= max */ /* FIXME - do we want to report more then once to give some idea of * how much thrashing there is? */ DO_ONCE({ SYSLOG_CUSTOM_NOTIFY(SYSLOG_WARNING, MSG_LOW_ON_VMM_MEMORY, 2, "Potentially thrashing on low virtual " "memory resetting.", get_application_name(), get_application_pid()); /* want QA to notice */ ASSERT_CURIOSITY(false && "vmm heap limit reset thrashing"); }); }); } } p = vmm_heap_reserve_blocks(&heapmgt->vmheap, size, which); LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_reserve: size=%d p="PFX"\n", size, p); if (p) return p; DO_ONCE({ DODEBUG({ out_of_vmheap_once = true; }); if (!INTERNAL_OPTION(skip_out_of_vm_reserve_curiosity)) { /* this maybe unsafe for early services w.r.t. case 666 */ SYSLOG_INTERNAL_WARNING("Out of vmheap reservation - reserving %dKB." "Falling back onto OS allocation", size/1024); ASSERT_CURIOSITY(false && "Out of vmheap reservation"); } /* This actually-out trigger is only trying to help issues like a * thread-private configuration being a memory hog (and thus we use up * our reserve). Reset needs memory, and this is asynchronous, so no * guarantees here anyway (the app may have already reserved all memory * beyond our reservation, see sqlsrvr.exe and cisvc.exe for ex.) which is * why we have -reset_at_vmm_threshold to make reset success more likely. */ if (DYNAMO_OPTION(reset_at_vmm_full)) { schedule_reset(RESET_ALL); } }); } /* if we fail to allocate from our reservation we fall back to the OS */ DODEBUG(ever_beyond_vmm = true;); #ifdef X64 /* PR 215395, make sure allocation satisfies heap reachability contraints */ p = os_heap_reserve_in_region ((void *)ALIGN_FORWARD(heap_allowable_region_start, PAGE_SIZE), (void *)ALIGN_BACKWARD(heap_allowable_region_end, PAGE_SIZE), size, error_code, executable); /* ensure future heap allocations are reachable from this allocation */ if (p != NULL) request_region_be_heap_reachable(p, size); #else p = os_heap_reserve(NULL, size, error_code, executable); #endif return p; } /* Commit previously reserved pages, returns false when out of memory * This is here just to complement the vmm interface, in fact it is * almost an alias for os_heap_commit. (If we had strict types then * here we'd convert a vm_addr_t into a heap_pc.) */ static inline bool vmm_heap_commit(vm_addr_t p, size_t size, uint prot, heap_error_code_t *error_code) { bool res = os_heap_commit(p, size, prot, error_code); size_t commit_used, commit_limit; ASSERT(!OWN_MUTEX(&reset_pending_lock)); if ((DYNAMO_OPTION(reset_at_commit_percent_free_limit) != 0 || DYNAMO_OPTION(reset_at_commit_free_limit) != 0) && os_heap_get_commit_limit(&commit_used, &commit_limit)) { size_t commit_left = commit_limit - commit_used; ASSERT(commit_used <= commit_limit); /* FIXME - worry about overflow in the multiplies below? With 4kb pages isn't * an issue till 160GB of committable memory. */ if ((DYNAMO_OPTION(reset_at_commit_free_limit) != 0 && commit_left < DYNAMO_OPTION(reset_at_commit_free_limit) / PAGE_SIZE) || (DYNAMO_OPTION(reset_at_commit_percent_free_limit) != 0 && 100 * commit_left < DYNAMO_OPTION(reset_at_commit_percent_free_limit) * commit_limit)) { /* Machine is getting low on memory, trigger a reset */ /* FIXME - if we aren't the ones hogging committed memory (rougue app) then * do we want a version of reset that doesn't de-commit our already grabbed * memory to avoid someone else stealing it (or perhaps keep just a minimal * level to ensure we make some progress)? */ /* FIXME - the commit limit is for the whole system; we have no good way of * telling if we're running in a job and if so what the commit limit for the * job is. */ /* FIXME - if a new process is started under dr while the machine is already * past the threshold we will just spin resetting here and not make any * progress, may be better to only reset when we have a reasonable amount of * non-persistent memory to free (so that we can at least make some progress * before resetting again). */ /* FIXME - the threshold is calculated at the current page file size, but * it's possible that the pagefile is expandable (dependent on disk space of * course) and thus we're preventing a potentially beneficial (to us) * upsizing of the pagefile here. See "HKLM\SYSTEM\CCS\ControlSession / * Manager\Memory Management" for the initial/max size of the various page * files (query SystemPafefileInformation only gets you the current size). */ /* xref case 345 on fixmes (and link to wiki discussion) */ if (schedule_reset(RESET_ALL)) { STATS_INC(reset_low_commit_count); DO_THRESHOLD_SAFE(DYNAMO_OPTION(report_reset_commit_threshold), FREQ_PROTECTED_SECTION, {/* < max - nothing */}, {/* >= max */ /* FIXME - do we want to report more then once to give some idea of * how much thrashing there is? */ DO_ONCE({ SYSLOG_CUSTOM_NOTIFY(SYSLOG_WARNING, MSG_LOW_ON_COMMITTABLE_MEMORY, 2, "Potentially thrashing on low committable " "memory resetting.", get_application_name(), get_application_pid()); /* want QA to notice */ ASSERT_CURIOSITY(false && "commit limit reset thrashing"); }); }); } } } if (!res && DYNAMO_OPTION(oom_timeout) != 0) { DEBUG_DECLARE(heap_error_code_t old_error_code = *error_code;) ASSERT(old_error_code != HEAP_ERROR_SUCCESS); /* check whether worth retrying */ if (!os_heap_systemwide_overcommit(*error_code)) { /* FIXME: we should check whether current process is the hog */ /* unless we have used the memory, there is still a * miniscule chance another thread will free up some or * will attempt suicide, so could retry even if current * process has a leak */ ASSERT_NOT_IMPLEMENTED(false); /* retry */ } SYSLOG_INTERNAL_WARNING("vmm_heap_commit oom: timeout and retry"); /* let's hope a memory hog dies in the mean time */ os_timeout(DYNAMO_OPTION(oom_timeout)); res = os_heap_commit(p, size, prot, error_code); DODEBUG({ if (res) { SYSLOG_INTERNAL_WARNING("vmm_heap_commit retried, got away! old="PFX " new="PFX"\n", old_error_code, *error_code); } else { SYSLOG_INTERNAL_WARNING("vmm_heap_commit retrying, no luck. old="PFX " new="PFX"\n", old_error_code, *error_code); } }); } return res; } /* back to normal section */ END_DATA_SECTION() /* Free previously reserved and possibly committed memory. Check if * it is within the memory managed by the virtual memory manager we * only decommit back to the OS, and we remove the vmm reservation. * Keep in mind that this can be called on units that are not fully * committed, e.g. guard pages are added to this - as long as the * os_heap_decommit interface can handle this we're OK */ static void vmm_heap_free(vm_addr_t p, size_t size, heap_error_code_t *error_code, which_vmm_t which) { LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_free: size=%d p="PFX" is_reserved=%d\n", size, p, vmm_is_reserved_unit(&heapmgt->vmheap, p, size)); /* the memory doesn't have to be within our VM reserve if it was allocated as an extra OS call when if we ran out */ if (DYNAMO_OPTION(vm_reserve)) { if (vmm_is_reserved_unit(&heapmgt->vmheap, p, size)) { os_heap_decommit(p, size, error_code); vmm_heap_free_blocks(&heapmgt->vmheap, p, size, which); LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_free: freed size=%d p="PFX"\n", size, p); return; } else { /* FIXME: check if this is stack_free getting in the way, then ignore it */ /* FIXME: could do this by overriding the meaning of the vmheap fields after cleanup to a different combination that start_pc = end_pc = NULL */ /* FIXME: see vmm_heap_unit_exit for the current stack_free problem */ if (vmm_heap_exited) { *error_code = HEAP_ERROR_SUCCESS; return; } } } os_heap_free(p, size, error_code); } static void vmm_heap_decommit(vm_addr_t p, size_t size, heap_error_code_t *error_code) { LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_decommit: size=%d p="PFX" is_reserved=%d\n", size, p, vmm_is_reserved_unit(&heapmgt->vmheap, p, size)); os_heap_decommit(p, size, error_code); /* nothing to be done to vmm blocks */ } /* Caller is required to handle thread synchronization and to update dynamo vm areas. * size must be PAGE_SIZE-aligned. * Returns NULL if fails to allocate memory! */ static void * vmm_heap_alloc(size_t size, uint prot, heap_error_code_t *error_code, which_vmm_t which) { vm_addr_t p = vmm_heap_reserve(size, error_code, TEST(MEMPROT_EXEC, prot), which); if (!p) return NULL; /* out of reserved memory */ if (!vmm_heap_commit(p, size, prot, error_code)) return NULL; /* out of committed memory */ return p; } /* virtual memory manager initialization */ void vmm_heap_init() { IF_WINDOWS(ASSERT(DYNAMO_OPTION(vmm_block_size) == OS_ALLOC_GRANULARITY)); #ifdef X64 /* add reachable regions before we allocate the heap, xref PR 215395 */ /* i#774, i#901: we no longer need the DR library nor ntdll.dll to be * reachable by the vmheap reservation. But, for -heap_in_lower_4GB, * we must call request_region_be_heap_reachable() up front. * This is a hard requirement so we set it prior to locating the vmm region. */ if (DYNAMO_OPTION(heap_in_lower_4GB)) request_region_be_heap_reachable(0, 0x80000000); #endif if (DYNAMO_OPTION(vm_reserve)) { vmm_heap_unit_init(&heapmgt->vmheap, DYNAMO_OPTION(vm_size)); } } void vmm_heap_exit() { /* virtual memory manager exit */ if (DYNAMO_OPTION(vm_reserve)) { /* FIXME: we have three regions that are not explicitly * deallocated current stack, init stack, global_do_syscall */ DOCHECK(1, { uint perstack = ALIGN_FORWARD_UINT(DYNAMO_OPTION(stack_size) + (DYNAMO_OPTION(guard_pages) ? (2*PAGE_SIZE) : (DYNAMO_OPTION(stack_guard_pages) ? PAGE_SIZE : 0)), DYNAMO_OPTION(vmm_block_size)) / DYNAMO_OPTION(vmm_block_size); uint unfreed_blocks = perstack * 1 /* initstack */ + /* current stack */ perstack * ((doing_detach IF_APP_EXPORTS(|| dr_api_exit)) ? 0 : 1); /* FIXME: on detach arch_thread_exit should explicitly mark as left behind all TPCs needed so then we can assert even for detach */ ASSERT(IF_WINDOWS(doing_detach || ) /* not deterministic when detaching */ heapmgt->vmheap.num_free_blocks == heapmgt->vmheap.num_blocks - unfreed_blocks || /* >=, not ==, b/c if we hit the vmm limit the cur dstack * could be outside of vmm (i#1164). */ ((ever_beyond_vmm /* This also happens for dstacks up high for DrMi#1723. */ IF_WINDOWS(|| get_os_version() >= WINDOWS_VERSION_8_1)) && heapmgt->vmheap.num_free_blocks >= heapmgt->vmheap.num_blocks - unfreed_blocks)); }); /* FIXME: On process exit we are currently executing off a * stack in this region so we cannot free the whole allocation. * FIXME: Any tombstone allocations will have to use a * different interface than the generic heap_mmap() which is * sometimes used to leave things behind. FIXME: Currently * we'll leave behind the whole vm unit if any tombstones are * left - which in fact is always the case, no matter whether * thread private code needs to be left or not. * global_do_syscall 32 byte allocation should be part of our * dll and won't have to be left. * The current stack is the main problem because it is later * cleaned up in cleanup_and_terminate by calling stack_free which * in turn gets all the way to vmm_heap_free. Therefore we add an * explicit test for vmm_heap_exited, so that we can otherwise free * bookkeeping information and delete the lock now. * Potential solution to most of these problems is to have * cleanup_and_terminate call vmm_heap_exit when cleaning up * the process, or to just leave the vm mapping behind and * simply pass a different argument to stack_free. */ vmm_heap_unit_exit(&heapmgt->vmheap); vmm_heap_exited = true; } } /* checks for compatibility among heap options, returns true if * modified the value of any options to make them compatible */ bool heap_check_option_compatibility() { bool ret = false; ret = check_param_bounds(&dynamo_options.vm_size, MIN_VMM_HEAP_UNIT_SIZE, MAX_VMM_HEAP_UNIT_SIZE, "vm_size") || ret; #ifdef INTERNAL /* if max_heap_unit_size is too small you may get a funny message * "initial_heap_unit_size must be >= 8229 and <= 4096" but in * release build we will take the min and then complain about * max_heap_unit_size and set it to the min also, so it all works * out w/o needing an extra check() call. */ /* case 7626: don't short-circuit checks, as later ones may be needed */ ret = check_param_bounds(&dynamo_options.initial_heap_unit_size, /* if have units smaller than a page we end up * allocating 64KB chunks for "oversized" units * for just about every alloc! so round up to * at least a page. */ ALIGN_FORWARD(UNITOVERHEAD + 1, (uint)PAGE_SIZE), HEAP_UNIT_MAX_SIZE, "initial_heap_unit_size") || ret; ret = check_param_bounds(&dynamo_options.initial_global_heap_unit_size, ALIGN_FORWARD(UNITOVERHEAD + 1, (uint)PAGE_SIZE), HEAP_UNIT_MAX_SIZE, "initial_global_heap_unit_size") || ret; ret = check_param_bounds(&dynamo_options.max_heap_unit_size, MAX(HEAP_UNIT_MIN_SIZE, GLOBAL_UNIT_MIN_SIZE), INT_MAX, "max_heap_unit_size") || ret; #endif return ret; } /* thread-shared initialization that should be repeated after a reset */ void heap_reset_init() { if (SEPARATE_NONPERSISTENT_HEAP()) { threadunits_init(GLOBAL_DCONTEXT, &heapmgt->global_nonpersistent_units, GLOBAL_UNIT_MIN_SIZE); } } /* initialization */ void heap_init() { int i; uint prev_sz = 0; LOG(GLOBAL, LOG_TOP|LOG_HEAP, 2, "Heap bucket sizes are:\n"); /* make sure we'll preserve alignment */ ASSERT(ALIGNED(HEADER_SIZE, HEAP_ALIGNMENT)); /* make sure free list pointers will fit */ ASSERT(BLOCK_SIZES[0] >= sizeof(heap_pc*)); /* since sizes depend on size of structs, make sure they're in order */ for (i = 0; i < BLOCK_TYPES; i++) { ASSERT(BLOCK_SIZES[i] > prev_sz); /* we assume all of our heap allocs are aligned */ ASSERT(i == BLOCK_TYPES-1 || ALIGNED(BLOCK_SIZES[i], HEAP_ALIGNMENT)); prev_sz = BLOCK_SIZES[i]; LOG(GLOBAL, LOG_TOP|LOG_HEAP, 2, "\t%d bytes\n", BLOCK_SIZES[i]); } /* we assume writes to some static vars are atomic, * i.e., the vars don't cross cache lines. they shouldn't since * they should all be 4-byte-aligned in the data segment. * FIXME: ensure that release build aligns ok? * I would be quite surprised if static vars were not 4-byte-aligned! */ ASSERT(ALIGN_BACKWARD(&heap_exiting, CACHE_LINE_SIZE()) == ALIGN_BACKWARD(&heap_exiting + 1, CACHE_LINE_SIZE())); ASSERT(ALIGN_BACKWARD(&heap_unit_lock.owner, CACHE_LINE_SIZE()) == ALIGN_BACKWARD(&heap_unit_lock.owner + 1, CACHE_LINE_SIZE())); /* For simplicity we go through our normal heap mechanism to allocate * our post-init heapmgt struct */ ASSERT(heapmgt == &temp_heapmgt); heapmgt->global_heap_writable = true; /* this is relied on in global_heap_alloc */ threadunits_init(GLOBAL_DCONTEXT, &heapmgt->global_units, GLOBAL_UNIT_MIN_SIZE); heapmgt = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, heap_management_t, ACCT_MEM_MGT, PROTECTED); memset(heapmgt, 0, sizeof(*heapmgt)); ASSERT(sizeof(temp_heapmgt) == sizeof(*heapmgt)); memcpy(heapmgt, &temp_heapmgt, sizeof(temp_heapmgt)); threadunits_init(GLOBAL_DCONTEXT, &heapmgt->global_unprotected_units, GLOBAL_UNIT_MIN_SIZE); heap_reset_init(); #ifdef WINDOWS /* PR 250294: As part of 64-bit hook work, hook reachability was addressed * using landing pads (see win32/callback.c for more explanation). Landing * pad areas are a type of special heap, so they should be initialized * during heap init. * Each landing pad area has its own allocation pointer, so they shouldn't * be merged automatically. */ VMVECTOR_ALLOC_VECTOR(landing_pad_areas, GLOBAL_DCONTEXT, VECTOR_SHARED | VECTOR_NEVER_MERGE, landing_pad_areas_lock); #endif } /* need to not remove from vmareas on process exit -- vmareas has already exited! */ static void really_free_unit(heap_unit_t *u) { RSTATS_SUB(heap_capacity, UNIT_COMMIT_SIZE(u)); STATS_ADD(heap_reserved_only, (stats_int_t)(UNIT_COMMIT_SIZE(u) - UNIT_RESERVED_SIZE(u))); /* remember that u itself is inside unit, not separately allocated */ release_guarded_real_memory((vm_addr_t)u, UNIT_RESERVED_SIZE(u), false/*do not update DR areas now*/, true, VMM_HEAP); } /* Free all thread-shared state not critical to forward progress; * heap_reset_init() will be called before continuing. */ void heap_reset_free() { heap_unit_t *u, *next_u; /* FIXME: share some code w/ heap_exit -- currently only called by reset */ ASSERT(DYNAMO_OPTION(enable_reset)); /* we must grab this lock before heap_unit_lock to avoid rank * order violations when freeing */ dynamo_vm_areas_lock(); /* for combining stats into global_units we need this lock * FIXME: remove if we go to separate stats sum location */ DODEBUG({ acquire_recursive_lock(&global_alloc_lock); }); acquire_recursive_lock(&heap_unit_lock); LOG(GLOBAL, LOG_HEAP, 1, "Pre-reset, global heap unit stats:\n"); /* FIXME: free directly rather than putting on dead list first */ threadunits_exit(&heapmgt->global_nonpersistent_units, GLOBAL_DCONTEXT); /* free all dead units */ u = heapmgt->heap.dead; while (u != NULL) { next_u = u->next_global; LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing dead unit "PFX"-"PFX" [-"PFX"]\n", u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u)); RSTATS_DEC(heap_num_free); really_free_unit(u); u = next_u; } heapmgt->heap.dead = NULL; heapmgt->heap.num_dead = 0; release_recursive_lock(&heap_unit_lock); DODEBUG({ release_recursive_lock(&global_alloc_lock); }); dynamo_vm_areas_unlock(); } /* atexit cleanup */ void heap_exit() { heap_unit_t *u, *next_u; heap_management_t *temp; heap_exiting = true; /* FIXME: we shouldn't need either lock if executed last */ dynamo_vm_areas_lock(); acquire_recursive_lock(&heap_unit_lock); #ifdef WINDOWS release_landing_pad_mem(); /* PR 250294 */ #endif LOG(GLOBAL, LOG_HEAP, 1, "Global unprotected heap unit stats:\n"); threadunits_exit(&heapmgt->global_unprotected_units, GLOBAL_DCONTEXT); if (SEPARATE_NONPERSISTENT_HEAP()) { LOG(GLOBAL, LOG_HEAP, 1, "Global nonpersistent heap unit stats:\n"); threadunits_exit(&heapmgt->global_nonpersistent_units, GLOBAL_DCONTEXT); } /* Now we need to go back to the static struct to clean up */ ASSERT(heapmgt != &temp_heapmgt); memcpy(&temp_heapmgt, heapmgt, sizeof(temp_heapmgt)); temp = heapmgt; heapmgt = &temp_heapmgt; HEAP_TYPE_FREE(GLOBAL_DCONTEXT, temp, heap_management_t, ACCT_MEM_MGT, PROTECTED); LOG(GLOBAL, LOG_HEAP, 1, "Global heap unit stats:\n"); threadunits_exit(&heapmgt->global_units, GLOBAL_DCONTEXT); /* free heap for all unfreed units */ LOG(GLOBAL, LOG_HEAP, 1, "Unfreed units:\n"); u = heapmgt->heap.units; while (u != NULL) { next_u = u->next_global; LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing live unit "PFX"-"PFX" [-"PFX"]\n", u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u)); RSTATS_DEC(heap_num_live); really_free_unit(u); u = next_u; } heapmgt->heap.units = NULL; u = heapmgt->heap.dead; while (u != NULL) { next_u = u->next_global; LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing dead unit "PFX"-"PFX" [-"PFX"]\n", u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u)); RSTATS_DEC(heap_num_free); really_free_unit(u); u = next_u; } heapmgt->heap.dead = NULL; release_recursive_lock(&heap_unit_lock); dynamo_vm_areas_unlock(); DELETE_RECURSIVE_LOCK(heap_unit_lock); DELETE_RECURSIVE_LOCK(global_alloc_lock); #ifdef X64 DELETE_LOCK(request_region_be_heap_reachable_lock); #endif if (doing_detach) heapmgt = &temp_heapmgt; } void heap_post_exit() { heap_exiting = false; } /* FIXME: * detect if the app is who we're fighting for memory, if so, don't * free memory, else the app will just keep grabbing more. * need a test for hitting 2GB (or 3GB!) user mode limit. */ static void heap_low_on_memory() { /* free some memory! */ heap_unit_t *u, *next_u; size_t freed = 0; LOG(GLOBAL, LOG_CACHE|LOG_STATS, 1, "heap_low_on_memory: about to free dead list units\n"); /* WARNING: this routine is called at arbitrary allocation failure points, * so we have to be careful what locks we grab * However, no allocation site can hold a lock weaker in rank than * heap_unit_lock, b/c it could deadlock on the allocation itself! * So we're safe. */ /* must grab this lock prior to heap_unit_lock if affecting DR vm areas * this is recursive so ok if we ran out of memory while holding DR vm area lock */ ASSERT(safe_to_allocate_or_free_heap_units()); dynamo_vm_areas_lock(); acquire_recursive_lock(&heap_unit_lock); u = heapmgt->heap.dead; while (u != NULL) { next_u = u->next_global; freed += UNIT_COMMIT_SIZE(u); /* FIXME: if out of committed pages only, could keep our reservations */ LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing dead unit "PFX"-"PFX" [-"PFX"]\n", u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u)); RSTATS_DEC(heap_num_free); really_free_unit(u); u = next_u; heapmgt->heap.num_dead--; } heapmgt->heap.dead = NULL; release_recursive_lock(&heap_unit_lock); dynamo_vm_areas_unlock(); LOG(GLOBAL, LOG_CACHE|LOG_STATS, 1, "heap_low_on_memory: freed %d KB\n", freed/1024); /* FIXME: we don't keep a list of guard pages, which we may decide to throw * out or compact at this time. */ /* FIXME: should also fix up the allocator to look in other free lists * of sizes larger than asked for, we may have plenty of memory available * in other lists! see comments in common_heap_alloc */ } static const char* get_oom_source_name(oom_source_t source) { /* currently only single character codenames, * (still as a string though) */ const char *code_name = "?"; switch (source) { case OOM_INIT : code_name = "I"; break; case OOM_RESERVE : code_name = "R"; break; case OOM_COMMIT : code_name = "C"; break; case OOM_EXTEND : code_name = "E"; break; default: ASSERT_NOT_REACHED(); } return code_name; } static bool silent_oom_for_process(oom_source_t source) { if (TESTANY(OOM_COMMIT|OOM_EXTEND, source) && !IS_STRING_OPTION_EMPTY(silent_commit_oom_list)) { bool onlist; const char *process_name = get_short_name(get_application_name()); string_option_read_lock(); onlist = check_filter_with_wildcards(DYNAMO_OPTION(silent_commit_oom_list), process_name); string_option_read_unlock(); if (onlist) { SYSLOG_INTERNAL_WARNING("not reporting last words of executable %s", process_name); return true; } } return false; } /* oom_source_t identifies the action we were taking, os_error_code is * the returned value from the last system call - opaque at this OS * independent layer. */ static void report_low_on_memory(oom_source_t source, heap_error_code_t os_error_code) { if (TESTANY(DYNAMO_OPTION(silent_oom_mask), source) || silent_oom_for_process(source)) { SYSLOG_INTERNAL_WARNING("Mostly silent OOM: %s "PFX".\n", get_oom_source_name(source), os_error_code); /* still produce an ldmp for internal use */ if (TEST(DUMPCORE_OUT_OF_MEM_SILENT, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Out of memory, silently aborting program."); } else { const char *oom_source_code = get_oom_source_name(source); char status_hex[19]; /* FIXME: for 64bit hex need 16+NULL */ /* note 0x prefix added by the syslog */ snprintf(status_hex, BUFFER_SIZE_ELEMENTS(status_hex), PFX, /* FIXME: 32bit */ os_error_code); NULL_TERMINATE_BUFFER(status_hex); /* SYSLOG first */ SYSLOG_CUSTOM_NOTIFY(SYSLOG_CRITICAL, MSG_OUT_OF_MEMORY, 4, "Out of memory. Program aborted.", get_application_name(), get_application_pid(), oom_source_code, status_hex ); /* FIXME: case 7306 can't specify arguments in SYSLOG_CUSTOM_NOTIFY */ SYSLOG_INTERNAL_WARNING("OOM Status: %s %s", oom_source_code, status_hex); /* XXX: case 7296 - ldmp even if we have decided not to produce an event above */ if (TEST(DUMPCORE_OUT_OF_MEM, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Out of memory, aborting program."); /* pass only status code to XML where we should have a stack dump and callstack */ report_diagnostics("Out of memory", status_hex, NO_VIOLATION_BAD_INTERNAL_STATE); } os_terminate(NULL, TERMINATE_PROCESS); ASSERT_NOT_REACHED(); } /* update statistics for committed memory, and add to vm_areas */ static inline void account_for_memory(void *p, size_t size, uint prot, bool add_vm, bool image _IF_DEBUG(const char *comment)) { RSTATS_ADD_PEAK(memory_capacity, size); /* case 3045: areas inside the vmheap reservation are not added to the list * for clients that use DR-allocated memory, we have get_memory_info() * query from the OS to see inside */ if (vmm_is_reserved_unit(&heapmgt->vmheap, p, size)) { return; } if (add_vm) { add_dynamo_vm_area(p, ((app_pc)p) + size, prot, image _IF_DEBUG(comment)); } else { /* due to circular dependencies bet vmareas and global heap we do not call * add_dynamo_vm_area here, instead we indicate that something has changed */ mark_dynamo_vm_areas_stale(); /* NOTE: 'prot' info is lost about this region, but is needed in * heap_vmareas_synch_units to update all_memory_areas. Currently * heap_create_unit is the only place that passes 'false' with prot rw-. */ ASSERT(TESTALL(MEMPROT_READ | MEMPROT_WRITE, prot)); } } /* remove_vm MUST be false iff this is heap memory, which is updated separately */ static void update_dynamo_areas_on_release(app_pc start, app_pc end, bool remove_vm) { if (!vm_areas_exited && !heap_exiting) { /* avoid problems when exiting */ /* case 3045: areas inside the vmheap reservation are not added to the list * for clients that use DR-allocated memory, we have get_memory_info() * query from the OS to see inside */ if (vmm_is_reserved_unit(&heapmgt->vmheap, start, end - start)) { return; } if (remove_vm) { remove_dynamo_vm_area(start, end); } else { /* Due to cyclic dependencies bet heap and vmareas we cannot remove * incrementally. The pending set is protected by the same lock * needed to synch the vm areas, so we will never mis-identify free * memory as DR memory. */ mark_dynamo_vm_areas_stale(); dynamo_areas_pending_remove = true; } } } bool lockwise_safe_to_allocate_memory() { /* check whether it's safe to hold a lock that normally can be held * for memory allocation -- i.e., check whether we hold the * global_alloc_lock */ return !self_owns_recursive_lock(&global_alloc_lock); } /* we indirect all os memory requests through here so we have a central place * to handle the out-of-memory condition. * add_vm MUST be false iff this is heap memory, which is updated separately. */ static void * get_real_memory(size_t size, uint prot, bool add_vm, which_vmm_t which _IF_DEBUG(const char *comment)) { void *p; heap_error_code_t error_code; /* must round up to page sizes, else vmm_heap_alloc assert triggers */ size = ALIGN_FORWARD(size, PAGE_SIZE); /* memory alloc/dealloc and updating DR list must be atomic */ dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ p = vmm_heap_alloc(size, prot, &error_code, which); if (p == NULL) { SYSLOG_INTERNAL_WARNING_ONCE("Out of memory -- cannot reserve or " "commit %dKB. Trying to recover.", size/1024); /* we should be ok here, shouldn't come in here holding global_alloc_lock * or heap_unit_lock w/o first having grabbed DR areas lock */ ASSERT(safe_to_allocate_or_free_heap_units()); heap_low_on_memory(); fcache_low_on_memory(); /* try again * FIXME: have more sophisticated strategy of freeing a little, then getting * more drastic with each subsequent failure * FIXME: can only free live fcache units for current thread w/ current * impl...should we wait a while and try again if out of memory, hoping * other threads have freed some?!?! */ p = vmm_heap_alloc(size, prot, &error_code, which); if (p == NULL) { report_low_on_memory(OOM_RESERVE, error_code); } SYSLOG_INTERNAL_WARNING_ONCE("Out of memory -- but still alive after " "emergency free."); } account_for_memory(p, size, prot, add_vm, false _IF_DEBUG(comment)); dynamo_vm_areas_unlock(); return p; } static void release_memory_and_update_areas(app_pc p, size_t size, bool decommit, bool remove_vm, which_vmm_t which) { heap_error_code_t error_code; /* these two operations need to be atomic wrt DR area updates */ dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ /* ref case 3035, we must remove from dynamo_areas before we free in case * we end up allocating memory in the process of removing the area * (we don't want to end up getting the memory we just freed since that * would lead to errors in the list when we finally did remove it) */ update_dynamo_areas_on_release(p, p + size, remove_vm); if (decommit) vmm_heap_decommit(p, size, &error_code); else vmm_heap_free(p, size, &error_code, which); ASSERT(error_code == HEAP_ERROR_SUCCESS); dynamo_vm_areas_unlock(); } /* remove_vm MUST be false iff this is heap memory, which is updated separately */ static void release_real_memory(void *p, size_t size, bool remove_vm, which_vmm_t which) { /* must round up to page sizes for vmm_heap_free */ size = ALIGN_FORWARD(size, PAGE_SIZE); release_memory_and_update_areas((app_pc)p, size, false/*free*/, remove_vm, which); /* avoid problem w/ being called by cleanup_and_terminate after dynamo_process_exit */ if (IF_DEBUG_ELSE(!dynamo_exited_log_and_stats, true)) RSTATS_SUB(memory_capacity, size); } static void extend_commitment(vm_addr_t p, size_t size, uint prot, bool initial_commit, which_vmm_t which) { heap_error_code_t error_code; ASSERT(ALIGNED(p, PAGE_SIZE)); size = ALIGN_FORWARD(size, PAGE_SIZE); if (!vmm_heap_commit(p, size, prot, &error_code)) { SYSLOG_INTERNAL_WARNING_ONCE("Out of memory - cannot extend commit " "%dKB. Trying to recover.", size/1024); heap_low_on_memory(); fcache_low_on_memory(); /* see low-memory ideas in get_real_memory */ if (!vmm_heap_commit(p, size, prot, &error_code)) { report_low_on_memory(initial_commit ? OOM_COMMIT : OOM_EXTEND, error_code); } SYSLOG_INTERNAL_WARNING_ONCE("Out of memory in extend - still alive " "after emergency free."); } } /* A wrapper around get_real_memory that adds a guard page on each side of the * requested unit. These should consume only uncommitted virtual address and * should not use any physical memory. * add_vm MUST be false iff this is heap memory, which is updated separately. * Non-NULL min_addr is only supported for stack allocations (DrMi#1723). */ static vm_addr_t get_guarded_real_memory(size_t reserve_size, size_t commit_size, uint prot, bool add_vm, bool guarded, byte *min_addr, which_vmm_t which _IF_DEBUG(const char *comment)) { vm_addr_t p = NULL; uint guard_size = (uint)PAGE_SIZE; heap_error_code_t error_code; bool try_vmm = true; ASSERT(reserve_size >= commit_size); if (!guarded || !dynamo_options.guard_pages) { if (reserve_size == commit_size) return get_real_memory(reserve_size, prot, add_vm, which _IF_DEBUG(comment)); guard_size = 0; } reserve_size = ALIGN_FORWARD(reserve_size, PAGE_SIZE); commit_size = ALIGN_FORWARD(commit_size, PAGE_SIZE); reserve_size += 2* guard_size; /* add top and bottom guards */ /* memory alloc/dealloc and updating DR list must be atomic */ dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ #ifdef WINDOWS /* DrMi#1723: if we swap TEB stack fields, a client (or a DR app mem touch) * can trigger an app guard * page. We have to ensure that the kernel will update TEB.StackLimit in that * case, which requires our dstack to be higher than the app stack. * This results in more fragmentation and larger dynamo_areas so we avoid * if we can. We could consider a 2nd vm_reserve region just for stacks. */ if (SWAP_TEB_STACKBASE() && (!DYNAMO_OPTION(vm_reserve) && min_addr > NULL) || (DYNAMO_OPTION(vm_reserve) && min_addr > heapmgt->vmheap.start_addr)) { try_vmm = false; } #endif if (try_vmm) p = vmm_heap_reserve(reserve_size, &error_code, TEST(MEMPROT_EXEC, prot), which); #if defined(WINDOWS) && defined(CLIENT_INTERFACE) if (!try_vmm || p < (vm_addr_t)min_addr) { if (p != NULL) vmm_heap_free(p, reserve_size, &error_code, which); p = os_heap_reserve_in_region ((void *)ALIGN_FORWARD(min_addr, PAGE_SIZE), (void *)PAGE_START(POINTER_MAX), reserve_size, &error_code, TEST(MEMPROT_EXEC, prot)); /* No reason to update heap-reachable b/c stack doesn't need to reach * (min_addr != NULL assumed to be stack). */ ASSERT(!DYNAMO_OPTION(stack_shares_gencode)); /* would break reachability */ /* If it fails we can't do much: we fall back to within-vmm, if possible, * and rely on our other best-effort TEB.StackLimit updating checks * (check_app_stack_limit()). */ if (p == NULL) { SYSLOG_INTERNAL_WARNING_ONCE("Unable to allocate dstack above app stack"); if (!try_vmm) { p = vmm_heap_reserve(reserve_size, &error_code, TEST(MEMPROT_EXEC, prot), which); } } } #endif if (p == NULL) { /* Very unlikely to happen: we have to reach at least 2GB reserved memory. */ SYSLOG_INTERNAL_WARNING_ONCE("Out of memory - cannot reserve %dKB. " "Trying to recover.", reserve_size/1024); heap_low_on_memory(); fcache_low_on_memory(); p = vmm_heap_reserve(reserve_size, &error_code, TEST(MEMPROT_EXEC, prot), which); if (p == NULL) { report_low_on_memory(OOM_RESERVE, error_code); } SYSLOG_INTERNAL_WARNING_ONCE("Out of memory on reserve - but still " "alive after emergency free."); } /* includes guard pages if add_vm -- else, heap_vmareas_synch_units() will * add guard pages in by assuming one page on each side of every heap unit * if dynamo_options.guard_pages */ account_for_memory((void *)p, reserve_size, prot, add_vm, false _IF_DEBUG(comment)); dynamo_vm_areas_unlock(); STATS_ADD_PEAK(reserved_memory_capacity, reserve_size); STATS_ADD_PEAK(guard_pages, 2); p += guard_size; extend_commitment(p, commit_size, prot, true /* initial commit */, which); return p; } /* A wrapper around get_release_memory that also frees the guard pages on each * side of the requested unit. remove_vm MUST be false iff this is heap memory, * which is updated separately. */ static void release_guarded_real_memory(vm_addr_t p, size_t size, bool remove_vm, bool guarded, which_vmm_t which) { if (!guarded || !dynamo_options.guard_pages) { release_real_memory(p, size, remove_vm, which); return; } size = ALIGN_FORWARD(size, PAGE_SIZE); size += PAGE_SIZE * 2; /* add top and bottom guards */ p -= PAGE_SIZE; release_memory_and_update_areas((app_pc)p, size, false/*free*/, remove_vm, which); /* avoid problem w/ being called by cleanup_and_terminate after dynamo_process_exit */ if (IF_DEBUG_ELSE(!dynamo_exited_log_and_stats, true)) { RSTATS_SUB(memory_capacity, size); STATS_SUB(reserved_memory_capacity, size); STATS_ADD(guard_pages, -2); } } /* use heap_mmap to allocate large chunks of executable memory * it's mainly used to allocate our fcache units */ void * heap_mmap_ex(size_t reserve_size, size_t commit_size, uint prot, bool guarded, which_vmm_t which) { /* XXX i#774: when we split vmheap and vmcode, if MEMPROT_EXEC is requested * here (or this is a call from a client, for reachability * compatibility), put it in vmcode; else in vmheap. */ void *p = get_guarded_real_memory(reserve_size, commit_size, prot, true, guarded, NULL, which _IF_DEBUG("heap_mmap")); #ifdef DEBUG_MEMORY if (TEST(MEMPROT_WRITE, prot)) memset(p, HEAP_ALLOCATED_BYTE, commit_size); #endif /* We rely on this for freeing _post_stack in absence of dcontext */ ASSERT(!DYNAMO_OPTION(vm_reserve) || !DYNAMO_OPTION(stack_shares_gencode) || (ptr_uint_t)p - (guarded ? (GUARD_PAGE_ADJUSTMENT/2) : 0) == ALIGN_BACKWARD(p, DYNAMO_OPTION(vmm_block_size)) || at_reset_at_vmm_limit()); LOG(GLOBAL, LOG_HEAP, 2, "heap_mmap: %d bytes [/ %d] @ "PFX"\n", commit_size, reserve_size, p); STATS_ADD_PEAK(mmap_capacity, commit_size); STATS_ADD_PEAK(mmap_reserved_only, (reserve_size - commit_size)); return p; } /* use heap_mmap to allocate large chunks of executable memory * it's mainly used to allocate our fcache units */ void * heap_mmap_reserve(size_t reserve_size, size_t commit_size, which_vmm_t which) { /* heap_mmap always marks as executable */ return heap_mmap_ex(reserve_size, commit_size, MEMPROT_EXEC|MEMPROT_READ|MEMPROT_WRITE, true, which); } /* It is up to the caller to ensure commit_size is a page size multiple, * and that it does not extend beyond the initial reservation. */ void heap_mmap_extend_commitment(void *p, size_t commit_size, which_vmm_t which) { extend_commitment(p, commit_size, MEMPROT_EXEC|MEMPROT_READ|MEMPROT_WRITE, false /*not initial commit*/, which); STATS_SUB(mmap_reserved_only, commit_size); STATS_ADD_PEAK(mmap_capacity, commit_size); #ifdef DEBUG_MEMORY memset(p, HEAP_ALLOCATED_BYTE, commit_size); #endif } /* De-commits from a committed region. */ void heap_mmap_retract_commitment(void *retract_start, size_t decommit_size, which_vmm_t which) { heap_error_code_t error_code; ASSERT(ALIGNED(decommit_size, PAGE_SIZE)); vmm_heap_decommit(retract_start, decommit_size, &error_code); STATS_ADD(mmap_reserved_only, decommit_size); STATS_ADD_PEAK(mmap_capacity, -(stats_int_t)decommit_size); } /* Allocates executable memory in the same allocation region as this thread's * stack, to save address space (case 9474). */ void * heap_mmap_reserve_post_stack(dcontext_t *dcontext, size_t reserve_size, size_t commit_size, which_vmm_t which) { void *p; byte *stack_reserve_end = NULL; heap_error_code_t error_code; size_t available = 0; uint prot; bool known_stack = false; ASSERT(reserve_size > 0 && commit_size < reserve_size); /* 1.5 * guard page adjustment since we'll share the middle one */ if (DYNAMO_OPTION(stack_size) + reserve_size + GUARD_PAGE_ADJUSTMENT + GUARD_PAGE_ADJUSTMENT / 2 > DYNAMO_OPTION(vmm_block_size)) { /* there's not enough room to share the allocation block, stack is too big */ LOG(GLOBAL, LOG_HEAP, 1, "Not enough room to allocate 0x%08x bytes post stack " "of size 0x%08x\n", reserve_size, DYNAMO_OPTION(stack_size)); return heap_mmap_reserve(reserve_size, commit_size, which); } if (DYNAMO_OPTION(stack_shares_gencode) && /* FIXME: we could support this w/o vm_reserve, or when beyond * the reservation, but we don't bother */ DYNAMO_OPTION(vm_reserve) && dcontext != GLOBAL_DCONTEXT && dcontext != NULL) { stack_reserve_end = dcontext->dstack + GUARD_PAGE_ADJUSTMENT/2; #if defined(UNIX) && !defined(HAVE_MEMINFO) prot = 0; /* avoid compiler warning: should only need inside if */ if (!dynamo_initialized) { /* memory info is not yet set up. since so early we only support * post-stack if inside vmm (won't be true only for pathological * tiny vmm sizes) */ if (vmm_is_reserved_unit(&heapmgt->vmheap, stack_reserve_end, reserve_size)) { known_stack = true; available = reserve_size; } else known_stack = false; } else #elif defined(UNIX) /* the all_memory_areas list doesn't keep details inside vmheap */ known_stack = get_memory_info_from_os(stack_reserve_end, NULL, &available, &prot); #else known_stack = get_memory_info(stack_reserve_end, NULL, &available, &prot); #endif /* If ever out of vmheap, then may have free space beyond stack, * which we could support but don't (see FIXME above) */ ASSERT(out_of_vmheap_once || (known_stack && available >= reserve_size && prot == 0)); } if (!known_stack || /* if -no_vm_reserve will short-circuit so no vmh deref danger */ !vmm_in_same_block(dcontext->dstack, /* we do want a guard page at the end */ stack_reserve_end + reserve_size) || available < reserve_size) { ASSERT(!DYNAMO_OPTION(stack_shares_gencode) || !DYNAMO_OPTION(vm_reserve) || out_of_vmheap_once); DOLOG(1, LOG_HEAP, { if (known_stack && available < reserve_size) { LOG(GLOBAL, LOG_HEAP, 1, "heap_mmap_reserve_post_stack: avail %d < needed %d\n", available, reserve_size); } }); STATS_INC(mmap_no_share_stack_region); return heap_mmap_reserve(reserve_size, commit_size, which); } ASSERT(DYNAMO_OPTION(vm_reserve)); ASSERT(stack_reserve_end != NULL); prot = MEMPROT_EXEC|MEMPROT_READ|MEMPROT_WRITE; /* memory alloc/dealloc and updating DR list must be atomic */ dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ /* We share the stack's end guard page as our start guard page */ if (vmm_is_reserved_unit(&heapmgt->vmheap, stack_reserve_end, reserve_size)) { /* Memory is already reserved with OS */ p = stack_reserve_end; } else { p = os_heap_reserve(stack_reserve_end, reserve_size, &error_code, true/*+x*/); #ifdef X64 /* ensure future heap allocations are reachable from this allocation * (this will also verify that this region meets reachability requirements) */ if (p != NULL) request_region_be_heap_reachable(p, reserve_size); #endif if (p == NULL) { ASSERT_NOT_REACHED(); LOG(GLOBAL, LOG_HEAP, 1, "heap_mmap_reserve_post_stack: reserve failed "PFX"\n", error_code); dynamo_vm_areas_unlock(); STATS_INC(mmap_no_share_stack_region); return heap_mmap_reserve(reserve_size, commit_size, which); } ASSERT(error_code == HEAP_ERROR_SUCCESS); } if (!vmm_heap_commit(p, commit_size, prot, &error_code)) { ASSERT_NOT_REACHED(); LOG(GLOBAL, LOG_HEAP, 1, "heap_mmap_reserve_post_stack: commit failed "PFX"\n", error_code); if (!vmm_is_reserved_unit(&heapmgt->vmheap, stack_reserve_end, reserve_size)) { os_heap_free(p, reserve_size, &error_code); ASSERT(error_code == HEAP_ERROR_SUCCESS); } dynamo_vm_areas_unlock(); STATS_INC(mmap_no_share_stack_region); return heap_mmap_reserve(reserve_size, commit_size, which); } account_for_memory(p, reserve_size, prot, true/*add now*/, false _IF_DEBUG("heap_mmap_reserve_post_stack")); dynamo_vm_areas_unlock(); /* We rely on this for freeing in absence of dcontext */ ASSERT((ptr_uint_t)p - GUARD_PAGE_ADJUSTMENT/2 != ALIGN_BACKWARD(p, DYNAMO_OPTION(vmm_block_size))); #ifdef DEBUG_MEMORY memset(p, HEAP_ALLOCATED_BYTE, commit_size); #endif LOG(GLOBAL, LOG_HEAP, 2, "heap_mmap w/ stack: %d bytes [/ %d] @ "PFX"\n", commit_size, reserve_size, p); STATS_ADD_PEAK(mmap_capacity, commit_size); STATS_ADD_PEAK(mmap_reserved_only, (reserve_size - commit_size)); STATS_INC(mmap_share_stack_region); return p; } /* De-commits memory that was allocated in the same allocation region as this * thread's stack (case 9474). */ void heap_munmap_post_stack(dcontext_t *dcontext, void *p, size_t reserve_size, which_vmm_t which) { /* We would require a valid dcontext and compare to the stack reserve end, * but on detach we have no dcontext, so we instead use block alignment. */ DOCHECK(1, { if (dcontext != NULL && dcontext != GLOBAL_DCONTEXT && DYNAMO_OPTION(vm_reserve) && DYNAMO_OPTION(stack_shares_gencode)) { bool at_stack_end = (p == dcontext->dstack + GUARD_PAGE_ADJUSTMENT/2); bool at_block_start = ((ptr_uint_t)p - GUARD_PAGE_ADJUSTMENT/2 == ALIGN_BACKWARD(p, DYNAMO_OPTION(vmm_block_size))); ASSERT((at_stack_end && !at_block_start) || (!at_stack_end && at_block_start)); } }); if (!DYNAMO_OPTION(vm_reserve) || !DYNAMO_OPTION(stack_shares_gencode) || (ptr_uint_t)p - GUARD_PAGE_ADJUSTMENT/2 == ALIGN_BACKWARD(p, DYNAMO_OPTION(vmm_block_size))) { heap_munmap(p, reserve_size, which); } else { /* Detach makes it a pain to pass in the commit size so * we use the reserve size, which works fine. */ release_memory_and_update_areas((app_pc)p, reserve_size, true/*decommit*/, true/*update now*/, which); LOG(GLOBAL, LOG_HEAP, 2, "heap_munmap_post_stack: %d bytes @ "PFX"\n", reserve_size, p); STATS_SUB(mmap_capacity, reserve_size); STATS_SUB(mmap_reserved_only, reserve_size); } } /* use heap_mmap to allocate large chunks of executable memory * it's mainly used to allocate our fcache units */ void * heap_mmap(size_t size, which_vmm_t which) { return heap_mmap_reserve(size, size, which); } /* free memory-mapped storage */ void heap_munmap_ex(void *p, size_t size, bool guarded, which_vmm_t which) { #ifdef DEBUG_MEMORY /* can't set to HEAP_UNALLOCATED_BYTE since really not in our address * space anymore */ #endif release_guarded_real_memory((vm_addr_t)p, size, true/*update DR areas immediately*/, guarded, which); DOSTATS({ /* avoid problem w/ being called by cleanup_and_terminate after * dynamo_process_exit */ if (!dynamo_exited_log_and_stats) { LOG(GLOBAL, LOG_HEAP, 2, "heap_munmap: %d bytes @ "PFX"\n", size, p); STATS_SUB(mmap_capacity, size); STATS_SUB(mmap_reserved_only, size); } }); } /* free memory-mapped storage */ void heap_munmap(void *p, size_t size, which_vmm_t which) { heap_munmap_ex(p, size, true/*guarded*/, which); } /* use stack_alloc to build a stack -- it returns TOS * For -stack_guard_pages, also allocates an extra page * on the bottom and uses it to detect overflows when accessed. */ void * stack_alloc(size_t size, byte *min_addr) { void *p; /* we reserve and commit at once for now * FIXME case 2330: commit-on-demand could allow larger max sizes w/o * hurting us in the common case */ size_t alloc_size = size; if (!DYNAMO_OPTION(guard_pages) && DYNAMO_OPTION(stack_guard_pages)) alloc_size += PAGE_SIZE; p = get_guarded_real_memory(alloc_size, alloc_size, MEMPROT_READ|MEMPROT_WRITE, true, true, min_addr, VMM_STACK _IF_DEBUG("stack_alloc")); if (!DYNAMO_OPTION(guard_pages) && DYNAMO_OPTION(stack_guard_pages)) p = (byte *)p + PAGE_SIZE; #ifdef DEBUG_MEMORY memset(p, HEAP_ALLOCATED_BYTE, size); #endif if (DYNAMO_OPTION(stack_guard_pages)) { /* XXX: maybe we should this option a count of how many pages, to catch * overflow that uses a large stride and skips over one page (UNIX-only * since Windows code always uses chkstk to trigger guard pages). */ /* We place a guard on UNIX signal stacks too: although we can't report * such overflows, we'd rather have a clear crash than memory corruption * from clobbering whatever memory is below the stack. */ /* mark the bottom page non-accessible to trap stack overflow */ byte *guard = (byte *)p - PAGE_SIZE; #ifdef WINDOWS /* Only a committed page can be a guard page. */ /* XXX: this doesn't work well with -vm_reserve where the kernel will * auto-expand the stack into adjacent allocations below the stack. */ heap_error_code_t error_code; if (vmm_heap_commit(guard, PAGE_SIZE, MEMPROT_READ|MEMPROT_WRITE, &error_code)) mark_page_as_guard(guard); #else /* For UNIX we just mark it as inaccessible. */ if (!DYNAMO_OPTION(guard_pages)) make_unwritable(guard, PAGE_SIZE); #endif } RSTATS_ADD_PEAK(stack_capacity, size); /* stack grows from high to low */ return (void *) ((ptr_uint_t)p + size); } /* free stack storage */ void stack_free(void *p, size_t size) { size_t alloc_size; if (size == 0) size = DYNAMORIO_STACK_SIZE; alloc_size = size; p = (void *) ((vm_addr_t)p - size); if (!DYNAMO_OPTION(guard_pages) && DYNAMO_OPTION(stack_guard_pages)) { alloc_size += PAGE_SIZE; p = (byte *)p - PAGE_SIZE; } release_guarded_real_memory((vm_addr_t)p, alloc_size, true/*update DR areas immediately*/, true, VMM_STACK); if (IF_DEBUG_ELSE(!dynamo_exited_log_and_stats, true)) RSTATS_SUB(stack_capacity, size); } /* only checks initstack and current dcontext * does not check any dstacks on the callback stack (win32) */ bool is_stack_overflow(dcontext_t *dcontext, byte *sp) { /* ASSUMPTION: size of stack is DYNAMORIO_STACK_SIZE = dynamo_options.stack_size * Currently sideline violates that for a thread stack, and we have separated * -signal_stack_size, but all dstacks and initstack should be this size. */ byte *bottom = dcontext->dstack - DYNAMORIO_STACK_SIZE; if (!DYNAMO_OPTION(stack_guard_pages) && !DYNAMO_OPTION(guard_pages)) return false; /* see if in bottom guard page of dstack */ if (sp >= bottom - PAGE_SIZE && sp < bottom) return true; /* now check the initstack */ bottom = initstack - DYNAMORIO_STACK_SIZE; if (sp >= bottom - PAGE_SIZE && sp < bottom) return true; return false; } byte * map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot, map_flags_t map_flags) { byte *view; /* memory alloc/dealloc and updating DR list must be atomic */ dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ view = os_map_file(f, size, offs, addr, prot, map_flags); if (view != NULL) { STATS_ADD_PEAK(file_map_capacity, *size); account_for_memory((void *)view, *size, prot, true/*add now*/, true/*image*/ _IF_DEBUG("map_file")); } dynamo_vm_areas_unlock(); return view; } bool unmap_file(byte *map, size_t size) { bool success; ASSERT(map != NULL && ALIGNED(map, PAGE_SIZE)); size = ALIGN_FORWARD(size, PAGE_SIZE); /* memory alloc/dealloc and updating DR list must be atomic */ dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ success = os_unmap_file(map, size); if (success) { /* Only update the all_memory_areas on success. * It should still be atomic to the outside observers. */ update_dynamo_areas_on_release(map, map+size, true/*remove now*/); STATS_SUB(file_map_capacity, size); } dynamo_vm_areas_unlock(); return success; } /* We cannot incrementally keep dynamo vm area list up to date due to * circular dependencies bet vmareas and global heap (trust me, I've tried * to support it with reentrant routines and recursive locks, the hard part * is getting add_vm_area to be reentrant or to queue up adding areas, * I think this solution is much more elegant, plus it avoids race conditions * between DR memory allocation and the vmareas list by ensuring the list * is up to date at the exact time of each query). * Instead we on-demand walk the units. * Freed units can usually be removed incrementally, except when we * hold the heap_unit_lock when we run out of memory -- when we set * a flag telling the caller of this routine to remove all heap areas * from the vm list prior to calling us to add the real ones back in. * Re-adding everyone is the simplest policy, so we don't have to keep * track of who's been added. * The caller is assumed to hold the dynamo vm areas write lock. */ void heap_vmareas_synch_units() { heap_unit_t *u, *next; /* make sure to add guard page on each side, as well */ uint offs = (dynamo_options.guard_pages) ? (uint)PAGE_SIZE : 0; /* we again have circular dependence w/ vmareas if it happens to need a * new unit in the course of adding these areas, so we use a recursive lock! * furthermore, we need to own the global lock now, to avoid deadlock with * another thread who does global_alloc and then needs a new unit! * which means that the global_alloc lock must be recursive since vmareas * may need to global_alloc... */ /* if chance could own both locks, must grab both now * always grab global_alloc first, then we won't have deadlocks */ acquire_recursive_lock(&global_alloc_lock); acquire_recursive_lock(&heap_unit_lock); if (dynamo_areas_pending_remove) { dynamo_areas_pending_remove = false; remove_dynamo_heap_areas(); /* When heap units are removed from the dynamo_area, they should be * marked so. See case 4196. */ for (u = heapmgt->heap.units; u != NULL; u = u->next_global) u->in_vmarea_list = false; for (u = heapmgt->heap.dead; u != NULL; u = u->next_global) u->in_vmarea_list = false; } for (u = heapmgt->heap.units; u != NULL; u = next) { app_pc start = (app_pc)u - offs; /* support un-aligned heap reservation end: PR 415269 (though as * part of that PR we shouldn't have un-aligned anymore) */ app_pc end_align = (app_pc) ALIGN_FORWARD(UNIT_RESERVED_END(u), PAGE_SIZE); app_pc end = end_align + offs; /* u can be moved to dead list, so cache the next link; case 4196. */ next = u->next_global; /* case 3045: areas inside the vmheap reservation are not added to the list */ if (!u->in_vmarea_list && !vmm_is_reserved_unit(&heapmgt->vmheap, start, end - start)) { /* case 4196 if next is used by dynamo_vmareas then next * may become dead if vector is resized, then u should be * alive and u->next_global should be reset AFTER add */ bool next_may_die = /* keep breaking abstractions */ is_dynamo_area_buffer(UNIT_GET_START_PC(next)); /* dynamo_areas.buf vector may get resized and u can either * go to the dead unit list, or it can be released back to * the OS. We'll mark it as being in vmarea list to avoid * re-adding when going through dead one's, and we'll mark * _before_ the potential free. If dynamo_areas.buf is * freed back to the OS we'll have another iteration in * update_dynamo_vm_areas() until we get fully * synchronized, so we don't need to worry about the * inconsistency. */ u->in_vmarea_list = true; add_dynamo_heap_vm_area(start, end, true, false _IF_DEBUG("heap unit")); /* NOTE: Since we could mark_dynamo_vm_areas_stale instead of adding to * it, we may lose prot info about this unit. * FIXME: Currently, this is done only at one place, which allocates unit * as MEMPROT_READ | MEMPROT_WRITE. If other places are added, then this * needs to change. */ update_all_memory_areas((app_pc)u, end_align, MEMPROT_READ | MEMPROT_WRITE, DR_MEMTYPE_DATA); /* unit */ if (offs != 0) { /* guard pages */ update_all_memory_areas((app_pc)u - offs, (app_pc)u, MEMPROT_NONE, DR_MEMTYPE_DATA); update_all_memory_areas(end_align, end, MEMPROT_NONE, DR_MEMTYPE_DATA); } if (next_may_die) { STATS_INC(num_vmareas_resize_synch); /* if next was potentially on dead row, then current * should still be live and point to the next live */ next = u->next_global; } } } for (u = heapmgt->heap.dead; u != NULL; u = next) { app_pc start = (app_pc)u - offs; /* support un-aligned heap reservation end: PR 415269 (though as * part of that PR we shouldn't have un-aligned anymore) */ app_pc end_align = (app_pc) ALIGN_FORWARD(UNIT_RESERVED_END(u), PAGE_SIZE); app_pc end = end_align + offs; /* u can be moved to live list, so cache the next link; case 4196. */ next = u->next_global; /* case 3045: areas inside the vmheap reservation are not added to the list */ if (!u->in_vmarea_list && !vmm_is_reserved_unit(&heapmgt->vmheap, start, end - start)) { u->in_vmarea_list = true; add_dynamo_heap_vm_area(start, end, true, false _IF_DEBUG("dead heap unit")); update_all_memory_areas((app_pc)u, end_align, MEMPROT_READ | MEMPROT_WRITE, DR_MEMTYPE_DATA); /* unit */ if (offs != 0) { /* guard pages */ update_all_memory_areas(start, (app_pc)u, MEMPROT_NONE, DR_MEMTYPE_DATA); update_all_memory_areas(end_align, end, MEMPROT_NONE, DR_MEMTYPE_DATA); } /* case 4196 if next was put back on live list for * dynamo_areas.buf vector, then next will no longer be a * valid iterator over dead list */ /* keep breaking abstractions */ if (is_dynamo_area_buffer(UNIT_GET_START_PC(next))) { STATS_INC(num_vmareas_resize_synch); ASSERT_NOT_TESTED(); next = u->next_global; } } } release_recursive_lock(&heap_unit_lock); release_recursive_lock(&global_alloc_lock); } /* shared between global and global_unprotected */ static void * common_global_heap_alloc(thread_units_t *tu, size_t size HEAPACCT(which_heap_t which)) { void *p; acquire_recursive_lock(&global_alloc_lock); p = common_heap_alloc(tu, size HEAPACCT(which)); release_recursive_lock(&global_alloc_lock); if (p == NULL) { /* circular dependence solution: we need to hold DR lock before * global alloc lock -- so we back out, grab it, and retry */ dynamo_vm_areas_lock(); acquire_recursive_lock(&global_alloc_lock); p = common_heap_alloc(tu, size HEAPACCT(which)); release_recursive_lock(&global_alloc_lock); dynamo_vm_areas_unlock(); } ASSERT(p != NULL); return p; } /* shared between global and global_unprotected */ static void common_global_heap_free(thread_units_t *tu, void *p, size_t size HEAPACCT(which_heap_t which)) { bool ok; if (p == NULL) { ASSERT(false && "attempt to free NULL"); return; } acquire_recursive_lock(&global_alloc_lock); ok = common_heap_free(tu, p, size HEAPACCT(which)); release_recursive_lock(&global_alloc_lock); if (!ok) { /* circular dependence solution: we need to hold DR lock before * global alloc lock -- so we back out, grab it, and retry */ dynamo_vm_areas_lock(); acquire_recursive_lock(&global_alloc_lock); ok = common_heap_free(tu, p, size HEAPACCT(which)); release_recursive_lock(&global_alloc_lock); dynamo_vm_areas_unlock(); } ASSERT(ok); } /* these functions use the global heap instead of a thread's heap: */ void * global_heap_alloc(size_t size HEAPACCT(which_heap_t which)) { void *p; #ifdef CLIENT_INTERFACE /* We pay the cost of this branch to support using DR's decode routines from the * regular DR library and not just drdecode, to support libraries that would use * drdecode but that also have to work with full DR (i#2499). */ if (heapmgt == &temp_heapmgt && /* We prevent recrusion by checking for a field that heap_init writes. */ !heapmgt->global_heap_writable) { standalone_init(); } #endif p = common_global_heap_alloc(&heapmgt->global_units, size HEAPACCT(which)); ASSERT(p != NULL); LOG(GLOBAL, LOG_HEAP, 6, "\nglobal alloc: "PFX" (%d bytes)\n", p, size); return p; } void global_heap_free(void *p, size_t size HEAPACCT(which_heap_t which)) { common_global_heap_free(&heapmgt->global_units, p, size HEAPACCT(which)); LOG(GLOBAL, LOG_HEAP, 6, "\nglobal free: "PFX" (%d bytes)\n", p, size); } /* reallocate area allocates new_num elements of element_size if ptr is NULL acts like global_heap_alloc, copies an old_num elements of given size in the new area */ /* FIXME: do a heap_realloc and a special_heap_realloc too */ void * global_heap_realloc(void *ptr, size_t old_num, size_t new_num, size_t element_size HEAPACCT(which_heap_t which)) { void *new_area = global_heap_alloc(new_num * element_size HEAPACCT(which)); if (ptr) { memcpy(new_area, ptr, (old_num < new_num ? old_num : new_num) * element_size); global_heap_free(ptr, old_num * element_size HEAPACCT(which)); } return new_area; } /* size does not include guard pages (if any) and is reserved, but only * DYNAMO_OPTION(heap_commit_increment) is committed up front */ static heap_unit_t * heap_create_unit(thread_units_t *tu, size_t size, bool must_be_new) { heap_unit_t *u = NULL, *dead = NULL, *prev_dead = NULL; bool new_unit = false; /* we do not restrict size to unit max as we have to make larger-than-max * units for oversized requests */ /* modifying heap list and DR areas must be atomic, and must grab * DR area lock before heap_unit_lock */ ASSERT(safe_to_allocate_or_free_heap_units()); dynamo_vm_areas_lock(); /* take from dead list if possible */ acquire_recursive_lock(&heap_unit_lock); /* FIXME: need to unprotect units that we're going to perform * {next,prev}_global assignments too -- but need to know whether * to re-protect -- do all at once, or each we need? add a writable * flag to heap_unit_t? */ if (!must_be_new) { for (dead = heapmgt->heap.dead; dead != NULL && UNIT_RESERVED_SIZE(dead) < size; prev_dead = dead, dead = dead->next_global) ; } if (dead != NULL) { if (prev_dead == NULL) heapmgt->heap.dead = dead->next_global; else prev_dead->next_global = dead->next_global; u = dead; heapmgt->heap.num_dead--; RSTATS_DEC(heap_num_free); release_recursive_lock(&heap_unit_lock); LOG(GLOBAL, LOG_HEAP, 2, "Re-using dead heap unit: "PFX"-"PFX" %d KB (need %d KB)\n", u, ((byte*)u)+size, UNIT_RESERVED_SIZE(u)/1024, size/1024); } else { size_t commit_size = DYNAMO_OPTION(heap_commit_increment); release_recursive_lock(&heap_unit_lock); /* do not hold while asking for memory */ /* create new unit */ ASSERT(commit_size <= size); u = (heap_unit_t *) get_guarded_real_memory(size, commit_size, MEMPROT_READ|MEMPROT_WRITE, false, true, NULL, VMM_HEAP _IF_DEBUG("")); new_unit = true; /* FIXME: handle low memory conditions by freeing units, + fcache units? */ ASSERT(u); LOG(GLOBAL, LOG_HEAP, 2, "New heap unit: "PFX"-"PFX"\n", u, ((byte*)u)+size); /* u is kept at top of unit itself, so displace start pc */ u->start_pc = (heap_pc) (((ptr_uint_t)u) + sizeof(heap_unit_t)); u->end_pc = ((heap_pc)u) + commit_size; u->reserved_end_pc = ((heap_pc)u) + size; u->in_vmarea_list = false; RSTATS_ADD_PEAK(heap_capacity, commit_size); /* FIXME: heap sizes are not always page-aligned so stats will be off */ STATS_ADD_PEAK(heap_reserved_only, (u->reserved_end_pc - u->end_pc)); } RSTATS_ADD_PEAK(heap_num_live, 1); u->cur_pc = u->start_pc; u->next_local = NULL; DODEBUG({ u->id = tu->num_units; tu->num_units++; }); acquire_recursive_lock(&heap_unit_lock); u->next_global = heapmgt->heap.units; if (heapmgt->heap.units != NULL) heapmgt->heap.units->prev_global = u; u->prev_global = NULL; heapmgt->heap.units = u; release_recursive_lock(&heap_unit_lock); dynamo_vm_areas_unlock(); #ifdef DEBUG_MEMORY DOCHECK(CHKLVL_MEMFILL, memset(u->start_pc, HEAP_UNALLOCATED_BYTE, u->end_pc - u->start_pc);); #endif return u; } /* dcontext only used to determine whether a global unit or not */ static void heap_free_unit(heap_unit_t *unit, dcontext_t *dcontext) { heap_unit_t *u, *prev_u; #ifdef DEBUG_MEMORY /* Unit should already be set to all HEAP_UNALLOCATED by the individual * frees and the free list cleanup, verify. */ /* NOTE - this assert fires if any memory in the unit wasn't freed. This * would include memory allocated ACCT_TOMBSTONE (which we don't currently * use). Using ACCT_TOMBSTONE is dangerous since we will still free the * unit here (say at proc or thread exit) even if there are ACCT_TOMBSTONE * allocations in it. */ /* Note, this memset check is done only on the special heap unit header, * not on the unit itself - FIXME: case 10434. Maybe we should embed the * special heap unit header in the first special heap unit itself. */ /* The hotp_only leak relaxation below is for case 9588 & 9593. */ DOCHECK(CHKLVL_MEMFILL, { CLIENT_ASSERT(IF_HOTP(hotp_only_contains_leaked_trampoline (unit->start_pc, unit->end_pc - unit->start_pc) ||) /* i#157: private loader => system lib allocs come here => * they don't always clean up. we have to relax here, but our * threadunits_exit checks should find all leaks anyway. */ heapmgt->global_units.acct.cur_usage[ACCT_LIBDUP] > 0 || is_region_memset_to_char(unit->start_pc, unit->end_pc - unit->start_pc, HEAP_UNALLOCATED_BYTE) /* don't assert when client does premature exit as it's * hard for Extension libs, etc. to clean up in such situations */ IF_CLIENT_INTERFACE(|| client_requested_exit), "memory leak detected"); }); #endif /* modifying heap list and DR areas must be atomic, and must grab * DR area lock before heap_unit_lock */ ASSERT(safe_to_allocate_or_free_heap_units()); dynamo_vm_areas_lock(); acquire_recursive_lock(&heap_unit_lock); /* FIXME: need to unprotect units that we're going to perform * {next,prev}_global assignments too -- but need to know whether * to re-protect -- do all at once, or each we need? add a writable * flag to heap_unit_t? */ /* remove from live list */ if (unit->prev_global != NULL) { unit->prev_global->next_global = unit->next_global; } else heapmgt->heap.units = unit->next_global; if (unit->next_global != NULL) { unit->next_global->prev_global = unit->prev_global; } /* prev_global is not used in the dead list */ unit->prev_global = NULL; RSTATS_DEC(heap_num_live); /* heuristic: don't keep around more dead units than max(5, 1/4 num threads) * FIXME: share the policy with the fcache dead unit policy * also, don't put special larger-than-max units on free list -- though * we do now have support for doing so (after PR 415269) */ if (UNITALLOC(unit) <= HEAP_UNIT_MAX_SIZE && (heapmgt->heap.num_dead < 5 || heapmgt->heap.num_dead * 4U <= (uint) get_num_threads())) { /* Keep dead list sorted small-to-large to avoid grabbing large * when can take small and then needing to allocate when only * have small left. Helps out with lots of small threads. */ for (u = heapmgt->heap.dead, prev_u = NULL; u != NULL && UNIT_RESERVED_SIZE(u) < UNIT_RESERVED_SIZE(unit); prev_u = u, u = u->next_global) ; if (prev_u == NULL) { unit->next_global = heapmgt->heap.dead; heapmgt->heap.dead = unit; } else { unit->next_global = u; prev_u->next_global = unit; } heapmgt->heap.num_dead++; release_recursive_lock(&heap_unit_lock); RSTATS_ADD_PEAK(heap_num_free, 1); } else { /* don't need to hold this while freeing since still hold DR areas lock */ release_recursive_lock(&heap_unit_lock); LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing excess dead unit "PFX"-"PFX" [-"PFX"]\n", unit, UNIT_COMMIT_END(unit), UNIT_RESERVED_END(unit)); really_free_unit(unit); } /* FIXME: shrink lock-held path if we see contention */ dynamo_vm_areas_unlock(); } #ifdef DEBUG_MEMORY static heap_unit_t * find_heap_unit(thread_units_t *tu, heap_pc p, size_t size) { /* FIXME (case 6198): this is a perf hit in debug builds. But, we can't use * a new vmvector b/c of circular dependences. Proposal: use custom data * field of vm_area_t in dynamo_areas list for heap entries to store a pointer * to the heap_unit_t struct, and add a backpointer to the owning thread_units_t * in heap_unit_t. Then have to make sure it's ok lock-wise to query the * dynamo_areas in the middle of an alloc or a free. It should be but for * global alloc and free we will have to grab the dynamo_areas lock up front * every time instead of the very rare times now when we need a new unit. */ heap_unit_t *unit; ASSERT(!POINTER_OVERFLOW_ON_ADD(p, size)); /* should not overflow */ for (unit = tu->top_unit; unit != NULL && (p < unit->start_pc || p+size > unit->end_pc); unit = unit->next_local); return unit; } #endif static void threadunits_init(dcontext_t *dcontext, thread_units_t *tu, size_t size) { int i; DODEBUG({ tu->num_units = 0; }); tu->top_unit = heap_create_unit(tu, size - GUARD_PAGE_ADJUSTMENT, false/*can reuse*/); tu->cur_unit = tu->top_unit; tu->dcontext = dcontext; tu->writable = true; #ifdef HEAP_ACCOUNTING memset(&tu->acct, 0, sizeof(tu->acct)); #endif for (i=0; i<BLOCK_TYPES; i++) tu->free_list[i] = NULL; } #ifdef HEAP_ACCOUNTING # define MAX_5_DIGIT 99999 static void print_tu_heap_statistics(thread_units_t *tu, file_t logfile, const char *prefix) { int i; size_t total = 0, cur = 0; LOG(logfile, LOG_HEAP|LOG_STATS, 1, "%s heap breakdown:\n", prefix); for (i = 0; i < ACCT_LAST; i++) { /* print out cur since this is done periodically, not just at end */ LOG(logfile, LOG_HEAP|LOG_STATS, 1, "%12s: cur=%5"SZFC"K, max=%5"SZFC"K, #=%7d, 1=", whichheap_name[i], tu->acct.cur_usage[i]/1024, tu->acct.max_usage[i]/1024, tu->acct.num_alloc[i]); if (tu->acct.max_single[i] <= MAX_5_DIGIT) LOG(logfile, LOG_HEAP|LOG_STATS, 1, "%5"SZFC, tu->acct.max_single[i]); else { LOG(logfile, LOG_HEAP|LOG_STATS, 1, "%4"SZFC"K", tu->acct.max_single[i]/1024); } LOG(logfile, LOG_HEAP|LOG_STATS, 1, ", new=%5"SZFC"K, re=%5"SZFC"K\n", tu->acct.alloc_new[i]/1024, tu->acct.alloc_reuse[i]/1024); total += tu->acct.max_usage[i]; cur += tu->acct.cur_usage[i]; } LOG(logfile, LOG_HEAP|LOG_STATS, 1, "Total cur usage: %6"SZFC" KB\n", cur/1024); LOG(logfile, LOG_HEAP|LOG_STATS, 1, "Total max (not nec. all used simult.): %6"SZFC" KB\n", total/1024); } void print_heap_statistics() { /* just do cur thread, don't try to walk all threads */ dcontext_t *dcontext = get_thread_private_dcontext(); DOSTATS({ uint i; LOG(GLOBAL, LOG_STATS, 1, "Heap bucket usage counts and wasted memory:\n"); for (i=0; i<BLOCK_TYPES; i++) { LOG(GLOBAL, LOG_STATS|LOG_HEAP, 1, "%2d %3d count=%9u peak_count=%9u peak_wasted=%9u peak_align=%9u\n", i, BLOCK_SIZES[i], block_total_count[i], block_peak_count[i], block_peak_wasted[i], block_peak_align_pad[i]); } }); if (dcontext != NULL) { thread_heap_t *th = (thread_heap_t *) dcontext->heap_field; if (th != NULL) { /* may not be initialized yet */ print_tu_heap_statistics(th->local_heap, THREAD, "Thread"); if (SEPARATE_NONPERSISTENT_HEAP()) { ASSERT(th->nonpersistent_heap != NULL); print_tu_heap_statistics(th->nonpersistent_heap, THREAD, "Thread non-persistent"); } } } if (SEPARATE_NONPERSISTENT_HEAP()) { print_tu_heap_statistics(&heapmgt->global_nonpersistent_units, GLOBAL, "Non-persistent global units"); } print_tu_heap_statistics(&global_racy_units, GLOBAL, "Racy Up-to-date Process"); print_tu_heap_statistics(&heapmgt->global_units, GLOBAL, "Updated-at-end Process (max is total of maxes)"); } static void add_heapacct_to_global_stats(heap_acct_t *acct) { /* add this thread's stats to the accurate (non-racy) global stats * FIXME: this gives a nice in-one-place total, but loses the * global-heap-only stats -- perhaps should add a total_units stats * to capture total and leave global alone here? */ uint i; acquire_recursive_lock(&global_alloc_lock); for (i = 0; i < ACCT_LAST; i++) { heapmgt->global_units.acct.alloc_reuse[i] += acct->alloc_reuse[i]; heapmgt->global_units.acct.alloc_new[i] += acct->alloc_new[i]; heapmgt->global_units.acct.cur_usage[i] += acct->cur_usage[i]; /* FIXME: these maxes are now not simultaneous max but sum-of-maxes */ heapmgt->global_units.acct.max_usage[i] += acct->max_usage[i]; heapmgt->global_units.acct.max_single[i] += acct->max_single[i]; heapmgt->global_units.acct.num_alloc[i] += acct->num_alloc[i]; } release_recursive_lock(&global_alloc_lock); } #endif /* dcontext only used for debugging */ static void threadunits_exit(thread_units_t *tu, dcontext_t *dcontext) { heap_unit_t *u, *next_u; #ifdef DEBUG size_t total_heap_used = 0; # ifdef HEAP_ACCOUNTING int j; # endif #endif #ifdef DEBUG_MEMORY /* verify and clear (for later asserts) the free list */ uint i; for (i = 0; i < BLOCK_TYPES; i++) { heap_pc p, next_p; for (p = tu->free_list[i]; p != NULL; p = next_p) { next_p = *(heap_pc *)p; /* clear the pointer to the next free for later asserts */ *(heap_pc *)p = (heap_pc) HEAP_UNALLOCATED_PTR_UINT; DOCHECK(CHKLVL_MEMFILL, { if (i < BLOCK_TYPES-1) { CLIENT_ASSERT(is_region_memset_to_char(p, BLOCK_SIZES[i], HEAP_UNALLOCATED_BYTE), "memory corruption detected"); } else { /* variable sized blocks */ CLIENT_ASSERT(is_region_memset_to_char(p, VARIABLE_SIZE(p), HEAP_UNALLOCATED_BYTE), "memory corruption detected"); /* clear the header for later asserts */ MEMSET_HEADER(p, HEAP_UNALLOCATED); } }); } tu->free_list[i] = NULL; } #endif u = tu->top_unit; while (u != NULL) { DOLOG(1, LOG_HEAP|LOG_STATS, { size_t num_used = u->cur_pc - u->start_pc; total_heap_used += num_used; LOG(THREAD, LOG_HEAP|LOG_STATS, 1, "Heap unit %d @"PFX"-"PFX" [-"PFX"] ("SZFMT" [/"SZFMT"] KB): used " SZFMT" bytes\n", u->id, u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u), (UNIT_COMMIT_SIZE(u))/1024, (UNIT_RESERVED_SIZE(u))/1024, num_used); }); next_u = u->next_local; heap_free_unit(u, dcontext); u = next_u; } LOG(THREAD, LOG_HEAP|LOG_STATS, 1, "\tTotal heap used: "SZFMT" KB\n", total_heap_used/1024); #if defined(DEBUG) && defined(HEAP_ACCOUNTING) /* FIXME: separate scopes: smaller functions for DEBUG_MEMORY x HEAP_ACCOUNTING */ for (j = 0; j < ACCT_LAST; j++) { size_t usage = tu->acct.cur_usage[j]; if (usage > 0) { LOG(THREAD, LOG_HEAP|LOG_STATS, 1, "WARNING: %s "SZFMT" bytes not freed!\n", whichheap_name[j], tu->acct.cur_usage[j]); # ifdef HOT_PATCHING_INTERFACE /* known leaks for case 9593 */ if (DYNAMO_OPTION(hotp_only) && ((j == ACCT_SPECIAL && usage == (size_t)hotp_only_tramp_bytes_leaked) || /* +4 is for the allocation's header; internal to heap mgt. */ (j == ACCT_MEM_MGT && usage == (size_t)(get_special_heap_header_size() + 4) && hotp_only_tramp_bytes_leaked > 0))) continue; # endif if (j != ACCT_TOMBSTONE /* known leak */ && /* i#157: private loader => system lib allocs come here => * they don't always clean up */ j != ACCT_LIBDUP && INTERNAL_OPTION(heap_accounting_assert)) { SYSLOG_INTERNAL_ERROR("memory leak: %s "SZFMT" bytes not freed", whichheap_name[j], tu->acct.cur_usage[j]); /* Don't assert when client does premature exit as it's * hard for Extension libs, etc. to clean up in such situations: */ CLIENT_ASSERT(IF_CLIENT_INTERFACE(client_requested_exit ||) false, "memory leak detected"); } } } if (tu != &heapmgt->global_units) add_heapacct_to_global_stats(&tu->acct); DOLOG(1, LOG_HEAP|LOG_STATS, { print_tu_heap_statistics(tu, THREAD, dcontext == GLOBAL_DCONTEXT ? "Process" : "Thread"); }); #endif /* defined(DEBUG) && defined(HEAP_ACCOUNTING) */ } void heap_thread_reset_init(dcontext_t *dcontext) { thread_heap_t *th = (thread_heap_t *) dcontext->heap_field; if (SEPARATE_NONPERSISTENT_HEAP()) { ASSERT(th->nonpersistent_heap != NULL); threadunits_init(dcontext, th->nonpersistent_heap, DYNAMO_OPTION(initial_heap_nonpers_size)); } } void heap_thread_init(dcontext_t *dcontext) { thread_heap_t *th = (thread_heap_t *) global_heap_alloc(sizeof(thread_heap_t) HEAPACCT(ACCT_MEM_MGT)); dcontext->heap_field = (void *) th; th->local_heap = (thread_units_t *) global_heap_alloc(sizeof(thread_units_t) HEAPACCT(ACCT_MEM_MGT)); threadunits_init(dcontext, th->local_heap, HEAP_UNIT_MIN_SIZE); if (SEPARATE_NONPERSISTENT_HEAP()) { th->nonpersistent_heap = (thread_units_t *) global_heap_alloc(sizeof(thread_units_t) HEAPACCT(ACCT_MEM_MGT)); } else th->nonpersistent_heap = NULL; heap_thread_reset_init(dcontext); } void heap_thread_reset_free(dcontext_t *dcontext) { thread_heap_t *th = (thread_heap_t *) dcontext->heap_field; if (SEPARATE_NONPERSISTENT_HEAP()) { ASSERT(th->nonpersistent_heap != NULL); /* FIXME: free directly rather than sending to dead list for * heap_reset_free() to free! * FIXME: for reset, don't free last unit so don't have to * recreate in reset_init() */ threadunits_exit(th->nonpersistent_heap, dcontext); } } void heap_thread_exit(dcontext_t *dcontext) { thread_heap_t *th = (thread_heap_t *) dcontext->heap_field; threadunits_exit(th->local_heap, dcontext); heap_thread_reset_free(dcontext); global_heap_free(th->local_heap, sizeof(thread_units_t) HEAPACCT(ACCT_MEM_MGT)); if (SEPARATE_NONPERSISTENT_HEAP()) { ASSERT(th->nonpersistent_heap != NULL); global_heap_free(th->nonpersistent_heap, sizeof(thread_units_t) HEAPACCT(ACCT_MEM_MGT)); } global_heap_free(th, sizeof(thread_heap_t) HEAPACCT(ACCT_MEM_MGT)); } #if defined(DEBUG_MEMORY) && defined(DEBUG) void print_free_list(thread_units_t *tu, int i) { void *p; int len = 0; dcontext_t *dcontext = tu->dcontext; LOG(THREAD, LOG_HEAP, 1, "Free list for size %d (== %d bytes):\n", i, BLOCK_SIZES[i]); p = (void *) tu->free_list[i]; while (p != NULL) { LOG(THREAD, LOG_HEAP, 1, "\tp = "PFX"\n", p); len++; p = *((char **)p); } LOG(THREAD, LOG_HEAP, 1, "Total length is %d\n", len); } #endif /* Used for both heap_unit_t and special_heap_unit_t. * Returns the amount it increased the unit by, so caller should increment * end_pc. * Both end_pc and reserved_end_pc are assumed to be open-ended! */ static size_t common_heap_extend_commitment(heap_pc cur_pc, heap_pc end_pc, heap_pc reserved_end_pc, size_t size_need, uint prot) { if (end_pc < reserved_end_pc && !POINTER_OVERFLOW_ON_ADD(cur_pc, size_need)) { /* extend commitment if have more reserved */ size_t commit_size = DYNAMO_OPTION(heap_commit_increment); /* simpler to just not support taking very last page in address space */ if (POINTER_OVERFLOW_ON_ADD(end_pc, commit_size)) return 0; if (cur_pc + size_need > end_pc + commit_size) { commit_size = ALIGN_FORWARD(cur_pc + size_need - (ptr_uint_t)end_pc, PAGE_SIZE); } if (end_pc + commit_size > reserved_end_pc || POINTER_OVERFLOW_ON_ADD(end_pc, commit_size)/*overflow seen in PR 518644 */) { /* commit anyway before caller moves on to new unit so that * we keep an invariant that all units but the current one * are fully committed, so our algorithm for looking at the end * of prior units holds */ commit_size = reserved_end_pc - end_pc; } ASSERT(!POINTER_OVERFLOW_ON_ADD(end_pc, commit_size) && end_pc + commit_size <= reserved_end_pc); extend_commitment(end_pc, commit_size, prot, false /* extension */, VMM_HEAP); #ifdef DEBUG_MEMORY memset(end_pc, HEAP_UNALLOCATED_BYTE, commit_size); #endif /* caller should do end_pc += commit_size */ RSTATS_ADD_PEAK(heap_capacity, commit_size); /* FIXME: heap sizes are not always page-aligned so stats will be off */ STATS_SUB(heap_reserved_only, commit_size); ASSERT(end_pc <= reserved_end_pc); return commit_size; } else return 0; } static void heap_unit_extend_commitment(heap_unit_t *u, size_t size_need, uint prot) { u->end_pc += common_heap_extend_commitment(u->cur_pc, u->end_pc, u->reserved_end_pc, size_need, prot); } /* allocate storage on the DR heap * returns NULL iff caller needs to grab dynamo_vm_areas_lock() and retry */ static void* common_heap_alloc(thread_units_t *tu, size_t size HEAPACCT(which_heap_t which)) { heap_unit_t *u = tu->cur_unit; heap_pc p = NULL; int bucket = 0; size_t alloc_size, aligned_size; #if defined(DEBUG_MEMORY) && defined(DEBUG) size_t check_alloc_size; dcontext_t *dcontext = tu->dcontext; /* DrMem i#999: private libs can be heap-intensive and our checks here * can have a prohibitive perf cost! */ uint chklvl = CHKLVL_MEMFILL + (IF_HEAPACCT_ELSE(which == ACCT_LIBDUP ? 1 : 0, 0)); ASSERT_CURIOSITY(which != ACCT_TOMBSTONE && "Do you really need to use ACCT_TOMBSTONE? (potentially dangerous)"); #endif ASSERT(size > 0); /* we don't want to pay check cost in release */ ASSERT(size < MAX_VALID_HEAP_ALLOCATION && "potential integer overflow"); /* we prefer to crash than having heap overflows */ if (size > MAX_VALID_HEAP_ALLOCATION) { /* This routine can currently accommodate without integer * overflows sizes up to UINT_MAX - sizeof(heap_unit_t), but * INT_MAX should be more than enough. * * Caller will likely crash, but that is better than a heap * overflow, where a crash would be the best we can hope for. */ return NULL; } /* NOTE - all of our buckets are sized to preserve alignment, so this can't change * which bucket is used. */ aligned_size = ALIGN_FORWARD(size, HEAP_ALIGNMENT); while (aligned_size > BLOCK_SIZES[bucket]) bucket++; if (bucket == BLOCK_TYPES-1) alloc_size = aligned_size + HEADER_SIZE; else alloc_size = BLOCK_SIZES[bucket]; ASSERT(size <= alloc_size); #ifdef DEBUG_MEMORY /* case 10292: use original calculated size for later check */ check_alloc_size = alloc_size; #endif if (alloc_size > MAXROOM) { /* too big for normal unit, build a special unit just for this allocation */ /* don't need alloc_size or even aligned_size, just need size */ heap_unit_t *new_unit, *prev; /* we page-align to avoid wasting space if unit gets reused later */ size_t unit_size = ALIGN_FORWARD(size + sizeof(heap_unit_t), PAGE_SIZE); ASSERT(size < unit_size && "overflow"); if (!safe_to_allocate_or_free_heap_units()) { /* circular dependence solution: we need to hold DR lock before * global alloc lock -- so we back out, grab it, and then come back */ return NULL; } /* Can reuse a dead unit if large enough: we'll just not use any * excess size until this is freed and put back on dead list. * (Currently we don't put oversized units on dead list though.) */ new_unit = heap_create_unit(tu, unit_size, false/*can be reused*/); /* we want to commit the whole alloc right away */ heap_unit_extend_commitment(new_unit, size, MEMPROT_READ|MEMPROT_WRITE); prev = tu->top_unit; alloc_size = size; /* should we include page-alignment? */ /* insert prior to cur unit (new unit will be full, so keep cur unit * where it is) */ while (prev != u && prev->next_local != u) { ASSERT(prev != NULL && prev->next_local != NULL); prev = prev->next_local; } if (prev == u) { ASSERT(prev == tu->top_unit); tu->top_unit = new_unit; } else prev->next_local = new_unit; new_unit->next_local = u; #ifdef DEBUG_MEMORY LOG(THREAD, LOG_HEAP, 3, "\tCreating new oversized heap unit %d (%d [/%d] KB)\n", new_unit->id, UNIT_COMMIT_SIZE(new_unit)/1024, UNIT_RESERVED_SIZE(new_unit)/1024); #endif p = new_unit->start_pc; new_unit->cur_pc += size; ACCOUNT_FOR_ALLOC(alloc_new, tu, which, size, size); /* use alloc_size? */ goto done_allocating; } if (tu->free_list[bucket] != NULL) { if (bucket == BLOCK_TYPES-1) { /* variable-length blocks, try to find one big enough */ size_t sz; heap_pc next = tu->free_list[bucket]; heap_pc prev; do { prev = p; p = next; /* aligned_size is written right _before_ next pointer */ sz = VARIABLE_SIZE(next); next = *((heap_pc*)p); } while (aligned_size > sz && next != NULL); if (aligned_size <= sz) { ASSERT(ALIGNED(next, HEAP_ALIGNMENT)); /* found one, extract from free list */ if (p == tu->free_list[bucket]) tu->free_list[bucket] = next; else *((heap_pc *)prev) = next; #ifdef DEBUG_MEMORY LOG(THREAD, LOG_HEAP, 2, "Variable-size block: allocating "PFX" (%d bytes [%d aligned] in " "%d block)\n", p, size, aligned_size, sz); /* ensure memory we got from the free list is in a heap unit */ DOCHECK(CHKLVL_DEFAULT, { /* expensive check */ ASSERT(find_heap_unit(tu, p, sz) != NULL); }); #endif ASSERT(ALIGNED(sz, HEAP_ALIGNMENT)); alloc_size = sz + HEADER_SIZE; ACCOUNT_FOR_ALLOC(alloc_reuse, tu, which, alloc_size, aligned_size); } else { /* no free block big enough available */ p = NULL; } } else { /* fixed-length free block available */ p = tu->free_list[bucket]; tu->free_list[bucket] = *((heap_pc *)p); ASSERT(ALIGNED(tu->free_list[bucket], HEAP_ALIGNMENT)); #ifdef DEBUG_MEMORY /* ensure memory we got from the free list is in a heap unit */ DOCHECK(CHKLVL_DEFAULT, { /* expensive check */ ASSERT(find_heap_unit(tu, p, alloc_size) != NULL); }); #endif ACCOUNT_FOR_ALLOC(alloc_reuse, tu, which, alloc_size, aligned_size); } } if (p == NULL) { /* no free blocks, grab a new one */ /* FIXME: if no more heap but lots of larger blocks available, * should use the larger blocks instead of failing! */ /* see if room for allocation size */ ASSERT(ALIGNED(u->cur_pc, HEAP_ALIGNMENT)); ASSERT(ALIGNED(alloc_size, HEAP_ALIGNMENT)); if (u->cur_pc + alloc_size > u->end_pc || POINTER_OVERFLOW_ON_ADD(u->cur_pc, alloc_size)/*xref PR 495961*/) { /* We either have to extend the current unit or, failing that, * allocate a new unit. */ if (!safe_to_allocate_or_free_heap_units()) { /* circular dependence solution: we need to hold dynamo areas * lock before global alloc lock in case we end up adding a new * unit or we hit oom (which may free units) while extending * the commmitment -- so we back out, grab it, and then come * back. */ return NULL; } /* try to extend if possible */ heap_unit_extend_commitment(u, alloc_size, MEMPROT_READ|MEMPROT_WRITE); /* check again after extending commit */ if (u->cur_pc + alloc_size > u->end_pc || POINTER_OVERFLOW_ON_ADD(u->cur_pc, alloc_size)/*xref PR 495961*/) { /* no room, look for room at end of previous units * FIXME: instead should put end of unit space on free list! */ heap_unit_t *prev = tu->top_unit; while (1) { /* make sure we do NOT steal space from oversized units, * who though they may have extra space from alignment * may be freed wholesale when primary alloc is freed */ if (UNITALLOC(prev) <= HEAP_UNIT_MAX_SIZE && !POINTER_OVERFLOW_ON_ADD(prev->cur_pc, alloc_size) && prev->cur_pc + alloc_size <= prev->end_pc) { tu->cur_unit = prev; u = prev; break; } if (prev->next_local == NULL) { /* no room anywhere, so create new unit * double size of old unit (until hit max size) */ heap_unit_t *new_unit; size_t unit_size; unit_size = UNITALLOC(u) * 2; while (unit_size < alloc_size + UNITOVERHEAD) unit_size *= 2; if (unit_size > HEAP_UNIT_MAX_SIZE) unit_size = HEAP_UNIT_MAX_SIZE; /* size for heap_create_unit doesn't include any guard * pages */ ASSERT(unit_size > UNITOVERHEAD); ASSERT(unit_size > (size_t) GUARD_PAGE_ADJUSTMENT); unit_size -= GUARD_PAGE_ADJUSTMENT; new_unit = heap_create_unit(tu, unit_size, false/*can reuse*/); prev->next_local = new_unit; #ifdef DEBUG_MEMORY LOG(THREAD, LOG_HEAP, 2, "\tCreating new heap unit %d (%d [/%d] KB)\n", new_unit->id, UNIT_COMMIT_SIZE(new_unit)/1024, UNIT_RESERVED_SIZE(new_unit)/1024); #endif /* use new unit for all future non-free-list allocations * we'll try to use the free room at the end of the old unit(s) * only when we next run out of room */ tu->cur_unit = new_unit; u = new_unit; /* may need to extend now if alloc_size is large */ heap_unit_extend_commitment(u, alloc_size, MEMPROT_READ|MEMPROT_WRITE); /* otherwise would have been bigger than MAXROOM */ ASSERT(alloc_size <= (ptr_uint_t) (u->end_pc - u->cur_pc)); break; } prev = prev->next_local; } } } p = u->cur_pc; if (bucket == BLOCK_TYPES-1) { /* we keep HEADER_SIZE bytes to store the size */ p += HEADER_SIZE; VARIABLE_SIZE(p) = aligned_size; } u->cur_pc += alloc_size; ACCOUNT_FOR_ALLOC(alloc_new, tu, which, alloc_size, aligned_size); } DOSTATS({ /* do this before done_allocating: want to ignore special-unit allocs */ ATOMIC_ADD(int, block_count[bucket], 1); ATOMIC_ADD(int, block_total_count[bucket], 1); /* FIXME: should atomically store inc-ed val in temp to avoid races w/ max */ ATOMIC_MAX(int, block_peak_count[bucket], block_count[bucket]); ASSERT(CHECK_TRUNCATE_TYPE_uint(alloc_size - aligned_size)); ATOMIC_ADD(int, block_wasted[bucket], (int) (alloc_size - aligned_size)); /* FIXME: should atomically store val in temp to avoid races w/ max */ ATOMIC_MAX(int, block_peak_wasted[bucket], block_wasted[bucket]); if (aligned_size > size) { ASSERT(CHECK_TRUNCATE_TYPE_uint(aligned_size - size)); ATOMIC_ADD(int, block_align_pad[bucket], (int) (aligned_size - size)); /* FIXME: should atomically store val in temp to avoid races w/ max */ ATOMIC_MAX(int, block_peak_align_pad[bucket], block_align_pad[bucket]); STATS_ADD_PEAK(heap_align, aligned_size - size); LOG(GLOBAL, LOG_STATS, 5, "alignment mismatch: %s ask %d, aligned is %d -> %d pad\n", IF_HEAPACCT_ELSE(whichheap_name[which], ""), size, aligned_size, aligned_size-size); } if (bucket == BLOCK_TYPES-1) { STATS_ADD(heap_headers, HEADER_SIZE); STATS_INC(heap_allocs_variable); } else { STATS_INC(heap_allocs_buckets); if (alloc_size > aligned_size) { STATS_ADD_PEAK(heap_bucket_pad, alloc_size - aligned_size); LOG(GLOBAL, LOG_STATS, 5, "bucket mismatch: %s ask (aligned) %d, got %d, -> %d\n", IF_HEAPACCT_ELSE(whichheap_name[which], ""), aligned_size, alloc_size, alloc_size-aligned_size); } } }); done_allocating: #ifdef DEBUG_MEMORY if (bucket == BLOCK_TYPES-1 && check_alloc_size <= MAXROOM) { /* verify is unallocated memory, skip possible free list next pointer */ DOCHECK(chklvl, { CLIENT_ASSERT(is_region_memset_to_char (p+sizeof(heap_pc *), (alloc_size-HEADER_SIZE)-sizeof(heap_pc *), HEAP_UNALLOCATED_BYTE), "memory corruption detected"); }); LOG(THREAD, LOG_HEAP, 6, "\nalloc var "PFX"-"PFX" %d bytes, ret "PFX"-"PFX" %d bytes\n", p-HEADER_SIZE, p-HEADER_SIZE+alloc_size, alloc_size, p, p+size, size); /* there can only be extra padding if we took off of the free list */ DOCHECK(chklvl, memset(p+size, HEAP_PAD_BYTE, (alloc_size-HEADER_SIZE)-size);); } else { /* verify is unallocated memory, skip possible free list next pointer */ DOCHECK(chklvl, { CLIENT_ASSERT(is_region_memset_to_char (p+sizeof(heap_pc *), alloc_size-sizeof(heap_pc *), HEAP_UNALLOCATED_BYTE), "memory corruption detected"); }); LOG(THREAD, LOG_HEAP, 6, "\nalloc fix or oversize "PFX"-"PFX" %d bytes, ret "PFX"-"PFX" %d bytes\n", p, p+alloc_size, alloc_size, p, p+size, size); DOCHECK(chklvl, memset(p+size, HEAP_PAD_BYTE, alloc_size-size);); } DOCHECK(chklvl, memset(p, HEAP_ALLOCATED_BYTE, size);); # ifdef HEAP_ACCOUNTING LOG(THREAD, LOG_HEAP, 6, "\t%s\n", whichheap_name[which]); # endif #endif return (void*)p; } /* allocate storage on the thread's private heap */ void* heap_alloc(dcontext_t *dcontext, size_t size HEAPACCT(which_heap_t which)) { thread_units_t *tu; void *ret_val; if (dcontext == GLOBAL_DCONTEXT) return global_heap_alloc(size HEAPACCT(which)); tu = ((thread_heap_t *) dcontext->heap_field)->local_heap; ret_val = common_heap_alloc(tu, size HEAPACCT(which)); ASSERT(ret_val != NULL); return ret_val; } /* free heap storage * returns false if caller needs to grab dynamo_vm_areas_lock() and retry */ static bool common_heap_free(thread_units_t *tu, void *p_void, size_t size HEAPACCT(which_heap_t which)) { int bucket = 0; heap_pc p = (heap_pc) p_void; #if defined(DEBUG) && (defined(DEBUG_MEMORY) || defined(HEAP_ACCOUNTING)) dcontext_t *dcontext = tu->dcontext; /* DrMem i#999: private libs can be heap-intensive and our checks here * can have a prohibitive perf cost! * XXX: b/c of re-use we have to memset on free. Perhaps we should * have a separate heap pool for private libs. But, the overhead * from that final memset is small compared to what we've already * saved, so maybe not worth it. */ uint chklvl = CHKLVL_MEMFILL + (IF_HEAPACCT_ELSE(which == ACCT_LIBDUP ? 1 : 0, 0)); #endif size_t alloc_size, aligned_size = ALIGN_FORWARD(size, HEAP_ALIGNMENT); ASSERT(size > 0); /* we don't want to pay check cost in release */ ASSERT(p != NULL); #ifdef DEBUG_MEMORY /* FIXME i#417: This curiosity assertion is trying to make sure we don't * perform a double free, but it can fire if we ever free a data structure * that has the 0xcdcdcdcd bitpattern in the first or last 4 bytes. This * has happened a few times: * * - case 8802: App's eax is 0xcdcdcdcd (from an app dbg memset) and we have * dcontext->allocated_start==dcontext. * - i#417: On Linux x64 we get rax == 0xcdcdcdcd from a memset, and * opnd_create_reg() only updates part of the register before returning by * value in RAX:RDX. We initialize to zero in debug mode to work around * this. * - i#540: On Win7 x64 we see this assert when running the TSan tests in * NegativeTests.WindowsRegisterWaitForSingleObjectTest. * * For now, we've downgraded this to a curiosity, but if it fires too much * in the future we should maintain a separate data structure in debug mode * to perform this check. We accept objects that start with 0xcdcdcdcd so * long as the second four bytes are not also 0xcdcdcdcd. */ DOCHECK(chklvl, { ASSERT_CURIOSITY( (*(uint *)p != HEAP_UNALLOCATED_UINT || (size >= 2*sizeof(uint) && *(((uint *)p)+1) != HEAP_UNALLOCATED_UINT)) && *(uint *)(p+size-sizeof(int)) != HEAP_UNALLOCATED_UINT && "attempting to free memory containing HEAP_UNALLOCATED pattern, " "possible double free!"); }); #endif while (aligned_size > BLOCK_SIZES[bucket]) bucket++; if (bucket == BLOCK_TYPES-1) alloc_size = aligned_size + HEADER_SIZE; else alloc_size = BLOCK_SIZES[bucket]; if (alloc_size > MAXROOM) { /* we must have used a special unit just for this allocation */ heap_unit_t *u = tu->top_unit, *prev = NULL; #ifdef DEBUG_MEMORY /* ensure we are freeing memory in a proper unit */ DOCHECK(CHKLVL_DEFAULT, { /* expensive check */ ASSERT(find_heap_unit(tu, p, size) != NULL); }); #endif if (!safe_to_allocate_or_free_heap_units()) { /* circular dependence solution: we need to hold DR lock before * global alloc lock -- so we back out, grab it, and then come back */ return false; } while (u != NULL && u->start_pc != p) { prev = u; u = u->next_local; } ASSERT(u != NULL); /* remove this unit from this thread's list, move to dead list * for future use -- no problems will be caused by it being * larger than normal */ if (prev == NULL) tu->top_unit = u->next_local; else prev->next_local = u->next_local; /* just retire the unit # */ #ifdef DEBUG_MEMORY LOG(THREAD, LOG_HEAP, 3, "\tFreeing oversized heap unit %d (%d KB)\n", u->id, size/1024); /* go ahead and set unallocated, even though we are just going to free * the unit, is needed for an assert in heap_free_unit anyways */ DOCHECK(CHKLVL_MEMFILL, memset(p, HEAP_UNALLOCATED_BYTE, size);); #endif ASSERT(size <= UNITROOM(u)); heap_free_unit(u, tu->dcontext); ACCOUNT_FOR_FREE(tu, which, size); return true; } else if (bucket == BLOCK_TYPES-1) { ASSERT(GET_VARIABLE_ALLOCATION_SIZE(p) >= alloc_size); alloc_size = GET_VARIABLE_ALLOCATION_SIZE(p); ASSERT(alloc_size - HEADER_SIZE >= aligned_size); } #if defined(DEBUG) || defined(DEBUG_MEMORY) || defined(HEAP_ACCOUNTING) if (bucket == BLOCK_TYPES-1) { # ifdef DEBUG_MEMORY LOG(THREAD, LOG_HEAP, 6, "\nfree var "PFX"-"PFX" %d bytes, asked "PFX"-"PFX" %d bytes\n", p-HEADER_SIZE, p-HEADER_SIZE+alloc_size, alloc_size, p, p+size, size); ASSERT_MESSAGE(chklvl, "heap overflow", is_region_memset_to_char(p+size, (alloc_size-HEADER_SIZE)-size, HEAP_PAD_BYTE)); /* ensure we are freeing memory in a proper unit */ DOCHECK(CHKLVL_DEFAULT, { /* expensive check */ ASSERT(find_heap_unit(tu, p, alloc_size - HEADER_SIZE) != NULL); }); /* set used and padding memory back to unallocated */ DOCHECK(CHKLVL_MEMFILL, memset(p, HEAP_UNALLOCATED_BYTE, alloc_size-HEADER_SIZE);); # endif STATS_SUB(heap_headers, HEADER_SIZE); } else { # ifdef DEBUG_MEMORY LOG(THREAD, LOG_HEAP, 6, "\nfree fix "PFX"-"PFX" %d bytes, asked "PFX"-"PFX" %d bytes\n", p, p+alloc_size, alloc_size, p, p+size, size); ASSERT_MESSAGE(chklvl, "heap overflow", is_region_memset_to_char(p+size, alloc_size-size, HEAP_PAD_BYTE)); /* ensure we are freeing memory in a proper unit */ DOCHECK(CHKLVL_DEFAULT, { /* expensive check */ ASSERT(find_heap_unit(tu, p, alloc_size) != NULL); }); /* set used and padding memory back to unallocated */ DOCHECK(CHKLVL_MEMFILL, memset(p, HEAP_UNALLOCATED_BYTE, alloc_size);); # endif STATS_SUB(heap_bucket_pad, (alloc_size - aligned_size)); } STATS_SUB(heap_align, (aligned_size - size)); DOSTATS({ ATOMIC_ADD(int, block_count[bucket], -1); ATOMIC_ADD(int, block_wasted[bucket], -(int)(alloc_size - aligned_size)); ATOMIC_ADD(int, block_align_pad[bucket], -(int)(aligned_size - size)); }); # ifdef HEAP_ACCOUNTING LOG(THREAD, LOG_HEAP, 6, "\t%s\n", whichheap_name[which]); ACCOUNT_FOR_FREE(tu, which, alloc_size); # endif #endif /* write next pointer */ *((heap_pc*)p) = tu->free_list[bucket]; ASSERT(ALIGNED(tu->free_list[bucket], HEAP_ALIGNMENT)); tu->free_list[bucket] = p; ASSERT(ALIGNED(tu->free_list[bucket], HEAP_ALIGNMENT)); return true; } /* free heap storage */ void heap_free(dcontext_t *dcontext, void *p, size_t size HEAPACCT(which_heap_t which)) { thread_units_t *tu; DEBUG_DECLARE(bool ok;) if (dcontext == GLOBAL_DCONTEXT) { global_heap_free(p, size HEAPACCT(which)); return; } tu = ((thread_heap_t *) dcontext->heap_field)->local_heap; DEBUG_DECLARE(ok = ) common_heap_free(tu, p, size HEAPACCT(which)); ASSERT(ok); } bool local_heap_protected(dcontext_t *dcontext) { thread_heap_t *th = (thread_heap_t *) dcontext->heap_field; return (!th->local_heap->writable || (th->nonpersistent_heap != NULL && !th->nonpersistent_heap->writable)); } static inline void protect_local_units_helper(heap_unit_t *u, bool writable) { /* win32 does not allow single protection change call on units that * were allocated with separate calls so we don't try to combine * adjacent units here */ while (u != NULL) { change_protection(UNIT_ALLOC_START(u), UNIT_COMMIT_SIZE(u), writable); u = u->next_local; } } static void protect_threadunits(thread_units_t *tu, bool writable) { ASSERT(TEST(SELFPROT_LOCAL, dynamo_options.protect_mask)); if (tu->writable == writable) return; protect_local_units_helper(tu->top_unit, writable); tu->writable = writable; } void protect_local_heap(dcontext_t *dcontext, bool writable) { thread_heap_t *th = (thread_heap_t *) dcontext->heap_field; protect_threadunits(th->local_heap, writable); if (SEPARATE_NONPERSISTENT_HEAP()) protect_threadunits(th->nonpersistent_heap, writable); } /* assumption: vmm_heap_alloc only gets called for HeapUnits themselves, which * are protected by us here, so ignore os heap */ void protect_global_heap(bool writable) { ASSERT(TEST(SELFPROT_GLOBAL, dynamo_options.protect_mask)); acquire_recursive_lock(&global_alloc_lock); if (heapmgt->global_heap_writable == writable) { release_recursive_lock(&global_alloc_lock); return; } /* win32 does not allow single protection change call on units that * were allocated with separate calls so we don't try to combine * adjacent units here * FIXME: That may no longer be true for our virtual memory manager that * will in fact be allocated as a single unit. It is only in case * we have run out of that initial allocation that we may have to * keep a separate list of allocations. */ if (!writable) { ASSERT(heapmgt->global_heap_writable); heapmgt->global_heap_writable = writable; } protect_local_units_helper(heapmgt->global_units.top_unit, writable); if (SEPARATE_NONPERSISTENT_HEAP()) { protect_local_units_helper(heapmgt->global_nonpersistent_units.top_unit, writable); } if (writable) { ASSERT(!heapmgt->global_heap_writable); heapmgt->global_heap_writable = writable; } release_recursive_lock(&global_alloc_lock); } /* FIXME: share some code...right now these are identical to protected * versions except the unit used */ void * global_unprotected_heap_alloc(size_t size HEAPACCT(which_heap_t which)) { void *p = common_global_heap_alloc(&heapmgt->global_unprotected_units, size HEAPACCT(which)); ASSERT(p != NULL); LOG(GLOBAL, LOG_HEAP, 6, "\nglobal unprotected alloc: "PFX" (%d bytes)\n", p, size); return p; } void global_unprotected_heap_free(void *p, size_t size HEAPACCT(which_heap_t which)) { common_global_heap_free(&heapmgt->global_unprotected_units, p, size HEAPACCT(which)); LOG(GLOBAL, LOG_HEAP, 6, "\nglobal unprotected free: "PFX" (%d bytes)\n", p, size); } void * nonpersistent_heap_alloc(dcontext_t *dcontext, size_t size HEAPACCT(which_heap_t which)) { void *p; if (SEPARATE_NONPERSISTENT_HEAP()) { if (dcontext == GLOBAL_DCONTEXT) { p = common_global_heap_alloc(&heapmgt->global_nonpersistent_units, size HEAPACCT(which)); LOG(GLOBAL, LOG_HEAP, 6, "\nglobal nonpersistent alloc: "PFX" (%d bytes)\n", p, size); } else { thread_units_t *nph = ((thread_heap_t *) dcontext->heap_field)->nonpersistent_heap; p = common_heap_alloc(nph, size HEAPACCT(which)); } } else { p = heap_alloc(dcontext, size HEAPACCT(which)); } ASSERT(p != NULL); return p; } void nonpersistent_heap_free(dcontext_t *dcontext, void *p, size_t size HEAPACCT(which_heap_t which)) { if (SEPARATE_NONPERSISTENT_HEAP()) { if (dcontext == GLOBAL_DCONTEXT) { common_global_heap_free(&heapmgt->global_nonpersistent_units, p, size HEAPACCT(which)); LOG(GLOBAL, LOG_HEAP, 6, "\nglobal nonpersistent free: "PFX" (%d bytes)\n", p, size); } else { thread_units_t *nph = ((thread_heap_t *) dcontext->heap_field)->nonpersistent_heap; DEBUG_DECLARE(bool ok =) common_heap_free(nph, p, size HEAPACCT(which)); ASSERT(ok); } } else { heap_free(dcontext, p, size HEAPACCT(which)); } } /**************************************************************************** * SPECIAL SINGLE-ALLOC-SIZE HEAP SERVICE */ /* Assumptions: * All allocations are of a single block size * If use_lock is false, no synchronization is needed or even safe */ /* We use our own unit struct to give us flexibility. * 1) We don't always allocate the header inline. * 2) We are sometimes executed from and so need pc prof support. * 3) We don't need all the fields of heap_unit_t. */ typedef struct _special_heap_unit_t { heap_pc alloc_pc; /* start of allocation region */ heap_pc start_pc; /* first address we'll give out for storage */ heap_pc end_pc; /* open-ended address of heap storage */ heap_pc cur_pc; /* current end (open) of allocated storage */ heap_pc reserved_end_pc; /* (open) end of reserved (not nec committed) memory */ #ifdef WINDOWS_PC_SAMPLE profile_t *profile; #endif #ifdef DEBUG int id; /* # of this unit */ #endif struct _special_heap_unit_t *next; } special_heap_unit_t; #define SPECIAL_UNIT_COMMIT_SIZE(u) ((u)->end_pc - (u)->alloc_pc) #define SPECIAL_UNIT_RESERVED_SIZE(u) ((u)->reserved_end_pc - (u)->alloc_pc) #define SPECIAL_UNIT_HEADER_INLINE(u) ((u)->alloc_pc != (u)->start_pc) /* the cfree list stores a next ptr and a count */ typedef struct _cfree_header { struct _cfree_header *next_cfree; uint count; } cfree_header_t; typedef struct _special_units_t { special_heap_unit_t *top_unit; /* start of linked list of heap units */ special_heap_unit_t *cur_unit; /* current unit in heap list */ uint block_size; /* all blocks are this size */ uint block_alignment; heap_pc free_list; cfree_header_t *cfree_list; #ifdef DEBUG int num_units; /* total # of heap units */ #endif bool writable:1; /* remember state of heap protection */ bool executable:1; /* if use_lock is false, grabbing _any_ lock may be hazardous! * (this isn't just an optimization, it's for correctness) */ bool use_lock:1; bool in_iterator:1; bool persistent:1; mutex_t lock; /* Yet another feature added: pclookup, but across multiple heaps, * so it's via a passed-in vector and passed-in data */ vm_area_vector_t *heap_areas; void *lookup_retval; #ifdef WINDOWS_PC_SAMPLE struct _special_units_t *next; #endif #ifdef HEAP_ACCOUNTING /* we only need one bucket for SpecialHeap but to re-use code we waste space */ heap_acct_t acct; #endif } special_units_t; #if defined(WINDOWS_PC_SAMPLE) && !defined(DEBUG) /* For fast exit path we need a quick way to walk all the units */ DECLARE_CXTSWPROT_VAR(static mutex_t special_units_list_lock, INIT_LOCK_FREE(special_units_list_lock)); /* This is only used for profiling so we don't bother to protect it */ DECLARE_CXTSWPROT_VAR(static special_units_t *special_units_list, NULL); #endif #if defined(DEBUG) && defined(HEAP_ACCOUNTING) && defined(HOT_PATCHING_INTERFACE) /* To get around the problem of the special_units_t "module" being defined after * the heap module in the same file. Part of fix for case 9593 that required * leaking memory. */ static int get_special_heap_header_size(void) { return sizeof(special_units_t); } #endif #ifdef WINDOWS_PC_SAMPLE static inline bool special_heap_profile_enabled() { return (dynamo_options.profile_pcs && dynamo_options.prof_pcs_stubs >= 2 && dynamo_options.prof_pcs_stubs <= 32); } #endif static inline uint get_prot(special_units_t *su) { return (su->executable ? MEMPROT_READ|MEMPROT_WRITE|MEMPROT_EXEC : MEMPROT_READ|MEMPROT_WRITE); } static void special_unit_extend_commitment(special_heap_unit_t *u, size_t size_need, uint prot) { u->end_pc += common_heap_extend_commitment(u->cur_pc, u->end_pc, u->reserved_end_pc, size_need, prot); } /* If pc is NULL, allocates memory and stores the header inside it; * If pc is non-NULL, allocates separate memory for the header, and * uses pc for the heap region (assuming size is fully committed). * unit_full only applies to the non-NULL case, indicating whether * to continue to allocate from this unit. */ static special_heap_unit_t * special_heap_create_unit(special_units_t *su, byte *pc, size_t size, bool unit_full) { special_heap_unit_t *u; size_t commit_size; uint prot = get_prot(su); ASSERT_OWN_MUTEX(su->use_lock, &su->lock); if (pc != NULL) { u = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, special_heap_unit_t, ACCT_MEM_MGT, PROTECTED); ASSERT(u != NULL); u->start_pc = pc; u->alloc_pc = pc; commit_size = size; /* caller should arrange alignment */ ASSERT(su->block_alignment == 0 || ALIGNED(u->start_pc, su->block_alignment)); } else { commit_size = DYNAMO_OPTION(heap_commit_increment); ASSERT(commit_size <= size); /* create new unit */ /* Since vmm lock, dynamo_vm_areas lock, all_memory_areas lock (on * linux), etc. will be acquired, and presumably !su->use_lock means * user can't handle ANY lock being acquired, we warn here: xref PR * 596768. In release build, we try to acqure the memory anyway. I'm * worried about pcprofile: can only fit ~1K in one unit and so will * easily run out...should it allocate additional units up front? * => PR 596808. */ DODEBUG({ if (su->top_unit != NULL/*init*/ && !su->use_lock) { SYSLOG_INTERNAL_WARNING_ONCE("potentially unsafe: allocating a new " "fragile special heap unit!"); } }); u = (special_heap_unit_t *) get_guarded_real_memory(size, commit_size, prot, true, true, NULL, VMM_SPECIAL_HEAP _IF_DEBUG("special_heap")); ASSERT(u != NULL); u->alloc_pc = (heap_pc) u; /* u is kept at top of unit itself, so displace start pc */ u->start_pc = (heap_pc) (((ptr_uint_t)u) + sizeof(special_heap_unit_t)); if (su->block_alignment != 0) { STATS_ADD(heap_special_align, ALIGN_FORWARD(u->start_pc, su->block_alignment) - (ptr_uint_t)u->start_pc); u->start_pc = (heap_pc) ALIGN_FORWARD(u->start_pc, su->block_alignment); } } u->end_pc = u->alloc_pc + commit_size; u->reserved_end_pc = u->alloc_pc + size; if (pc != NULL && unit_full) { ASSERT(u->reserved_end_pc == u->end_pc); u->cur_pc = u->end_pc; } else u->cur_pc = u->start_pc; u->next = NULL; DODEBUG({ u->id = su->num_units; su->num_units++; }); #ifdef WINDOWS_PC_SAMPLE if (special_heap_profile_enabled()) { u->profile = create_profile((app_pc)PAGE_START(u->start_pc), u->reserved_end_pc, dynamo_options.prof_pcs_stubs, NULL); start_profile(u->profile); } else u->profile = NULL; #endif /* N.B.: if STATS macros ever change to grab a mutex, we could deadlock * if !su->use_lock! */ RSTATS_ADD_PEAK(heap_capacity, commit_size); RSTATS_ADD_PEAK(heap_special_capacity, commit_size); STATS_ADD_PEAK(heap_special_units, 1); STATS_ADD_PEAK(heap_reserved_only, (u->reserved_end_pc - u->end_pc)); if (su->heap_areas != NULL) { vmvector_add(su->heap_areas, u->alloc_pc, u->reserved_end_pc, su->lookup_retval); } #ifdef DEBUG_MEMORY /* Don't clobber already-allocated memory */ DOCHECK(CHKLVL_MEMFILL, { if (pc == NULL) memset(u->start_pc, HEAP_UNALLOCATED_BYTE, u->end_pc - u->start_pc); }); #endif return u; } /* caller must store the special_units_t *, which is opaque */ static void * special_heap_init_internal(uint block_size, uint block_alignment, bool use_lock, bool executable, bool persistent, vm_area_vector_t *vector, void *vector_data, byte *heap_region, size_t heap_size, bool unit_full) { special_units_t *su; size_t unit_size = heap_size; if (unit_size == 0) { unit_size = (block_size * 16 > HEAP_UNIT_MIN_SIZE) ? (block_size * 16) : HEAP_UNIT_MIN_SIZE; /* Whether using 16K or 64K vmm blocks, HEAP_UNIT_MIN_SIZE of 32K wastes * space, and our main uses (stubs, whether global or coarse, and signal * pending queue) don't need a lot of space, so shrinking. * This tuning is a little fragile (just like for regular heap units and * fcache units) so be careful when changing default parameters. */ if (unit_size == HEAP_UNIT_MIN_SIZE) { ASSERT(unit_size > (size_t) GUARD_PAGE_ADJUSTMENT); unit_size -= GUARD_PAGE_ADJUSTMENT; } } if (heap_region == NULL) { unit_size = (size_t) ALIGN_FORWARD(unit_size, PAGE_SIZE); } su = (special_units_t *) (persistent ? global_heap_alloc(sizeof(special_units_t) HEAPACCT(ACCT_MEM_MGT)) : nonpersistent_heap_alloc(GLOBAL_DCONTEXT, sizeof(special_units_t) HEAPACCT(ACCT_MEM_MGT))); memset(su, 0, sizeof(*su)); ASSERT(block_size >= sizeof(heap_pc *) && "need room for free list ptrs"); ASSERT(block_size >= sizeof(heap_pc *) + sizeof(uint) && "need room for cfree list ptrs"); su->block_size = block_size; su->block_alignment = block_alignment; su->executable = executable; su->persistent = persistent; su->writable = true; su->free_list = NULL; su->cfree_list = NULL; DODEBUG({ su->num_units = 0; }); ASSERT((vector == NULL) == (vector_data == NULL)); su->heap_areas = vector; su->lookup_retval = vector_data; su->in_iterator = false; if (use_lock) ASSIGN_INIT_LOCK_FREE(su->lock, special_heap_lock); /* For persistent cache loading we hold executable_areas lock and so * cannot acquire special_heap_lock -- so we do not acquire * for the initial unit creation, which is safe since su is still * private to this routine. */ su->use_lock = false; /* we set to real value below */ su->top_unit = special_heap_create_unit(su, heap_region, unit_size, unit_full); su->use_lock = use_lock; #ifdef HEAP_ACCOUNTING memset(&su->acct, 0, sizeof(su->acct)); #endif su->cur_unit = su->top_unit; #if defined(WINDOWS_PC_SAMPLE) && !defined(DEBUG) if (special_heap_profile_enabled()) { /* Add to the global master list, which requires a lock */ mutex_lock(&special_units_list_lock); su->next = special_units_list; special_units_list = su; mutex_unlock(&special_units_list_lock); } #endif return su; } /* Typical usage */ void * special_heap_init(uint block_size, bool use_lock, bool executable, bool persistent) { uint alignment = 0; /* Some users expect alignment; not much of a space loss for those who don't. * XXX: find those users and have them call special_heap_init_aligned() * and removed this. */ if (IS_POWER_OF_2(block_size)) alignment = block_size; return special_heap_init_internal(block_size, alignment, use_lock, executable, persistent, NULL, NULL, NULL, 0, false); } void * special_heap_init_aligned(uint block_size, uint alignment, bool use_lock, bool executable, bool persistent, size_t initial_unit_size) { return special_heap_init_internal(block_size, alignment, use_lock, executable, persistent, NULL, NULL, NULL, initial_unit_size, false); } /* Special heap w/ a vector for lookups. Also supports a pre-created heap region * (heap_region, heap_region+heap_size) whose fullness is unit_full. */ void * special_heap_pclookup_init(uint block_size, bool use_lock, bool executable, bool persistent, vm_area_vector_t *vector, void *vector_data, byte *heap_region, size_t heap_size, bool unit_full) { uint alignment = 0; /* XXX: see comment in special_heap_init() */ if (IS_POWER_OF_2(block_size)) alignment = block_size; return special_heap_init_internal(block_size, alignment, use_lock, executable, persistent, vector, vector_data, heap_region, heap_size, unit_full); } /* Sets the vector data for the lookup vector used by the special heap */ void special_heap_set_vector_data(void *special, void *vector_data) { special_units_t *su = (special_units_t *) special; special_heap_unit_t *u; ASSERT(su->heap_areas != NULL); /* FIXME: more efficient to walk the vector, but no interface * to set the data: we'd need to expose the iterator index or * the vmarea struct rather than than the clean copy we have now */ for (u = su->top_unit; u != NULL; u = u->next) { vmvector_modify_data(su->heap_areas, u->alloc_pc, u->reserved_end_pc, vector_data); } } /* Returns false if the special heap has more than one unit or has a * non-externally-allocated unit. * Sets the cur pc for the only unit to end_pc. */ bool special_heap_set_unit_end(void *special, byte *end_pc) { special_units_t *su = (special_units_t *) special; if (su->top_unit->next != NULL || SPECIAL_UNIT_HEADER_INLINE(su->top_unit) || end_pc < su->top_unit->start_pc || end_pc > su->top_unit->end_pc) return false; su->top_unit->cur_pc = end_pc; return true; } #ifdef WINDOWS_PC_SAMPLE static void special_heap_profile_stop(special_heap_unit_t *u) { int sum; ASSERT(special_heap_profile_enabled()); stop_profile(u->profile); sum = sum_profile(u->profile); if (sum > 0) { mutex_lock(&profile_dump_lock); print_file(profile_file, "\nDumping special heap unit profile\n%d hits\n", sum); dump_profile(profile_file, u->profile); mutex_unlock(&profile_dump_lock); } } #endif #if defined(WINDOWS_PC_SAMPLE) && !defined(DEBUG) /* for fast exit path only, normal path taken care of */ void special_heap_profile_exit() { special_heap_unit_t *u; special_units_t *su; ASSERT(special_heap_profile_enabled()); /* will never be compiled in I guess :) */ mutex_lock(&special_units_list_lock); for (su = special_units_list; su != NULL; su = su->next) { if (su->use_lock) mutex_lock(&su->lock); for (u = su->top_unit; u != NULL; u = u->next) { if (u->profile != NULL) special_heap_profile_stop(u); /* fast exit path: do not bother to free */ } if (su->use_lock) mutex_unlock(&su->lock); } mutex_unlock(&special_units_list_lock); } #endif void special_heap_exit(void *special) { special_units_t *su = (special_units_t *) special; special_heap_unit_t *u, *next_u; #ifdef DEBUG size_t total_heap_used = 0; #endif u = su->top_unit; while (u != NULL) { /* Assumption: it's ok to use print_lock even if !su->use_lock */ DOLOG(1, LOG_HEAP|LOG_STATS, { size_t num_used = u->cur_pc - u->start_pc; total_heap_used += num_used; LOG(THREAD_GET, LOG_HEAP|LOG_STATS, 1, "Heap unit "SZFMT" (size "SZFMT" [/"SZFMT"] KB): used "SZFMT" KB\n", u->id, (SPECIAL_UNIT_COMMIT_SIZE(u))/1024, SPECIAL_UNIT_RESERVED_SIZE(u)/1024, num_used/1024); }); next_u = u->next; #ifdef WINDOWS_PC_SAMPLE if (u->profile != NULL) { ASSERT(special_heap_profile_enabled()); special_heap_profile_stop(u); free_profile(u->profile); u->profile = NULL; } #endif STATS_ADD(heap_special_units, -1); RSTATS_SUB(heap_special_capacity, SPECIAL_UNIT_COMMIT_SIZE(u)); if (su->heap_areas != NULL) { vmvector_remove(su->heap_areas, u->alloc_pc, u->reserved_end_pc); } if (!SPECIAL_UNIT_HEADER_INLINE(u)) { HEAP_TYPE_FREE(GLOBAL_DCONTEXT, u, special_heap_unit_t, ACCT_MEM_MGT, PROTECTED); /* up to creator to free the heap region */ } else { release_guarded_real_memory((vm_addr_t)u, SPECIAL_UNIT_RESERVED_SIZE(u), true/*update DR areas immediately*/, true, VMM_SPECIAL_HEAP); } u = next_u; } #ifdef HEAP_ACCOUNTING add_heapacct_to_global_stats(&su->acct); #endif LOG(THREAD_GET, LOG_HEAP|LOG_STATS, 1, "\tTotal heap used: "SZFMT" KB\n", total_heap_used/1024); #if defined(WINDOWS_PC_SAMPLE) && !defined(DEBUG) if (special_heap_profile_enabled()) { /* Removed this special_units_t from the master list */ mutex_lock(&special_units_list_lock); if (special_units_list == su) special_units_list = su->next; else { special_units_t *prev = special_units_list; ASSERT(prev != NULL); for (; prev->next != NULL && prev->next != su; prev = prev->next) ;/*nothing*/ ASSERT(prev->next == su); prev->next = su->next; } mutex_unlock(&special_units_list_lock); } #endif if (su->use_lock) DELETE_LOCK(su->lock); /* up to caller to free the vector, which is typically multi-heap */ if (su->persistent) { global_heap_free(su, sizeof(special_units_t) HEAPACCT(ACCT_MEM_MGT)); } else { nonpersistent_heap_free(GLOBAL_DCONTEXT, su, sizeof(special_units_t) HEAPACCT(ACCT_MEM_MGT)); } } void * special_heap_calloc(void *special, uint num) { #ifdef DEBUG dcontext_t *dcontext = get_thread_private_dcontext(); #endif special_units_t *su = (special_units_t *) special; special_heap_unit_t *u; void *p = NULL; bool took_free = false; ASSERT(num > 0); if (su->use_lock) mutex_lock(&su->lock); u = su->cur_unit; if (su->free_list != NULL && num == 1) { p = (void *) su->free_list; su->free_list = *((heap_pc *)p); took_free = true; } else if (su->cfree_list != NULL && num > 1) { /* FIXME: take a piece of cfree if num == 1? * seems better to save the bigger pieces */ cfree_header_t *cfree = su->cfree_list, *prev = NULL; while (cfree != NULL && cfree->count < num) { prev = cfree; cfree = cfree->next_cfree; } if (cfree != NULL) { ASSERT(cfree->count >= num); took_free = true; if (cfree->count == num) { /* take it out of list altogether */ if (prev == NULL) su->cfree_list = cfree->next_cfree; else prev->next_cfree = cfree->next_cfree; p = (void *) cfree; } else if (cfree->count == num+1) { /* add single-size piece to normal free list */ heap_pc tail = ((heap_pc) cfree) + num * su->block_size; *((heap_pc *)tail) = su->free_list; su->free_list = tail; p = (void *) cfree; } else { /* if take tail don't have to change free list ptrs at all */ cfree->count -= num; p = (void *) (((heap_pc) cfree) + (cfree->count - num) * su->block_size); } } } if (!took_free) { /* no free blocks, grab a new one */ if (u->cur_pc + su->block_size*num > u->end_pc || POINTER_OVERFLOW_ON_ADD(u->cur_pc, su->block_size*num)) { /* simply extend commitment, if possible */ size_t pre_commit_size = SPECIAL_UNIT_COMMIT_SIZE(u); special_unit_extend_commitment(u, su->block_size*num, get_prot(su)); RSTATS_ADD_PEAK(heap_special_capacity, SPECIAL_UNIT_COMMIT_SIZE(u) - pre_commit_size); /* check again after extending commit */ if (u->cur_pc + su->block_size*num > u->end_pc || POINTER_OVERFLOW_ON_ADD(u->cur_pc, su->block_size*num)) { /* no room, need new unit */ special_heap_unit_t *new_unit; special_heap_unit_t *prev = su->top_unit; size_t size = UNITALLOC(u); while (prev->next != NULL) prev = prev->next; /* create new unit double size of old unit (until hit max size) */ if (size*2 <= HEAP_UNIT_MAX_SIZE) size *= 2; /* we don't support arbitrarily long sequences */ ASSERT(su->block_size * num < size); new_unit = special_heap_create_unit(su, NULL, size, false/*empty*/); prev->next = new_unit; if (su->use_lock) { /* if synch bad so is printing */ LOG(THREAD, LOG_HEAP, 3, "\tCreating new heap unit %d\n", new_unit->id); } su->cur_unit = new_unit; u = new_unit; ASSERT(u->cur_pc + su->block_size*num <= u->end_pc && !POINTER_OVERFLOW_ON_ADD(u->cur_pc, su->block_size*num)); } } p = (void *) u->cur_pc; u->cur_pc += su->block_size*num; ACCOUNT_FOR_ALLOC(alloc_new, su, ACCT_SPECIAL, su->block_size*num, su->block_size*num); } else { ACCOUNT_FOR_ALLOC(alloc_reuse, su, ACCT_SPECIAL, su->block_size*num, su->block_size*num); } if (su->use_lock) mutex_unlock(&su->lock); #ifdef DEBUG_MEMORY DOCHECK(CHKLVL_MEMFILL, memset(p, HEAP_ALLOCATED_BYTE, su->block_size*num);); #endif ASSERT(p != NULL); return (void*)p; } void * special_heap_alloc(void *special) { return special_heap_calloc(special, 1); } void special_heap_cfree(void *special, void *p, uint num) { special_units_t *su = (special_units_t *) special; ASSERT(num > 0); ASSERT(p != NULL); /* Allow freeing while iterating w/o deadlock (iterator holds lock) */ ASSERT(!su->in_iterator || OWN_MUTEX(&su->lock)); if (su->use_lock && !su->in_iterator) mutex_lock(&su->lock); #ifdef DEBUG_MEMORY /* FIXME: ensure that p is in allocated state */ DOCHECK(CHKLVL_MEMFILL, memset(p, HEAP_UNALLOCATED_BYTE, su->block_size*num);); #endif if (num == 1) { /* write next pointer */ *((heap_pc *)p) = su->free_list; su->free_list = (heap_pc)p; } else { cfree_header_t *cfree = (cfree_header_t *) p; cfree->next_cfree = su->cfree_list; cfree->count = num; su->cfree_list = cfree; } #ifdef HEAP_ACCOUNTING ACCOUNT_FOR_FREE(su, ACCT_SPECIAL, su->block_size*num); #endif if (su->use_lock && !su->in_iterator) mutex_unlock(&su->lock); } void special_heap_free(void *special, void *p) { special_heap_cfree(special, p, 1); } bool special_heap_can_calloc(void *special, uint num) { special_units_t *su = (special_units_t *) special; bool can_calloc = false; ASSERT(num > 0); if (su->use_lock) mutex_lock(&su->lock); if (su->free_list != NULL && num == 1) { can_calloc = true; } else if (su->cfree_list != NULL && num > 1) { cfree_header_t *cfree = su->cfree_list; while (cfree != NULL) { if (cfree->count >= num) { can_calloc = true; break; } cfree = cfree->next_cfree; } } if (!can_calloc) { special_heap_unit_t *u = su->cur_unit; /* what if more units are available? */ can_calloc = (u->cur_pc + su->block_size*num <= u->reserved_end_pc && !POINTER_OVERFLOW_ON_ADD(u->cur_pc, su->block_size*num)); } if (su->use_lock) mutex_unlock(&su->lock); return can_calloc; } /* Special heap iterator. Initialized with special_heap_iterator_start(), which * grabs the heap lock (regardless of whether synch is used for allocs), and * destroyed with special_heap_iterator_stop() to release the lock. * If the special heap uses no lock for alloc, it is up to the caller * to prevent race conditions causing problems. * Accessor special_heap_iterator_next() should be called only when * predicate special_heap_iterator_hasnext() is true. * Any mutation of the heap while iterating will result in a deadlock * for heaps that use locks for alloc, except for individual freeing, * which will proceed w/o trying to grab the lock a second time. * FIXME: could generalize to regular heaps if a use arises. */ void special_heap_iterator_start(void *heap, special_heap_iterator_t *shi) { special_units_t *su = (special_units_t *) heap; ASSERT(heap != NULL); ASSERT(shi != NULL); mutex_lock(&su->lock); shi->heap = heap; shi->next_unit = (void *) su->top_unit; su->in_iterator = true; } bool special_heap_iterator_hasnext(special_heap_iterator_t *shi) { ASSERT(shi != NULL); DOCHECK(1, { special_units_t *su = (special_units_t *) shi->heap; ASSERT(su != NULL); ASSERT_OWN_MUTEX(true, &su->lock); }); return (shi->next_unit != NULL); } /* Iterator accessor: * Has to be initialized with special_heap_iterator_start, and should be * called only when special_heap_iterator_hasnext() is true. * Sets the area boundaries in area_start and area_end. */ void special_heap_iterator_next(special_heap_iterator_t *shi /* IN/OUT */, app_pc *heap_start /* OUT */, app_pc *heap_end /* OUT */) { special_units_t *su; special_heap_unit_t *u; ASSERT(shi != NULL); su = (special_units_t *) shi->heap; ASSERT(su != NULL); ASSERT_OWN_MUTEX(true, &su->lock); u = (special_heap_unit_t *) shi->next_unit; ASSERT(u != NULL); if (u != NULL) { /* caller error, but paranoid */ if (heap_start != NULL) *heap_start = u->start_pc; ASSERT(u->cur_pc <= u->end_pc); if (heap_end != NULL) *heap_end = u->cur_pc; shi->next_unit = (void *) u->next; } } void special_heap_iterator_stop(special_heap_iterator_t *shi) { special_units_t *su; ASSERT(shi != NULL); su = (special_units_t *) shi->heap; ASSERT(su != NULL); ASSERT_OWN_MUTEX(true, &su->lock); su->in_iterator = false; mutex_unlock(&su->lock); DODEBUG({ shi->heap = NULL; shi->next_unit = NULL; }); } #if defined(DEBUG) && defined(HOT_PATCHING_INTERFACE) /* We leak hotp trampolines as part of fix for case 9593; so, during a detach * we can't delete the trampoline heap. However if that heap's lock isn't * deleted, we'll assert. This routine is used only for that. Normally, we * should call special_heap_exit() which deletes the lock. */ void special_heap_delete_lock(void *special) { special_units_t *su = (special_units_t *)special; /* No one calls this routine unless they have a lock to delete. */ ASSERT(su != NULL); if (su == NULL) return; ASSERT(su->use_lock); if (su->use_lock) DELETE_LOCK(su->lock); } #endif /*----------------------------------------------------------------------------*/ #ifdef WINDOWS /* currently not used on linux */ /* Landing pads (introduced as part of work for PR 250294). */ /* landing_pad_areas is a vmvector made up of regions of memory called * landing pad areas, each of which contains multiple landing pads. Landing * pads are small trampolines used to jump from the hook point to the main * trampoline. This is used in both 32-bit and 64-bit DR. In both cases it * will handle the problem of hook chaining by 3rd party software and us having * to release our hooks (we'll nop the landing pad and free the trampoline). * In 64-bit it also solves the problem of reachability of the 5-byte rel jmp * we use for hooking, i.e., that 5-byte rel jmp may not reach the main * trampoline in DR heap. We have to maintain the hook a 5-byte jmp because * hotp_only assumes it (see PR 250294). * * A landing pad will have nothing more than a jump (5-byte rel for 32-bit DR * and 64-bit abs ind jmp for 64-bit DR) to the trampoline and a 5-byte rel jmp * back to the next instruction after the hook, plus the displaced app instrs. * * To handle hook chaining landing pads won't be released till process exit * (not on a detach), their first jump will just be nop'ed. As landing pads * aren't released till exit, all landing pads are just incrementally allocated * in a landing pad area. * * Note: Landing pad areas don't necessarily have to fall within the vm_reserve * region or capacity, so aren't accounted by our vmm. * * Note: If in future other needs for such region specific allocation should * arise, then we should convert this into special_heap_alloc_in_region(). For * now, landing pads are the only consumers, so was decided to be acceptable. * * See win32/callback.c for emit_landing_pad_code() and landing pad usage. */ typedef struct { byte *start; /* start of reserved region */ byte *end; /* end of reserved region */ byte *commit_end; /* end of committed memory in the reserved region */ byte *cur_ptr; /* pointer to next allocatable landing pad memory */ bool allocated; /* allocated, or stolen from an app dll? */ } landing_pad_area_t; /* Allocates a landing pad so that a hook inserted at addr_to_hook can reach * its trampoline via the landing pad. The landing pad will reachable by a * 32-bit relative jmp from addr_to_hook. * Note: we may want to generalize this at some point such that the size of the * landing pad is passed as an argument. * * For Windows we assume that landing_pads_to_executable_areas(true) will be * called once landing pads are finished being created. */ byte * alloc_landing_pad(app_pc addr_to_hook) { app_pc hook_region_start, hook_region_end; app_pc alloc_region_start, alloc_region_end; app_pc lpad_area_start = NULL, lpad_area_end; app_pc lpad = NULL; landing_pad_area_t *lpad_area = NULL; /* Allocate the landing pad area such that any hook from within the module * or memory region containing addr_to_hook can use the same area for a * landing pad. Makes it more efficient. */ hook_region_start = get_allocation_base(addr_to_hook); if (hook_region_start == NULL) { /* to support raw virtual address hooks */ ASSERT_CURIOSITY("trying to hook raw or unallocated memory?"); hook_region_start = addr_to_hook; hook_region_end = addr_to_hook; } else { hook_region_end = hook_region_start + get_allocation_size(hook_region_start, NULL); ASSERT(hook_region_end > hook_region_start); /* check overflow */ /* If region size is > 2 GB, then it isn't an image; PE32{,+} restrict * images to 2 GB. Also, if region is > 2 GB the reachability macros * called below will return a region smaller (and with start and end * inverted) than the region from which the reachability is desired, * i.e., some of the areas in [hook_region_start, hook_region_end) * won't be able to reach the region computed. * * A better choice is to pick something smaller (100 MB) because if the * region is close to 2 GB in size then we might not be able to * allocate memory for a landing pad that is reachable. */ if (hook_region_end - hook_region_start > 100 * 1024 * 1024) { /* Try a smaller region of 100 MB around the address to hook. */ ASSERT_CURIOSITY(false && "seeing patch region > 100 MB - DGC?"); hook_region_start = MIN(addr_to_hook, MAX(hook_region_start, addr_to_hook - 50 * 1024 * 1024)); hook_region_end = MAX(addr_to_hook, MIN(hook_region_end, addr_to_hook + 50 * 1024 * 1024)); } } /* Define the region that can be reached from anywhere within the * hook region with a 32-bit rel jmp. */ alloc_region_start = REACHABLE_32BIT_START(hook_region_start, hook_region_end); alloc_region_end = REACHABLE_32BIT_END(hook_region_start, hook_region_end); ASSERT(alloc_region_start < alloc_region_end); /* Check if there is an existing landing pad area within the reachable * region for the hook location. If so use it, else allocate one. */ write_lock(&landing_pad_areas->lock); if (vmvector_overlap(landing_pad_areas, alloc_region_start, alloc_region_end)) { /* Now we have to get that landing pad area that is FULLY contained * within alloc_region_start and alloc_region_end. If a landing pad * area is only partially within the alloc region, then a landing pad * created there won't be able to reach the addr_to_hook. BTW, that * landing pad area should have enough space to allocate a landing pad! * If these conditions are met allocate a landing pad. */ vmvector_iterator_t lpad_area_iter; vmvector_iterator_start(landing_pad_areas, &lpad_area_iter); while (vmvector_iterator_hasnext(&lpad_area_iter)) { lpad_area = vmvector_iterator_next(&lpad_area_iter, &lpad_area_start, &lpad_area_end); if (lpad_area_start < alloc_region_end && lpad_area_end > alloc_region_start && (lpad_area->cur_ptr + LANDING_PAD_SIZE) < lpad_area_end) { /* See if enough memory in this landing pad area has been * committed, if not commit more memory. */ if ((lpad_area->cur_ptr + LANDING_PAD_SIZE) >= lpad_area->commit_end) { ASSERT(lpad_area->allocated); extend_commitment(lpad_area->commit_end, PAGE_SIZE, MEMPROT_READ|MEMPROT_EXEC, false /* not initial commit */, VMM_SPECIAL_MMAP); lpad_area->commit_end += PAGE_SIZE; } /* Update the current pointer for the landing pad area, i.e., * allocate the landing pad. */ lpad = lpad_area->cur_ptr; lpad_area->cur_ptr += LANDING_PAD_SIZE; break; } } vmvector_iterator_stop(&lpad_area_iter); } /* If a landing pad area wasn't found because there wasn't any in the * allocation region or none fully contained within the allocation region, * then create a new one within the allocation region. Then allocate a * landing pad in it. */ if (lpad == NULL) { bool allocated = true; heap_error_code_t heap_error; lpad_area_end = NULL; lpad_area_start = os_heap_reserve_in_region ((void *)ALIGN_FORWARD(alloc_region_start, PAGE_SIZE), (void *)ALIGN_BACKWARD(alloc_region_end, PAGE_SIZE), LANDING_PAD_AREA_SIZE, &heap_error, true/*+x*/); if (lpad_area_start == NULL || heap_error == HEAP_ERROR_CANT_RESERVE_IN_REGION) { /* Should retry with using just the aligned target address - we may * have made the region so large that there's nothing nearby to * reserve. */ lpad_area_start = os_heap_reserve( (void *)ALIGN_FORWARD(addr_to_hook, LANDING_PAD_AREA_SIZE), LANDING_PAD_AREA_SIZE, &heap_error, true/*+x*/); # ifdef WINDOWS if (lpad_area_start == NULL && /* We can only do this once w/ current interface. * XXX: support multiple "allocs" inside libs. */ vmvector_empty(landing_pad_areas) && os_find_free_code_space_in_libs(&lpad_area_start, &lpad_area_end)) { if (lpad_area_end - lpad_area_start >= LANDING_PAD_SIZE && /* Mark writable until we're done creating landing pads */ make_hookable(lpad_area_start, lpad_area_end - lpad_area_start, NULL)) { /* Let's take it */ allocated = false; /* We assume that landing_pads_to_executable_areas(true) will be * called once landing pads are finished being created and we * can restore to +rx there. */ lpad_temp_writable_start = lpad_area_start; lpad_temp_writable_size = lpad_area_end - lpad_area_start; } else lpad_area_start = NULL; /* not big enough */ } # endif if (lpad_area_start == NULL) { /* Even at startup when there will be enough memory, * theoretically 2 GB of dlls might get packed together before * we get control (very unlikely), so we can fail. If it does, * then say 'oom' and exit. */ SYSLOG_INTERNAL_WARNING("unable to reserve memory for landing pads"); report_low_on_memory(OOM_RESERVE, heap_error); } } /* Allocate the landing pad area as rx, allocate a landing pad in it * and add it to landing_pad_areas vector. Note, we only commit 4k * initially even though we reserve 64k (LANDING_PAD_AREA_SIZE), to * avoid wastage. */ if (allocated) { extend_commitment(lpad_area_start, PAGE_SIZE, MEMPROT_READ|MEMPROT_EXEC, true /* initial commit */, VMM_SPECIAL_MMAP); } lpad_area = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, landing_pad_area_t, ACCT_VMAREAS, PROTECTED); lpad_area->start = lpad_area_start; lpad_area->end = (lpad_area_end == NULL ? lpad_area_start + LANDING_PAD_AREA_SIZE : lpad_area_end); lpad_area->commit_end = lpad_area_start + PAGE_SIZE; lpad_area->cur_ptr = lpad_area_start; lpad_area->allocated = allocated; lpad = lpad_area->cur_ptr; lpad_area->cur_ptr += LANDING_PAD_SIZE; vmvector_add(landing_pad_areas, lpad_area->start, lpad_area->end, lpad_area); STATS_INC(num_landing_pad_areas); } /* Landing pads aren't added to executable_areas here because not all * landing pads should be added. Only the ones used for DR hooks should be * added to executable areas (which is done using * landing_pads_to_executable_areas() at the end of inserting DR hooks). * hotp_only related landing pads shouldn't be added to executable areas as * their trampolines aren't added to executable_areas. This is why landing * pads aren't added to executable_areas here, at the point of allocation. */ LOG(GLOBAL, LOG_ALL, 3, "%s: used "PIFX" bytes in "PFX"-"PFX"\n", __FUNCTION__, lpad_area->cur_ptr - lpad_area->start, lpad_area->start, lpad_area->end); /* Boundary check to make sure the allocation is within the landing pad area. */ ASSERT(lpad_area->cur_ptr <= lpad_area->end); write_unlock(&landing_pad_areas->lock); return lpad; } /* Attempts to save space in the landing pad region by trimming the most * recently allocated landing pad to the actual space used. * Will fail if another landing pad was allocated between lpad_start * being allocated and this routine being called. */ bool trim_landing_pad(byte *lpad_start, size_t space_used) { landing_pad_area_t *lpad_area = NULL; bool res = false; write_lock(&landing_pad_areas->lock); if (vmvector_lookup_data(landing_pad_areas, lpad_start, NULL, NULL, &lpad_area)) { if (lpad_start == lpad_area->cur_ptr - LANDING_PAD_SIZE) { lpad_area->cur_ptr -= (LANDING_PAD_SIZE - space_used); res = true; } } write_unlock(&landing_pad_areas->lock); return res; } /* Adds or removes all landing pads from executable_areas by adding whole * landing pad areas. This is done to prevent bb building from considering * landing pads to be selfmod code; as such, these don't have to be * {add,remov}ed from executable_areas for hotp_only or for thin_client mode. */ void landing_pads_to_executable_areas(bool add) { vmvector_iterator_t lpad_area_iter; app_pc lpad_area_start, lpad_area_end; DEBUG_DECLARE(landing_pad_area_t *lpad_area;) uint lpad_area_size; if (RUNNING_WITHOUT_CODE_CACHE()) return; # ifdef WINDOWS if (add && lpad_temp_writable_start != NULL) { make_unhookable(lpad_temp_writable_start, lpad_temp_writable_size, true); lpad_temp_writable_start = NULL; } # endif /* With code cache, there should be only one landing pad area, just for * dr hooks in ntdll. For 64-bit, the image entry hook will result in a * new landing pad. */ IF_X64_ELSE(,ASSERT(landing_pad_areas->length == 1);) /* Just to be safe, walk through all areas in release build. */ vmvector_iterator_start(landing_pad_areas, &lpad_area_iter); while (vmvector_iterator_hasnext(&lpad_area_iter)) { DEBUG_DECLARE(lpad_area = ) vmvector_iterator_next(&lpad_area_iter, &lpad_area_start, &lpad_area_end); lpad_area_size = (uint)(lpad_area_end - lpad_area_start); ASSERT(lpad_area_size <= LANDING_PAD_AREA_SIZE); /* Current ptr should be within area. */ ASSERT(lpad_area->cur_ptr < lpad_area_end); if (add) { add_executable_region(lpad_area_start, lpad_area_size _IF_DEBUG("add landing pad areas after inserting dr hooks")); } else { remove_executable_region(lpad_area_start, lpad_area_size, false /* no lock */); } } vmvector_iterator_stop(&lpad_area_iter); } /* Delete landing_pad_areas and the landing_pad_area_t allocated for each * landing pad area. However, release all landing pads only on process exit; * for detach leave the landing pads in (in case some one hooks after us they * shouldn't crash if they chain correctly). */ static void release_landing_pad_mem(void) { vmvector_iterator_t lpad_area_iter; app_pc lpad_area_start, lpad_area_end; landing_pad_area_t *lpad_area; heap_error_code_t heap_error; vmvector_iterator_start(landing_pad_areas, &lpad_area_iter); while (vmvector_iterator_hasnext(&lpad_area_iter)) { bool allocated; lpad_area = vmvector_iterator_next(&lpad_area_iter, &lpad_area_start, &lpad_area_end); allocated = lpad_area->allocated; HEAP_TYPE_FREE(GLOBAL_DCONTEXT, lpad_area, landing_pad_area_t, ACCT_VMAREAS, PROTECTED); if (!doing_detach && /* On normal exit release the landing pads. */ allocated) os_heap_free(lpad_area_start, LANDING_PAD_AREA_SIZE, &heap_error); } vmvector_iterator_stop(&lpad_area_iter); vmvector_delete_vector(GLOBAL_DCONTEXT, landing_pad_areas); } #endif /* WINDOWS */ /*----------------------------------------------------------------------------*/
1
12,525
I guess it's not no-access to match Windows where guard pages are readable.
DynamoRIO-dynamorio
c
@@ -103,7 +103,9 @@ module Ncr self.proposal.update(status: 'approved') else approvers = emails.map{|e| User.for_email(e)} + removed_approvers_to_notify = self.proposal.approvals.non_pending.map(&:user) - approvers self.proposal.approvers = approvers + Dispatcher.on_approver_removal(self.proposal, removed_approvers_to_notify) end end
1
require 'csv' module Ncr # Make sure all table names use 'ncr_XXX' def self.table_name_prefix 'ncr_' end EXPENSE_TYPES = %w(BA60 BA61 BA80) BUILDING_NUMBERS = YAML.load_file("#{Rails.root}/config/data/ncr/building_numbers.yml") class WorkOrder < ActiveRecord::Base include ValueHelper include ProposalDelegate # This is a hack to be able to attribute changes to the correct user. This attribute needs to be set explicitly, then the update comment will use them as the "commenter". Defaults to the requester. attr_accessor :modifier after_initialize :set_defaults before_validation :normalize_values before_update :record_changes # @TODO: use integer number of cents to avoid floating point issues validates :amount, numericality: { less_than_or_equal_to: 3000, message: "must be less than or equal to $3,000" } validates :amount, numericality: { greater_than_or_equal_to: 0, message: "must be greater than or equal to $0" } validates :cl_number, format: { with: /\ACL\d{7}\z/, message: "must start with 'CL', followed by seven numbers" }, allow_blank: true validates :expense_type, inclusion: {in: EXPENSE_TYPES}, presence: true validates :function_code, format: { with: /\APG[A-Z0-9]{3}\z/, message: "must start with 'PG', followed by three letters or numbers" }, allow_blank: true validates :project_title, presence: true validates :vendor, presence: true validates :building_number, presence: true validates :rwa_number, presence: true, if: :ba80? validates :rwa_number, format: { with: /\A[a-zA-Z][0-9]{7}\z/, message: "must be one letter followed by 7 numbers" }, allow_blank: true validates :soc_code, format: { with: /\A[A-Z0-9]{3}\z/, message: "must be three letters or numbers" }, allow_blank: true def set_defaults self.direct_pay ||= false self.not_to_exceed ||= false self.emergency ||= false end # For budget attributes, converts empty strings to `nil`, so that the request isn't shown as being modified when the fields appear in the edit form. def normalize_values if self.cl_number.present? self.cl_number = self.cl_number.upcase self.cl_number.prepend('CL') unless self.cl_number.start_with?('CL') else self.cl_number = nil end if self.function_code.present? self.function_code.upcase! self.function_code.prepend('PG') unless self.function_code.start_with?('PG') else self.function_code = nil end if self.soc_code.present? self.soc_code.upcase! else self.soc_code = nil end end def approver_email_frozen? approval = self.individual_approvals.first approval && !approval.actionable? end def approver_changed?(approval_email) self.approving_official && self.approving_official.email_address != approval_email end def setup_approvals_and_observers(approving_official_email) emails = self.system_approver_emails if self.approver_email_frozen? emails.unshift(self.approving_official.email_address) else emails.unshift(approving_official_email) end if self.emergency emails.each{|e| self.add_observer(e)} # skip state machine self.proposal.update(status: 'approved') else approvers = emails.map{|e| User.for_email(e)} self.proposal.approvers = approvers end end def approving_official self.approvers.first end def email_approvers Dispatcher.on_proposal_update(self.proposal) end # Ignore values in certain fields if they aren't relevant. May want to # split these into different models def self.relevant_fields(expense_type) fields = [:description, :amount, :expense_type, :vendor, :not_to_exceed, :building_number, :org_code, :direct_pay, :cl_number, :function_code, :soc_code] case expense_type when 'BA61' fields << :emergency when 'BA80' fields.concat([:rwa_number, :code]) end fields end def relevant_fields Ncr::WorkOrder.relevant_fields(self.expense_type) end # Methods for Client Data interface def fields_for_display attributes = self.relevant_fields attributes.map{|key| [WorkOrder.human_attribute_name(key), self[key]]} end def client "ncr" end # will return nil if the `org_code` is blank or not present in Organization list def organization # TODO reference by `code` rather than storing the whole thing code = (self.org_code || '').split(' ', 2)[0] Ncr::Organization.find(code) end def ba80? self.expense_type == 'BA80' end def public_identifier "FY" + self.fiscal_year.to_s.rjust(2, "0") + "-#{self.proposal.id}" end def total_price self.amount || 0.0 end # may be replaced with paper-trail or similar at some point def version self.updated_at.to_i end def name self.project_title end def system_approver_emails results = [] if %w(BA60 BA61).include?(self.expense_type) unless self.organization.try(:whsc?) results << self.class.ba61_tier1_budget_mailbox end results << self.class.ba61_tier2_budget_mailbox else results << self.class.ba80_budget_mailbox end results end def self.ba61_tier1_budget_mailbox ENV['NCR_BA61_TIER1_BUDGET_MAILBOX'] || '[email protected]' end def self.ba61_tier2_budget_mailbox ENV['NCR_BA61_TIER2_BUDGET_MAILBOX'] || '[email protected]' end def self.ba80_budget_mailbox ENV['NCR_BA80_BUDGET_MAILBOX'] || '[email protected]' end def org_id self.organization.try(:code) end def building_id regex = /\A(\w{8}) .*\z/ if self.building_number && regex.match(self.building_number) regex.match(self.building_number)[1] else self.building_number end end def as_json super.merge(org_id: self.org_id, building_id: self.building_id) end protected # TODO move to Proposal model def record_changes changed_attributes = self.changed_attributes.except(:updated_at) comment_texts = [] bullet = changed_attributes.length > 1 ? '- ' : '' changed_attributes.each do |key, value| former = property_to_s(self.send(key + "_was")) value = property_to_s(self[key]) property_name = WorkOrder.human_attribute_name(key) comment_texts << WorkOrder.update_comment_format(property_name, value, bullet, former) end if !comment_texts.empty? if self.approved? comment_texts << "_Modified post-approval_" end self.proposal.comments.create( comment_text: comment_texts.join("\n"), update_comment: true, user: self.modifier || self.requester ) end end def self.update_comment_format key, value, bullet, former=nil from = former ? "from #{former} " : '' "#{bullet}*#{key}* was changed " + from + "to #{value}" end def fiscal_year year = self.created_at.nil? ? Time.now.year : self.created_at.year month = self.created_at.nil? ? Time.now.month : self.created_at.month if month >= 10 year += 1 end year % 100 # convert to two-digit end end end
1
13,670
Not a blocker, but would probably be useful to have a `scope :non_pending_approvers` on the `Proposal` model.
18F-C2
rb
@@ -94,6 +94,16 @@ class TaskException(Exception): pass +GetWorkResponse = collections.namedtuple('GetWorkResponse', ( + 'task_id', + 'running_tasks', + 'n_pending_tasks', + 'n_unique_pending', + 'n_pending_last_scheduled', + 'worker_state' +)) + + class TaskProcess(multiprocessing.Process): """ Wrap all task execution in this class.
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The worker communicates with the scheduler and does two things: 1. Sends all tasks that has to be run 2. Gets tasks from the scheduler that should be run When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.Scheduler` instance. When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance. Everything in this module is private to luigi and may change in incompatible ways between versions. The exception is the exception types and the :py:class:`worker` config class. """ import collections import getpass import logging import multiprocessing import os import signal import subprocess import sys try: import Queue except ImportError: import queue as Queue import random import socket import threading import time import traceback import types from luigi import six from luigi import notifications from luigi.event import Event from luigi.task_register import load_task from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, UNKNOWN, Scheduler, RetryPolicy from luigi.scheduler import WORKER_STATE_ACTIVE, WORKER_STATE_DISABLED from luigi.target import Target from luigi.task import Task, flatten, getpaths, Config from luigi.task_register import TaskClassException from luigi.task_status import RUNNING from luigi.parameter import FloatParameter, IntParameter, BoolParameter try: import simplejson as json except ImportError: import json logger = logging.getLogger('luigi-interface') # Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex, # that may not be unlocked in child process, resulting in the process being locked indefinitely. fork_lock = threading.Lock() # Why we assert on _WAIT_INTERVAL_EPS: # multiprocessing.Queue.get() is undefined for timeout=0 it seems: # https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get. # I also tried with really low epsilon, but then ran into the same issue where # the test case "test_external_dependency_worker_is_patient" got stuck. So I # unscientifically just set the final value to a floating point number that # "worked for me". _WAIT_INTERVAL_EPS = 0.00001 def _is_external(task): return task.run is None or task.run == NotImplemented def _get_retry_policy_dict(task): return RetryPolicy(task.retry_count, task.disable_hard_timeout, task.disable_window_seconds)._asdict() class TaskException(Exception): pass class TaskProcess(multiprocessing.Process): """ Wrap all task execution in this class. Mainly for convenience since this is run in a separate process. """ def __init__(self, task, worker_id, result_queue, tracking_url_callback, status_message_callback, random_seed=False, worker_timeout=0): super(TaskProcess, self).__init__() self.task = task self.worker_id = worker_id self.result_queue = result_queue self.tracking_url_callback = tracking_url_callback self.status_message_callback = status_message_callback self.random_seed = random_seed if task.worker_timeout is not None: worker_timeout = task.worker_timeout self.timeout_time = time.time() + worker_timeout if worker_timeout else None def _run_get_new_deps(self): self.task.set_tracking_url = self.tracking_url_callback self.task.set_status_message = self.status_message_callback task_gen = self.task.run() self.task.set_tracking_url = None self.task.set_status_message = None if not isinstance(task_gen, types.GeneratorType): return None next_send = None while True: try: if next_send is None: requires = six.next(task_gen) else: requires = task_gen.send(next_send) except StopIteration: return None new_req = flatten(requires) if all(t.complete() for t in new_req): next_send = getpaths(requires) else: new_deps = [(t.task_module, t.task_family, t.to_str_params()) for t in new_req] return new_deps def run(self): logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task) if self.random_seed: # Need to have different random seeds if running in separate processes random.seed((os.getpid(), time.time())) status = FAILED expl = '' missing = [] new_deps = [] try: # Verify that all the tasks are fulfilled! For external tasks we # don't care about unfulfilled dependencies, because we are just # checking completeness of self.task so outputs of dependencies are # irrelevant. if not _is_external(self.task): missing = [dep.task_id for dep in self.task.deps() if not dep.complete()] if missing: deps = 'dependency' if len(missing) == 1 else 'dependencies' raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing))) self.task.trigger_event(Event.START, self.task) t0 = time.time() status = None if _is_external(self.task): # External task # TODO(erikbern): We should check for task completeness after non-external tasks too! # This will resolve #814 and make things a lot more consistent if self.task.complete(): status = DONE else: status = FAILED expl = 'Task is an external data dependency ' \ 'and data does not exist (yet?).' else: new_deps = self._run_get_new_deps() status = DONE if not new_deps else PENDING if new_deps: logger.info( '[pid %s] Worker %s new requirements %s', os.getpid(), self.worker_id, self.task) elif status == DONE: self.task.trigger_event( Event.PROCESSING_TIME, self.task, time.time() - t0) expl = self.task.on_success() logger.info('[pid %s] Worker %s done %s', os.getpid(), self.worker_id, self.task) self.task.trigger_event(Event.SUCCESS, self.task) except KeyboardInterrupt: raise except BaseException as ex: status = FAILED logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task) self.task.trigger_event(Event.FAILURE, self.task, ex) raw_error_message = self.task.on_failure(ex) expl = raw_error_message finally: self.result_queue.put( (self.task.task_id, status, expl, missing, new_deps)) def _recursive_terminate(self): import psutil try: parent = psutil.Process(self.pid) children = parent.children(recursive=True) # terminate parent. Give it a chance to clean up super(TaskProcess, self).terminate() parent.wait() # terminate children for child in children: try: child.terminate() except psutil.NoSuchProcess: continue except psutil.NoSuchProcess: return def terminate(self): """Terminate this process and its subprocesses.""" # default terminate() doesn't cleanup child processes, it orphans them. try: return self._recursive_terminate() except ImportError: return super(TaskProcess, self).terminate() class SingleProcessPool(object): """ Dummy process pool for using a single processor. Imitates the api of multiprocessing.Pool using single-processor equivalents. """ def apply_async(self, function, args): return function(*args) def close(self): pass def join(self): pass class DequeQueue(collections.deque): """ deque wrapper implementing the Queue interface. """ def put(self, obj, block=None, timeout=None): return self.append(obj) def get(self, block=None, timeout=None): try: return self.pop() except IndexError: raise Queue.Empty class AsyncCompletionException(Exception): """ Exception indicating that something went wrong with checking complete. """ def __init__(self, trace): self.trace = trace class TracebackWrapper(object): """ Class to wrap tracebacks so we can know they're not just strings. """ def __init__(self, trace): self.trace = trace def check_complete(task, out_queue): """ Checks if task is complete, puts the result to out_queue. """ logger.debug("Checking if %s is complete", task) try: is_complete = task.complete() except Exception: is_complete = TracebackWrapper(traceback.format_exc()) out_queue.put((task, is_complete)) class worker(Config): # NOTE: `section.config-variable` in the config_path argument is deprecated in favor of `worker.config_variable` ping_interval = FloatParameter(default=1.0, config_path=dict(section='core', name='worker-ping-interval')) keep_alive = BoolParameter(default=False, config_path=dict(section='core', name='worker-keep-alive')) count_uniques = BoolParameter(default=False, config_path=dict(section='core', name='worker-count-uniques'), description='worker-count-uniques means that we will keep a ' 'worker alive only if it has a unique pending task, as ' 'well as having keep-alive true') wait_interval = FloatParameter(default=1.0, config_path=dict(section='core', name='worker-wait-interval')) wait_jitter = FloatParameter(default=5.0) max_reschedules = IntParameter(default=1, config_path=dict(section='core', name='worker-max-reschedules')) timeout = IntParameter(default=0, config_path=dict(section='core', name='worker-timeout')) task_limit = IntParameter(default=None, config_path=dict(section='core', name='worker-task-limit')) retry_external_tasks = BoolParameter(default=False, config_path=dict(section='core', name='retry-external-tasks'), description='If true, incomplete external tasks will be ' 'retested for completion while Luigi is running.') no_install_shutdown_handler = BoolParameter(default=False, description='If true, the SIGUSR1 shutdown handler will' 'NOT be install on the worker') class KeepAliveThread(threading.Thread): """ Periodically tell the scheduler that the worker still lives. """ def __init__(self, scheduler, worker_id, ping_interval): super(KeepAliveThread, self).__init__() self._should_stop = threading.Event() self._scheduler = scheduler self._worker_id = worker_id self._ping_interval = ping_interval def stop(self): self._should_stop.set() def run(self): while True: self._should_stop.wait(self._ping_interval) if self._should_stop.is_set(): logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id) break with fork_lock: try: self._scheduler.ping(worker=self._worker_id) except: # httplib.BadStatusLine: logger.warning('Failed pinging scheduler') class Worker(object): """ Worker object communicates with a scheduler. Simple class that talks to a scheduler and: * tells the scheduler what it has to do + its dependencies * asks for stuff to do (pulls it in a loop and runs it) """ def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs): if scheduler is None: scheduler = Scheduler() self.worker_processes = int(worker_processes) self._worker_info = self._generate_worker_info() if not worker_id: worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info]) self._config = worker(**kwargs) assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive" assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero" self._id = worker_id self._scheduler = scheduler self._assistant = assistant self._stop_requesting_work = False self.host = socket.gethostname() self._scheduled_tasks = {} self._suspended_tasks = {} self._batch_running_tasks = {} self._batch_families_sent = set() self._first_task = None self.add_succeeded = True self.run_succeeded = True self.unfulfilled_counts = collections.defaultdict(int) # note that ``signal.signal(signal.SIGUSR1, fn)`` only works inside the main execution thread, which is why we # provide the ability to conditionally install the hook. if not self._config.no_install_shutdown_handler: try: signal.signal(signal.SIGUSR1, self.handle_interrupt) signal.siginterrupt(signal.SIGUSR1, False) except AttributeError: pass # Keep info about what tasks are running (could be in other processes) if worker_processes == 1: self._task_result_queue = DequeQueue() else: self._task_result_queue = multiprocessing.Queue() self._running_tasks = {} # Stuff for execution_summary self._add_task_history = [] self._get_work_response_history = [] def _add_task(self, *args, **kwargs): """ Call ``self._scheduler.add_task``, but store the values too so we can implement :py:func:`luigi.execution_summary.summary`. """ task_id = kwargs['task_id'] status = kwargs['status'] runnable = kwargs['runnable'] task = self._scheduled_tasks.get(task_id) if task: msg = (task, status, runnable) self._add_task_history.append(msg) if task_id in self._batch_running_tasks: for batch_task in self._batch_running_tasks.pop(task_id): self._add_task_history.append((batch_task, status, True)) self._scheduler.add_task(*args, **kwargs) logger.info('Informed scheduler that task %s has status %s', task_id, status) def __enter__(self): """ Start the KeepAliveThread. """ self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval) self._keep_alive_thread.daemon = True self._keep_alive_thread.start() return self def __exit__(self, type, value, traceback): """ Stop the KeepAliveThread and kill still running tasks. """ self._keep_alive_thread.stop() self._keep_alive_thread.join() for task in self._running_tasks.values(): if task.is_alive(): task.terminate() return False # Don't suppress exception def _generate_worker_info(self): # Generate as much info as possible about the worker # Some of these calls might not be available on all OS's args = [('salt', '%09d' % random.randrange(0, 999999999)), ('workers', self.worker_processes)] try: args += [('host', socket.gethostname())] except BaseException: pass try: args += [('username', getpass.getuser())] except BaseException: pass try: args += [('pid', os.getpid())] except BaseException: pass try: sudo_user = os.getenv("SUDO_USER") if sudo_user: args.append(('sudo_user', sudo_user)) except BaseException: pass return args def _validate_task(self, task): if not isinstance(task, Task): raise TaskException('Can not schedule non-task %s' % task) if not task.initialized(): # we can't get the repr of it since it's not initialized... raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__) def _log_complete_error(self, task, tb): log_msg = "Will not run {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb) logger.warning(log_msg) def _log_dependency_error(self, task, tb): log_msg = "Will not run {task} or any dependencies due to error in deps() method:\n{tb}".format(task=task, tb=tb) logger.warning(log_msg) def _log_unexpected_error(self, task): logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause def _email_complete_error(self, task, formatted_traceback): self._email_error(task, formatted_traceback, subject="Luigi: {task} failed scheduling. Host: {host}", headline="Will not run {task} or any dependencies due to error in complete() method", ) def _email_dependency_error(self, task, formatted_traceback): self._email_error(task, formatted_traceback, subject="Luigi: {task} failed scheduling. Host: {host}", headline="Will not run {task} or any dependencies due to error in deps() method", ) def _email_unexpected_error(self, task, formatted_traceback): self._email_error(task, formatted_traceback, subject="Luigi: Framework error while scheduling {task}. Host: {host}", headline="Luigi framework error", ) def _email_task_failure(self, task, formatted_traceback): self._email_error(task, formatted_traceback, subject="Luigi: {task} FAILED. Host: {host}", headline="A task failed when running. Most likely run() raised an exception.", ) def _email_error(self, task, formatted_traceback, subject, headline): formatted_subject = subject.format(task=task, host=self.host) command = subprocess.list2cmdline(sys.argv) message = notifications.format_task_error(headline, task, command, formatted_traceback) notifications.send_error_email(formatted_subject, message, task.owner_email) def add(self, task, multiprocess=False): """ Add a Task for the worker to check and possibly schedule and run. Returns True if task and its dependencies were successfully scheduled or completed before. """ if self._first_task is None and hasattr(task, 'task_id'): self._first_task = task.task_id self.add_succeeded = True if multiprocess: queue = multiprocessing.Manager().Queue() pool = multiprocessing.Pool() else: queue = DequeQueue() pool = SingleProcessPool() self._validate_task(task) pool.apply_async(check_complete, [task, queue]) # we track queue size ourselves because len(queue) won't work for multiprocessing queue_size = 1 try: seen = set([task.task_id]) while queue_size: current = queue.get() queue_size -= 1 item, is_complete = current for next in self._add(item, is_complete): if next.task_id not in seen: self._validate_task(next) seen.add(next.task_id) pool.apply_async(check_complete, [next, queue]) queue_size += 1 except (KeyboardInterrupt, TaskException): raise except Exception as ex: self.add_succeeded = False formatted_traceback = traceback.format_exc() self._log_unexpected_error(task) task.trigger_event(Event.BROKEN_TASK, task, ex) self._email_unexpected_error(task, formatted_traceback) raise finally: pool.close() pool.join() return self.add_succeeded def _add_task_batcher(self, task): family = task.task_family if family not in self._batch_families_sent: task_class = type(task) batch_param_names = task_class.batch_param_names() if batch_param_names: self._scheduler.add_task_batcher( worker=self._id, task_family=family, batched_args=batch_param_names, max_batch_size=task.max_batch_size, ) self._batch_families_sent.add(family) def _add(self, task, is_complete): if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit: logger.warning('Will not run %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit) deps = None status = UNKNOWN runnable = False else: formatted_traceback = None try: self._check_complete_value(is_complete) except KeyboardInterrupt: raise except AsyncCompletionException as ex: formatted_traceback = ex.trace except BaseException: formatted_traceback = traceback.format_exc() if formatted_traceback is not None: self.add_succeeded = False self._log_complete_error(task, formatted_traceback) task.trigger_event(Event.DEPENDENCY_MISSING, task) self._email_complete_error(task, formatted_traceback) deps = None status = UNKNOWN runnable = False elif is_complete: deps = None status = DONE runnable = False task.trigger_event(Event.DEPENDENCY_PRESENT, task) elif _is_external(task): deps = None status = PENDING runnable = worker().retry_external_tasks task.trigger_event(Event.DEPENDENCY_MISSING, task) logger.warning('Data for %s does not exist (yet?). The task is an ' 'external data depedency, so it can not be run from' ' this luigi process.', task) else: try: deps = task.deps() self._add_task_batcher(task) except Exception as ex: formatted_traceback = traceback.format_exc() self.add_succeeded = False self._log_dependency_error(task, formatted_traceback) task.trigger_event(Event.BROKEN_TASK, task, ex) self._email_dependency_error(task, formatted_traceback) deps = None status = UNKNOWN runnable = False else: status = PENDING runnable = True if task.disabled: status = DISABLED if deps: for d in deps: self._validate_dependency(d) task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d) yield d # return additional tasks to add deps = [d.task_id for d in deps] self._scheduled_tasks[task.task_id] = task self._add_task( worker=self._id, task_id=task.task_id, status=status, deps=deps, runnable=runnable, priority=task.priority, resources=task.process_resources(), params=task.to_str_params(), family=task.task_family, module=task.task_module, batchable=task.batchable, retry_policy_dict=_get_retry_policy_dict(task), ) def _validate_dependency(self, dependency): if isinstance(dependency, Target): raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class') elif not isinstance(dependency, Task): raise Exception('requires() must return Task objects') def _check_complete_value(self, is_complete): if is_complete not in (True, False): if isinstance(is_complete, TracebackWrapper): raise AsyncCompletionException(is_complete.trace) raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete) def _add_worker(self): self._worker_info.append(('first_task', self._first_task)) self._scheduler.add_worker(self._id, self._worker_info) def _log_remote_tasks(self, running_tasks, n_pending_tasks, n_unique_pending): logger.debug("Done") logger.debug("There are no more tasks to run at this time") if running_tasks: for r in running_tasks: logger.debug('%s is currently run by worker %s', r['task_id'], r['worker']) elif n_pending_tasks: logger.debug("There are %s pending tasks possibly being run by other workers", n_pending_tasks) if n_unique_pending: logger.debug("There are %i pending tasks unique to this worker", n_unique_pending) def _get_work_task_id(self, get_work_response): if get_work_response['task_id'] is not None: return get_work_response['task_id'] elif 'batch_id' in get_work_response: task = load_task( module=get_work_response.get('task_module'), task_name=get_work_response['task_family'], params_str=get_work_response['task_params'], ) self._scheduler.add_task( worker=self._id, task_id=task.task_id, module=get_work_response.get('task_module'), family=get_work_response['task_family'], params=task.to_str_params(), status=RUNNING, batch_id=get_work_response['batch_id'], ) return task.task_id def _get_work(self): if self._stop_requesting_work: return None, 0, 0, 0, WORKER_STATE_DISABLED logger.debug("Asking scheduler for work...") r = self._scheduler.get_work( worker=self._id, host=self.host, assistant=self._assistant, current_tasks=list(self._running_tasks.keys()), ) n_pending_tasks = r['n_pending_tasks'] running_tasks = r['running_tasks'] n_unique_pending = r['n_unique_pending'] # TODO: For a tiny amount of time (a month?) we'll keep forwards compatibility # That is you can user a newer client than server (Sep 2016) worker_state = r.get('worker_state', WORKER_STATE_ACTIVE) # state according to server! task_id = self._get_work_task_id(r) self._get_work_response_history.append({ 'task_id': task_id, 'running_tasks': running_tasks, }) if task_id is not None and task_id not in self._scheduled_tasks: logger.info('Did not schedule %s, will load it dynamically', task_id) try: # TODO: we should obtain the module name from the server! self._scheduled_tasks[task_id] = \ load_task(module=r.get('task_module'), task_name=r['task_family'], params_str=r['task_params']) except TaskClassException as ex: msg = 'Cannot find task for %s' % task_id logger.exception(msg) subject = 'Luigi: %s' % msg error_message = notifications.wrap_traceback(ex) notifications.send_error_email(subject, error_message) self._add_task(worker=self._id, task_id=task_id, status=FAILED, runnable=False, assistant=self._assistant) task_id = None self.run_succeeded = False if task_id is not None and 'batch_task_ids' in r: batch_tasks = filter(None, [ self._scheduled_tasks.get(batch_id) for batch_id in r['batch_task_ids']]) self._batch_running_tasks[task_id] = batch_tasks return task_id, running_tasks, n_pending_tasks, n_unique_pending, worker_state def _run_task(self, task_id): task = self._scheduled_tasks[task_id] p = self._create_task_process(task) self._running_tasks[task_id] = p if self.worker_processes > 1: with fork_lock: p.start() else: # Run in the same process p.run() def _create_task_process(self, task): def update_tracking_url(tracking_url): self._scheduler.add_task( task_id=task.task_id, worker=self._id, status=RUNNING, tracking_url=tracking_url, ) def update_status_message(message): self._scheduler.set_task_status_message(task.task_id, message) return TaskProcess( task, self._id, self._task_result_queue, update_tracking_url, update_status_message, random_seed=bool(self.worker_processes > 1), worker_timeout=self._config.timeout ) def _purge_children(self): """ Find dead children and put a response on the result queue. :return: """ for task_id, p in six.iteritems(self._running_tasks): if not p.is_alive() and p.exitcode: error_msg = 'Task {} died unexpectedly with exit code {}'.format(task_id, p.exitcode) p.task.trigger_event(Event.PROCESS_FAILURE, p.task, error_msg) elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive(): p.terminate() error_msg = 'Task {} timed out after {} seconds and was terminated.'.format(task_id, p.task.worker_timeout) p.task.trigger_event(Event.TIMEOUT, p.task, error_msg) else: continue logger.info(error_msg) self._task_result_queue.put((task_id, FAILED, error_msg, [], [])) def _handle_next_task(self): """ We have to catch three ways a task can be "done": 1. normal execution: the task runs/fails and puts a result back on the queue, 2. new dependencies: the task yielded new deps that were not complete and will be rescheduled and dependencies added, 3. child process dies: we need to catch this separately. """ while True: self._purge_children() # Deal with subprocess failures try: task_id, status, expl, missing, new_requirements = ( self._task_result_queue.get( timeout=self._config.wait_interval)) except Queue.Empty: return task = self._scheduled_tasks[task_id] if not task or task_id not in self._running_tasks: continue # Not a running task. Probably already removed. # Maybe it yielded something? # external task if run not implemented, retry-able if config option is enabled. external_task_retryable = _is_external(task) and self._config.retry_external_tasks if status == FAILED and not external_task_retryable: self._email_task_failure(task, expl) new_deps = [] if new_requirements: new_req = [load_task(module, name, params) for module, name, params in new_requirements] for t in new_req: self.add(t) new_deps = [t.task_id for t in new_req] self._add_task(worker=self._id, task_id=task_id, status=status, expl=json.dumps(expl), resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant) self._running_tasks.pop(task_id) # re-add task to reschedule missing dependencies if missing: reschedule = True # keep out of infinite loops by not rescheduling too many times for task_id in missing: self.unfulfilled_counts[task_id] += 1 if (self.unfulfilled_counts[task_id] > self._config.max_reschedules): reschedule = False if reschedule: self.add(task) self.run_succeeded &= (status == DONE) or (len(new_deps) > 0) return def _sleeper(self): # TODO is exponential backoff necessary? while True: jitter = self._config.wait_jitter wait_interval = self._config.wait_interval + random.uniform(0, jitter) logger.debug('Sleeping for %f seconds', wait_interval) time.sleep(wait_interval) yield def _keep_alive(self, n_pending_tasks, n_unique_pending): """ Returns true if a worker should stay alive given. If worker-keep-alive is not set, this will always return false. For an assistant, it will always return the value of worker-keep-alive. Otherwise, it will return true for nonzero n_pending_tasks. If worker-count-uniques is true, it will also require that one of the tasks is unique to this worker. """ if not self._config.keep_alive: return False elif self._assistant: return True else: return n_pending_tasks and (n_unique_pending or not self._config.count_uniques) def handle_interrupt(self, signum, _): """ Stops the assistant from asking for more work on SIGUSR1 """ if signum == signal.SIGUSR1: self._start_phasing_out() def _start_phasing_out(self): """ Go into a mode where we dont ask for more work and quit once existing tasks are done. """ self._config.keep_alive = False self._stop_requesting_work = True def run(self): """ Returns True if all scheduled tasks were executed successfully. """ logger.info('Running Worker with %d processes', self.worker_processes) sleeper = self._sleeper() self.run_succeeded = True self._add_worker() while True: while len(self._running_tasks) >= self.worker_processes: logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks)) self._handle_next_task() task_id, running_tasks, n_pending_tasks, n_unique_pending, worker_state = self._get_work() if worker_state == WORKER_STATE_DISABLED: self._start_phasing_out() if task_id is None: if not self._stop_requesting_work: self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending) if len(self._running_tasks) == 0: if self._keep_alive(n_pending_tasks, n_unique_pending): six.next(sleeper) continue else: break else: self._handle_next_task() continue # task_id is not None: logger.debug("Pending tasks: %s", n_pending_tasks) self._run_task(task_id) while len(self._running_tasks): logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks)) self._handle_next_task() return self.run_succeeded
1
15,864
Add trailing comma
spotify-luigi
py
@@ -28,7 +28,7 @@ func StartModules() { // GracefulShutdown is if it gets the special signals it does modules cleanup func GracefulShutdown() { c := make(chan os.Signal) - signal.Notify(c, syscall.SIGINT, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGKILL, + signal.Notify(c, syscall.SIGINT, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGILL, syscall.SIGTRAP, syscall.SIGABRT) select { case s := <-c:
1
package core import ( "os" "os/signal" "syscall" "k8s.io/klog" beehiveContext "github.com/kubeedge/beehive/pkg/core/context" ) // StartModules starts modules that are registered func StartModules() { beehiveContext.InitContext(beehiveContext.MsgCtxTypeChannel) modules := GetModules() for name, module := range modules { //Init the module beehiveContext.AddModule(name) //Assemble typeChannels for sendToGroup beehiveContext.AddModuleGroup(name, module.Group()) go module.Start() klog.Infof("Starting module %v", name) } } // GracefulShutdown is if it gets the special signals it does modules cleanup func GracefulShutdown() { c := make(chan os.Signal) signal.Notify(c, syscall.SIGINT, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGQUIT, syscall.SIGILL, syscall.SIGTRAP, syscall.SIGABRT) select { case s := <-c: klog.Infof("Get os signal %v", s.String()) //Cleanup each modules beehiveContext.Cancel() modules := GetModules() for name, _ := range modules { klog.Infof("Cleanup module %v", name) beehiveContext.Cleanup(name) } } } // Run starts the modules and in the end does module cleanup func Run() { // Address the module registration and start the core StartModules() // monitor system signal and shutdown gracefully GracefulShutdown() }
1
18,160
I see someone say SIGKILL can not be caught by process. The original code here about `SIGKILL` is useless?
kubeedge-kubeedge
go
@@ -241,6 +241,7 @@ class Setting extends BaseModel { let output = {}; output[Setting.THEME_LIGHT] = _('Light'); output[Setting.THEME_DARK] = _('Dark'); + output[Setting.THEME_OLED_DARK] = _('OLED dark'); if (platform !== 'mobile') { output[Setting.THEME_DRACULA] = _('Dracula'); output[Setting.THEME_SOLARIZED_LIGHT] = _('Solarised Light');
1
const BaseModel = require('lib/BaseModel.js'); const { Database } = require('lib/database.js'); const SyncTargetRegistry = require('lib/SyncTargetRegistry.js'); const { time } = require('lib/time-utils.js'); const { sprintf } = require('sprintf-js'); const ObjectUtils = require('lib/ObjectUtils'); const { toTitleCase } = require('lib/string-utils.js'); const { rtrimSlashes } = require('lib/path-utils.js'); const { _, supportedLocalesToLanguages, defaultLocale } = require('lib/locale.js'); const { shim } = require('lib/shim'); class Setting extends BaseModel { static tableName() { return 'settings'; } static modelType() { return BaseModel.TYPE_SETTING; } static metadata() { if (this.metadata_) return this.metadata_; const platform = shim.platformName(); const mobilePlatform = shim.mobilePlatform(); const emptyDirWarning = _('Attention: If you change this location, make sure you copy all your content to it before syncing, otherwise all files will be removed! See the FAQ for more details: %s', 'https://joplinapp.org/faq/'); // A "public" setting means that it will show up in the various config screens (or config command for the CLI tool), however // if if private a setting might still be handled and modified by the app. For instance, the settings related to sorting notes are not // public for the mobile and desktop apps because they are handled separately in menus. this.metadata_ = { 'clientId': { value: '', type: Setting.TYPE_STRING, public: false, }, 'editor.keyboardMode': { value: 'default', type: Setting.TYPE_STRING, public: true, appTypes: ['desktop'], isEnum: true, label: () => _('Keyboard Mode'), options: () => { let output = {}; output['default'] = _('Default'); output['emacs'] = _('Emacs'); output['vim'] = _('Vim'); return output; }, }, 'sync.target': { value: SyncTargetRegistry.nameToId('dropbox'), type: Setting.TYPE_INT, isEnum: true, public: true, section: 'sync', label: () => _('Synchronisation target'), description: appType => { return appType !== 'cli' ? null : _('The target to synchonise to. Each sync target may have additional parameters which are named as `sync.NUM.NAME` (all documented below).'); }, options: () => { return SyncTargetRegistry.idAndLabelPlainObject(); }, }, 'sync.2.path': { value: '', type: Setting.TYPE_STRING, section: 'sync', show: settings => { try { return settings['sync.target'] == SyncTargetRegistry.nameToId('filesystem'); } catch (error) { return false; } }, filter: value => { return value ? rtrimSlashes(value) : ''; }, public: true, label: () => _('Directory to synchronise with (absolute path)'), description: () => emptyDirWarning, }, 'sync.5.path': { value: '', type: Setting.TYPE_STRING, section: 'sync', show: settings => { return settings['sync.target'] == SyncTargetRegistry.nameToId('nextcloud'); }, public: true, label: () => _('Nextcloud WebDAV URL'), description: () => emptyDirWarning, }, 'sync.5.username': { value: '', type: Setting.TYPE_STRING, section: 'sync', show: settings => { return settings['sync.target'] == SyncTargetRegistry.nameToId('nextcloud'); }, public: true, label: () => _('Nextcloud username'), }, 'sync.5.password': { value: '', type: Setting.TYPE_STRING, section: 'sync', show: settings => { return settings['sync.target'] == SyncTargetRegistry.nameToId('nextcloud'); }, public: true, label: () => _('Nextcloud password'), secure: true, }, 'sync.6.path': { value: '', type: Setting.TYPE_STRING, section: 'sync', show: settings => { return settings['sync.target'] == SyncTargetRegistry.nameToId('webdav'); }, public: true, label: () => _('WebDAV URL'), description: () => emptyDirWarning, }, 'sync.6.username': { value: '', type: Setting.TYPE_STRING, section: 'sync', show: settings => { return settings['sync.target'] == SyncTargetRegistry.nameToId('webdav'); }, public: true, label: () => _('WebDAV username'), }, 'sync.6.password': { value: '', type: Setting.TYPE_STRING, section: 'sync', show: settings => { return settings['sync.target'] == SyncTargetRegistry.nameToId('webdav'); }, public: true, label: () => _('WebDAV password'), secure: true, }, 'sync.3.auth': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.4.auth': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.7.auth': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.1.context': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.2.context': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.3.context': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.4.context': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.5.context': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.6.context': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.7.context': { value: '', type: Setting.TYPE_STRING, public: false }, 'sync.5.syncTargets': { value: {}, type: Setting.TYPE_OBJECT, public: false }, 'sync.resourceDownloadMode': { value: 'always', type: Setting.TYPE_STRING, section: 'sync', public: true, advanced: true, isEnum: true, appTypes: ['mobile', 'desktop'], label: () => _('Attachment download behaviour'), description: () => _('In "Manual" mode, attachments are downloaded only when you click on them. In "Auto", they are downloaded when you open the note. In "Always", all the attachments are downloaded whether you open the note or not.'), options: () => { return { always: _('Always'), manual: _('Manual'), auto: _('Auto'), }; }, }, 'sync.maxConcurrentConnections': { value: 5, type: Setting.TYPE_INT, public: true, advanced: true, section: 'sync', label: () => _('Max concurrent connections'), minimum: 1, maximum: 20, step: 1 }, activeFolderId: { value: '', type: Setting.TYPE_STRING, public: false }, firstStart: { value: true, type: Setting.TYPE_BOOL, public: false }, locale: { value: defaultLocale(), type: Setting.TYPE_STRING, isEnum: true, public: true, label: () => _('Language'), options: () => { return ObjectUtils.sortByValue(supportedLocalesToLanguages({ includeStats: true })); }, }, dateFormat: { value: Setting.DATE_FORMAT_1, type: Setting.TYPE_STRING, isEnum: true, public: true, label: () => _('Date format'), options: () => { let options = {}; const now = new Date('2017-01-30T12:00:00').getTime(); options[Setting.DATE_FORMAT_1] = time.formatMsToLocal(now, Setting.DATE_FORMAT_1); options[Setting.DATE_FORMAT_2] = time.formatMsToLocal(now, Setting.DATE_FORMAT_2); options[Setting.DATE_FORMAT_3] = time.formatMsToLocal(now, Setting.DATE_FORMAT_3); options[Setting.DATE_FORMAT_4] = time.formatMsToLocal(now, Setting.DATE_FORMAT_4); options[Setting.DATE_FORMAT_5] = time.formatMsToLocal(now, Setting.DATE_FORMAT_5); options[Setting.DATE_FORMAT_6] = time.formatMsToLocal(now, Setting.DATE_FORMAT_6); return options; }, }, timeFormat: { value: Setting.TIME_FORMAT_1, type: Setting.TYPE_STRING, isEnum: true, public: true, label: () => _('Time format'), options: () => { let options = {}; const now = new Date('2017-01-30T20:30:00').getTime(); options[Setting.TIME_FORMAT_1] = time.formatMsToLocal(now, Setting.TIME_FORMAT_1); options[Setting.TIME_FORMAT_2] = time.formatMsToLocal(now, Setting.TIME_FORMAT_2); return options; }, }, theme: { value: Setting.THEME_LIGHT, type: Setting.TYPE_INT, public: true, appTypes: ['mobile', 'desktop'], isEnum: true, label: () => _('Theme'), section: 'appearance', options: () => { let output = {}; output[Setting.THEME_LIGHT] = _('Light'); output[Setting.THEME_DARK] = _('Dark'); if (platform !== 'mobile') { output[Setting.THEME_DRACULA] = _('Dracula'); output[Setting.THEME_SOLARIZED_LIGHT] = _('Solarised Light'); output[Setting.THEME_SOLARIZED_DARK] = _('Solarised Dark'); output[Setting.THEME_NORD] = _('Nord'); } return output; }, }, showNoteCounts: { value: true, type: Setting.TYPE_BOOL, public: true, appTypes: ['desktop'], label: () => _('Show note counts') }, layoutButtonSequence: { value: Setting.LAYOUT_ALL, type: Setting.TYPE_INT, public: false, appTypes: ['desktop'], isEnum: true, options: () => ({ [Setting.LAYOUT_ALL]: _('%s / %s / %s', _('Editor'), _('Viewer'), _('Split View')), [Setting.LAYOUT_EDITOR_VIEWER]: _('%s / %s', _('Editor'), _('Viewer')), [Setting.LAYOUT_EDITOR_SPLIT]: _('%s / %s', _('Editor'), _('Split View')), [Setting.LAYOUT_VIEWER_SPLIT]: _('%s / %s', _('Viewer'), _('Split View')), }), }, uncompletedTodosOnTop: { value: true, type: Setting.TYPE_BOOL, section: 'note', public: true, appTypes: ['cli'], label: () => _('Uncompleted to-dos on top') }, showCompletedTodos: { value: true, type: Setting.TYPE_BOOL, section: 'note', public: true, appTypes: ['cli'], label: () => _('Show completed to-dos') }, 'notes.sortOrder.field': { value: 'user_updated_time', type: Setting.TYPE_STRING, section: 'note', isEnum: true, public: true, appTypes: ['cli'], label: () => _('Sort notes by'), options: () => { const Note = require('lib/models/Note'); const noteSortFields = ['user_updated_time', 'user_created_time', 'title']; const options = {}; for (let i = 0; i < noteSortFields.length; i++) { options[noteSortFields[i]] = toTitleCase(Note.fieldToLabel(noteSortFields[i])); } return options; }, }, 'editor.autoMatchingBraces': { value: true, type: Setting.TYPE_BOOL, public: true, section: 'note', appTypes: ['desktop'], label: () => _('Auto-pair braces, parenthesis, quotations, etc.'), }, 'notes.sortOrder.reverse': { value: true, type: Setting.TYPE_BOOL, section: 'note', public: true, label: () => _('Reverse sort order'), appTypes: ['cli'] }, 'folders.sortOrder.field': { value: 'title', type: Setting.TYPE_STRING, isEnum: true, public: true, appTypes: ['cli'], label: () => _('Sort notebooks by'), options: () => { const Folder = require('lib/models/Folder'); const folderSortFields = ['title', 'last_note_user_updated_time']; const options = {}; for (let i = 0; i < folderSortFields.length; i++) { options[folderSortFields[i]] = toTitleCase(Folder.fieldToLabel(folderSortFields[i])); } return options; }, }, 'folders.sortOrder.reverse': { value: false, type: Setting.TYPE_BOOL, public: true, label: () => _('Reverse sort order'), appTypes: ['cli'] }, trackLocation: { value: true, type: Setting.TYPE_BOOL, section: 'note', public: true, label: () => _('Save geo-location with notes') }, newTodoFocus: { value: 'title', type: Setting.TYPE_STRING, section: 'note', isEnum: true, public: true, appTypes: ['desktop'], label: () => _('When creating a new to-do:'), options: () => { return { title: _('Focus title'), body: _('Focus body'), }; }, }, newNoteFocus: { value: 'body', type: Setting.TYPE_STRING, section: 'note', isEnum: true, public: true, appTypes: ['desktop'], label: () => _('When creating a new note:'), options: () => { return { title: _('Focus title'), body: _('Focus body'), }; }, }, // Deprecated - use markdown.plugin.* 'markdown.softbreaks': { value: false, type: Setting.TYPE_BOOL, public: false, appTypes: ['mobile', 'desktop']}, 'markdown.typographer': { value: false, type: Setting.TYPE_BOOL, public: false, appTypes: ['mobile', 'desktop']}, // Deprecated 'markdown.plugin.softbreaks': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable soft breaks') }, 'markdown.plugin.typographer': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable typographer support') }, 'markdown.plugin.katex': { value: true, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable math expressions') }, 'markdown.plugin.mark': { value: true, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable ==mark== syntax') }, 'markdown.plugin.footnote': { value: true, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable footnotes') }, 'markdown.plugin.toc': { value: true, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable table of contents extension') }, 'markdown.plugin.sub': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable ~sub~ syntax') }, 'markdown.plugin.sup': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable ^sup^ syntax') }, 'markdown.plugin.deflist': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable deflist syntax') }, 'markdown.plugin.abbr': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable abbreviation syntax') }, 'markdown.plugin.emoji': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable markdown emoji') }, 'markdown.plugin.insert': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable ++insert++ syntax') }, 'markdown.plugin.multitable': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable multimarkdown table extension') }, 'markdown.plugin.fountain': { value: false, type: Setting.TYPE_BOOL, section: 'plugins', public: true, appTypes: ['mobile', 'desktop'], label: () => _('Enable Fountain syntax support') }, // Tray icon (called AppIndicator) doesn't work in Ubuntu // http://www.webupd8.org/2017/04/fix-appindicator-not-working-for.html // Might be fixed in Electron 18.x but no non-beta release yet. So for now // by default we disable it on Linux. showTrayIcon: { value: platform !== 'linux', type: Setting.TYPE_BOOL, section: 'application', public: true, appTypes: ['desktop'], label: () => _('Show tray icon'), description: () => { return platform === 'linux' ? _('Note: Does not work in all desktop environments.') : _('This will allow Joplin to run in the background. It is recommended to enable this setting so that your notes are constantly being synchronised, thus reducing the number of conflicts.'); }, }, startMinimized: { value: false, type: Setting.TYPE_BOOL, section: 'application', public: true, appTypes: ['desktop'], label: () => _('Start application minimised in the tray icon') }, collapsedFolderIds: { value: [], type: Setting.TYPE_ARRAY, public: false }, 'db.ftsEnabled': { value: -1, type: Setting.TYPE_INT, public: false }, 'encryption.enabled': { value: false, type: Setting.TYPE_BOOL, public: false }, 'encryption.activeMasterKeyId': { value: '', type: Setting.TYPE_STRING, public: false }, 'encryption.passwordCache': { value: {}, type: Setting.TYPE_OBJECT, public: false, secure: true }, 'style.zoom': { value: 100, type: Setting.TYPE_INT, public: true, appTypes: ['desktop'], section: 'appearance', label: () => _('Global zoom percentage'), minimum: 50, maximum: 500, step: 10 }, 'style.editor.fontSize': { value: 13, type: Setting.TYPE_INT, public: true, appTypes: ['desktop'], section: 'appearance', label: () => _('Editor font size'), minimum: 4, maximum: 50, step: 1 }, 'style.editor.fontFamily': (mobilePlatform) ? ({ value: Setting.FONT_DEFAULT, type: Setting.TYPE_STRING, isEnum: true, public: true, label: () => _('Editor font'), appTypes: ['mobile'], section: 'appearance', options: () => { // IMPORTANT: The font mapping must match the one in global-styles.js::editorFont() if (mobilePlatform === 'ios') { return { [Setting.FONT_DEFAULT]: 'Default', [Setting.FONT_MENLO]: 'Menlo', [Setting.FONT_COURIER_NEW]: 'Courier New', [Setting.FONT_AVENIR]: 'Avenir', }; } return { [Setting.FONT_DEFAULT]: 'Default', [Setting.FONT_MONOSPACE]: 'Monospace', }; }, }) : { value: '', type: Setting.TYPE_STRING, public: true, appTypes: ['desktop'], section: 'appearance', label: () => _('Editor font family'), description: () => _('This must be *monospace* font or it will not work properly. If the font ' + 'is incorrect or empty, it will default to a generic monospace font.'), }, 'style.sidebar.width': { value: 150, minimum: 80, maximum: 400, type: Setting.TYPE_INT, public: false, appTypes: ['desktop'] }, 'style.noteList.width': { value: 150, minimum: 80, maximum: 400, type: Setting.TYPE_INT, public: false, appTypes: ['desktop'] }, // TODO: Is there a better way to do this? The goal here is to simply have // a way to display a link to the customizable stylesheets, not for it to // serve as a customizable Setting. But because the Setting page is auto- // generated from this list of settings, there wasn't a really elegant way // to do that directly in the React markup. 'style.customCss.renderedMarkdown': { onClick: () => { const dir = Setting.value('profileDir'); const filename = Setting.custom_css_files.RENDERED_MARKDOWN; const filepath = `${dir}/${filename}`; const defaultContents = '/* For styling the rendered Markdown */'; shim.openOrCreateFile(filepath, defaultContents); }, type: Setting.TYPE_BUTTON, public: true, appTypes: ['desktop'], label: () => _('Custom stylesheet for rendered Markdown'), section: 'appearance', }, 'style.customCss.joplinApp': { onClick: () => { const dir = Setting.value('profileDir'); const filename = Setting.custom_css_files.JOPLIN_APP; const filepath = `${dir}/${filename}`; const defaultContents = `/* For styling the entire Joplin app (except the rendered Markdown, which is defined in \`${Setting.custom_css_files.RENDERED_MARKDOWN}\`) */`; shim.openOrCreateFile(filepath, defaultContents); }, type: Setting.TYPE_BUTTON, public: true, appTypes: ['desktop'], label: () => _('Custom stylesheet for Joplin-wide app styles'), section: 'appearance', }, autoUpdateEnabled: { value: true, type: Setting.TYPE_BOOL, section: 'application', public: true, appTypes: ['desktop'], label: () => _('Automatically update the application') }, 'autoUpdate.includePreReleases': { value: false, type: Setting.TYPE_BOOL, section: 'application', public: true, appTypes: ['desktop'], label: () => _('Get pre-releases when checking for updates'), description: () => _('See the pre-release page for more details: %s', 'https://joplinapp.org/prereleases') }, 'clipperServer.autoStart': { value: false, type: Setting.TYPE_BOOL, public: false }, 'sync.interval': { value: 300, type: Setting.TYPE_INT, section: 'sync', isEnum: true, public: true, label: () => _('Synchronisation interval'), options: () => { return { 0: _('Disabled'), 300: _('%d minutes', 5), 600: _('%d minutes', 10), 1800: _('%d minutes', 30), 3600: _('%d hour', 1), 43200: _('%d hours', 12), 86400: _('%d hours', 24), }; }, }, noteVisiblePanes: { value: ['editor', 'viewer'], type: Setting.TYPE_ARRAY, public: false, appTypes: ['desktop'] }, sidebarVisibility: { value: true, type: Setting.TYPE_BOOL, public: false, appTypes: ['desktop'] }, noteListVisibility: { value: true, type: Setting.TYPE_BOOL, public: false, appTypes: ['desktop'] }, tagHeaderIsExpanded: { value: true, type: Setting.TYPE_BOOL, public: false, appTypes: ['desktop'] }, folderHeaderIsExpanded: { value: true, type: Setting.TYPE_BOOL, public: false, appTypes: ['desktop'] }, editor: { value: '', type: Setting.TYPE_STRING, subType: 'file_path_and_args', public: true, appTypes: ['cli', 'desktop'], label: () => _('Text editor command'), description: () => _('The editor command (may include arguments) that will be used to open a note. If none is provided it will try to auto-detect the default editor.') }, 'export.pdfPageSize': { value: 'A4', type: Setting.TYPE_STRING, isEnum: true, public: true, appTypes: ['desktop'], label: () => _('Page size for PDF export'), options: () => { return { 'A4': _('A4'), 'Letter': _('Letter'), 'A3': _('A3'), 'A5': _('A5'), 'Tabloid': _('Tabloid'), 'Legal': _('Legal'), }; }}, 'export.pdfPageOrientation': { value: 'portrait', type: Setting.TYPE_STRING, isEnum: true, public: true, appTypes: ['desktop'], label: () => _('Page orientation for PDF export'), options: () => { return { 'portrait': _('Portrait'), 'landscape': _('Landscape'), }; }}, 'net.customCertificates': { value: '', type: Setting.TYPE_STRING, section: 'sync', advanced: true, show: settings => { return [SyncTargetRegistry.nameToId('nextcloud'), SyncTargetRegistry.nameToId('webdav')].indexOf(settings['sync.target']) >= 0; }, public: true, appTypes: ['desktop', 'cli'], label: () => _('Custom TLS certificates'), description: () => _('Comma-separated list of paths to directories to load the certificates from, or path to individual cert files. For example: /my/cert_dir, /other/custom.pem. Note that if you make changes to the TLS settings, you must save your changes before clicking on "Check synchronisation configuration".'), }, 'net.ignoreTlsErrors': { value: false, type: Setting.TYPE_BOOL, advanced: true, section: 'sync', show: settings => { return [SyncTargetRegistry.nameToId('nextcloud'), SyncTargetRegistry.nameToId('webdav')].indexOf(settings['sync.target']) >= 0; }, public: true, appTypes: ['desktop', 'cli'], label: () => _('Ignore TLS certificate errors'), }, 'sync.wipeOutFailSafe': { value: true, type: Setting.TYPE_BOOL, advanced: true, public: true, section: 'sync', label: () => _('Fail-safe: Do not wipe out local data when sync target is empty (often the result of a misconfiguration or bug)') }, 'api.token': { value: null, type: Setting.TYPE_STRING, public: false }, 'api.port': { value: null, type: Setting.TYPE_INT, public: true, appTypes: ['cli'], description: () => _('Specify the port that should be used by the API server. If not set, a default will be used.') }, 'resourceService.lastProcessedChangeId': { value: 0, type: Setting.TYPE_INT, public: false }, 'searchEngine.lastProcessedChangeId': { value: 0, type: Setting.TYPE_INT, public: false }, 'revisionService.lastProcessedChangeId': { value: 0, type: Setting.TYPE_INT, public: false }, 'searchEngine.initialIndexingDone': { value: false, type: Setting.TYPE_BOOL, public: false }, 'revisionService.enabled': { section: 'revisionService', value: true, type: Setting.TYPE_BOOL, public: true, label: () => _('Enable note history') }, 'revisionService.ttlDays': { section: 'revisionService', value: 90, type: Setting.TYPE_INT, public: true, minimum: 1, maximum: 365 * 2, step: 1, unitLabel: (value = null) => { return value === null ? _('days') : _('%d days', value); }, label: () => _('Keep note history for'), }, 'revisionService.intervalBetweenRevisions': { section: 'revisionService', value: 1000 * 60 * 10, type: Setting.TYPE_INT, public: false }, 'revisionService.oldNoteInterval': { section: 'revisionService', value: 1000 * 60 * 60 * 24 * 7, type: Setting.TYPE_INT, public: false }, 'welcome.wasBuilt': { value: false, type: Setting.TYPE_BOOL, public: false }, 'welcome.enabled': { value: true, type: Setting.TYPE_BOOL, public: false }, 'camera.type': { value: 0, type: Setting.TYPE_INT, public: false, appTypes: ['mobile'] }, 'camera.ratio': { value: '4:3', type: Setting.TYPE_STRING, public: false, appTypes: ['mobile'] }, }; return this.metadata_; } static settingMetadata(key) { const metadata = this.metadata(); if (!(key in metadata)) throw new Error(`Unknown key: ${key}`); let output = Object.assign({}, metadata[key]); output.key = key; return output; } static keyExists(key) { return key in this.metadata(); } static keyDescription(key, appType = null) { const md = this.settingMetadata(key); if (!md.description) return null; return md.description(appType); } static keys(publicOnly = false, appType = null) { if (!this.keys_) { const metadata = this.metadata(); this.keys_ = []; for (let n in metadata) { if (!metadata.hasOwnProperty(n)) continue; this.keys_.push(n); } } if (appType || publicOnly) { let output = []; for (let i = 0; i < this.keys_.length; i++) { const md = this.settingMetadata(this.keys_[i]); if (publicOnly && !md.public) continue; if (appType && md.appTypes && md.appTypes.indexOf(appType) < 0) continue; output.push(md.key); } return output; } else { return this.keys_; } } static isPublic(key) { return this.keys(true).indexOf(key) >= 0; } static load() { this.cancelScheduleSave(); this.cache_ = []; return this.modelSelectAll('SELECT * FROM settings').then(rows => { this.cache_ = []; for (let i = 0; i < rows.length; i++) { let c = rows[i]; if (!this.keyExists(c.key)) continue; c.value = this.formatValue(c.key, c.value); c.value = this.filterValue(c.key, c.value); this.cache_.push(c); } this.dispatchUpdateAll(); }); } static toPlainObject() { const keys = this.keys(); let keyToValues = {}; for (let i = 0; i < keys.length; i++) { keyToValues[keys[i]] = this.value(keys[i]); } return keyToValues; } static dispatchUpdateAll() { this.dispatch({ type: 'SETTING_UPDATE_ALL', settings: this.toPlainObject(), }); } static setConstant(key, value) { if (!(key in this.constants_)) throw new Error(`Unknown constant key: ${key}`); this.constants_[key] = value; } static setValue(key, value) { if (!this.cache_) throw new Error('Settings have not been initialized!'); value = this.formatValue(key, value); value = this.filterValue(key, value); for (let i = 0; i < this.cache_.length; i++) { let c = this.cache_[i]; if (c.key == key) { const md = this.settingMetadata(key); if (md.isEnum === true) { if (!this.isAllowedEnumOption(key, value)) { throw new Error(_('Invalid option value: "%s". Possible values are: %s.', value, this.enumOptionsDoc(key))); } } if (c.value === value) return; // Don't log this to prevent sensitive info (passwords, auth tokens...) to end up in logs // this.logger().info('Setting: ' + key + ' = ' + c.value + ' => ' + value); if ('minimum' in md && value < md.minimum) value = md.minimum; if ('maximum' in md && value > md.maximum) value = md.maximum; c.value = value; this.dispatch({ type: 'SETTING_UPDATE_ONE', key: key, value: c.value, }); this.scheduleSave(); return; } } this.cache_.push({ key: key, value: this.formatValue(key, value), }); this.dispatch({ type: 'SETTING_UPDATE_ONE', key: key, value: this.formatValue(key, value), }); this.scheduleSave(); } static setObjectKey(settingKey, objectKey, value) { let o = this.value(settingKey); if (typeof o !== 'object') o = {}; o[objectKey] = value; this.setValue(settingKey, o); } static deleteObjectKey(settingKey, objectKey) { const o = this.value(settingKey); if (typeof o !== 'object') return; delete o[objectKey]; this.setValue(settingKey, o); } static valueToString(key, value) { const md = this.settingMetadata(key); value = this.formatValue(key, value); if (md.type == Setting.TYPE_INT) return value.toFixed(0); if (md.type == Setting.TYPE_BOOL) return value ? '1' : '0'; if (md.type == Setting.TYPE_ARRAY) return value ? JSON.stringify(value) : '[]'; if (md.type == Setting.TYPE_OBJECT) return value ? JSON.stringify(value) : '{}'; if (md.type == Setting.TYPE_STRING) return value ? `${value}` : ''; throw new Error(`Unhandled value type: ${md.type}`); } static filterValue(key, value) { const md = this.settingMetadata(key); return md.filter ? md.filter(value) : value; } static formatValue(key, value) { const md = this.settingMetadata(key); if (md.type == Setting.TYPE_INT) return !value ? 0 : Math.floor(Number(value)); if (md.type == Setting.TYPE_BOOL) { if (typeof value === 'string') { value = value.toLowerCase(); if (value === 'true') return true; if (value === 'false') return false; value = Number(value); } return !!value; } if (md.type === Setting.TYPE_ARRAY) { if (!value) return []; if (Array.isArray(value)) return value; if (typeof value === 'string') return JSON.parse(value); return []; } if (md.type === Setting.TYPE_OBJECT) { if (!value) return {}; if (typeof value === 'object') return value; if (typeof value === 'string') return JSON.parse(value); return {}; } if (md.type === Setting.TYPE_STRING) { if (!value) return ''; return `${value}`; } throw new Error(`Unhandled value type: ${md.type}`); } static value(key) { // Need to copy arrays and objects since in setValue(), the old value and new one is compared // with strict equality and the value is updated only if changed. However if the caller acquire // and object and change a key, the objects will be detected as equal. By returning a copy // we avoid this problem. function copyIfNeeded(value) { if (value === null || value === undefined) return value; if (Array.isArray(value)) return value.slice(); if (typeof value === 'object') return Object.assign({}, value); return value; } if (key in this.constants_) { const v = this.constants_[key]; const output = typeof v === 'function' ? v() : v; if (output == 'SET_ME') throw new Error(`Setting constant has not been set: ${key}`); return output; } if (!this.cache_) throw new Error('Settings have not been initialized!'); for (let i = 0; i < this.cache_.length; i++) { if (this.cache_[i].key == key) { return copyIfNeeded(this.cache_[i].value); } } const md = this.settingMetadata(key); return copyIfNeeded(md.value); } static isEnum(key) { const md = this.settingMetadata(key); return md.isEnum === true; } static enumOptionValues(key) { const options = this.enumOptions(key); let output = []; for (let n in options) { if (!options.hasOwnProperty(n)) continue; output.push(n); } return output; } static enumOptionLabel(key, value) { const options = this.enumOptions(key); for (let n in options) { if (n == value) return options[n]; } return ''; } static enumOptions(key) { const metadata = this.metadata(); if (!metadata[key]) throw new Error(`Unknown key: ${key}`); if (!metadata[key].options) throw new Error(`No options for: ${key}`); return metadata[key].options(); } static enumOptionsDoc(key, templateString = null) { if (templateString === null) templateString = '%s: %s'; const options = this.enumOptions(key); let output = []; for (let n in options) { if (!options.hasOwnProperty(n)) continue; output.push(sprintf(templateString, n, options[n])); } return output.join(', '); } static isAllowedEnumOption(key, value) { const options = this.enumOptions(key); return !!options[value]; } // For example, if settings is: // { sync.5.path: 'http://example', sync.5.username: 'testing' } // and baseKey is 'sync.5', the function will return // { path: 'http://example', username: 'testing' } static subValues(baseKey, settings, options = null) { const includeBaseKeyInName = !!options && !!options.includeBaseKeyInName; let output = {}; for (let key in settings) { if (!settings.hasOwnProperty(key)) continue; if (key.indexOf(baseKey) === 0) { const subKey = includeBaseKeyInName ? key : key.substr(baseKey.length + 1); output[subKey] = settings[key]; } } return output; } static async saveAll() { if (!this.saveTimeoutId_) return Promise.resolve(); this.logger().info('Saving settings...'); clearTimeout(this.saveTimeoutId_); this.saveTimeoutId_ = null; let queries = []; queries.push('DELETE FROM settings'); for (let i = 0; i < this.cache_.length; i++) { let s = Object.assign({}, this.cache_[i]); s.value = this.valueToString(s.key, s.value); queries.push(Database.insertQuery(this.tableName(), s)); } await BaseModel.db().transactionExecBatch(queries); this.logger().info('Settings have been saved.'); } static scheduleSave() { if (!Setting.autoSaveEnabled) return; if (this.saveTimeoutId_) clearTimeout(this.saveTimeoutId_); this.saveTimeoutId_ = setTimeout(() => { this.saveAll(); }, 500); } static cancelScheduleSave() { if (this.saveTimeoutId_) clearTimeout(this.saveTimeoutId_); this.saveTimeoutId_ = null; } static publicSettings(appType) { if (!appType) throw new Error('appType is required'); const metadata = this.metadata(); let output = {}; for (let key in metadata) { if (!metadata.hasOwnProperty(key)) continue; let s = Object.assign({}, metadata[key]); if (!s.public) continue; if (s.appTypes && s.appTypes.indexOf(appType) < 0) continue; s.value = this.value(key); output[key] = s; } return output; } static typeToString(typeId) { if (typeId === Setting.TYPE_INT) return 'int'; if (typeId === Setting.TYPE_STRING) return 'string'; if (typeId === Setting.TYPE_BOOL) return 'bool'; if (typeId === Setting.TYPE_ARRAY) return 'array'; if (typeId === Setting.TYPE_OBJECT) return 'object'; } static groupMetadatasBySections(metadatas) { let sections = []; const generalSection = { name: 'general', metadatas: [] }; const nameToSections = {}; nameToSections['general'] = generalSection; sections.push(generalSection); for (let i = 0; i < metadatas.length; i++) { const md = metadatas[i]; if (!md.section) { generalSection.metadatas.push(md); } else { if (!nameToSections[md.section]) { nameToSections[md.section] = { name: md.section, metadatas: [] }; sections.push(nameToSections[md.section]); } nameToSections[md.section].metadatas.push(md); } } return sections; } static sectionNameToLabel(name) { if (name === 'general') return _('General'); if (name === 'sync') return _('Synchronisation'); if (name === 'appearance') return _('Appearance'); if (name === 'note') return _('Note'); if (name === 'plugins') return _('Plugins'); if (name === 'application') return _('Application'); if (name === 'revisionService') return _('Note History'); if (name === 'encryption') return _('Encryption'); if (name === 'server') return _('Web Clipper'); return name; } static sectionNameToIcon(name) { if (name === 'general') return 'fa-sliders'; if (name === 'sync') return 'fa-refresh'; if (name === 'appearance') return 'fa-pencil'; if (name === 'note') return 'fa-file-text-o'; if (name === 'plugins') return 'fa-puzzle-piece'; if (name === 'application') return 'fa-cog'; if (name === 'revisionService') return 'fa-archive-org'; if (name === 'encryption') return 'fa-key-modern'; if (name === 'server') return 'fa-hand-scissors-o'; return name; } static appTypeToLabel(name) { // Not translated for now because only used on Welcome notes (which are not translated) if (name === 'cli') return 'CLI'; return name[0].toUpperCase() + name.substr(1).toLowerCase(); } } Setting.TYPE_INT = 1; Setting.TYPE_STRING = 2; Setting.TYPE_BOOL = 3; Setting.TYPE_ARRAY = 4; Setting.TYPE_OBJECT = 5; Setting.TYPE_BUTTON = 6; Setting.THEME_LIGHT = 1; Setting.THEME_DARK = 2; Setting.THEME_SOLARIZED_LIGHT = 3; Setting.THEME_SOLARIZED_DARK = 4; Setting.THEME_DRACULA = 5; Setting.THEME_NORD = 6; Setting.FONT_DEFAULT = 0; Setting.FONT_MENLO = 1; Setting.FONT_COURIER_NEW = 2; Setting.FONT_AVENIR = 3; Setting.FONT_MONOSPACE = 4; Setting.LAYOUT_ALL = 0; Setting.LAYOUT_EDITOR_VIEWER = 1; Setting.LAYOUT_EDITOR_SPLIT = 2; Setting.LAYOUT_VIEWER_SPLIT = 3; Setting.DATE_FORMAT_1 = 'DD/MM/YYYY'; Setting.DATE_FORMAT_2 = 'DD/MM/YY'; Setting.DATE_FORMAT_3 = 'MM/DD/YYYY'; Setting.DATE_FORMAT_4 = 'MM/DD/YY'; Setting.DATE_FORMAT_5 = 'YYYY-MM-DD'; Setting.DATE_FORMAT_6 = 'DD.MM.YYYY'; Setting.TIME_FORMAT_1 = 'HH:mm'; Setting.TIME_FORMAT_2 = 'h:mm A'; Setting.custom_css_files = { JOPLIN_APP: 'userchrome.css', RENDERED_MARKDOWN: 'userstyle.css', }; // Contains constants that are set by the application and // cannot be modified by the user: Setting.constants_ = { env: 'SET_ME', isDemo: false, appName: 'joplin', appId: 'SET_ME', // Each app should set this identifier appType: 'SET_ME', // 'cli' or 'mobile' resourceDirName: '', resourceDir: '', profileDir: '', templateDir: '', tempDir: '', flagOpenDevTools: false, syncVersion: 1, }; Setting.autoSaveEnabled = true; module.exports = Setting;
1
11,377
As it is a mobile only theme, please make sure the option appears only on mobile
laurent22-joplin
js
@@ -364,3 +364,14 @@ def _get_non_negative_param(param, default=None): if value < 0: raise APIBadRequest("'{}' should be a non-negative integer".format(param)) return value + + +def parse_param_list(params): + param_list = [] + for param in params.split(","): + param = param.strip() + if not param: + continue + param_list.append(param) + + return param_list
1
import listenbrainz.webserver.rabbitmq_connection as rabbitmq_connection import listenbrainz.webserver.redis_connection as redis_connection import pika import pika.exceptions import sys import time import ujson import uuid from flask import current_app, request from listenbrainz.listen import Listen from listenbrainz.webserver import API_LISTENED_AT_ALLOWED_SKEW from listenbrainz.webserver.external import messybrainz from listenbrainz.webserver.errors import APIInternalServerError, APIServiceUnavailable, APIBadRequest #: Maximum overall listen size in bytes, to prevent egregious spamming. MAX_LISTEN_SIZE = 10240 #: The maximum number of tags per listen. MAX_TAGS_PER_LISTEN = 50 #: The maximum length of a tag MAX_TAG_SIZE = 64 #: The maximum number of listens returned in a single GET request. MAX_ITEMS_PER_GET = 100 #: The default number of listens returned in a single GET request. DEFAULT_ITEMS_PER_GET = 25 MAX_ITEMS_PER_MESSYBRAINZ_LOOKUP = 10 # Define the values for types of listens LISTEN_TYPE_SINGLE = 1 LISTEN_TYPE_IMPORT = 2 LISTEN_TYPE_PLAYING_NOW = 3 def insert_payload(payload, user, listen_type=LISTEN_TYPE_IMPORT): """ Convert the payload into augmented listens then submit them. Returns: augmented_listens """ try: augmented_listens = _get_augmented_listens(payload, user, listen_type) _send_listens_to_queue(listen_type, augmented_listens) except (APIInternalServerError, APIServiceUnavailable) as e: raise except Exception as e: current_app.logger.error("Error while inserting payload: %s", str(e), exc_info=True) raise APIInternalServerError("Something went wrong. Please try again.") return augmented_listens def handle_playing_now(listen): """ Check that the listen doesn't already exist in redis and put it in there if it isn't. Returns: listen if new playing now listen, None otherwise """ old_playing_now = redis_connection._redis.get_playing_now(listen['user_id']) if old_playing_now and listen['recording_msid'] == old_playing_now.recording_msid: return None if 'duration' in listen['track_metadata']['additional_info']: listen_timeout = listen['track_metadata']['additional_info']['duration'] elif 'duration_ms' in listen['track_metadata']['additional_info']: listen_timeout = listen['track_metadata']['additional_info']['duration_ms'] // 1000 else: listen_timeout = current_app.config['PLAYING_NOW_MAX_DURATION'] redis_connection._redis.put_playing_now(listen['user_id'], listen, listen_timeout) return listen def _send_listens_to_queue(listen_type, listens): submit = [] for listen in listens: if listen_type == LISTEN_TYPE_PLAYING_NOW: try: listen = handle_playing_now(listen) if listen: submit.append(listen) except Exception as e: current_app.logger.error("Redis rpush playing_now write error: " + str(e)) raise APIServiceUnavailable("Cannot record playing_now at this time.") else: submit.append(listen) if submit: # check if rabbitmq connection exists or not # and if not then try to connect try: rabbitmq_connection.init_rabbitmq_connection(current_app) except ConnectionError as e: current_app.logger.error('Cannot connect to RabbitMQ: %s' % str(e)) raise APIServiceUnavailable('Cannot submit listens to queue, please try again later.') if listen_type == LISTEN_TYPE_PLAYING_NOW: exchange = current_app.config['PLAYING_NOW_EXCHANGE'] queue = current_app.config['PLAYING_NOW_QUEUE'] else: exchange = current_app.config['INCOMING_EXCHANGE'] queue = current_app.config['INCOMING_QUEUE'] publish_data_to_queue( data=submit, exchange=exchange, queue=queue, error_msg='Cannot submit listens to queue, please try again later.', ) def validate_listen(listen, listen_type): """Make sure that required keys are present, filled out and not too large.""" if listen_type in (LISTEN_TYPE_SINGLE, LISTEN_TYPE_IMPORT): if 'listened_at' not in listen: log_raise_400("JSON document must contain the key listened_at at the top level.", listen) try: listen['listened_at'] = int(listen['listened_at']) except ValueError: log_raise_400("JSON document must contain an int value for listened_at.", listen) if 'listened_at' in listen and 'track_metadata' in listen and len(listen) > 2: log_raise_400("JSON document may only contain listened_at and " "track_metadata top level keys", listen) # if timestamp is too high, raise BadRequest # in order to make up for possible clock skew, we allow # timestamps to be one hour ahead of server time if not is_valid_timestamp(listen['listened_at']): log_raise_400("Value for key listened_at is too high.", listen) elif listen_type == LISTEN_TYPE_PLAYING_NOW: if 'listened_at' in listen: log_raise_400("JSON document must not contain listened_at while submitting " "playing_now.", listen) if 'track_metadata' in listen and len(listen) > 1: log_raise_400("JSON document may only contain track_metadata as top level " "key when submitting now_playing.", listen) # Basic metadata try: if not listen['track_metadata']['track_name']: log_raise_400("JSON document does not contain required " "track_metadata.track_name.", listen) if not listen['track_metadata']['artist_name']: log_raise_400("JSON document does not contain required " "track_metadata.artist_name.", listen) if not isinstance(listen['track_metadata']['artist_name'], str): log_raise_400("artist_name must be a single string.", listen) except KeyError: log_raise_400("JSON document does not contain a valid metadata.track_name " "and/or track_metadata.artist_name.", listen) if 'additional_info' in listen['track_metadata']: # Tags if 'tags' in listen['track_metadata']['additional_info']: tags = listen['track_metadata']['additional_info']['tags'] if len(tags) > MAX_TAGS_PER_LISTEN: log_raise_400("JSON document may not contain more than %d items in " "track_metadata.additional_info.tags." % MAX_TAGS_PER_LISTEN, listen) for tag in tags: if len(tag) > MAX_TAG_SIZE: log_raise_400("JSON document may not contain track_metadata.additional_info.tags " "longer than %d characters." % MAX_TAG_SIZE, listen) # MBIDs single_mbid_keys = ['release_mbid', 'recording_mbid', 'release_group_mbid', 'track_mbid'] for key in single_mbid_keys: verify_mbid_validity(listen, key, multi = False) multiple_mbid_keys = ['artist_mbids', 'work_mbids'] for key in multiple_mbid_keys: verify_mbid_validity(listen, key, multi = True) # lifted from AcousticBrainz def is_valid_uuid(u): try: u = uuid.UUID(u) return True except (AttributeError, ValueError): return False def _get_augmented_listens(payload, user, listen_type): """ Converts the payload to augmented list after lookup in the MessyBrainz database """ augmented_listens = [] msb_listens = [] for l in payload: listen = l.copy() # Create a local object to prevent the mutation of the passed object listen['user_id'] = user['id'] listen['user_name'] = user['musicbrainz_id'] msb_listens.append(listen) if len(msb_listens) >= MAX_ITEMS_PER_MESSYBRAINZ_LOOKUP: augmented_listens.extend(_messybrainz_lookup(msb_listens)) msb_listens = [] if msb_listens: augmented_listens.extend(_messybrainz_lookup(msb_listens)) return augmented_listens def _messybrainz_lookup(listens): msb_listens = [] for listen in listens: messy_dict = { 'artist': listen['track_metadata']['artist_name'], 'title': listen['track_metadata']['track_name'], } if 'release_name' in listen['track_metadata']: messy_dict['release'] = listen['track_metadata']['release_name'] if 'additional_info' in listen['track_metadata']: ai = listen['track_metadata']['additional_info'] if 'artist_mbids' in ai and isinstance(ai['artist_mbids'], list): messy_dict['artist_mbids'] = ai['artist_mbids'] if 'release_mbid' in ai: messy_dict['release_mbid'] = ai['release_mbid'] if 'recording_mbid' in ai: messy_dict['recording_mbid'] = ai['recording_mbid'] if 'track_number' in ai: messy_dict['track_number'] = ai['track_number'] if 'spotify_id' in ai: messy_dict['spotify_id'] = ai['spotify_id'] msb_listens.append(messy_dict) try: msb_responses = messybrainz.submit_listens(msb_listens) except messybrainz.exceptions.BadDataException as e: log_raise_400(str(e)) except messybrainz.exceptions.NoDataFoundException: return [] except messybrainz.exceptions.ErrorAddingException as e: raise APIServiceUnavailable(str(e)) augmented_listens = [] for listen, messybrainz_resp in zip(listens, msb_responses['payload']): messybrainz_resp = messybrainz_resp['ids'] if 'additional_info' not in listen['track_metadata']: listen['track_metadata']['additional_info'] = {} try: listen['recording_msid'] = messybrainz_resp['recording_msid'] listen['track_metadata']['additional_info']['artist_msid'] = messybrainz_resp['artist_msid'] except KeyError: current_app.logger.error("MessyBrainz did not return a proper set of ids") raise APIInternalServerError try: listen['track_metadata']['additional_info']['release_msid'] = messybrainz_resp['release_msid'] except KeyError: pass artist_mbids = messybrainz_resp.get('artist_mbids', []) release_mbid = messybrainz_resp.get('release_mbid', None) recording_mbid = messybrainz_resp.get('recording_mbid', None) if 'artist_mbids' not in listen['track_metadata']['additional_info'] and \ 'release_mbid' not in listen['track_metadata']['additional_info'] and \ 'recording_mbid' not in listen['track_metadata']['additional_info']: if len(artist_mbids) > 0 and release_mbid and recording_mbid: listen['track_metadata']['additional_info']['artist_mbids'] = artist_mbids listen['track_metadata']['additional_info']['release_mbid'] = release_mbid listen['track_metadata']['additional_info']['recording_mbid'] = recording_mbid augmented_listens.append(listen) return augmented_listens def log_raise_400(msg, data=""): """ Helper function for logging issues with request data and showing error page. Logs the message and data, raises BadRequest exception which shows 400 Bad Request to the user. """ if isinstance(data, dict): data = ujson.dumps(data) current_app.logger.debug("BadRequest: %s\nJSON: %s" % (msg, data)) raise APIBadRequest(msg) def verify_mbid_validity(listen, key, multi): """ Verify that mbid(s) present in listen with key `key` is valid. Args: listen: listen data key: the key whose mbids is to be validated multi: boolean value signifying if the key contains multiple mbids """ if not multi: items = listen['track_metadata']['additional_info'].get(key) items = [items] if items else [] else: items = listen['track_metadata']['additional_info'].get(key, []) for item in items: if not is_valid_uuid(item): log_raise_400("%s MBID format invalid." % (key, ), listen) def is_valid_timestamp(ts): """ Returns True if the timestamp passed is in the API's allowed range of timestamps, False otherwise Args: ts (int): the timestamp to be checked for validity Returns: bool: True if timestamp is valid, False otherwise """ return ts <= int(time.time()) + API_LISTENED_AT_ALLOWED_SKEW def publish_data_to_queue(data, exchange, queue, error_msg): """ Publish specified data to the specified queue. Args: data: the data to be published exchange (str): the name of the exchange queue (str): the name of the queue error_msg (str): the error message to be returned in case of an error """ try: with rabbitmq_connection._rabbitmq.get() as connection: channel = connection.channel channel.exchange_declare(exchange=exchange, exchange_type='fanout') channel.queue_declare(queue, durable=True) channel.basic_publish( exchange=exchange, routing_key='', body=ujson.dumps(data), properties=pika.BasicProperties(delivery_mode=2, ), ) except pika.exceptions.ConnectionClosed as e: current_app.logger.error("Connection to rabbitmq closed while trying to publish: %s" % str(e), exc_info=True) raise APIServiceUnavailable(error_msg) except Exception as e: current_app.logger.error("Cannot publish to rabbitmq channel: %s / %s" % (type(e).__name__, str(e)), exc_info=True) raise APIServiceUnavailable(error_msg) def _get_non_negative_param(param, default=None): """ Gets the value of a request parameter, validating that it is non-negative Args: param (str): the parameter to get default: the value to return if the parameter doesn't exist in the request """ value = request.args.get(param, default) if value is not None: try: value = int(value) except ValueError: raise APIBadRequest("'{}' should be a non-negative integer".format(param)) if value < 0: raise APIBadRequest("'{}' should be a non-negative integer".format(param)) return value
1
17,015
Would like a docstring and type annotations here
metabrainz-listenbrainz-server
py
@@ -11,7 +11,7 @@ module Mongoid included do cattr_accessor :shard_key_fields - self.shard_key_fields = [] + self.shard_key_fields = {} end # Get the shard key fields.
1
# frozen_string_literal: true # encoding: utf-8 module Mongoid # This module contains behavior for adding shard key fields to updates. # # @since 4.0.0 module Shardable extend ActiveSupport::Concern included do cattr_accessor :shard_key_fields self.shard_key_fields = [] end # Get the shard key fields. # # @note Refactored from using delegate for class load performance. # # @example Get the shard key fields. # model.shard_key_fields # # @return [ Array<String> ] The shard key field names. # # @since 1.0.0 def shard_key_fields self.class.shard_key_fields end # Get the document selector with the defined shard keys. # # @example Get the selector for the shard keys. # person.shard_key_selector # # @return [ Hash ] The shard key selector. # # @since 2.0.0 def shard_key_selector selector = {} shard_key_fields.each do |field| selector[field.to_s] = new_record? ? send(field) : attribute_was(field) end selector end module ClassMethods # Specifies a shard key with the field(s) specified. # # @example Specify the shard key. # # class Person # include Mongoid::Document # field :first_name, :type => String # field :last_name, :type => String # # shard_key :first_name, :last_name # end # # @since 2.0.0 def shard_key(*names) names.each do |name| self.shard_key_fields << self.database_field_name(name).to_sym end end end end end
1
12,292
This is an API change. Why was it made?
mongodb-mongoid
rb
@@ -1,5 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. +using Microsoft.VisualStudio.TestPlatform.CoreUtilities.Tracing; + namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Discovery { using System;
1
// Copyright (c) Microsoft. All rights reserved. namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Discovery { using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.IO; using System.Linq; using Microsoft.VisualStudio.TestPlatform.Common.ExtensionFramework; using Microsoft.VisualStudio.TestPlatform.Common.ExtensionFramework.Utilities; using Microsoft.VisualStudio.TestPlatform.Common.Interfaces; using Microsoft.VisualStudio.TestPlatform.ObjectModel; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Adapter; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Logging; using Microsoft.VisualStudio.TestPlatform.Common.Logging; /// <summary> /// Enumerates through all the discoverers. /// </summary> internal class DiscovererEnumerator { private DiscoveryResultCache discoveryResultCache; /// <summary> /// Initializes a new instance of the <see cref="DiscovererEnumerator"/> class. /// </summary> /// <param name="discoveryResultCache"> The discovery result cache. </param> public DiscovererEnumerator(DiscoveryResultCache discoveryResultCache) { this.discoveryResultCache = discoveryResultCache; } /// <summary> /// Discovers tests from the sources. /// </summary> /// <param name="testExtensionSourceMap"> The test extension source map. </param> /// <param name="settings"> The settings. </param> /// <param name="logger"> The logger. </param> internal void LoadTests(IDictionary<string, IEnumerable<string>> testExtensionSourceMap, IRunSettings settings, IMessageLogger logger) { foreach (var kvp in testExtensionSourceMap) { this.LoadTestsFromAnExtension(kvp.Key, kvp.Value, settings, logger); } } /// <summary> /// Loads test cases from individual source. /// Discovery extensions update progress through ITestCaseDiscoverySink. /// Discovery extensions sends discovery messages through TestRunMessageLoggerProxy /// </summary> /// <param name="extensionAssembly"> The extension Assembly. </param> /// <param name="sources"> The sources. </param> /// <param name="settings"> The settings. </param> /// <param name="logger"> The logger. </param> [SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes", Justification = "This methods must invoke all possible discoverers and not fail or crash in any one.")] private void LoadTestsFromAnExtension(string extensionAssembly, IEnumerable<string> sources, IRunSettings settings, IMessageLogger logger) { var discovererToSourcesMap = GetDiscovererToSourcesMap(extensionAssembly, sources, logger); // Warning is logged for in the inner function if (discovererToSourcesMap == null || discovererToSourcesMap.Count() == 0) { return; } var context = new DiscoveryContext { RunSettings = settings }; // Set on the logger the TreatAdapterErrorAsWarning setting from runsettings. this.SetAdapterLoggingSettings(logger, settings); var discoverySink = new TestCaseDiscoverySink(this.discoveryResultCache); foreach (var discoverer in discovererToSourcesMap.Keys) { Type discovererType = null; // See if discoverer can be instantiated successfully else move next. try { discovererType = discoverer.Value.GetType(); } catch (Exception e) { var mesage = string.Format( CultureInfo.CurrentUICulture, CrossPlatEngine.Resources.DiscovererInstantiationException, e.Message); logger.SendMessage(TestMessageLevel.Warning, mesage); EqtTrace.Error(e); continue; } // if instantiated successfully, get tests try { if (EqtTrace.IsVerboseEnabled) { EqtTrace.Verbose( "DiscoveryContext.LoadTests: Loading tests for {0}", discoverer.Value.GetType().FullName); } discoverer.Value.DiscoverTests(discovererToSourcesMap[discoverer], context, logger, discoverySink); if (EqtTrace.IsVerboseEnabled) { EqtTrace.Verbose( "DiscoveryContext.LoadTests: Done loading tests for {0}", discoverer.Value.GetType().FullName); } } catch (Exception e) { var message = string.Format( CultureInfo.CurrentUICulture, CrossPlatEngine.Resources.ExceptionFromLoadTests, discovererType.Name, e.Message); logger.SendMessage(TestMessageLevel.Error, message); EqtTrace.Error(e); } } } private void SetAdapterLoggingSettings(IMessageLogger messageLogger, IRunSettings runSettings) { var discoveryMessageLogger = messageLogger as TestSessionMessageLogger; if (discoveryMessageLogger != null && runSettings != null) { #if Todo // Todo: Enable this when RunSettings is enabled. IRunConfigurationSettingsProvider runConfigurationSettingsProvider = (IRunConfigurationSettingsProvider)runSettings.GetSettings(ObjectModel.Constants.RunConfigurationSettingsName); if (runConfigurationSettingsProvider != null && runConfigurationSettingsProvider.Settings != null) { discoveryMessageLogger.TreatTestAdapterErrorsAsWarnings = runConfigurationSettingsProvider.Settings.TreatTestAdapterErrorsAsWarnings; } #endif } } /// <summary> /// Get the discoverers matching with the parameter sources /// </summary> /// <param name="extensionAssembly"> The extension assembly. </param> /// <param name="sources"> The sources. </param> /// <param name="logger"> The logger instance. </param> /// <returns> The map between an extension type and a source. </returns> internal static Dictionary<LazyExtension<ITestDiscoverer, ITestDiscovererCapabilities>, IEnumerable<string>> GetDiscovererToSourcesMap(string extensionAssembly, IEnumerable<string> sources, IMessageLogger logger) { var allDiscoverers = GetDiscoverers(extensionAssembly, throwOnError: true); if (allDiscoverers == null || !allDiscoverers.Any()) { // No discoverer available, log a warning logger.SendMessage( TestMessageLevel.Warning, String.Format(CultureInfo.CurrentCulture, CrossPlatEngine.Resources.NoDiscovererRegistered)); return null; } var result = new Dictionary<LazyExtension<ITestDiscoverer, ITestDiscovererCapabilities>, IEnumerable<string>>(); var sourcesForWhichNoDiscovererIsAvailable = new List<string>(sources); foreach (var discoverer in allDiscoverers) { // Find the sources which this discoverer can look at. // Based on whether it is registered for a matching file extension or no file extensions at all. var matchingSources = (from source in sources where (discoverer.Metadata.FileExtension == null || discoverer.Metadata.FileExtension.Contains( Path.GetExtension(source), StringComparer.OrdinalIgnoreCase)) select source).ToList(); // ToList is required to actually execute the query // Update the source list for which no matching source is available. if (matchingSources.Any()) { sourcesForWhichNoDiscovererIsAvailable = sourcesForWhichNoDiscovererIsAvailable.Except(matchingSources, StringComparer.OrdinalIgnoreCase) .ToList(); result.Add(discoverer, matchingSources); } } if (EqtTrace.IsWarningEnabled && sourcesForWhichNoDiscovererIsAvailable != null) { foreach (var source in sourcesForWhichNoDiscovererIsAvailable) { // Log a warning to logfile, not to the "default logger for discovery time messages". EqtTrace.Warning( "No test discoverer is registered to perform discovery for the type of test source '{0}'. Register a test discoverer for this source type and try again.", source); } } return result; } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")] [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1006:DoNotNestGenericTypesInMemberSignatures")] private static IEnumerable<LazyExtension<ITestDiscoverer, ITestDiscovererCapabilities>> GetDiscoverers( string extensionAssembly, bool throwOnError) { try { if (string.IsNullOrEmpty(extensionAssembly) || string.Equals(extensionAssembly, ObjectModel.Constants.UnspecifiedAdapterPath)) { // full discovery. return TestDiscoveryExtensionManager.Create().Discoverers; } else { return TestDiscoveryExtensionManager.GetDiscoveryExtensionManager(extensionAssembly).Discoverers; } } catch (Exception ex) { EqtTrace.Error( "TestDiscoveryManager: LoadExtensions: Exception occured while loading extensions {0}", ex); if (throwOnError) { throw; } return null; } } } }
1
11,094
Please move using inside namespace.
microsoft-vstest
.cs
@@ -511,7 +511,7 @@ namespace Nethermind.Blockchain } } - private AddBlockResult Suggest(Block? block, BlockHeader header, bool shouldProcess = true, bool? setAsMain = null) + private AddBlockResult Suggest(Block? block, BlockHeader header, bool shouldProcess = true, bool? setAsMain = null, bool poSEnabled = false) { #if DEBUG /* this is just to make sure that we do not fall into this trap when creating tests */
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Collections; using System.Collections.Generic; using System.IO; using System.Linq; using System.Threading; using Nethermind.Blockchain.Find; using Nethermind.Blockchain.Synchronization; using Nethermind.Core; using Nethermind.Core.Attributes; using Nethermind.Core.Caching; using Nethermind.Core.Crypto; using Nethermind.Core.Extensions; using Nethermind.Core.Specs; using Nethermind.Db; using Nethermind.Int256; using Nethermind.Logging; using Nethermind.Serialization.Rlp; using Nethermind.State.Repositories; using Nethermind.Db.Blooms; namespace Nethermind.Blockchain { [Todo(Improve.Refactor, "After the fast sync work there are some duplicated code parts for the 'by header' and 'by block' approaches.")] public partial class BlockTree : IBlockTree { // there is not much logic in the addressing here private const long LowestInsertedBodyNumberDbEntryAddress = 0; private static byte[] StateHeadHashDbEntryAddress = new byte[16]; internal static Keccak DeletePointerAddressInDb = new(new BitArray(32 * 8, true).ToBytes()); internal static Keccak HeadAddressInDb = Keccak.Zero; private const int CacheSize = 64; private readonly ICache<Keccak, Block> _blockCache = new LruCache<Keccak, Block>(CacheSize, CacheSize, "blocks"); private readonly ICache<Keccak, BlockHeader> _headerCache = new LruCache<Keccak, BlockHeader>(CacheSize, CacheSize, "headers"); private const int BestKnownSearchLimit = 256_000_000; private readonly object _batchInsertLock = new(); private readonly IDb _blockDb; private readonly IDb _headerDb; private readonly IDb _blockInfoDb; private ICache<long, HashSet<Keccak>> _invalidBlocks = new LruCache<long, HashSet<Keccak>>(128, 128, "invalid blocks"); private readonly BlockDecoder _blockDecoder = new(); private readonly HeaderDecoder _headerDecoder = new(); private readonly ILogger _logger; private readonly ISpecProvider _specProvider; private readonly IBloomStorage _bloomStorage; private readonly ISyncConfig _syncConfig; private readonly IChainLevelInfoRepository _chainLevelInfoRepository; private bool _tryToRecoverFromHeaderBelowBodyCorruption = false; public BlockHeader? Genesis { get; private set; } public Block? Head { get; private set; } public BlockHeader? BestSuggestedHeader { get; private set; } public Block? BestSuggestedBody { get; private set; } public BlockHeader? LowestInsertedHeader { get; private set; } private long? _lowestInsertedReceiptBlock; public long? LowestInsertedBodyNumber { get => _lowestInsertedReceiptBlock; set { _lowestInsertedReceiptBlock = value; if (value.HasValue) { _blockDb.Set(LowestInsertedBodyNumberDbEntryAddress, Rlp.Encode(value.Value).Bytes); } } } public long BestKnownNumber { get; private set; } public ulong ChainId => _specProvider.ChainId; private int _canAcceptNewBlocksCounter; public bool CanAcceptNewBlocks => _canAcceptNewBlocksCounter == 0; public BlockTree( IDbProvider? dbProvider, IChainLevelInfoRepository? chainLevelInfoRepository, ISpecProvider? specProvider, IBloomStorage? bloomStorage, ILogManager? logManager) : this(dbProvider?.BlocksDb, dbProvider?.HeadersDb, dbProvider?.BlockInfosDb, chainLevelInfoRepository, specProvider, bloomStorage, new SyncConfig(), logManager) { } public BlockTree( IDbProvider? dbProvider, IChainLevelInfoRepository? chainLevelInfoRepository, ISpecProvider? specProvider, IBloomStorage? bloomStorage, ISyncConfig? syncConfig, ILogManager? logManager) : this(dbProvider?.BlocksDb, dbProvider?.HeadersDb, dbProvider?.BlockInfosDb, chainLevelInfoRepository, specProvider, bloomStorage, syncConfig, logManager) { } public BlockTree( IDb? blockDb, IDb? headerDb, IDb? blockInfoDb, IChainLevelInfoRepository? chainLevelInfoRepository, ISpecProvider? specProvider, IBloomStorage? bloomStorage, ILogManager? logManager) : this(blockDb, headerDb, blockInfoDb, chainLevelInfoRepository, specProvider, bloomStorage, new SyncConfig(), logManager) { } public BlockTree( IDb? blockDb, IDb? headerDb, IDb? blockInfoDb, IChainLevelInfoRepository? chainLevelInfoRepository, ISpecProvider? specProvider, IBloomStorage? bloomStorage, ISyncConfig? syncConfig, ILogManager? logManager) { _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); _blockDb = blockDb ?? throw new ArgumentNullException(nameof(blockDb)); _headerDb = headerDb ?? throw new ArgumentNullException(nameof(headerDb)); _blockInfoDb = blockInfoDb ?? throw new ArgumentNullException(nameof(blockInfoDb)); _specProvider = specProvider ?? throw new ArgumentNullException(nameof(specProvider)); _bloomStorage = bloomStorage ?? throw new ArgumentNullException(nameof(bloomStorage)); _syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig)); _chainLevelInfoRepository = chainLevelInfoRepository ?? throw new ArgumentNullException(nameof(chainLevelInfoRepository)); byte[]? deletePointer = _blockInfoDb.Get(DeletePointerAddressInDb); if (deletePointer is not null) { DeleteBlocks(new Keccak(deletePointer)); } ChainLevelInfo? genesisLevel = LoadLevel(0); if (genesisLevel is not null) { if (genesisLevel.BlockInfos.Length != 1) { // just for corrupted test bases genesisLevel.BlockInfos = new[] {genesisLevel.BlockInfos[0]}; _chainLevelInfoRepository.PersistLevel(0, genesisLevel); //throw new InvalidOperationException($"Genesis level in DB has {genesisLevel.BlockInfos.Length} blocks"); } if (genesisLevel.BlockInfos[0].WasProcessed) { BlockHeader genesisHeader = FindHeader(genesisLevel.BlockInfos[0].BlockHash, BlockTreeLookupOptions.None); Genesis = genesisHeader; LoadStartBlock(); } RecalculateTreeLevels(); AttemptToFixCorruptionByMovingHeadBackwards(); } if (_logger.IsInfo) _logger.Info($"Block tree initialized, " + $"last processed is {Head?.Header.ToString(BlockHeader.Format.Short) ?? "0"}, " + $"best queued is {BestSuggestedHeader?.Number.ToString() ?? "0"}, " + $"best known is {BestKnownNumber}, " + $"lowest inserted header {LowestInsertedHeader?.Number}, " + $"body {LowestInsertedBodyNumber}"); ThisNodeInfo.AddInfo("Chain ID :", $"{Nethermind.Core.ChainId.GetChainName(ChainId)}"); ThisNodeInfo.AddInfo("Chain head :", $"{Head?.Header.ToString(BlockHeader.Format.Short) ?? "0"}"); } private void AttemptToFixCorruptionByMovingHeadBackwards() { if (_tryToRecoverFromHeaderBelowBodyCorruption && BestSuggestedHeader != null) { ChainLevelInfo chainLevelInfo = LoadLevel(BestSuggestedHeader.Number); BlockInfo? canonicalBlock = chainLevelInfo?.MainChainBlock; if (canonicalBlock is not null) { SetHeadBlock(canonicalBlock.BlockHash!); } else { _logger.Error("Failed attempt to fix 'header < body' corruption caused by an unexpected shutdown."); } } } private void RecalculateTreeLevels() { if (_syncConfig.BeamSyncFixMode) { if (Head is null) { throw new InvalidOperationException( $"Head is null when entering {nameof(_syncConfig.BeamSyncFixMode)}"); } BestKnownNumber = Head.Number; BestSuggestedBody = Head; BestSuggestedHeader = Head.Header; return; } LoadLowestInsertedBodyNumber(); LoadLowestInsertedHeader(); LoadBestKnown(); } private void LoadLowestInsertedBodyNumber() { LowestInsertedBodyNumber = _blockDb.Get(LowestInsertedBodyNumberDbEntryAddress)? .AsRlpValueContext().DecodeLong(); } private void LoadLowestInsertedHeader() { long left = 1L; long right = _syncConfig.PivotNumberParsed; bool HasLevel(long blockNumber) { ChainLevelInfo level = LoadLevel(blockNumber); return level is not null; } long? lowestInsertedHeader = BinarySearchBlockNumber(left, right, HasLevel, BinarySearchDirection.Down); if (lowestInsertedHeader.HasValue) { ChainLevelInfo? level = LoadLevel(lowestInsertedHeader.Value); if (level is null) { throw new InvalidDataException( $"Missing chain level at number {lowestInsertedHeader.Value} when calling {nameof(LoadLowestInsertedHeader)}"); } BlockInfo blockInfo = level.BlockInfos[0]; LowestInsertedHeader = FindHeader(blockInfo.BlockHash, BlockTreeLookupOptions.None); } } private void LoadBestKnown() { long left = (Head?.Number ?? 0) == 0 ? Math.Max(_syncConfig.PivotNumberParsed, LowestInsertedHeader?.Number ?? 0) - 1 : Head.Number; long right = Math.Max(0, left) + BestKnownSearchLimit; bool LevelExists(long blockNumber) { return LoadLevel(blockNumber) is not null; } bool HeaderExists(long blockNumber) { ChainLevelInfo level = LoadLevel(blockNumber); if (level is null) { return false; } foreach (BlockInfo blockInfo in level.BlockInfos) { if (FindHeader(blockInfo.BlockHash, BlockTreeLookupOptions.None) is not null) { return true; } } return false; } bool BodyExists(long blockNumber) { ChainLevelInfo level = LoadLevel(blockNumber); if (level is null) { return false; } foreach (BlockInfo blockInfo in level.BlockInfos) { if (FindBlock(blockInfo.BlockHash, BlockTreeLookupOptions.None) is not null) { return true; } } return false; } long bestKnownNumberFound = BinarySearchBlockNumber(1, left, LevelExists) ?? 0; long bestKnownNumberAlternative = BinarySearchBlockNumber(left, right, LevelExists) ?? 0; long bestSuggestedHeaderNumber = BinarySearchBlockNumber(1, left, HeaderExists) ?? 0; long bestSuggestedHeaderNumberAlternative = BinarySearchBlockNumber(left, right, HeaderExists) ?? 0; long bestSuggestedBodyNumber = BinarySearchBlockNumber(1, left, BodyExists) ?? 0; long bestSuggestedBodyNumberAlternative = BinarySearchBlockNumber(left, right, BodyExists) ?? 0; if (_logger.IsInfo) _logger.Info("Numbers resolved, " + $"level = Max({bestKnownNumberFound}, {bestKnownNumberAlternative}), " + $"header = Max({bestSuggestedHeaderNumber}, {bestSuggestedHeaderNumberAlternative}), " + $"body = Max({bestSuggestedBodyNumber}, {bestSuggestedBodyNumberAlternative})"); BestKnownNumber = Math.Max(bestKnownNumberFound, bestKnownNumberAlternative); bestSuggestedHeaderNumber = Math.Max(bestSuggestedHeaderNumber, bestSuggestedHeaderNumberAlternative); bestSuggestedBodyNumber = Math.Max(bestSuggestedBodyNumber, bestSuggestedBodyNumberAlternative); if (BestKnownNumber < 0 || bestSuggestedHeaderNumber < 0 || bestSuggestedBodyNumber < 0 || bestSuggestedHeaderNumber < bestSuggestedBodyNumber) { if (_logger.IsWarn) _logger.Warn( $"Detected corrupted block tree data ({bestSuggestedHeaderNumber} < {bestSuggestedBodyNumber}) (possibly due to an unexpected shutdown). Attempting to fix by moving head backwards. This may fail and you may need to resync the node."); if (bestSuggestedHeaderNumber < bestSuggestedBodyNumber) { bestSuggestedBodyNumber = bestSuggestedHeaderNumber; _tryToRecoverFromHeaderBelowBodyCorruption = true; } else { throw new InvalidDataException("Invalid initial block tree state loaded - " + $"best known: {BestKnownNumber}|" + $"best header: {bestSuggestedHeaderNumber}|" + $"best body: {bestSuggestedBodyNumber}|"); } } BestSuggestedHeader = FindHeader(bestSuggestedHeaderNumber, BlockTreeLookupOptions.None); var bestSuggestedBodyHeader = FindHeader(bestSuggestedBodyNumber, BlockTreeLookupOptions.None); BestSuggestedBody = bestSuggestedBodyHeader is null ? null : FindBlock(bestSuggestedBodyHeader.Hash, BlockTreeLookupOptions.None); } private enum BinarySearchDirection { Up, Down } private static long? BinarySearchBlockNumber(long left, long right, Func<long, bool> isBlockFound, BinarySearchDirection direction = BinarySearchDirection.Up) { if (left > right) { return null; } long? result = null; while (left != right) { long index = direction == BinarySearchDirection.Up ? left + (right - left) / 2 : right - (right - left) / 2; if (isBlockFound(index)) { result = index; if (direction == BinarySearchDirection.Up) { left = index + 1; } else { right = index - 1; } } else { if (direction == BinarySearchDirection.Up) { right = index; } else { left = index; } } } if (isBlockFound(left)) { result = direction == BinarySearchDirection.Up ? left : right; } return result; } public AddBlockResult Insert(BlockHeader header) { if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } if (header.Hash is null) { throw new InvalidOperationException("An attempt to insert a block header without a known hash."); } if (header.Bloom is null) { throw new InvalidOperationException("An attempt to insert a block header without a known bloom."); } if (header.Number == 0) { throw new InvalidOperationException("Genesis block should not be inserted."); } if (header.TotalDifficulty is null) { SetTotalDifficulty(header); } // validate hash here // using previously received header RLPs would allows us to save 2GB allocations on a sample // 3M Goerli blocks fast sync Rlp newRlp = _headerDecoder.Encode(header); _headerDb.Set(header.Hash, newRlp.Bytes); BlockInfo blockInfo = new(header.Hash, header.TotalDifficulty ?? 0); ChainLevelInfo chainLevel = new(true, blockInfo); _chainLevelInfoRepository.PersistLevel(header.Number, chainLevel); _bloomStorage.Store(header.Number, header.Bloom); if (header.Number < (LowestInsertedHeader?.Number ?? long.MaxValue)) { LowestInsertedHeader = header; } if (header.Number > BestKnownNumber) { BestKnownNumber = header.Number; } if (header.Number > (BestSuggestedHeader?.Number ?? 0)) { BestSuggestedHeader = header; } return AddBlockResult.Added; } public AddBlockResult Insert(Block block) { if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } if (block.Hash is null) { throw new InvalidOperationException("An attempt to store a block with a null hash."); } if (block.Number == 0) { throw new InvalidOperationException("Genesis block should not be inserted."); } // if we carry Rlp from the network message all the way here then we could solve 4GB of allocations and some processing // by avoiding encoding back to RLP here (allocations measured on a sample 3M blocks Goerli fast sync Rlp newRlp = _blockDecoder.Encode(block); _blockDb.Set(block.Hash, newRlp.Bytes); return AddBlockResult.Added; } public void Insert(IEnumerable<Block> blocks) { lock (_batchInsertLock) { // TODO: why is this commented out? why was it here in the first place? (2021-03-27) // try // { // _blockDb.StartBatch(); foreach (Block block in blocks) { Insert(block); } // } // finally // { // _blockDb.CommitBatch(); // } } } private AddBlockResult Suggest(Block? block, BlockHeader header, bool shouldProcess = true, bool? setAsMain = null) { #if DEBUG /* this is just to make sure that we do not fall into this trap when creating tests */ if (header.StateRoot is null && !header.IsGenesis) { throw new InvalidDataException($"State root is null in {header.ToString(BlockHeader.Format.Short)}"); } #endif if (header.Hash is null) { throw new InvalidOperationException("An attempt to suggest a header with a null hash."); } if (!CanAcceptNewBlocks) { return AddBlockResult.CannotAccept; } HashSet<Keccak> invalidBlocksWithThisNumber = _invalidBlocks.Get(header.Number); if (invalidBlocksWithThisNumber?.Contains(header.Hash) ?? false) { return AddBlockResult.InvalidBlock; } bool isKnown = IsKnownBlock(header.Number, header.Hash); if (isKnown && (BestSuggestedHeader?.Number ?? 0) >= header.Number) { if (_logger.IsTrace) _logger.Trace($"Block {header.Hash} already known."); return AddBlockResult.AlreadyKnown; } if (!header.IsGenesis && !IsKnownBlock(header.Number - 1, header.ParentHash!)) { if (_logger.IsTrace) _logger.Trace($"Could not find parent ({header.ParentHash}) of block {header.Hash}"); return AddBlockResult.UnknownParent; } SetTotalDifficulty(header); if (block is not null && !isKnown) { if (block.Hash is null) { throw new InvalidOperationException("An attempt to suggest block with a null hash."); } Rlp newRlp = _blockDecoder.Encode(block); _blockDb.Set(block.Hash, newRlp.Bytes); } if (!isKnown) { Rlp newRlp = _headerDecoder.Encode(header); _headerDb.Set(header.Hash, newRlp.Bytes); BlockInfo blockInfo = new(header.Hash, header.TotalDifficulty ?? 0); UpdateOrCreateLevel(header.Number, blockInfo, setAsMain is null ? !shouldProcess : setAsMain.Value); NewSuggestedBlock?.Invoke(this, new BlockEventArgs(block)); } if (header.IsGenesis || header.TotalDifficulty > (BestSuggestedHeader?.TotalDifficulty ?? 0)) { if (header.IsGenesis) { Genesis = header; } BestSuggestedHeader = header; if (block is not null && shouldProcess) { BestSuggestedBody = block; NewBestSuggestedBlock?.Invoke(this, new BlockEventArgs(block)); } } return AddBlockResult.Added; } public AddBlockResult SuggestHeader(BlockHeader header) { return Suggest(null, header); } public AddBlockResult SuggestBlock(Block block, bool shouldProcess = true, bool? setAsMain = null) { if (Genesis is null && !block.IsGenesis) { throw new InvalidOperationException("Block tree should be initialized with genesis before suggesting other blocks."); } return Suggest(block, block.Header, shouldProcess, setAsMain); } public BlockHeader? FindHeader(long number, BlockTreeLookupOptions options) { Keccak blockHash = GetBlockHashOnMainOrBestDifficultyHash(number); return blockHash is null ? null : FindHeader(blockHash, options); } public Keccak? FindBlockHash(long blockNumber) => GetBlockHashOnMainOrBestDifficultyHash(blockNumber); public BlockHeader? FindHeader(Keccak? blockHash, BlockTreeLookupOptions options) { if (blockHash is null || blockHash == Keccak.Zero) { // TODO: would be great to check why this is still needed (maybe it is something archaic) return null; } BlockHeader? header = _headerDb.Get(blockHash, _headerDecoder, _headerCache, false); if (header is null) { return null; } header.Hash ??= blockHash; bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None; bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical; if ((totalDifficultyNeeded && header.TotalDifficulty is null) || requiresCanonical) { (BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(header.Number, header.Hash, true); if (level is null || blockInfo is null) { // TODO: this is here because storing block data is not transactional // TODO: would be great to remove it, he? if (_logger.IsTrace) _logger.Trace($"Entering missing block info in {nameof(FindHeader)} scope when head is {Head?.ToString(Block.Format.Short)}"); SetTotalDifficulty(header); blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty!.Value); level = UpdateOrCreateLevel(header.Number, blockInfo); } else { header.TotalDifficulty = blockInfo.TotalDifficulty; } if (requiresCanonical) { bool isMain = level.MainChainBlock?.BlockHash?.Equals(blockHash) == true; header = isMain ? header : null; } } if (header is not null && ShouldCache(header.Number)) { _headerCache.Set(blockHash, header); } return header; } /// <returns> /// If level has a block on the main chain then returns the block info,otherwise <value>null</value> /// </returns> public BlockInfo? FindCanonicalBlockInfo(long blockNumber) { ChainLevelInfo level = LoadLevel(blockNumber); if (level is null) { return null; } if (level.HasBlockOnMainChain) { BlockInfo blockInfo = level.BlockInfos[0]; blockInfo.BlockNumber = blockNumber; return blockInfo; } return null; } public Keccak? FindHash(long number) { return GetBlockHashOnMainOrBestDifficultyHash(number); } public BlockHeader[] FindHeaders(Keccak? blockHash, int numberOfBlocks, int skip, bool reverse) { if (numberOfBlocks == 0) { return Array.Empty<BlockHeader>(); } if (blockHash is null) { return new BlockHeader[numberOfBlocks]; } BlockHeader startHeader = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (startHeader is null) { return new BlockHeader[numberOfBlocks]; } if (numberOfBlocks == 1) { return new[] {startHeader}; } if (skip == 0) { /* if we do not skip and we have the last block then we can assume that all the blocks are there and we can use the fact that we can use parent hash and that searching by hash is much faster as it does not require the step of resolving number -> hash */ BlockHeader endHeader = FindHeader(startHeader.Number + numberOfBlocks - 1, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (endHeader is not null) { return FindHeadersReversedFull(endHeader, numberOfBlocks); } } BlockHeader[] result = new BlockHeader[numberOfBlocks]; BlockHeader current = startHeader; int directionMultiplier = reverse ? -1 : 1; int responseIndex = 0; do { result[responseIndex] = current; responseIndex++; long nextNumber = startHeader.Number + directionMultiplier * (responseIndex * skip + responseIndex); if (nextNumber < 0) { break; } current = FindHeader(nextNumber, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } while (current is not null && responseIndex < numberOfBlocks); return result; } private BlockHeader[] FindHeadersReversedFull(BlockHeader startHeader, int numberOfBlocks) { if (startHeader is null) throw new ArgumentNullException(nameof(startHeader)); if (numberOfBlocks == 1) { return new[] {startHeader}; } BlockHeader[] result = new BlockHeader[numberOfBlocks]; BlockHeader current = startHeader; int responseIndex = numberOfBlocks - 1; do { result[responseIndex] = current; responseIndex--; if (responseIndex < 0) { break; } current = this.FindParentHeader(current, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } while (current is not null && responseIndex < numberOfBlocks); return result; } public BlockHeader? FindLowestCommonAncestor(BlockHeader firstDescendant, BlockHeader secondDescendant, long maxSearchDepth) { if (firstDescendant.Number > secondDescendant.Number) { firstDescendant = GetAncestorAtNumber(firstDescendant, secondDescendant.Number); } else if (secondDescendant.Number > firstDescendant.Number) { secondDescendant = GetAncestorAtNumber(secondDescendant, firstDescendant.Number); } long currentSearchDepth = 0; while ( firstDescendant is not null && secondDescendant is not null && firstDescendant.Hash != secondDescendant.Hash) { if (currentSearchDepth++ >= maxSearchDepth) return null; firstDescendant = this.FindParentHeader(firstDescendant, BlockTreeLookupOptions.TotalDifficultyNotNeeded); secondDescendant = this.FindParentHeader(secondDescendant, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } return firstDescendant; } private BlockHeader? GetAncestorAtNumber(BlockHeader header, long number) { BlockHeader? result = header; while (result is not null && result.Number < number) { result = this.FindParentHeader(result, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } return header; } private Keccak? GetBlockHashOnMainOrBestDifficultyHash(long blockNumber) { if (blockNumber < 0) { throw new ArgumentException($"{nameof(blockNumber)} must be greater or equal zero and is {blockNumber}", nameof(blockNumber)); } ChainLevelInfo level = LoadLevel(blockNumber); if (level is null) { return null; } if (level.HasBlockOnMainChain) { return level.BlockInfos[0].BlockHash; } UInt256 bestDifficultySoFar = UInt256.Zero; Keccak bestHash = null; for (int i = 0; i < level.BlockInfos.Length; i++) { BlockInfo current = level.BlockInfos[i]; if (level.BlockInfos[i].TotalDifficulty > bestDifficultySoFar) { bestDifficultySoFar = current.TotalDifficulty; bestHash = current.BlockHash; } } return bestHash; } public Block? FindBlock(long blockNumber, BlockTreeLookupOptions options) { Keccak hash = GetBlockHashOnMainOrBestDifficultyHash(blockNumber); return FindBlock(hash, options); } public void DeleteInvalidBlock(Block invalidBlock) { if (invalidBlock.Hash is null) { if (_logger.IsWarn) _logger.Warn($"{nameof(DeleteInvalidBlock)} call has been made for a block without a null hash."); return; } if (_logger.IsDebug) _logger.Debug($"Deleting invalid block {invalidBlock.ToString(Block.Format.FullHashAndNumber)}"); HashSet<Keccak>? invalidBlocksWithThisNumber = _invalidBlocks.Get(invalidBlock.Number) ?? new HashSet<Keccak>(); invalidBlocksWithThisNumber.Add(invalidBlock.Hash); _invalidBlocks.Set(invalidBlock.Number, invalidBlocksWithThisNumber); BestSuggestedHeader = Head?.Header; BestSuggestedBody = Head; BlockAcceptingNewBlocks(); try { DeleteBlocks(invalidBlock.Hash!); } finally { ReleaseAcceptingNewBlocks(); } } private void DeleteBlocks(Keccak deletePointer) { BlockHeader? deleteHeader = FindHeader(deletePointer, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (deleteHeader is null) { if (_logger.IsWarn) _logger.Warn( $"Cannot delete invalid block {deletePointer} - block has not been added to the database or has already been deleted."); return; } if (deleteHeader.Hash is null) { if (_logger.IsWarn) _logger.Warn( $"Cannot delete invalid block {deletePointer} - black has a null hash."); return; } long currentNumber = deleteHeader.Number; Keccak currentHash = deleteHeader.Hash; Keccak? nextHash = null; ChainLevelInfo? nextLevel = null; using BatchWrite batch = _chainLevelInfoRepository.StartBatch(); while (true) { ChainLevelInfo? currentLevel = nextLevel ?? LoadLevel(currentNumber); nextLevel = LoadLevel(currentNumber + 1); bool shouldRemoveLevel = false; if (currentLevel is not null) // preparing update of the level (removal of the invalid branch block) { if (currentLevel.BlockInfos.Length == 1) { shouldRemoveLevel = true; } else { currentLevel.BlockInfos = currentLevel.BlockInfos.Where(bi => bi.BlockHash != currentHash).ToArray(); } } // just finding what the next descendant will be if (nextLevel is not null) { nextHash = FindChild(nextLevel, currentHash); } UpdateDeletePointer(nextHash); if (shouldRemoveLevel) { BestKnownNumber = Math.Min(BestKnownNumber, currentNumber - 1); _chainLevelInfoRepository.Delete(currentNumber, batch); } else if(currentLevel is not null) { _chainLevelInfoRepository.PersistLevel(currentNumber, currentLevel, batch); } if (_logger.IsInfo) _logger.Info($"Deleting invalid block {currentHash} at level {currentNumber}"); _blockCache.Delete(currentHash); _blockDb.Delete(currentHash); _headerCache.Delete(currentHash); _headerDb.Delete(currentHash); if (nextHash is null) { break; } currentNumber++; currentHash = nextHash; nextHash = null; } } private Keccak? FindChild(ChainLevelInfo level, Keccak parentHash) { Keccak childHash = null; for (int i = 0; i < level.BlockInfos.Length; i++) { Keccak potentialChildHash = level.BlockInfos[i].BlockHash; BlockHeader? potentialChild = FindHeader(potentialChildHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (potentialChild is null) { if(_logger.IsWarn) _logger.Warn( $"Block with hash {potentialChildHash} has been found on chain level but its header is missing from the DB."); return null; } if (potentialChild.ParentHash == parentHash) { childHash = potentialChildHash; break; } } return childHash; } public bool IsMainChain(BlockHeader blockHeader) { ChainLevelInfo? chainLevelInfo = LoadLevel(blockHeader.Number); bool isMain = chainLevelInfo is not null && chainLevelInfo.MainChainBlock?.BlockHash?.Equals(blockHeader.Hash) == true; return isMain; } public bool IsMainChain(Keccak blockHash) { BlockHeader? header = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (header is null) { throw new InvalidOperationException($"Not able to retrieve block number for an unknown block {blockHash}"); } return IsMainChain(header); } public BlockHeader? FindBestSuggestedHeader() => BestSuggestedHeader; public bool WasProcessed(long number, Keccak blockHash) { ChainLevelInfo? levelInfo = LoadLevel(number); if (levelInfo is null) { throw new InvalidOperationException($"Not able to find block {blockHash} from an unknown level {number}"); } int? index = FindIndex(blockHash, levelInfo); if (index is null) { throw new InvalidOperationException($"Not able to find block {blockHash} index on the chain level"); } return levelInfo.BlockInfos[index.Value].WasProcessed; } public void UpdateMainChain(Block[] blocks, bool wereProcessed, bool forceUpdateHeadBlock = false) { if (blocks.Length == 0) { return; } bool ascendingOrder = true; if (blocks.Length > 1) { if (blocks[^1].Number < blocks[0].Number) { ascendingOrder = false; } } #if DEBUG for (int i = 0; i < blocks.Length; i++) { if (i != 0) { if (ascendingOrder && blocks[i].Number != blocks[i - 1].Number + 1) { throw new InvalidOperationException("Update main chain invoked with gaps"); } if (!ascendingOrder && blocks[i - 1].Number != blocks[i].Number + 1) { throw new InvalidOperationException("Update main chain invoked with gaps"); } } } #endif long lastNumber = ascendingOrder ? blocks[^1].Number : blocks[0].Number; long previousHeadNumber = Head?.Number ?? 0L; using BatchWrite batch = _chainLevelInfoRepository.StartBatch(); if (previousHeadNumber > lastNumber) { for (long i = 0; i < previousHeadNumber - lastNumber; i++) { long levelNumber = previousHeadNumber - i; ChainLevelInfo? level = LoadLevel(levelNumber); if (level is not null) { level.HasBlockOnMainChain = false; _chainLevelInfoRepository.PersistLevel(levelNumber, level, batch); } } } for (int i = 0; i < blocks.Length; i++) { Block block = blocks[i]; if (ShouldCache(block.Number)) { _blockCache.Set(block.Hash, blocks[i]); _headerCache.Set(block.Hash, block.Header); } // we only force update head block for last block in processed blocks bool lastProcessedBlock = i == blocks.Length - 1; MoveToMain(blocks[i], batch, wereProcessed, forceUpdateHeadBlock && lastProcessedBlock); } } /// <summary> /// Moves block to main chain. /// </summary> /// <param name="block">Block to move</param> /// <param name="batch">Db batch</param> /// <param name="wasProcessed">Was block processed (full sync), or not (fast sync)</param> /// <param name="forceUpdateHeadBlock">Force updating <see cref="Head"/> to this block, even when <see cref="Block.TotalDifficulty"/> is not higher than previous head.</param> /// <exception cref="InvalidOperationException">Invalid block</exception> [Todo(Improve.MissingFunctionality, "Recalculate bloom storage on reorg.")] private void MoveToMain(Block block, BatchWrite batch, bool wasProcessed, bool forceUpdateHeadBlock) { if (block.Hash is null) { throw new InvalidOperationException("An attempt to move to main a block with hash not set."); } if (block.Bloom is null) { throw new InvalidOperationException("An attempt to move to main a block with bloom not set."); } ChainLevelInfo? level = LoadLevel(block.Number); int? index = level is null ? null : FindIndex(block.Hash, level); if (index is null) { throw new InvalidOperationException($"Cannot move unknown block {block.ToString(Block.Format.FullHashAndNumber)} to main"); } Keccak hashOfThePreviousMainBlock = level.MainChainBlock?.BlockHash; BlockInfo info = level.BlockInfos[index.Value]; info.WasProcessed = wasProcessed; if (index.Value != 0) { (level.BlockInfos[index.Value], level.BlockInfos[0]) = (level.BlockInfos[0], level.BlockInfos[index.Value]); } level.HasBlockOnMainChain = true; _chainLevelInfoRepository.PersistLevel(block.Number, level, batch); _bloomStorage.Store(block.Number, block.Bloom); Block previous = hashOfThePreviousMainBlock is not null && hashOfThePreviousMainBlock != block.Hash ? FindBlock(hashOfThePreviousMainBlock, BlockTreeLookupOptions.TotalDifficultyNotNeeded) : null; BlockAddedToMain?.Invoke(this, new BlockReplacementEventArgs(block, previous)); if (forceUpdateHeadBlock || block.IsGenesis || block.TotalDifficulty > (Head?.TotalDifficulty ?? 0)) { if (block.Number == 0) { Genesis = block.Header; } if (block.TotalDifficulty is null) { throw new InvalidOperationException("Head block with null total difficulty"); } if (wasProcessed) { UpdateHeadBlock(block); } } if (_logger.IsTrace) _logger.Trace($"Block {block.ToString(Block.Format.Short)} added to main chain"); } private void LoadStartBlock() { Block? startBlock = null; byte[] persistedNumberData = _blockInfoDb.Get(StateHeadHashDbEntryAddress); long? persistedNumber = persistedNumberData is null ? (long?) null : new RlpStream(persistedNumberData).DecodeLong(); if (persistedNumber is not null) { startBlock = FindBlock(persistedNumber.Value, BlockTreeLookupOptions.None); _logger.Warn($"Start block loaded from reorg boundary - {persistedNumber} - {startBlock?.ToString(Block.Format.Short)}"); } else { byte[] data = _blockInfoDb.Get(HeadAddressInDb); if (data is not null) { startBlock = FindBlock(new Keccak(data), BlockTreeLookupOptions.None); _logger.Warn($"Start block loaded from HEAD - {startBlock?.ToString(Block.Format.Short)}"); } } if (startBlock is not null) { if (startBlock.Hash is null) { throw new InvalidDataException("The start block hash is null."); } SetHeadBlock(startBlock.Hash); } } private void SetHeadBlock(Keccak headHash) { Block? headBlock = FindBlock(headHash, BlockTreeLookupOptions.None); if (headBlock is null) { throw new InvalidOperationException("An attempt to set a head block that has not been stored in the DB."); } ChainLevelInfo? level = LoadLevel(headBlock.Number); int? index = level is null ? null : FindIndex(headHash, level); if (!index.HasValue) { throw new InvalidDataException("Head block data missing from chain info"); } headBlock.Header.TotalDifficulty = level.BlockInfos[index.Value].TotalDifficulty; Head = headBlock; } public bool IsKnownBlock(long number, Keccak blockHash) { if (number > BestKnownNumber) { return false; } // IsKnownBlock will be mainly called when new blocks are incoming // and these are very likely to be all at the head of the chain if (blockHash == Head?.Hash) { return true; } if (_headerCache.Get(blockHash) is not null) { return true; } ChainLevelInfo level = LoadLevel(number); return level is not null && FindIndex(blockHash, level).HasValue; } private void UpdateDeletePointer(Keccak? hash) { if (hash is null) { _blockInfoDb.Delete(DeletePointerAddressInDb); } else { if (_logger.IsInfo) _logger.Info($"Deleting an invalid block or its descendant {hash}"); _blockInfoDb.Set(DeletePointerAddressInDb, hash.Bytes); } } public void UpdateHeadBlock(Keccak blockHash) { if(_logger.IsError) _logger.Error($"Block tree override detected - updating head block to {blockHash}."); _blockInfoDb.Set(HeadAddressInDb, blockHash.Bytes); } private void UpdateHeadBlock(Block block) { if (block.Hash is null) { throw new InvalidOperationException("Block suggested as the new head block has no hash set."); } if (block.IsGenesis) { Genesis = block.Header; } Head = block; _blockInfoDb.Set(HeadAddressInDb, block.Hash.Bytes); NewHeadBlock?.Invoke(this, new BlockEventArgs(block)); } private ChainLevelInfo UpdateOrCreateLevel(long number, BlockInfo blockInfo, bool setAsMain = false) { using (var batch = _chainLevelInfoRepository.StartBatch()) { ChainLevelInfo level = LoadLevel(number, false); if (level is not null) { BlockInfo[] blockInfos = level.BlockInfos; Array.Resize(ref blockInfos, blockInfos.Length + 1); if (setAsMain) { blockInfos[^1] = blockInfos[0]; blockInfos[0] = blockInfo; } else { blockInfos[^1] = blockInfo; } level.BlockInfos = blockInfos; } else { if (number > BestKnownNumber) { BestKnownNumber = number; } level = new ChainLevelInfo(false, new[] {blockInfo}); } if (setAsMain) { level.HasBlockOnMainChain = true; } _chainLevelInfoRepository.PersistLevel(number, level, batch); return level; } } private (BlockInfo Info, ChainLevelInfo Level) LoadInfo(long number, Keccak blockHash, bool forceLoad) { ChainLevelInfo chainLevelInfo = LoadLevel(number, forceLoad); if (chainLevelInfo is null) { return (null, null); } int? index = FindIndex(blockHash, chainLevelInfo); return index.HasValue ? (chainLevelInfo.BlockInfos[index.Value], chainLevelInfo) : (null, chainLevelInfo); } private static int? FindIndex(Keccak blockHash, ChainLevelInfo level) { for (int i = 0; i < level.BlockInfos.Length; i++) { Keccak hashAtIndex = level.BlockInfos[i].BlockHash; if (hashAtIndex.Equals(blockHash)) { return i; } } return null; } private ChainLevelInfo? LoadLevel(long number, bool forceLoad = true) { if (number > BestKnownNumber && !forceLoad) { return null; } return _chainLevelInfoRepository.LoadLevel(number); } /// <summary> /// To make cache useful even when we handle sync requests /// </summary> /// <param name="number"></param> /// <returns></returns> private bool ShouldCache(long number) { return number == 0L || Head is null || number > Head.Number - CacheSize && number <= Head.Number + 1; } public ChainLevelInfo? FindLevel(long number) { return _chainLevelInfoRepository.LoadLevel(number); } public Keccak? HeadHash => Head?.Hash; public Keccak? GenesisHash => Genesis?.Hash; public Keccak? PendingHash => Head?.Hash; public Block? FindBlock(Keccak? blockHash, BlockTreeLookupOptions options) { if (blockHash is null || blockHash == Keccak.Zero) { return null; } Block block = _blockDb.Get(blockHash, _blockDecoder, _blockCache, false); if (block is null) { return null; } bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None; bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical; if ((totalDifficultyNeeded && block.TotalDifficulty is null) || requiresCanonical) { (BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(block.Number, block.Hash, true); if (level is null || blockInfo is null) { // TODO: this is here because storing block data is not transactional // TODO: would be great to remove it, he? if (_logger.IsTrace) _logger.Trace($"Entering missing block info in {nameof(FindBlock)} scope when head is {Head?.ToString(Block.Format.Short)}"); SetTotalDifficulty(block.Header); blockInfo = new BlockInfo(block.Hash, block.TotalDifficulty!.Value); level = UpdateOrCreateLevel(block.Number, blockInfo); } else { block.Header.TotalDifficulty = blockInfo.TotalDifficulty; } if (requiresCanonical) { bool isMain = level.MainChainBlock?.BlockHash.Equals(blockHash) == true; block = isMain ? block : null; } } if (block is not null && ShouldCache(block.Number)) { _blockCache.Set(blockHash, block); _headerCache.Set(blockHash, block.Header); } return block; } private void SetTotalDifficulty(BlockHeader header) { BlockHeader GetParentHeader(BlockHeader current) => // TotalDifficultyNotNeeded is by design here, // if it was absent this would result in recursion, as if parent doesn't already have total difficulty // then it would call back to SetTotalDifficulty for it // This was original code but it could result in stack overflow this.FindParentHeader(current, BlockTreeLookupOptions.TotalDifficultyNotNeeded) ?? throw new InvalidOperationException($"An orphaned block on the chain {current}"); void SetTotalDifficultyDeep(BlockHeader current) { Stack<BlockHeader> stack = new(); while (current.TotalDifficulty is null) { (BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(current.Number, current.Hash, true); if (level is null || blockInfo is null) { stack.Push(current); if (_logger.IsTrace) _logger.Trace($"Calculating total difficulty for {current.ToString(BlockHeader.Format.Short)}"); current = GetParentHeader(current); } else { current.TotalDifficulty = blockInfo.TotalDifficulty; } } while (stack.TryPop(out BlockHeader child)) { child.TotalDifficulty = current.TotalDifficulty + child.Difficulty; BlockInfo blockInfo = new(child.Hash, child.TotalDifficulty.Value); UpdateOrCreateLevel(child.Number, blockInfo); if (_logger.IsTrace) _logger.Trace($"Calculated total difficulty for {child} is {child.TotalDifficulty}"); current = child; } } if (header.TotalDifficulty is not null) { return; } if (_logger.IsTrace) _logger.Trace($"Calculating total difficulty for {header.ToString(BlockHeader.Format.Short)}"); if (header.IsGenesis) { header.TotalDifficulty = header.Difficulty; } else { BlockHeader parentHeader = GetParentHeader(header); if (parentHeader.TotalDifficulty is null) { SetTotalDifficultyDeep(parentHeader); } header.TotalDifficulty = parentHeader.TotalDifficulty + header.Difficulty; } if (_logger.IsTrace) _logger.Trace($"Calculated total difficulty for {header} is {header.TotalDifficulty}"); } public event EventHandler<BlockReplacementEventArgs>? BlockAddedToMain; public event EventHandler<BlockEventArgs>? NewBestSuggestedBlock; public event EventHandler<BlockEventArgs>? NewSuggestedBlock; public event EventHandler<BlockEventArgs>? NewHeadBlock; /// <summary> /// Can delete a slice of the chain (usually invoked when the chain is corrupted in the DB). /// This will only allow to delete a slice starting somewhere before the head of the chain /// and ending somewhere after the head (in case there are some hanging levels later). /// </summary> /// <param name="startNumber">Start level of the slice to delete</param> /// <param name="endNumber">End level of the slice to delete</param> /// <exception cref="ArgumentException">Thrown when <paramref name="startNumber"/> ot <paramref name="endNumber"/> do not satisfy the slice position rules</exception> public int DeleteChainSlice(in long startNumber, long? endNumber) { int deleted = 0; endNumber ??= BestKnownNumber; if (endNumber - startNumber < 0) { throw new ArgumentException("Start number must be equal or greater end number.", nameof(startNumber)); } if (endNumber - startNumber > 50000) { throw new ArgumentException($"Cannot delete that many blocks at once (start: {startNumber}, end {endNumber}).", nameof(startNumber)); } if (startNumber < 1) { throw new ArgumentException("Start number must be strictly greater than 0", nameof(startNumber)); } Block? newHeadBlock = null; // we are running these checks before all the deletes if (Head.Number >= startNumber) { // greater than zero so will not fail ChainLevelInfo? chainLevelInfo = _chainLevelInfoRepository.LoadLevel(startNumber - 1); if (chainLevelInfo is null) { throw new InvalidDataException($"Chain level {startNumber - 1} does not exist when {startNumber} level exists."); } // there may be no canonical block marked on this level - then we just hack to genesis Keccak? newHeadHash = chainLevelInfo.HasBlockOnMainChain ? chainLevelInfo.BlockInfos[0].BlockHash : Genesis?.Hash; newHeadBlock = newHeadHash is null ? null : FindBlock(newHeadHash, BlockTreeLookupOptions.None); } using (_chainLevelInfoRepository.StartBatch()) { for (long i = endNumber.Value; i >= startNumber; i--) { ChainLevelInfo? chainLevelInfo = _chainLevelInfoRepository.LoadLevel(i); if (chainLevelInfo is null) { continue; } _chainLevelInfoRepository.Delete(i); deleted++; foreach (BlockInfo blockInfo in chainLevelInfo.BlockInfos) { Keccak blockHash = blockInfo.BlockHash; _blockInfoDb.Delete(blockHash); _blockDb.Delete(blockHash); _headerDb.Delete(blockHash); } } } if (newHeadBlock is not null) { UpdateHeadBlock(newHeadBlock); } return deleted; } internal void BlockAcceptingNewBlocks() { Interlocked.Increment(ref _canAcceptNewBlocksCounter); } internal void ReleaseAcceptingNewBlocks() { Interlocked.Decrement(ref _canAcceptNewBlocksCounter); } public void SavePruningReorganizationBoundary(long blockNumber) { _blockInfoDb.Set(StateHeadHashDbEntryAddress, Rlp.Encode(blockNumber).Bytes); } } }
1
26,227
shall we create some enum flags for setasmain shouldprocess and pos?
NethermindEth-nethermind
.cs
@@ -141,6 +141,16 @@ exit /B %errorlevel% # therefore, this command will always exit 0 if either service is installed on host, Command.new("sc query puppet || sc query pe-puppet", [], { :cmdexe => true }) + # (PA-514) value for PUPPET_AGENT_STARTUP_MODE should be present in + # registry and honored after install/upgrade. + reg_query_command = %Q(reg query "HKLM\\SOFTWARE\\Wow6432Node\\Puppet Labs\\PuppetInstaller" /v "RememberedPuppetAgentStartupMode" | findstr #{msi_opts['PUPPET_AGENT_STARTUP_MODE']}) + on host, Command.new(reg_query_command, [], { :cmdexe => true }) + + start_mode = msi_opts['PUPPET_AGENT_STARTUP_MODE'] == "Automatic" ? "Auto" : msi_opts['PUPPET_AGENT_STARTUP_MODE'] + service_query_command = %Q('WMIC SERVICE where (Name like "%Puppet" AND StartMode="#{start_mode}") | findstr Puppet') + + on host, Command.new(service_query_command, [], { :cmdexe => true }) + # emit the misc/versions.txt file which contains component versions for # puppet, facter, hiera, pxp-agent, packaging and vendored Ruby [
1
module Beaker module DSL module InstallUtils # # This module contains methods useful for Windows installs # module WindowsUtils # Given a host, returns it's system TEMP path # # @param [Host] host An object implementing {Beaker::Hosts}'s interface. # # @return [String] system temp path def get_system_temp_path(host) host.system_temp_path end alias_method :get_temp_path, :get_system_temp_path # Generates commands to be inserted into a Windows batch file to launch an MSI install # @param [String] msi_path The path of the MSI - can be a local Windows style file path like # c:\temp\puppet.msi OR a url like https://download.com/puppet.msi or file://c:\temp\puppet.msi # @param [Hash{String=>String}] msi_opts MSI installer options # See https://docs.puppetlabs.com/guides/install_puppet/install_windows.html#msi-properties # @param [String] log_path The path to write the MSI log - must be a local Windows style file path # # @api private def msi_install_script(msi_path, msi_opts, log_path) # msiexec requires backslashes in file paths launched under cmd.exe start /w url_pattern = /^(https?|file):\/\// msi_path = msi_path.gsub(/\//, "\\") if msi_path !~ url_pattern msi_params = msi_opts.map{|k, v| "#{k}=#{v}"}.join(' ') # msiexec requires quotes around paths with backslashes - c:\ or file://c:\ # not strictly needed for http:// but it simplifies this code batch_contents = <<-BATCH start /w msiexec.exe /i \"#{msi_path}\" /qn /L*V #{log_path} #{msi_params} exit /B %errorlevel% BATCH end # Given a host, path to MSI and MSI options, will create a batch file # on the host, returning the path to the randomized batch file and # the randomized log file # # @param [Host] host An object implementing {Beaker::Hosts}'s interface. # @param [String] msi_path The path of the MSI - can be a local Windows # style file path like c:\temp\puppet.msi OR a url like # https://download.com/puppet.msi or file://c:\temp\puppet.msi # @param [Hash{String=>String}] msi_opts MSI installer options # See https://docs.puppetlabs.com/guides/install_puppet/install_windows.html#msi-properties # # @api private # @return [String, String] path to the batch file, patch to the log file def create_install_msi_batch_on(host, msi_path, msi_opts) timestamp = Time.new.strftime('%Y-%m-%d_%H.%M.%S') tmp_path = host.system_temp_path tmp_path.gsub!('/', '\\') batch_name = "install-puppet-msi-#{timestamp}.bat" batch_path = "#{tmp_path}#{host.scp_separator}#{batch_name}" log_path = "#{tmp_path}\\install-puppet-#{timestamp}.log" Tempfile.open(batch_name) do |tmp_file| batch_contents = msi_install_script(msi_path, msi_opts, log_path) File.open(tmp_file.path, 'w') { |file| file.puts(batch_contents) } host.do_scp_to(tmp_file.path, batch_path, {}) end return batch_path, log_path end # Given hosts construct a PATH that includes puppetbindir, facterbindir and hierabindir # @param [Host, Array<Host>, String, Symbol] hosts One or more hosts to act upon, # or a role (String or Symbol) that identifies one or more hosts. # @param [String] msi_path The path of the MSI - can be a local Windows style file path like # c:\temp\puppet.msi OR a url like https://download.com/puppet.msi or file://c:\temp\puppet.msi # @param [Hash{String=>String}] msi_opts MSI installer options # See https://docs.puppetlabs.com/guides/install_puppet/install_windows.html#msi-properties # @option msi_opts [String] INSTALLIDIR Where Puppet and its dependencies should be installed. # (Defaults vary based on operating system and intaller architecture) # Requires Puppet 2.7.12 / PE 2.5.0 # @option msi_opts [String] PUPPET_MASTER_SERVER The hostname where the puppet master server can be reached. # (Defaults to puppet) # Requires Puppet 2.7.12 / PE 2.5.0 # @option msi_opts [String] PUPPET_CA_SERVER The hostname where the CA puppet master server can be reached, if you are using multiple masters and only one of them is acting as the CA. # (Defaults the value of PUPPET_MASTER_SERVER) # Requires Puppet 2.7.12 / PE 2.5.0 # @option msi_opts [String] PUPPET_AGENT_CERTNAME The node’s certificate name, and the name it uses when requesting catalogs. This will set a value for # (Defaults to the node's fqdn as discovered by facter fqdn) # Requires Puppet 2.7.12 / PE 2.5.0 # @option msi_opts [String] PUPPET_AGENT_ENVIRONMENT The node’s environment. # (Defaults to production) # Requires Puppet 3.3.1 / PE 3.1.0 # @option msi_opts [String] PUPPET_AGENT_STARTUP_MODE Whether the puppet agent service should run (or be allowed to run) # (Defaults to Manual - valid values are Automatic, Manual or Disabled) # Requires Puppet 3.4.0 / PE 3.2.0 # @option msi_opts [String] PUPPET_AGENT_ACCOUNT_USER Whether the puppet agent service should run (or be allowed to run) # (Defaults to LocalSystem) # Requires Puppet 3.4.0 / PE 3.2.0 # @option msi_opts [String] PUPPET_AGENT_ACCOUNT_PASSWORD The password to use for puppet agent’s user account # (No default) # Requires Puppet 3.4.0 / PE 3.2.0 # @option msi_opts [String] PUPPET_AGENT_ACCOUNT_DOMAIN The domain of puppet agent’s user account. # (Defaults to .) # Requires Puppet 3.4.0 / PE 3.2.0 # @option opts [Boolean] :debug output the MSI installation log when set to true # otherwise do not output log (false; default behavior) # # @example # install_msi_on(hosts, 'c:\puppet.msi', {:debug => true}) # # @api private def install_msi_on(hosts, msi_path, msi_opts = {}, opts = {}) block_on hosts do | host | msi_opts['PUPPET_AGENT_STARTUP_MODE'] ||= 'Manual' batch_path, log_file = create_install_msi_batch_on(host, msi_path, msi_opts) # begin / rescue here so that we can reuse existing error msg propagation begin # 1641 = ERROR_SUCCESS_REBOOT_INITIATED # 3010 = ERROR_SUCCESS_REBOOT_REQUIRED on host, Command.new("\"#{batch_path}\"", [], { :cmdexe => true }), :acceptable_exit_codes => [0, 1641, 3010] rescue on host, Command.new("type \"#{log_file}\"", [], { :cmdexe => true }) raise end if opts[:debug] on host, Command.new("type \"#{log_file}\"", [], { :cmdexe => true }) end if !host.is_cygwin? # HACK: for some reason, post install we need to refresh the connection to make puppet available for execution host.close end # verify service status post install # if puppet service exists, then pe-puppet is not queried # if puppet service does not exist, pe-puppet is queried and that exit code is used # therefore, this command will always exit 0 if either service is installed on host, Command.new("sc query puppet || sc query pe-puppet", [], { :cmdexe => true }) # emit the misc/versions.txt file which contains component versions for # puppet, facter, hiera, pxp-agent, packaging and vendored Ruby [ "\\\"%ProgramFiles%\\Puppet Labs\\puppet\\misc\\versions.txt\\\"", "\\\"%ProgramFiles(x86)%\\Puppet Labs\\puppet\\misc\\versions.txt\\\"" ].each do |path| on host, Command.new("\"if exist #{path} type #{path}\"", [], { :cmdexe => true }) end end end # Installs a specified msi path on given hosts # @param [Host, Array<Host>, String, Symbol] hosts One or more hosts to act upon, # or a role (String or Symbol) that identifies one or more hosts. # @param [String] msi_path The path of the MSI - can be a local Windows style file path like # c:\temp\foo.msi OR a url like https://download.com/foo.msi or file://c:\temp\foo.msi # @param [Hash{String=>String}] msi_opts MSI installer options # @option opts [Boolean] :debug output the MSI installation log when set to true # otherwise do not output log (false; default behavior) # # @example # generic_install_msi_on(hosts, 'https://releases.hashicorp.com/vagrant/1.8.4/vagrant_1.8.4.msi', {}, {:debug => true}) # # @api private def generic_install_msi_on(hosts, msi_path, msi_opts = {}, opts = {}) block_on hosts do | host | batch_path, log_file = create_install_msi_batch_on(host, msi_path, msi_opts) # begin / rescue here so that we can reuse existing error msg propagation begin # 1641 = ERROR_SUCCESS_REBOOT_INITIATED # 3010 = ERROR_SUCCESS_REBOOT_REQUIRED on host, Command.new("\"#{batch_path}\"", [], { :cmdexe => true }), :acceptable_exit_codes => [0, 1641, 3010] rescue on host, Command.new("type \"#{log_file}\"", [], { :cmdexe => true }) raise end if opts[:debug] on host, Command.new("type \"#{log_file}\"", [], { :cmdexe => true }) end if !host.is_cygwin? # HACK: for some reason, post install we need to refresh the connection to make puppet available for execution host.close end end end end end end end
1
13,677
Should this `PUPPET_AGENT_STARTUP_MODE` have a corresponding yardoc change?
voxpupuli-beaker
rb
@@ -17,15 +17,10 @@ # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. +# pylint: disable=unused-import import pytest import pytest_bdd as bdd -# pylint: disable=unused-import from end2end.features.test_yankpaste_bdd import init_fake_clipboard - -pytestmark = pytest.mark.qtwebengine_todo("Caret mode is not implemented", - run=False) - - bdd.scenarios('caret.feature')
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. import pytest import pytest_bdd as bdd # pylint: disable=unused-import from end2end.features.test_yankpaste_bdd import init_fake_clipboard pytestmark = pytest.mark.qtwebengine_todo("Caret mode is not implemented", run=False) bdd.scenarios('caret.feature')
1
19,868
Why not simply remove the `pytest` import now that it's not needed anymore? :wink:
qutebrowser-qutebrowser
py
@@ -0,0 +1,12 @@ +// Copyright (c) Microsoft. All Rights Reserved. +// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. + +using Microsoft.VisualStudio.TestTools.UnitTesting; + +namespace Microsoft.CodeAnalysis.Sarif.Core +{ + [TestClass] + public class TagsTests + { + } +}
1
1
10,811
Yeah, not filled in yet.
microsoft-sarif-sdk
.cs
@@ -28,6 +28,13 @@ </div> </div> + <div class="row"> + <div class="form-group col-xs-12"> + <%= f.check_box :active, style: 'width: auto' %> + <%= f.label :active, _('Active'), class: 'control-label' %> + </div> + </div> + <div class="row"> <div class="form-group col-xs-6"> <%= f.label :starts_at, _('Start'), class: 'control-label' %>
1
<% url = @notification.new_record? ? super_admin_notifications_path : super_admin_notification_path(@notification) %> <%= form_for @notification, url: url, html: { class: 'notification' } do |f| %> <div class="row"> <div class="form-group col-xs-10"> <%= f.label :title, _('Title'), class: 'control-label' %> <%= f.text_field :title, class: 'form-control', value: @notification.title, spellcheck: true, "aria-required": true %> </div> <div class="form-group col-xs-2"> <%= f.label :level, _('Level'), class: 'control-label' %> <%= f.select :level, Notification.levels.keys.map { |l| [l.humanize, l] }, { value: @notification.level }, { class: 'form-control', data: { toggle: 'tooltip', html: true }, title: _('<strong>Info:</strong> Simple information message, displayed in blue.<br/><strong>Warning:</strong> warning message, for signaling something unusual, displayed in orange.<br/><strong>Danger:</strong> error message, for anything critical, displayed in red') } %> </div> </div> <div class="row"> <div class="form-group col-xs-12"> <%= f.check_box :dismissable, style: 'width: auto' %> <%= f.label :dismissable, _('Dismissable'), class: 'control-label' %> </div> </div> <div class="row"> <div class="form-group col-xs-6"> <%= f.label :starts_at, _('Start'), class: 'control-label' %> <%= f.date_field :starts_at, class: 'form-control', value: (@notification.starts_at || Date.today), min: Date.today %> </div> <div class="form-group col-xs-6"> <%= f.label :expires_at, _('Expiration'), class: 'control-label' %> <%= f.date_field :expires_at, class: 'form-control', value: (@notification.expires_at || Date.tomorrow), min: Date.tomorrow %> </div> </div> <div class="form-group"> <%= f.label :body, _('Body'), class: 'control-label' %> <%= f.text_area :body, class: 'form-control notification-text', value: @notification.body, "aria-required": true %> </div> <div class="pull-right"> <%= f.button _('Save'), class: 'btn btn-default', type: 'submit' %> <%= link_to( _('Delete'), super_admin_notification_path(@notification), class: 'btn btn-default', method: :delete, data: { confirm: _('Are you sure you want to delete the notification "%{title}"') % { title: @notification.title }}) unless @notification.new_record? %> <%= link_to _('Cancel'), super_admin_notifications_path, class: 'btn btn-default', role: 'button' %> </div> <% end %>
1
18,932
don't use style. Use a class instead. reducing `col-xs-12` down to 8 or 6 or whatever should do the trick
DMPRoadmap-roadmap
rb
@@ -218,6 +218,13 @@ public class Catalog implements AutoCloseable { Objects.requireNonNull(database, "database is null"); Objects.requireNonNull(tableName, "tableName is null"); TiTableInfo table = metaCache.getTable(database, tableName); + + if (table == null) { + // reload cache if table not exists + reloadCache(); + table = metaCache.getTable(database, tableName); + } + if (showRowId && table != null) { return table.copyTableWithRowId(); } else {
1
/* * Copyright 2017 PingCAP, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * See the License for the specific language governing permissions and * limitations under the License. */ package com.pingcap.tikv.catalog; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.pingcap.tikv.Snapshot; import com.pingcap.tikv.meta.TiDBInfo; import com.pingcap.tikv.meta.TiTableInfo; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.log4j.Logger; public class Catalog implements AutoCloseable { private Supplier<Snapshot> snapshotProvider; private ScheduledExecutorService service; private CatalogCache metaCache; private final boolean showRowId; private final String dbPrefix; private final Logger logger = Logger.getLogger(this.getClass()); @Override public void close() throws Exception { if (service != null) { service.shutdownNow(); service.awaitTermination(1, TimeUnit.SECONDS); } } private static class CatalogCache { private CatalogCache(CatalogTransaction transaction, String dbPrefix, boolean loadTables) { this.transaction = transaction; this.dbPrefix = dbPrefix; this.tableCache = new ConcurrentHashMap<>(); this.dbCache = loadDatabases(loadTables); this.currentVersion = transaction.getLatestSchemaVersion(); } private final Map<String, TiDBInfo> dbCache; private final ConcurrentHashMap<TiDBInfo, Map<String, TiTableInfo>> tableCache; private CatalogTransaction transaction; private long currentVersion; private final String dbPrefix; public CatalogTransaction getTransaction() { return transaction; } public long getVersion() { return currentVersion; } public TiDBInfo getDatabase(String name) { Objects.requireNonNull(name, "name is null"); return dbCache.get(name.toLowerCase()); } public List<TiDBInfo> listDatabases() { return ImmutableList.copyOf(dbCache.values()); } public List<TiTableInfo> listTables(TiDBInfo db) { Map<String, TiTableInfo> tableMap = tableCache.get(db); if (tableMap == null) { tableMap = loadTables(db); } return ImmutableList.copyOf(tableMap.values()); } public TiTableInfo getTable(TiDBInfo db, String tableName) { Map<String, TiTableInfo> tableMap = tableCache.get(db); if (tableMap == null) { tableMap = loadTables(db); } return tableMap.get(tableName.toLowerCase()); } private Map<String, TiTableInfo> loadTables(TiDBInfo db) { List<TiTableInfo> tables = transaction.getTables(db.getId()); ImmutableMap.Builder<String, TiTableInfo> builder = ImmutableMap.builder(); for (TiTableInfo table : tables) { builder.put(table.getName().toLowerCase(), table); } Map<String, TiTableInfo> tableMap = builder.build(); tableCache.put(db, tableMap); return tableMap; } private Map<String, TiDBInfo> loadDatabases(boolean loadTables) { HashMap<String, TiDBInfo> newDBCache = new HashMap<>(); List<TiDBInfo> databases = transaction.getDatabases(); databases.forEach( db -> { TiDBInfo newDBInfo = db.rename(dbPrefix + db.getName()); newDBCache.put(newDBInfo.getName().toLowerCase(), newDBInfo); if (loadTables) { loadTables(newDBInfo); } }); return newDBCache; } } public Catalog( Supplier<Snapshot> snapshotProvider, int refreshPeriod, TimeUnit periodUnit, boolean showRowId, String dbPrefix) { this.snapshotProvider = Objects.requireNonNull(snapshotProvider, "Snapshot Provider is null"); this.showRowId = showRowId; this.dbPrefix = dbPrefix; metaCache = new CatalogCache(new CatalogTransaction(snapshotProvider.get()), dbPrefix, false); service = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setDaemon(true).build()); service.scheduleAtFixedRate( () -> { // Wrap this with a try catch block in case schedule update fails try { reloadCache(); } catch (Exception e) { logger.warn("Reload Cache failed", e); } }, refreshPeriod, refreshPeriod, periodUnit); } /** * read current row id from TiKV and write the calculated value back to TiKV. The calculation rule * is start(read from TiKV) + step. */ public synchronized long getAutoTableId(long dbId, long tableId, long step) { Snapshot snapshot = snapshotProvider.get(); CatalogTransaction newTrx = new CatalogTransaction(snapshot); return newTrx.getAutoTableId(dbId, tableId, step); } /** read current row id from TiKV according to database id and table id. */ public synchronized long getAutoTableId(long dbId, long tableId) { Snapshot snapshot = snapshotProvider.get(); CatalogTransaction newTrx = new CatalogTransaction(snapshot); return newTrx.getAutoTableId(dbId, tableId); } public synchronized void reloadCache(boolean loadTables) { Snapshot snapshot = snapshotProvider.get(); CatalogTransaction newTrx = new CatalogTransaction(snapshot); long latestVersion = newTrx.getLatestSchemaVersion(); if (latestVersion > metaCache.getVersion()) { metaCache = new CatalogCache(newTrx, dbPrefix, loadTables); } } public void reloadCache() { reloadCache(false); } public List<TiDBInfo> listDatabases() { return metaCache.listDatabases(); } public List<TiTableInfo> listTables(TiDBInfo database) { Objects.requireNonNull(database, "database is null"); if (showRowId) { return metaCache .listTables(database) .stream() .map(TiTableInfo::copyTableWithRowId) .collect(Collectors.toList()); } else { return metaCache.listTables(database); } } public TiDBInfo getDatabase(String dbName) { Objects.requireNonNull(dbName, "dbName is null"); return metaCache.getDatabase(dbName); } public TiTableInfo getTable(String dbName, String tableName) { TiDBInfo database = getDatabase(dbName); if (database == null) { return null; } return getTable(database, tableName); } public TiTableInfo getTable(TiDBInfo database, String tableName) { Objects.requireNonNull(database, "database is null"); Objects.requireNonNull(tableName, "tableName is null"); TiTableInfo table = metaCache.getTable(database, tableName); if (showRowId && table != null) { return table.copyTableWithRowId(); } else { return table; } } @VisibleForTesting public TiTableInfo getTable(TiDBInfo database, long tableId) { Objects.requireNonNull(database, "database is null"); Collection<TiTableInfo> tables = listTables(database); for (TiTableInfo table : tables) { if (table.getId() == tableId) { if (showRowId) { return table.copyTableWithRowId(); } else { return table; } } } return null; } }
1
9,818
should we also do `reloadCache` when database is null?
pingcap-tispark
java
@@ -47,6 +47,15 @@ void SetMCSBondTyper(MCSParameters &p, BondComparator bondComp) { } } +ROMol *getQueryMol(MCSResult &mcsRes) { + ROMol *res; + { + NOGIL gil; + res = new ROMol(*mcsRes.QueryMol); + } + return res; +} + MCSResult *FindMCSWrapper(python::object mols, bool maximizeBonds, double threshold, unsigned timeout, bool verbose, bool matchValences, bool ringMatchesRingOnly,
1
// // Copyright (C) 2014 Novartis Institutes for BioMedical Research // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <RDBoost/python.h> #include <GraphMol/ROMol.h> #include <RDBoost/Wrap.h> #include <GraphMol/FMCS/FMCS.h> namespace python = boost::python; namespace RDKit { void SetMCSAtomTyper(MCSParameters &p, AtomComparator atomComp) { switch (atomComp) { case AtomCompareAny: p.AtomTyper = MCSAtomCompareAny; break; case AtomCompareElements: p.AtomTyper = MCSAtomCompareElements; break; case AtomCompareIsotopes: p.AtomTyper = MCSAtomCompareIsotopes; break; case AtomCompareAnyHeavyAtom: p.AtomTyper = MCSAtomCompareAnyHeavyAtom; break; } } void SetMCSBondTyper(MCSParameters &p, BondComparator bondComp) { switch (bondComp) { case BondCompareAny: p.BondTyper = MCSBondCompareAny; break; case BondCompareOrder: p.BondTyper = MCSBondCompareOrder; break; case BondCompareOrderExact: p.BondTyper = MCSBondCompareOrderExact; break; } } MCSResult *FindMCSWrapper(python::object mols, bool maximizeBonds, double threshold, unsigned timeout, bool verbose, bool matchValences, bool ringMatchesRingOnly, bool completeRingsOnly, bool matchChiralTag, AtomComparator atomComp, BondComparator bondComp, RingComparator ringComp, std::string seedSmarts) { std::vector<ROMOL_SPTR> ms; unsigned int nElems = python::extract<unsigned int>(mols.attr("__len__")()); ms.resize(nElems); for (unsigned int i = 0; i < nElems; ++i) { if (!mols[i]) throw_value_error("molecule is None"); ms[i] = python::extract<ROMOL_SPTR>(mols[i]); } MCSParameters p; p.Threshold = threshold; p.MaximizeBonds = maximizeBonds; p.Timeout = timeout; p.Verbose = verbose; p.InitialSeed = seedSmarts; p.AtomCompareParameters.MatchValences = matchValences; p.AtomCompareParameters.MatchChiralTag = matchChiralTag; p.AtomCompareParameters.RingMatchesRingOnly = ringMatchesRingOnly; SetMCSAtomTyper(p, atomComp); SetMCSBondTyper(p, bondComp); p.BondCompareParameters.RingMatchesRingOnly = ringMatchesRingOnly; p.BondCompareParameters.CompleteRingsOnly = completeRingsOnly; p.BondCompareParameters.MatchFusedRings = (ringComp != IgnoreRingFusion); p.BondCompareParameters.MatchFusedRingsStrict = (ringComp == StrictRingFusion); MCSResult *res = nullptr; { NOGIL gil; res = new MCSResult(findMCS(ms, &p)); } return res; } MCSResult *FindMCSWrapper2(python::object mols, const MCSParameters &params) { std::vector<ROMOL_SPTR> ms; unsigned int nElems = python::extract<unsigned int>(mols.attr("__len__")()); ms.resize(nElems); for (unsigned int i = 0; i < nElems; ++i) { if (!mols[i]) throw_value_error("molecule is None"); ms[i] = python::extract<ROMOL_SPTR>(mols[i]); } MCSResult *res = nullptr; { NOGIL gil; res = new MCSResult(findMCS(ms, &params)); } return res; } } // namespace RDKit namespace { struct mcsresult_wrapper { static void wrap() { python::class_<RDKit::MCSResult>("MCSResult", "used to return MCS results", python::no_init) .def_readonly("numAtoms", &RDKit::MCSResult::NumAtoms, "number of atoms in MCS") .def_readonly("numBonds", &RDKit::MCSResult::NumBonds, "number of bonds in MCS") .def_readonly("smartsString", &RDKit::MCSResult::SmartsString, "SMARTS string for the MCS") .def_readonly("canceled", &RDKit::MCSResult::Canceled, "if True, the MCS calculation did not finish"); } }; } // namespace BOOST_PYTHON_MODULE(rdFMCS) { python::scope().attr("__doc__") = "Module containing a C++ implementation of the FMCS algorithm"; mcsresult_wrapper::wrap(); python::enum_<RDKit::AtomComparator>("AtomCompare") .value("CompareAny", RDKit::AtomCompareAny) .value("CompareElements", RDKit::AtomCompareElements) .value("CompareIsotopes", RDKit::AtomCompareIsotopes) .value("CompareAnyHeavyAtom", RDKit::AtomCompareAnyHeavyAtom); python::enum_<RDKit::BondComparator>("BondCompare") .value("CompareAny", RDKit::BondCompareAny) .value("CompareOrder", RDKit::BondCompareOrder) .value("CompareOrderExact", RDKit::BondCompareOrderExact); python::enum_<RDKit::RingComparator>("RingCompare") .value("IgnoreRingFusion", RDKit::IgnoreRingFusion) .value("PermissiveRingFusion", RDKit::PermissiveRingFusion) .value("StrictRingFusion", RDKit::StrictRingFusion); std::string docString = "Find the MCS for a set of molecules"; python::def( "FindMCS", RDKit::FindMCSWrapper, (python::arg("mols"), python::arg("maximizeBonds") = true, python::arg("threshold") = 1.0, python::arg("timeout") = 3600, python::arg("verbose") = false, python::arg("matchValences") = false, python::arg("ringMatchesRingOnly") = false, python::arg("completeRingsOnly") = false, python::arg("matchChiralTag") = false, python::arg("atomCompare") = RDKit::AtomCompareElements, python::arg("bondCompare") = RDKit::BondCompareOrder, python::arg("ringCompare") = RDKit::IgnoreRingFusion, python::arg("seedSmarts") = ""), python::return_value_policy<python::manage_new_object>(), docString.c_str()); python::class_<RDKit::MCSParameters, boost::noncopyable>( "MCSParameters", "Parameters controlling how the MCS is constructed") .def_readwrite("MaximizeBonds", &RDKit::MCSParameters::MaximizeBonds, "toggles maximizing the number of bonds (instead of the " "number of atoms)") .def_readwrite("Threshold", &RDKit::MCSParameters::Threshold, "fraction of the dataset that must contain the MCS") .def_readwrite("Timeout", &RDKit::MCSParameters::Timeout, "timeout (in seconds) for the calculation") .def_readwrite("Verbose", &RDKit::MCSParameters::Verbose, "toggles verbose mode") .def_readwrite("AtomCompareParameters", &RDKit::MCSParameters::AtomCompareParameters, "parameters for comparing atoms") .def_readwrite("BondCompareParameters", &RDKit::MCSParameters::BondCompareParameters, "parameters for comparing bonds") // haven't been able to get these properly working // .def_readwrite("AtomTyper", &RDKit::MCSParameters::AtomTyper, // "function for comparing atoms") // .def_readwrite("BondTyper", &RDKit::MCSParameters::BondTyper, // "function for comparing bonds") .def_readwrite("InitialSeed", &RDKit::MCSParameters::InitialSeed, "SMILES string to be used as the seed of the MCS") .def("SetAtomTyper", RDKit::SetMCSAtomTyper, (python::arg("self"), python::arg("comparator")), "sets the atom typer to be used. The argument should be one of the " "members of the rdFMCS.AtomCompare class.") .def("SetBondTyper", RDKit::SetMCSBondTyper, (python::arg("self"), python::arg("comparator")), "sets the bond typer to be used. The argument should be one of the " "members of the rdFMCS.BondCompare class."); ; python::class_<RDKit::MCSAtomCompareParameters, boost::noncopyable>( "MCSAtomCompareParameters", "Parameters controlling how atom-atom matching is done") .def_readwrite("MatchValences", &RDKit::MCSAtomCompareParameters::MatchValences, "include atom valences in the match") .def_readwrite("MatchChiralTag", &RDKit::MCSAtomCompareParameters::MatchChiralTag, "include atom chirality in the match") .def_readwrite("MatchFormalCharge", &RDKit::MCSAtomCompareParameters::MatchFormalCharge, "include formal charge in the match") .def_readwrite("RingMatchesRingOnly", &RDKit::MCSAtomCompareParameters::RingMatchesRingOnly, "ring atoms are only allowed to match other ring atoms"); python::class_<RDKit::MCSBondCompareParameters, boost::noncopyable>( "MCSBondCompareParameters", "Parameters controlling how bond-bond matching is done") .def_readwrite("RingMatchesRingOnly", &RDKit::MCSBondCompareParameters::RingMatchesRingOnly, "ring bonds are only allowed to match other ring bonds") .def_readwrite("CompleteRingsOnly", &RDKit::MCSBondCompareParameters::CompleteRingsOnly, "results cannot include partial rings") .def_readwrite("MatchFusedRings", &RDKit::MCSBondCompareParameters::MatchFusedRings, "enforce check on ring fusion, i.e. alpha-methylnaphthalene " "won't match beta-methylnaphtalene, but decalin " "will match cyclodecane unless MatchFusedRingsStrict is True") .def_readwrite("MatchFusedRingsStrict", &RDKit::MCSBondCompareParameters::MatchFusedRingsStrict, "only enforced if MatchFusedRings is True; the ring fusion " "must be the same in both query and target, i.e. decalin " "won't match cyclodecane") .def_readwrite("MatchStereo", &RDKit::MCSBondCompareParameters::MatchStereo, "include bond stereo in the comparison"); docString = "Find the MCS for a set of molecules"; python::def("FindMCS", RDKit::FindMCSWrapper2, (python::arg("mols"), python::arg("parameters")), python::return_value_policy<python::manage_new_object>(), docString.c_str()); }
1
20,046
Why not just return QueryMol directly? Why require the copy?
rdkit-rdkit
cpp
@@ -84,6 +84,15 @@ def retry_test(func): assert success return result + +def scapy_path(fname): + """Resolves a path relative to scapy's root folder""" + if fname.startswith('/'): + fname = fname[1:] + return os.path.abspath(os.path.join( + os.path.dirname(__file__), '../../', fname + )) + # Import tool #
1
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) Philippe Biondi <[email protected]> # This program is published under a GPLv2 license """ Unit testing infrastructure for Scapy """ from __future__ import print_function import bz2 import copy import code import getopt import glob import hashlib import importlib import json import logging import os import os.path import sys import time import traceback import warnings import zlib from scapy.consts import WINDOWS import scapy.modules.six as six from scapy.modules.six.moves import range from scapy.config import conf from scapy.compat import base64_bytes, bytes_hex, plain_str from scapy.themes import DefaultTheme, BlackAndWhite # Check UTF-8 support # def _utf8_support(): """ Check UTF-8 support for the output """ try: if six.PY2: return False if WINDOWS: return (sys.stdout.encoding == "utf-8") return True except AttributeError: return False if _utf8_support(): arrow = "\u2514" dash = "\u2501" checkmark = "\u2713" else: arrow = "->" dash = "--" checkmark = "OK" # Util class # class Bunch: __init__ = lambda self, **kw: setattr(self, '__dict__', kw) def retry_test(func): """Retries the passed function 3 times before failing""" success = False ex = Exception("Unknown") for _ in six.moves.range(3): try: result = func() except Exception as e: time.sleep(1) ex = e else: success = True break if not success: raise ex assert success return result # Import tool # def import_module(name): if name.endswith(".py"): name = name[:-3] try: return importlib.import_module(name, package="scapy") except Exception: return importlib.import_module(name) # INTERNAL/EXTERNAL FILE EMBEDDING # class File: def __init__(self, name, URL, local): self.name = name self.local = local.encode("utf8") self.URL = URL def get_local(self): return bz2.decompress(base64_bytes(self.local)) def get_URL(self): return self.URL def write(self, dir): if dir: dir += "/" with open(dir + self.name, "wb") as fdesc: fdesc.write(self.get_local()) # Embed a base64 encoded bziped version of js and css files # to work if you can't reach Internet. class External_Files: UTscapy_js = File("UTscapy.js", "https://scapy.net/files/UTscapy/UTscapy.js", # noqa: E501 """QlpoOTFBWSZTWWVijKQAAXxfgERUYOvAChIhBAC /79+qQAH8AFA0poANAMjQAAAGABo0NGEZNBo0\n0BhgAaNDRhGTQaNNAYFURJinp lGaKbRkJiekzSenqmpA0Gm1LFMpRUklVQlK9WUTZYpNFI1IiEWE\nFT09Sfj5uO+ qO6S5DQwKIxM92+Zku94wL6V/1KTKan2c66Ug6SmVKy1ZIrgauxMVLF5xLH0lJRQ u\nKlqLF10iatlTzqvw7S9eS3+h4lu3GZyMgoOude3NJ1pQy8eo+X96IYZw+yneh siPj73m0rnvQ3QX\nZ9BJQiZQYQ5/uNcl2WOlC5vyQqV/BWsnr2NZYLYXQLDs/Bf fk4ZfR4/SH6GfA5Xlek4xHNHqbSsR\nbREOgueXo3kcYi94K6hSO3ldD2O/qJXOF qJ8o3TE2aQahxtQpCVUKQMvODHwu2YkaORYZC6gihEa\nllcHDIAtRPScBACAJnU ggYhLDX6DEko7nC9GvAw5OcEkiyDUbLdiGCzDaXWMC2DuQ2Y6sGf6NcRu\nON7QS bhHsPc4KKmZ/xdyRThQkGVijKQ=\n""") UTscapy_css = File("UTscapy.css", "https://scapy.net/files/UTscapy/UTscapy.css", # noqa: E501 """QlpoOTFBWSZTWbpATIwAAFpfgHwQSB//+Cpj2Q C//9/6UAS5t7qcLut3NNDp0gxKMmpqaep6n6iP\n1J+pPU0yAAaeoaDI0BJCTJqa j1BoaGhoAAPSAAAJNSRqmmk8TQmj1DT1Hom1HkQABoNDmmJgATAB\nMAAJgACYJI hDQUzCR5Q0niRoaAGgGmZS+faw7LNbkliDG1Q52WJCd85cxRVVKegld8qCRISoto GD\nEGREFEYRW0CxAgTb13lodjuN7E1aCFgRFVhiEmZAZ/ek+XR0c8DWiAKpBgY2 LNpQ1rOvlnoUI1Al\n0ySaP1w2MyFxoQqRicScCm6WnQOxDnufxk8s2deLLKlN+r fvxyTTCGRAWZONkVGIxVQRZGZLeAwH\nbpQXZcYj467i85knEOYWmLcokaqEGYGS xMCpD+cOIaL7GCxEU/aNSlWFNCvQBvzb915huAgdIdD2\nya9ZQGoqrmtommfAxu 7FGTDBNBfir9UkAMmT1KRzxasJ0n2OE+mlgTZzJnhydbJaMtAk8DJzUuvv\nZpc3 CJLVyr8F3NmIQO5E3SJSY3SQnk1CQwlELqFutXjeWWzmiywo7xJk5rUcVOV9+Ro4 96WmXsUr\nkKhNocbnFztqPhesccW5kja+KuNFmzdw4DVOBJ2JPhGOYSwCUiwUe2 kOshYBdULUmwYwToAGdgA9\n5n3bSpG85LUFIE0Cw78EYVgY0ESnYW5UdfgBhj1w PiiXDEG2vAtr38O9kdwg3tFU/0okilEjDYDa\nEfkomkLUSokmE8g1fMYBqQyyaP RWmySO3EtAuMVhQqIuMldOzLqWubl7k1MnhuBaELOgtB2TChcS\n0k7jvgdBKIef UkdAf3t2GO/LVSrDvkcb4l4TrwrI7JeCo8pBvXqZBqZJSqbsAziG7QDQVNqdtFGz \nEvMKOvKvUQ6mJFigLxBnziGQGQDEMQPSGhlV2BwAN6rZEmLwgED0OrEiSxXDcB MDskp36AV7IbKa\nCila/Wm1BKhBF+ZIqtiFyYpUhI1Q5+JK0zK7aVyLS9y7GaSr NCRpr7uaa1UgapVKs6wKKQzYCWsV\n8iCGrAkgWZEnDMJWCGUZOIpcmMle1UXSAl d5OoUYXNo0L7WSOcxEkSGjCcRhjvMRP1pAUuBPRCRA\n2lhC0ZgLYDAf5V2agMUa ki1ZgOQDXQ7aIDTdjGRTgnzPML0V1X+tIoSSZmZhrxZbluMWGEkwwky6\n0ObWIM cEbX4cawPPBVc6m5UUPbEmBANyjtNvTKE2ri7oOmBVKIMLqQKm+4rlmisu2uGSxW zTov5w\nqQDp61FkHk40wzQUKk4YcBlbQT1l8VXeZJYAVFjSJIcC8JykBYZJ1yka I4LDm5WP7s2NaRkhhV7A\nFVSD5zA8V/DJzfTk0QHmCT2wRgwPKjP60EqqlDUaST /i7kinChIXSAmRgA==\n""") def get_local_dict(cls): return {x: y.name for (x, y) in six.iteritems(cls.__dict__) if isinstance(y, File)} get_local_dict = classmethod(get_local_dict) def get_URL_dict(cls): return {x: y.URL for (x, y) in six.iteritems(cls.__dict__) if isinstance(y, File)} get_URL_dict = classmethod(get_URL_dict) # HELPER CLASSES FOR PARAMETRING OUTPUT FORMAT # class EnumClass: def from_string(cls, x): return cls.__dict__[x.upper()] from_string = classmethod(from_string) class Format(EnumClass): TEXT = 1 ANSI = 2 HTML = 3 LATEX = 4 XUNIT = 5 LIVE = 6 # TEST CLASSES # class TestClass: def __getitem__(self, item): return getattr(self, item) def add_keywords(self, kws): if isinstance(kws, six.string_types): kws = [kws.lower()] for kwd in kws: kwd = kwd.lower() if kwd.startswith('-'): try: self.keywords.remove(kwd[1:]) except KeyError: pass else: self.keywords.add(kwd) class TestCampaign(TestClass): def __init__(self, title): self.title = title self.filename = None self.headcomments = "" self.campaign = [] self.keywords = set() self.crc = None self.sha = None self.preexec = None self.preexec_output = None self.end_pos = 0 self.interrupted = False self.duration = 0.0 def add_testset(self, testset): self.campaign.append(testset) testset.keywords.update(self.keywords) def trunc(self, index): self.campaign = self.campaign[:index] def startNum(self, beginpos): for ts in self: for t in ts: t.num = beginpos beginpos += 1 self.end_pos = beginpos def __iter__(self): return self.campaign.__iter__() def all_tests(self): for ts in self: for t in ts: yield t class TestSet(TestClass): def __init__(self, name): self.name = name self.tests = [] self.comments = "" self.keywords = set() self.crc = None self.expand = 1 def add_test(self, test): self.tests.append(test) test.keywords.update(self.keywords) def trunc(self, index): self.tests = self.tests[:index] def __iter__(self): return self.tests.__iter__() class UnitTest(TestClass): def __init__(self, name): self.name = name self.test = "" self.comments = "" self.result = "passed" self.fresult = "" # make instance True at init to have a different truth value than None self.duration = 0 self.output = "" self.num = -1 self.keywords = set() self.crc = None self.expand = 1 def prepare(self, theme): if six.PY2: self.test = self.test.decode("utf8", "ignore") self.output = self.output.decode("utf8", "ignore") self.comments = self.comments.decode("utf8", "ignore") self.result = self.result.decode("utf8", "ignore") if self.result == "passed": self.fresult = theme.success(self.result) else: self.fresult = theme.fail(self.result) def __nonzero__(self): return self.result == "passed" __bool__ = __nonzero__ # Careful note: all data not included will be set by default. # Use -c as first argument !! def parse_config_file(config_path, verb=3): """Parse provided json to get configuration Empty default json: { "testfiles": [], "breakfailed": true, "onlyfailed": false, "verb": 3, "dump": 0, "docs": 0, "crc": true, "preexec": {}, "global_preexec": "", "outputfile": null, "local": true, "format": "ansi", "num": null, "modules": [], "kw_ok": [], "kw_ko": [] } """ with open(config_path) as config_file: data = json.load(config_file) if verb > 2: print(" %s Loaded config file" % arrow, config_path) def get_if_exist(key, default): return data[key] if key in data else default return Bunch(testfiles=get_if_exist("testfiles", []), breakfailed=get_if_exist("breakfailed", True), remove_testfiles=get_if_exist("remove_testfiles", []), onlyfailed=get_if_exist("onlyfailed", False), verb=get_if_exist("verb", 3), dump=get_if_exist("dump", 0), crc=get_if_exist("crc", 1), docs=get_if_exist("docs", 0), preexec=get_if_exist("preexec", {}), global_preexec=get_if_exist("global_preexec", ""), outfile=get_if_exist("outputfile", sys.stdout), local=get_if_exist("local", False), num=get_if_exist("num", None), modules=get_if_exist("modules", []), kw_ok=get_if_exist("kw_ok", []), kw_ko=get_if_exist("kw_ko", []), format=get_if_exist("format", "ansi")) # PARSE CAMPAIGN # def parse_campaign_file(campaign_file): test_campaign = TestCampaign("Test campaign") test_campaign.filename = campaign_file.name testset = None test = None testnb = 0 for line in campaign_file.readlines(): if line[0] == '#': continue if line[0] == "~": (test or testset or test_campaign).add_keywords(line[1:].split()) elif line[0] == "%": test_campaign.title = line[1:].strip() elif line[0] == "+": testset = TestSet(line[1:].strip()) test_campaign.add_testset(testset) test = None elif line[0] == "=": test = UnitTest(line[1:].strip()) test.num = testnb testnb += 1 if testset is None: error_m = "Please create a test set (i.e. '+' section)." raise getopt.GetoptError(error_m) testset.add_test(test) elif line[0] == "*": if test is not None: test.comments += line[1:] elif testset is not None: testset.comments += line[1:] else: test_campaign.headcomments += line[1:] else: if test is None: if line.strip(): raise ValueError("Unknown content [%s]" % line.strip()) else: test.test += line return test_campaign def dump_campaign(test_campaign): print("#" * (len(test_campaign.title) + 6)) print("## %(title)s ##" % test_campaign) print("#" * (len(test_campaign.title) + 6)) if test_campaign.sha and test_campaign.crc: print("CRC=[%(crc)s] SHA=[%(sha)s]" % test_campaign) print("from file %(filename)s" % test_campaign) print() for ts in test_campaign: if ts.crc: print("+--[%s]%s(%s)--" % (ts.name, "-" * max(2, 80 - len(ts.name) - 18), ts.crc)) # noqa: E501 else: print("+--[%s]%s" % (ts.name, "-" * max(2, 80 - len(ts.name) - 6))) if ts.keywords: print(" kw=%s" % ",".join(ts.keywords)) for t in ts: print("%(num)03i %(name)s" % t) c = k = "" if t.keywords: k = "kw=%s" % ",".join(t.keywords) if t.crc: c = "[%(crc)s] " % t if c or k: print(" %s%s" % (c, k)) def docs_campaign(test_campaign): print("%(title)s" % test_campaign) print("=" * (len(test_campaign.title))) print() if len(test_campaign.headcomments): print("%s" % test_campaign.headcomments.strip().replace("\n", "")) print() for ts in test_campaign: print("%s" % ts.name) print("-" * len(ts.name)) print() if len(ts.comments): print("%s" % ts.comments.strip().replace("\n", "")) print() for t in ts: print("%s" % t.name) print("^" * len(t.name)) print() if len(t.comments): print("%s" % t.comments.strip().replace("\n", "")) print() print("Usage example::") for line in t.test.split('\n'): if not line.rstrip().endswith('# no_docs'): print("\t%s" % line) # COMPUTE CAMPAIGN DIGESTS # if six.PY2: def crc32(x): return "%08X" % (0xffffffff & zlib.crc32(x)) def sha1(x): return hashlib.sha1(x).hexdigest().upper() else: def crc32(x): return "%08X" % (0xffffffff & zlib.crc32(bytearray(x, "utf8"))) def sha1(x): return hashlib.sha1(x.encode("utf8")).hexdigest().upper() def compute_campaign_digests(test_campaign): dc = "" for ts in test_campaign: dts = "" for t in ts: dt = t.test.strip() t.crc = crc32(dt) dts += "\0" + dt ts.crc = crc32(dts) dc += "\0\x01" + dts test_campaign.crc = crc32(dc) with open(test_campaign.filename) as fdesc: test_campaign.sha = sha1(fdesc.read()) # FILTER CAMPAIGN # def filter_tests_on_numbers(test_campaign, num): if num: for ts in test_campaign: ts.tests = [t for t in ts.tests if t.num in num] test_campaign.campaign = [ts for ts in test_campaign.campaign if ts.tests] def _filter_tests_kw(test_campaign, kw, keep): def kw_match(lst, kw): return any(k for k in lst if kw == k) if kw: kw = kw.lower() if keep: cond = lambda x: x else: cond = lambda x: not x for ts in test_campaign: ts.tests = [t for t in ts.tests if cond(kw_match(t.keywords, kw))] def filter_tests_keep_on_keywords(test_campaign, kw): return _filter_tests_kw(test_campaign, kw, True) def filter_tests_remove_on_keywords(test_campaign, kw): return _filter_tests_kw(test_campaign, kw, False) def remove_empty_testsets(test_campaign): test_campaign.campaign = [ts for ts in test_campaign.campaign if ts.tests] # RUN TEST # def run_test(test, get_interactive_session, theme, verb=3, ignore_globals=None, my_globals=None): """An internal UTScapy function to run a single test""" start_time = time.time() test.output, res = get_interactive_session(test.test.strip(), ignore_globals=ignore_globals, verb=verb, my_globals=my_globals) test.result = "failed" try: if res is None or res: test.result = "passed" if test.output.endswith('KeyboardInterrupt\n'): test.result = "interrupted" raise KeyboardInterrupt except Exception: test.output += "UTscapy: Error during result interpretation:\n" test.output += "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2],)) finally: test.duration = time.time() - start_time if test.result == "failed": from scapy.sendrecv import debug # Add optional debugging data to log if debug.crashed_on: cls, val = debug.crashed_on test.output += "\n\nPACKET DISSECTION FAILED ON:\n %s(hex_bytes('%s'))" % (cls.__name__, plain_str(bytes_hex(val))) debug.crashed_on = None test.prepare(theme) if verb > 2: print("%(fresult)6s %(crc)s %(duration)06.2fs %(name)s" % test) elif verb > 1: print("%(fresult)6s %(crc)s %(name)s" % test) return bool(test) # RUN CAMPAIGN # def import_UTscapy_tools(ses): """Adds UTScapy tools directly to a session""" ses["retry_test"] = retry_test ses["Bunch"] = Bunch if WINDOWS: from scapy.arch.windows import _route_add_loopback _route_add_loopback() ses["conf"].ifaces = conf.ifaces ses["conf"].route.routes = conf.route.routes ses["conf"].route6.routes = conf.route6.routes def run_campaign(test_campaign, get_interactive_session, theme, drop_to_interpreter=False, verb=3, ignore_globals=None, scapy_ses=None): passed = failed = 0 if test_campaign.preexec: test_campaign.preexec_output = get_interactive_session( test_campaign.preexec.strip(), ignore_globals=ignore_globals, my_globals=scapy_ses)[0] # Drop def drop(scapy_ses): code.interact(banner="Test '%s' failed. " "exit() to stop, Ctrl-D to leave " "this interpreter and continue " "with the current test campaign" % t.name, local=scapy_ses) try: for i, testset in enumerate(test_campaign): for j, t in enumerate(testset): if run_test(t, get_interactive_session, theme, verb=verb, my_globals=scapy_ses): passed += 1 else: failed += 1 if drop_to_interpreter: drop(scapy_ses) test_campaign.duration += t.duration except KeyboardInterrupt: failed += 1 testset.trunc(j + 1) test_campaign.trunc(i + 1) test_campaign.interrupted = True if verb: print("Campaign interrupted!") if drop_to_interpreter: drop(scapy_ses) test_campaign.passed = passed test_campaign.failed = failed style = [theme.success, theme.fail][bool(failed)] if verb > 2: print("Campaign CRC=%(crc)s in %(duration)06.2fs SHA=%(sha)s" % test_campaign) print(style("PASSED=%i FAILED=%i" % (passed, failed))) elif verb: print("Campaign CRC=%(crc)s SHA=%(sha)s" % test_campaign) print(style("PASSED=%i FAILED=%i" % (passed, failed))) return failed # INFO LINES # def info_line(test_campaign): filename = test_campaign.filename if filename is None: return "Run %s by UTscapy" % time.ctime() else: return "Run %s from [%s] by UTscapy" % (time.ctime(), filename) def html_info_line(test_campaign): filename = test_campaign.filename if filename is None: return """Run %s by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % time.ctime() # noqa: E501 else: return """Run %s from [%s] by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % (time.ctime(), filename) # noqa: E501 # CAMPAIGN TO something # def campaign_to_TEXT(test_campaign, theme): ptheme = [lambda x: x, theme.success][bool(test_campaign.passed)] ftheme = [lambda x: x, theme.fail][bool(test_campaign.failed)] output = theme.green("\n%(title)s\n" % test_campaign) output += dash + " " + info_line(test_campaign) + "\n" output += ptheme(" " + arrow + " Passed=%(passed)i\n" % test_campaign) output += ftheme(" " + arrow + " Failed=%(failed)i\n" % test_campaign) output += "%(headcomments)s\n" % test_campaign for testset in test_campaign: if any(t.expand for t in testset): output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset for t in testset: if t.expand: output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t # noqa: E501 return output def campaign_to_ANSI(test_campaign, theme): return campaign_to_TEXT(test_campaign, theme) def campaign_to_xUNIT(test_campaign): output = '<?xml version="1.0" encoding="UTF-8" ?>\n<testsuite>\n' for testset in test_campaign: for t in testset: output += ' <testcase classname="%s"\n' % testset.name.encode("string_escape").replace('"', ' ') # noqa: E501 output += ' name="%s"\n' % t.name.encode("string_escape").replace('"', ' ') # noqa: E501 output += ' duration="0">\n' % t if not t: output += '<error><![CDATA[%(output)s]]></error>\n' % t output += "</testcase>\n" output += '</testsuite>' return output def campaign_to_HTML(test_campaign): output = """ <h1>%(title)s</h1> <p> """ % test_campaign if test_campaign.crc is not None and test_campaign.sha is not None: output += "CRC=<span class=crc>%(crc)s</span> SHA=<span class=crc>%(sha)s</span><br>" % test_campaign output += "<small><em>" + html_info_line(test_campaign) + "</em></small>" output += "".join([ test_campaign.headcomments, "\n<p>", "PASSED=%(passed)i FAILED=%(failed)i" % test_campaign, " <span class=warn_interrupted>INTERRUPTED!</span>" if test_campaign.interrupted else "", "<p>\n\n", ]) for testset in test_campaign: output += "<h2>" % testset if testset.crc is not None: output += "<span class=crc>%(crc)s</span> " % testset output += "%(name)s</h2>\n%(comments)s\n<ul>\n" % testset for t in testset: output += """<li class=%(result)s id="tst%(num)il">\n""" % t if t.expand == 2: output += """ <span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">+%(num)03i+</span> <span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')">-%(num)03i-</span> """ % t else: output += """ <span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')">+%(num)03i+</span> <span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">-%(num)03i-</span> """ % t if t.crc is not None: output += "<span class=crc>%(crc)s</span>\n" % t output += """%(name)s\n<span class="comment %(result)s" id="tst%(num)i" """ % t # noqa: E501 if t.expand < 2: output += """ style="POSITION: absolute; VISIBILITY: hidden;" """ # noqa: E501 output += """><br>%(comments)s <pre> %(output)s</pre></span> """ % t output += "\n</ul>\n\n" return output def pack_html_campaigns(runned_campaigns, data, local=False, title=None): output = """ <html> <head> <title>%(title)s</title> <h1>UTScapy tests</h1> <span class=control_button onClick="hide_all('tst')">Shrink All</span> <span class=control_button onClick="show_all('tst')">Expand All</span> <span class=control_button onClick="show_passed('tst')">Expand Passed</span> <span class=control_button onClick="show_failed('tst')">Expand Failed</span> <p> """ for test_campaign in runned_campaigns: for ts in test_campaign: for t in ts: output += """<span class=button%(result)s onClick="goto_id('tst%(num)il')">%(num)03i</span>\n""" % t output += """</p>\n\n <link rel="stylesheet" href="%(UTscapy_css)s" type="text/css"> <script language="JavaScript" src="%(UTscapy_js)s" type="text/javascript"></script> </head> <body> %(data)s </body></html> """ out_dict = {'data': data, 'title': title if title else "UTScapy tests"} if local: dirname = os.path.dirname(test_campaign.output_file) External_Files.UTscapy_js.write(dirname) External_Files.UTscapy_css.write(dirname) out_dict.update(External_Files.get_local_dict()) else: out_dict.update(External_Files.get_URL_dict()) output %= out_dict return output def campaign_to_LATEX(test_campaign): output = r"""\documentclass{report} \usepackage{alltt} \usepackage{xcolor} \usepackage{a4wide} \usepackage{hyperref} \title{%(title)s} \date{%%s} \begin{document} \maketitle \tableofcontents \begin{description} \item[Passed:] %(passed)i \item[Failed:] %(failed)i \end{description} %(headcomments)s """ % test_campaign output %= info_line(test_campaign) for testset in test_campaign: output += "\\chapter{%(name)s}\n\n%(comments)s\n\n" % testset for t in testset: if t.expand: output += r"""\section{%(name)s} [%(num)03i] [%(result)s] %(comments)s \begin{alltt} %(output)s \end{alltt} """ % t output += "\\end{document}\n" return output # USAGE # def usage(): print("""Usage: UTscapy [-m module] [-f {text|ansi|HTML|LaTeX|live}] [-o output_file] [-t testfile] [-T testfile] [-k keywords [-k ...]] [-K keywords [-K ...]] [-l] [-b] [-d|-D] [-F] [-q[q]] [-i] [-P preexecute_python_code] [-c configfile] -t\t\t: provide test files (can be used many times) -T\t\t: if -t is used with *, remove a specific file (can be used many times) -l\t\t: generate local .js and .css files -F\t\t: expand only failed tests -b\t\t: don't stop at the first failed campaign -d\t\t: dump campaign -D\t\t: dump campaign and stop -R\t\t: dump campaign as reStructuredText -C\t\t: don't calculate CRC and SHA -c\t\t: load a .utsc config file -i\t\t: drop into Python interpreter if test failed -q\t\t: quiet mode -qq\t\t: [silent mode] -x\t\t: use pyannotate -n <testnum>\t: only tests whose numbers are given (eg. 1,3-7,12) -N\t\t: force non root -m <module>\t: additional module to put in the namespace -k <kw1>,<kw2>,...\t: include only tests with one of those keywords (can be used many times) -K <kw1>,<kw2>,...\t: remove tests with one of those keywords (can be used many times) -P <preexecute_python_code> """) raise SystemExit # MAIN # def execute_campaign(TESTFILE, OUTPUTFILE, PREEXEC, NUM, KW_OK, KW_KO, DUMP, DOCS, FORMAT, VERB, ONLYFAILED, CRC, INTERPRETER, autorun_func, theme, pos_begin=0, ignore_globals=None, scapy_ses=None): # noqa: E501 # Parse test file try: test_campaign = parse_campaign_file(TESTFILE) except ValueError as ex: print( theme.red("Error while parsing '%s': '%s'" % (TESTFILE.name, ex)) ) sys.exit(1) # Report parameters if PREEXEC: test_campaign.preexec = PREEXEC # Compute campaign CRC and SHA if CRC: compute_campaign_digests(test_campaign) # Filter out unwanted tests filter_tests_on_numbers(test_campaign, NUM) for k in KW_OK: filter_tests_keep_on_keywords(test_campaign, k) for k in KW_KO: filter_tests_remove_on_keywords(test_campaign, k) remove_empty_testsets(test_campaign) # Dump campaign if DUMP: dump_campaign(test_campaign) if DUMP > 1: sys.exit() # Dump campaign as reStructuredText if DOCS: docs_campaign(test_campaign) sys.exit() # Run tests test_campaign.output_file = OUTPUTFILE result = run_campaign( test_campaign, autorun_func[FORMAT], theme, drop_to_interpreter=INTERPRETER, verb=VERB, ignore_globals=None, scapy_ses=scapy_ses ) # Shrink passed if ONLYFAILED: for t in test_campaign.all_tests(): if t: t.expand = 0 else: t.expand = 2 # Generate report if FORMAT == Format.TEXT: output = campaign_to_TEXT(test_campaign, theme) elif FORMAT == Format.ANSI: output = campaign_to_ANSI(test_campaign, theme) elif FORMAT == Format.HTML: test_campaign.startNum(pos_begin) output = campaign_to_HTML(test_campaign) elif FORMAT == Format.LATEX: output = campaign_to_LATEX(test_campaign) elif FORMAT == Format.XUNIT: output = campaign_to_xUNIT(test_campaign) elif FORMAT == Format.LIVE: output = "" return output, (result == 0), test_campaign def resolve_testfiles(TESTFILES): for tfile in TESTFILES[:]: if "*" in tfile: TESTFILES.remove(tfile) TESTFILES.extend(glob.glob(tfile)) return TESTFILES def main(): argv = sys.argv[1:] logger = logging.getLogger("scapy") logger.addHandler(logging.StreamHandler()) ignore_globals = list(six.moves.builtins.__dict__) import scapy print(dash + " UTScapy - Scapy %s - %s" % ( scapy.__version__, sys.version.split(" ")[0] )) # Parse arguments FORMAT = Format.ANSI OUTPUTFILE = sys.stdout LOCAL = 0 NUM = None NON_ROOT = False KW_OK = [] KW_KO = [] DUMP = 0 DOCS = 0 CRC = True BREAKFAILED = True ONLYFAILED = False VERB = 3 GLOB_PREEXEC = "" PREEXEC_DICT = {} MODULES = [] TESTFILES = [] ANNOTATIONS_MODE = False INTERPRETER = False try: opts = getopt.getopt(argv, "o:t:T:c:f:hbln:m:k:K:DRdCiFqNP:s:x") for opt, optarg in opts[0]: if opt == "-h": usage() elif opt == "-b": BREAKFAILED = False elif opt == "-F": ONLYFAILED = True elif opt == "-q": VERB -= 1 elif opt == "-D": DUMP = 2 elif opt == "-R": DOCS = 1 elif opt == "-d": DUMP = 1 elif opt == "-C": CRC = False elif opt == "-i": INTERPRETER = True elif opt == "-x": ANNOTATIONS_MODE = True elif opt == "-P": GLOB_PREEXEC += "\n" + optarg elif opt == "-f": try: FORMAT = Format.from_string(optarg) except KeyError as msg: raise getopt.GetoptError("Unknown output format %s" % msg) elif opt == "-t": TESTFILES.append(optarg) TESTFILES = resolve_testfiles(TESTFILES) elif opt == "-T": TESTFILES.remove(optarg) elif opt == "-c": data = parse_config_file(optarg, VERB) BREAKFAILED = data.breakfailed ONLYFAILED = data.onlyfailed VERB = data.verb DUMP = data.dump CRC = data.crc PREEXEC_DICT = data.preexec GLOB_PREEXEC = data.global_preexec OUTPUTFILE = data.outfile TESTFILES = data.testfiles LOCAL = 1 if data.local else 0 NUM = data.num MODULES = data.modules KW_OK.extend(data.kw_ok) KW_KO.extend(data.kw_ko) try: FORMAT = Format.from_string(data.format) except KeyError as msg: raise getopt.GetoptError("Unknown output format %s" % msg) TESTFILES = resolve_testfiles(TESTFILES) for testfile in resolve_testfiles(data.remove_testfiles): try: TESTFILES.remove(testfile) except ValueError: error_m = "Cannot remove %s from test files" % testfile raise getopt.GetoptError(error_m) elif opt == "-o": OUTPUTFILE = optarg if not os.access(os.path.dirname(os.path.abspath(OUTPUTFILE)), os.W_OK): raise getopt.GetoptError("Cannot write to file %s" % OUTPUTFILE) elif opt == "-l": LOCAL = 1 elif opt == "-n": NUM = [] for v in (x.strip() for x in optarg.split(",")): try: NUM.append(int(v)) except ValueError: v1, v2 = [int(e) for e in v.split('-', 1)] NUM.extend(range(v1, v2 + 1)) elif opt == "-N": NON_ROOT = True elif opt == "-m": MODULES.append(optarg) elif opt == "-k": KW_OK.extend(optarg.split(",")) elif opt == "-K": KW_KO.extend(optarg.split(",")) except getopt.GetoptError as msg: print("ERROR:", msg) raise SystemExit if FORMAT in [Format.LIVE, Format.ANSI]: theme = DefaultTheme() else: theme = BlackAndWhite() # Disable tests if needed # Discard Python3 tests when using Python2 if six.PY2: KW_KO.append("python3_only") if VERB > 2: print(" " + arrow + " Python 2 mode") try: if NON_ROOT or os.getuid() != 0: # Non root # Discard root tests KW_KO.append("netaccess") KW_KO.append("needs_root") if VERB > 2: print(" " + arrow + " Non-root mode") except AttributeError: pass if conf.use_pcap: KW_KO.append("not_pcapdnet") if VERB > 2: print(" " + arrow + " libpcap mode") KW_KO.append("disabled") # Process extras if six.PY3: KW_KO.append("FIXME_py3") if ANNOTATIONS_MODE: try: from pyannotate_runtime import collect_types except ImportError: raise ImportError("Please install pyannotate !") collect_types.init_types_collection() collect_types.start() if VERB > 2: print(" " + arrow + " Booting scapy...") try: from scapy import all as scapy except Exception as e: print("[CRITICAL]: Cannot import Scapy: %s" % e) traceback.print_exc() sys.exit(1) # Abort the tests for m in MODULES: try: mod = import_module(m) six.moves.builtins.__dict__.update(mod.__dict__) except ImportError as e: raise getopt.GetoptError("cannot import [%s]: %s" % (m, e)) # Add SCAPY_ROOT_DIR environment variable, used for tests os.environ['SCAPY_ROOT_DIR'] = os.environ.get("PWD", os.getcwd()) autorun_func = { Format.TEXT: scapy.autorun_get_text_interactive_session, Format.ANSI: scapy.autorun_get_ansi_interactive_session, Format.HTML: scapy.autorun_get_html_interactive_session, Format.LATEX: scapy.autorun_get_latex_interactive_session, Format.XUNIT: scapy.autorun_get_text_interactive_session, Format.LIVE: scapy.autorun_get_live_interactive_session, } if VERB > 2: print(" " + arrow + " Discovering tests files...") glob_output = "" glob_result = 0 glob_title = None UNIQUE = len(TESTFILES) == 1 # Resolve tags and asterix for prex in six.iterkeys(copy.copy(PREEXEC_DICT)): if "*" in prex: pycode = PREEXEC_DICT[prex] del PREEXEC_DICT[prex] for gl in glob.iglob(prex): _pycode = pycode.replace("%name%", os.path.splitext(os.path.split(gl)[1])[0]) # noqa: E501 PREEXEC_DICT[gl] = _pycode pos_begin = 0 runned_campaigns = [] scapy_ses = importlib.import_module(".all", "scapy").__dict__ import_UTscapy_tools(scapy_ses) # Execute all files for TESTFILE in TESTFILES: if VERB > 2: print(theme.green(dash + " Loading: %s" % TESTFILE)) PREEXEC = PREEXEC_DICT[TESTFILE] if TESTFILE in PREEXEC_DICT else GLOB_PREEXEC with open(TESTFILE) as testfile: output, result, campaign = execute_campaign( testfile, OUTPUTFILE, PREEXEC, NUM, KW_OK, KW_KO, DUMP, DOCS, FORMAT, VERB, ONLYFAILED, CRC, INTERPRETER, autorun_func, theme, pos_begin=pos_begin, ignore_globals=ignore_globals, scapy_ses=copy.copy(scapy_ses) ) runned_campaigns.append(campaign) pos_begin = campaign.end_pos if UNIQUE: glob_title = campaign.title glob_output += output if not result: glob_result = 1 if BREAKFAILED: break if VERB > 2: print( checkmark + " All campaigns executed. Writing output..." ) if ANNOTATIONS_MODE: collect_types.stop() collect_types.dump_stats("pyannotate_results") # Concenate outputs if FORMAT == Format.HTML: glob_output = pack_html_campaigns(runned_campaigns, glob_output, LOCAL, glob_title) # Write the final output # Note: on Python 2, we force-encode to ignore ascii errors # on Python 3, we need to detect the type of stream if OUTPUTFILE == sys.stdout: print(glob_output, file=OUTPUTFILE) else: with open(OUTPUTFILE, "wb") as f: f.write(glob_output.encode("utf8", "ignore") if 'b' in f.mode or six.PY2 else glob_output) # Delete scapy's test environment vars del os.environ['SCAPY_ROOT_DIR'] # Print end message if VERB > 2: if glob_result == 0: print(theme.green("UTscapy ended successfully")) else: print(theme.red("UTscapy ended with error code %s" % glob_result)) # Check active threads if VERB > 2: import threading if threading.active_count() > 1: print("\nWARNING: UNFINISHED THREADS") print(threading.enumerate()) # Return state return glob_result if __name__ == "__main__": if sys.warnoptions: with warnings.catch_warnings(record=True) as cw: warnings.resetwarnings() # Let's discover the garbage waste warnings.simplefilter('error') print("### Warning mode enabled ###") res = main() if cw: res = 1 sys.exit(res) else: sys.exit(main())
1
18,511
Could you add a docstring?
secdev-scapy
py
@@ -23,4 +23,5 @@ module ApplicationHelper def display_search_ui? current_user && current_user.client_model && !client_disabled? end + end
1
module ApplicationHelper def controller_name params[:controller].gsub(/\W/, "-") end def display_return_to_proposal controller.is_a?(ProposalsController) && params[:action] == "history" end def display_return_to_proposals controller.is_a?(ClientDataController) || (controller.is_a?(ProposalsController) && params[:action] != "index") end def auth_path "/auth/myusa" end def display_profile_warning? !current_page?(profile_path) && current_user && current_user.requires_profile_attention? end def display_search_ui? current_user && current_user.client_model && !client_disabled? end end
1
16,698
looks like you added newlines after blocks in a few files - I generally like newlines before/after multi-line blocks _except_ when the end the block is directly nested inside another block (eg: two `end`s next to each other) what do you think?
18F-C2
rb
@@ -46,7 +46,7 @@ public class EqualsVisitor implements GenericVisitor<Boolean, Visitable> { } private EqualsVisitor() { - // hide constructor + // hide constructor } /**
1
/* * Copyright (C) 2007-2010 Júlio Vilmar Gesser. * Copyright (C) 2011, 2013-2020 The JavaParser Team. * * This file is part of JavaParser. * * JavaParser can be used either under the terms of * a) the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * b) the terms of the Apache License * * You should have received a copy of both licenses in LICENCE.LGPL and * LICENCE.APACHE. Please refer to those files for details. * * JavaParser is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. */ package com.github.javaparser.ast.visitor; import com.github.javaparser.ast.*; import com.github.javaparser.ast.body.*; import com.github.javaparser.ast.comments.BlockComment; import com.github.javaparser.ast.comments.JavadocComment; import com.github.javaparser.ast.comments.LineComment; import com.github.javaparser.ast.expr.*; import com.github.javaparser.ast.modules.*; import com.github.javaparser.ast.stmt.*; import com.github.javaparser.ast.type.*; import java.util.List; import java.util.Optional; /** * A visitor that calculates deep node equality by comparing all properties and child nodes of the node. * * @author Julio Vilmar Gesser */ public class EqualsVisitor implements GenericVisitor<Boolean, Visitable> { private static final EqualsVisitor SINGLETON = new EqualsVisitor(); public static boolean equals(final Node n, final Node n2) { return SINGLETON.nodeEquals(n, n2); } private EqualsVisitor() { // hide constructor } /** * Check for equality that can be applied to each kind of node, * to not repeat it in every method we store that here. */ private boolean commonNodeEquality(Node n, Node n2) { if (!nodeEquals(n.getComment(), n2.getComment())) { return false; } return nodesEquals(n.getOrphanComments(), n2.getOrphanComments()); } private <T extends Node> boolean nodesEquals(final List<T> nodes1, final List<T> nodes2) { if (nodes1 == null) { return nodes2 == null; } else if (nodes2 == null) { return false; } if (nodes1.size() != nodes2.size()) { return false; } for (int i = 0; i < nodes1.size(); i++) { if (!nodeEquals(nodes1.get(i), nodes2.get(i))) { return false; } } return true; } private <N extends Node> boolean nodesEquals(NodeList<N> n, NodeList<N> n2) { if (n == n2) { return true; } if (n == null || n2 == null) { return false; } if (n.size() != n2.size()) { return false; } for (int i = 0; i < n.size(); i++) { if (!nodeEquals(n.get(i), n2.get(i))) { return false; } } return true; } private <T extends Node> boolean nodeEquals(final T n, final T n2) { if (n == n2) { return true; } if (n == null || n2 == null) { return false; } if (n.getClass() != n2.getClass()) { return false; } if (!commonNodeEquality(n, n2)) { return false; } return n.accept(this, n2); } private <T extends Node> boolean nodeEquals(final Optional<T> n, final Optional<T> n2) { return nodeEquals(n.orElse(null), n2.orElse(null)); } private <T extends Node> boolean nodesEquals(final Optional<NodeList<T>> n, final Optional<NodeList<T>> n2) { return nodesEquals(n.orElse(null), n2.orElse(null)); } private boolean objEquals(final Object n, final Object n2) { if (n == n2) { return true; } if (n == null || n2 == null) { return false; } return n.equals(n2); } @Override public Boolean visit(final CompilationUnit n, final Visitable arg) { final CompilationUnit n2 = (CompilationUnit) arg; if (!nodesEquals(n.getImports(), n2.getImports())) return false; if (!nodeEquals(n.getModule(), n2.getModule())) return false; if (!nodeEquals(n.getPackageDeclaration(), n2.getPackageDeclaration())) return false; if (!nodesEquals(n.getTypes(), n2.getTypes())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final PackageDeclaration n, final Visitable arg) { final PackageDeclaration n2 = (PackageDeclaration) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final TypeParameter n, final Visitable arg) { final TypeParameter n2 = (TypeParameter) arg; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodesEquals(n.getTypeBound(), n2.getTypeBound())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final LineComment n, final Visitable arg) { final LineComment n2 = (LineComment) arg; if (!objEquals(n.getContent(), n2.getContent())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final BlockComment n, final Visitable arg) { final BlockComment n2 = (BlockComment) arg; if (!objEquals(n.getContent(), n2.getContent())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ClassOrInterfaceDeclaration n, final Visitable arg) { final ClassOrInterfaceDeclaration n2 = (ClassOrInterfaceDeclaration) arg; if (!nodesEquals(n.getExtendedTypes(), n2.getExtendedTypes())) return false; if (!nodesEquals(n.getImplementedTypes(), n2.getImplementedTypes())) return false; if (!objEquals(n.isInterface(), n2.isInterface())) return false; if (!nodesEquals(n.getTypeParameters(), n2.getTypeParameters())) return false; if (!nodesEquals(n.getMembers(), n2.getMembers())) return false; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final EnumDeclaration n, final Visitable arg) { final EnumDeclaration n2 = (EnumDeclaration) arg; if (!nodesEquals(n.getEntries(), n2.getEntries())) return false; if (!nodesEquals(n.getImplementedTypes(), n2.getImplementedTypes())) return false; if (!nodesEquals(n.getMembers(), n2.getMembers())) return false; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final EnumConstantDeclaration n, final Visitable arg) { final EnumConstantDeclaration n2 = (EnumConstantDeclaration) arg; if (!nodesEquals(n.getArguments(), n2.getArguments())) return false; if (!nodesEquals(n.getClassBody(), n2.getClassBody())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final AnnotationDeclaration n, final Visitable arg) { final AnnotationDeclaration n2 = (AnnotationDeclaration) arg; if (!nodesEquals(n.getMembers(), n2.getMembers())) return false; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final AnnotationMemberDeclaration n, final Visitable arg) { final AnnotationMemberDeclaration n2 = (AnnotationMemberDeclaration) arg; if (!nodeEquals(n.getDefaultValue(), n2.getDefaultValue())) return false; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final FieldDeclaration n, final Visitable arg) { final FieldDeclaration n2 = (FieldDeclaration) arg; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodesEquals(n.getVariables(), n2.getVariables())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final VariableDeclarator n, final Visitable arg) { final VariableDeclarator n2 = (VariableDeclarator) arg; if (!nodeEquals(n.getInitializer(), n2.getInitializer())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ConstructorDeclaration n, final Visitable arg) { final ConstructorDeclaration n2 = (ConstructorDeclaration) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodesEquals(n.getParameters(), n2.getParameters())) return false; if (!nodeEquals(n.getReceiverParameter(), n2.getReceiverParameter())) return false; if (!nodesEquals(n.getThrownExceptions(), n2.getThrownExceptions())) return false; if (!nodesEquals(n.getTypeParameters(), n2.getTypeParameters())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final MethodDeclaration n, final Visitable arg) { final MethodDeclaration n2 = (MethodDeclaration) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodesEquals(n.getParameters(), n2.getParameters())) return false; if (!nodeEquals(n.getReceiverParameter(), n2.getReceiverParameter())) return false; if (!nodesEquals(n.getThrownExceptions(), n2.getThrownExceptions())) return false; if (!nodesEquals(n.getTypeParameters(), n2.getTypeParameters())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final Parameter n, final Visitable arg) { final Parameter n2 = (Parameter) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!objEquals(n.isVarArgs(), n2.isVarArgs())) return false; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodesEquals(n.getVarArgsAnnotations(), n2.getVarArgsAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final InitializerDeclaration n, final Visitable arg) { final InitializerDeclaration n2 = (InitializerDeclaration) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!objEquals(n.isStatic(), n2.isStatic())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final JavadocComment n, final Visitable arg) { final JavadocComment n2 = (JavadocComment) arg; if (!objEquals(n.getContent(), n2.getContent())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ClassOrInterfaceType n, final Visitable arg) { final ClassOrInterfaceType n2 = (ClassOrInterfaceType) arg; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getScope(), n2.getScope())) return false; if (!nodesEquals(n.getTypeArguments(), n2.getTypeArguments())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final PrimitiveType n, final Visitable arg) { final PrimitiveType n2 = (PrimitiveType) arg; if (!objEquals(n.getType(), n2.getType())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ArrayType n, final Visitable arg) { final ArrayType n2 = (ArrayType) arg; if (!nodeEquals(n.getComponentType(), n2.getComponentType())) return false; if (!objEquals(n.getOrigin(), n2.getOrigin())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ArrayCreationLevel n, final Visitable arg) { final ArrayCreationLevel n2 = (ArrayCreationLevel) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getDimension(), n2.getDimension())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final IntersectionType n, final Visitable arg) { final IntersectionType n2 = (IntersectionType) arg; if (!nodesEquals(n.getElements(), n2.getElements())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final UnionType n, final Visitable arg) { final UnionType n2 = (UnionType) arg; if (!nodesEquals(n.getElements(), n2.getElements())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final VoidType n, final Visitable arg) { final VoidType n2 = (VoidType) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final WildcardType n, final Visitable arg) { final WildcardType n2 = (WildcardType) arg; if (!nodeEquals(n.getExtendedType(), n2.getExtendedType())) return false; if (!nodeEquals(n.getSuperType(), n2.getSuperType())) return false; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final UnknownType n, final Visitable arg) { final UnknownType n2 = (UnknownType) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ArrayAccessExpr n, final Visitable arg) { final ArrayAccessExpr n2 = (ArrayAccessExpr) arg; if (!nodeEquals(n.getIndex(), n2.getIndex())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ArrayCreationExpr n, final Visitable arg) { final ArrayCreationExpr n2 = (ArrayCreationExpr) arg; if (!nodeEquals(n.getElementType(), n2.getElementType())) return false; if (!nodeEquals(n.getInitializer(), n2.getInitializer())) return false; if (!nodesEquals(n.getLevels(), n2.getLevels())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ArrayInitializerExpr n, final Visitable arg) { final ArrayInitializerExpr n2 = (ArrayInitializerExpr) arg; if (!nodesEquals(n.getValues(), n2.getValues())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final AssignExpr n, final Visitable arg) { final AssignExpr n2 = (AssignExpr) arg; if (!objEquals(n.getOperator(), n2.getOperator())) return false; if (!nodeEquals(n.getTarget(), n2.getTarget())) return false; if (!nodeEquals(n.getValue(), n2.getValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final BinaryExpr n, final Visitable arg) { final BinaryExpr n2 = (BinaryExpr) arg; if (!nodeEquals(n.getLeft(), n2.getLeft())) return false; if (!objEquals(n.getOperator(), n2.getOperator())) return false; if (!nodeEquals(n.getRight(), n2.getRight())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final CastExpr n, final Visitable arg) { final CastExpr n2 = (CastExpr) arg; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ClassExpr n, final Visitable arg) { final ClassExpr n2 = (ClassExpr) arg; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ConditionalExpr n, final Visitable arg) { final ConditionalExpr n2 = (ConditionalExpr) arg; if (!nodeEquals(n.getCondition(), n2.getCondition())) return false; if (!nodeEquals(n.getElseExpr(), n2.getElseExpr())) return false; if (!nodeEquals(n.getThenExpr(), n2.getThenExpr())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final EnclosedExpr n, final Visitable arg) { final EnclosedExpr n2 = (EnclosedExpr) arg; if (!nodeEquals(n.getInner(), n2.getInner())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final FieldAccessExpr n, final Visitable arg) { final FieldAccessExpr n2 = (FieldAccessExpr) arg; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getScope(), n2.getScope())) return false; if (!nodesEquals(n.getTypeArguments(), n2.getTypeArguments())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final InstanceOfExpr n, final Visitable arg) { final InstanceOfExpr n2 = (InstanceOfExpr) arg; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final StringLiteralExpr n, final Visitable arg) { final StringLiteralExpr n2 = (StringLiteralExpr) arg; if (!objEquals(n.getValue(), n2.getValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final IntegerLiteralExpr n, final Visitable arg) { final IntegerLiteralExpr n2 = (IntegerLiteralExpr) arg; if (!objEquals(n.getValue(), n2.getValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final LongLiteralExpr n, final Visitable arg) { final LongLiteralExpr n2 = (LongLiteralExpr) arg; if (!objEquals(n.getValue(), n2.getValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final CharLiteralExpr n, final Visitable arg) { final CharLiteralExpr n2 = (CharLiteralExpr) arg; if (!objEquals(n.getValue(), n2.getValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final DoubleLiteralExpr n, final Visitable arg) { final DoubleLiteralExpr n2 = (DoubleLiteralExpr) arg; if (!objEquals(n.getValue(), n2.getValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final BooleanLiteralExpr n, final Visitable arg) { final BooleanLiteralExpr n2 = (BooleanLiteralExpr) arg; if (!objEquals(n.isValue(), n2.isValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final NullLiteralExpr n, final Visitable arg) { final NullLiteralExpr n2 = (NullLiteralExpr) arg; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final MethodCallExpr n, final Visitable arg) { final MethodCallExpr n2 = (MethodCallExpr) arg; if (!nodesEquals(n.getArguments(), n2.getArguments())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getScope(), n2.getScope())) return false; if (!nodesEquals(n.getTypeArguments(), n2.getTypeArguments())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final NameExpr n, final Visitable arg) { final NameExpr n2 = (NameExpr) arg; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ObjectCreationExpr n, final Visitable arg) { final ObjectCreationExpr n2 = (ObjectCreationExpr) arg; if (!nodesEquals(n.getAnonymousClassBody(), n2.getAnonymousClassBody())) return false; if (!nodesEquals(n.getArguments(), n2.getArguments())) return false; if (!nodeEquals(n.getScope(), n2.getScope())) return false; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodesEquals(n.getTypeArguments(), n2.getTypeArguments())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final Name n, final Visitable arg) { final Name n2 = (Name) arg; if (!objEquals(n.getIdentifier(), n2.getIdentifier())) return false; if (!nodeEquals(n.getQualifier(), n2.getQualifier())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final SimpleName n, final Visitable arg) { final SimpleName n2 = (SimpleName) arg; if (!objEquals(n.getIdentifier(), n2.getIdentifier())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ThisExpr n, final Visitable arg) { final ThisExpr n2 = (ThisExpr) arg; if (!nodeEquals(n.getTypeName(), n2.getTypeName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final SuperExpr n, final Visitable arg) { final SuperExpr n2 = (SuperExpr) arg; if (!nodeEquals(n.getTypeName(), n2.getTypeName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final UnaryExpr n, final Visitable arg) { final UnaryExpr n2 = (UnaryExpr) arg; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!objEquals(n.getOperator(), n2.getOperator())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final VariableDeclarationExpr n, final Visitable arg) { final VariableDeclarationExpr n2 = (VariableDeclarationExpr) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodesEquals(n.getVariables(), n2.getVariables())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final MarkerAnnotationExpr n, final Visitable arg) { final MarkerAnnotationExpr n2 = (MarkerAnnotationExpr) arg; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final SingleMemberAnnotationExpr n, final Visitable arg) { final SingleMemberAnnotationExpr n2 = (SingleMemberAnnotationExpr) arg; if (!nodeEquals(n.getMemberValue(), n2.getMemberValue())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final NormalAnnotationExpr n, final Visitable arg) { final NormalAnnotationExpr n2 = (NormalAnnotationExpr) arg; if (!nodesEquals(n.getPairs(), n2.getPairs())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final MemberValuePair n, final Visitable arg) { final MemberValuePair n2 = (MemberValuePair) arg; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getValue(), n2.getValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ExplicitConstructorInvocationStmt n, final Visitable arg) { final ExplicitConstructorInvocationStmt n2 = (ExplicitConstructorInvocationStmt) arg; if (!nodesEquals(n.getArguments(), n2.getArguments())) return false; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!objEquals(n.isThis(), n2.isThis())) return false; if (!nodesEquals(n.getTypeArguments(), n2.getTypeArguments())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final LocalClassDeclarationStmt n, final Visitable arg) { final LocalClassDeclarationStmt n2 = (LocalClassDeclarationStmt) arg; if (!nodeEquals(n.getClassDeclaration(), n2.getClassDeclaration())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final AssertStmt n, final Visitable arg) { final AssertStmt n2 = (AssertStmt) arg; if (!nodeEquals(n.getCheck(), n2.getCheck())) return false; if (!nodeEquals(n.getMessage(), n2.getMessage())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final BlockStmt n, final Visitable arg) { final BlockStmt n2 = (BlockStmt) arg; if (!nodesEquals(n.getStatements(), n2.getStatements())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final LabeledStmt n, final Visitable arg) { final LabeledStmt n2 = (LabeledStmt) arg; if (!nodeEquals(n.getLabel(), n2.getLabel())) return false; if (!nodeEquals(n.getStatement(), n2.getStatement())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final EmptyStmt n, final Visitable arg) { final EmptyStmt n2 = (EmptyStmt) arg; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ExpressionStmt n, final Visitable arg) { final ExpressionStmt n2 = (ExpressionStmt) arg; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final SwitchStmt n, final Visitable arg) { final SwitchStmt n2 = (SwitchStmt) arg; if (!nodesEquals(n.getEntries(), n2.getEntries())) return false; if (!nodeEquals(n.getSelector(), n2.getSelector())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final SwitchEntry n, final Visitable arg) { final SwitchEntry n2 = (SwitchEntry) arg; if (!nodesEquals(n.getLabels(), n2.getLabels())) return false; if (!nodesEquals(n.getStatements(), n2.getStatements())) return false; if (!objEquals(n.getType(), n2.getType())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final BreakStmt n, final Visitable arg) { final BreakStmt n2 = (BreakStmt) arg; if (!nodeEquals(n.getLabel(), n2.getLabel())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ReturnStmt n, final Visitable arg) { final ReturnStmt n2 = (ReturnStmt) arg; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final IfStmt n, final Visitable arg) { final IfStmt n2 = (IfStmt) arg; if (!nodeEquals(n.getCondition(), n2.getCondition())) return false; if (!nodeEquals(n.getElseStmt(), n2.getElseStmt())) return false; if (!nodeEquals(n.getThenStmt(), n2.getThenStmt())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final WhileStmt n, final Visitable arg) { final WhileStmt n2 = (WhileStmt) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!nodeEquals(n.getCondition(), n2.getCondition())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ContinueStmt n, final Visitable arg) { final ContinueStmt n2 = (ContinueStmt) arg; if (!nodeEquals(n.getLabel(), n2.getLabel())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final DoStmt n, final Visitable arg) { final DoStmt n2 = (DoStmt) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!nodeEquals(n.getCondition(), n2.getCondition())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ForEachStmt n, final Visitable arg) { final ForEachStmt n2 = (ForEachStmt) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!nodeEquals(n.getIterable(), n2.getIterable())) return false; if (!nodeEquals(n.getVariable(), n2.getVariable())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ForStmt n, final Visitable arg) { final ForStmt n2 = (ForStmt) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!nodeEquals(n.getCompare(), n2.getCompare())) return false; if (!nodesEquals(n.getInitialization(), n2.getInitialization())) return false; if (!nodesEquals(n.getUpdate(), n2.getUpdate())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ThrowStmt n, final Visitable arg) { final ThrowStmt n2 = (ThrowStmt) arg; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final SynchronizedStmt n, final Visitable arg) { final SynchronizedStmt n2 = (SynchronizedStmt) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final TryStmt n, final Visitable arg) { final TryStmt n2 = (TryStmt) arg; if (!nodesEquals(n.getCatchClauses(), n2.getCatchClauses())) return false; if (!nodeEquals(n.getFinallyBlock(), n2.getFinallyBlock())) return false; if (!nodesEquals(n.getResources(), n2.getResources())) return false; if (!nodeEquals(n.getTryBlock(), n2.getTryBlock())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final CatchClause n, final Visitable arg) { final CatchClause n2 = (CatchClause) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!nodeEquals(n.getParameter(), n2.getParameter())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final LambdaExpr n, final Visitable arg) { final LambdaExpr n2 = (LambdaExpr) arg; if (!nodeEquals(n.getBody(), n2.getBody())) return false; if (!objEquals(n.isEnclosingParameters(), n2.isEnclosingParameters())) return false; if (!nodesEquals(n.getParameters(), n2.getParameters())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final MethodReferenceExpr n, final Visitable arg) { final MethodReferenceExpr n2 = (MethodReferenceExpr) arg; if (!objEquals(n.getIdentifier(), n2.getIdentifier())) return false; if (!nodeEquals(n.getScope(), n2.getScope())) return false; if (!nodesEquals(n.getTypeArguments(), n2.getTypeArguments())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final TypeExpr n, final Visitable arg) { final TypeExpr n2 = (TypeExpr) arg; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ImportDeclaration n, final Visitable arg) { final ImportDeclaration n2 = (ImportDeclaration) arg; if (!objEquals(n.isAsterisk(), n2.isAsterisk())) return false; if (!objEquals(n.isStatic(), n2.isStatic())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(NodeList n, Visitable arg) { return nodesEquals((NodeList<Node>) n, (NodeList<Node>) arg); } @Override public Boolean visit(final ModuleDeclaration n, final Visitable arg) { final ModuleDeclaration n2 = (ModuleDeclaration) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodesEquals(n.getDirectives(), n2.getDirectives())) return false; if (!objEquals(n.isOpen(), n2.isOpen())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ModuleRequiresDirective n, final Visitable arg) { final ModuleRequiresDirective n2 = (ModuleRequiresDirective) arg; if (!nodesEquals(n.getModifiers(), n2.getModifiers())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override() public Boolean visit(final ModuleExportsDirective n, final Visitable arg) { final ModuleExportsDirective n2 = (ModuleExportsDirective) arg; if (!nodesEquals(n.getModuleNames(), n2.getModuleNames())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override() public Boolean visit(final ModuleProvidesDirective n, final Visitable arg) { final ModuleProvidesDirective n2 = (ModuleProvidesDirective) arg; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodesEquals(n.getWith(), n2.getWith())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override() public Boolean visit(final ModuleUsesDirective n, final Visitable arg) { final ModuleUsesDirective n2 = (ModuleUsesDirective) arg; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ModuleOpensDirective n, final Visitable arg) { final ModuleOpensDirective n2 = (ModuleOpensDirective) arg; if (!nodesEquals(n.getModuleNames(), n2.getModuleNames())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final UnparsableStmt n, final Visitable arg) { final UnparsableStmt n2 = (UnparsableStmt) arg; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final ReceiverParameter n, final Visitable arg) { final ReceiverParameter n2 = (ReceiverParameter) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getName(), n2.getName())) return false; if (!nodeEquals(n.getType(), n2.getType())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final VarType n, final Visitable arg) { final VarType n2 = (VarType) arg; if (!nodesEquals(n.getAnnotations(), n2.getAnnotations())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final Modifier n, final Visitable arg) { final Modifier n2 = (Modifier) arg; if (!objEquals(n.getKeyword(), n2.getKeyword())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final SwitchExpr n, final Visitable arg) { final SwitchExpr n2 = (SwitchExpr) arg; if (!nodesEquals(n.getEntries(), n2.getEntries())) return false; if (!nodeEquals(n.getSelector(), n2.getSelector())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final YieldStmt n, final Visitable arg) { final YieldStmt n2 = (YieldStmt) arg; if (!nodeEquals(n.getExpression(), n2.getExpression())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } @Override public Boolean visit(final TextBlockLiteralExpr n, final Visitable arg) { final TextBlockLiteralExpr n2 = (TextBlockLiteralExpr) arg; if (!objEquals(n.getValue(), n2.getValue())) return false; if (!nodeEquals(n.getComment(), n2.getComment())) return false; return true; } }
1
14,037
@jlerbsc 's fix in #2918 in action - thanks! :smiling_face_with_three_hearts:
javaparser-javaparser
java
@@ -33,12 +33,15 @@ class ArgInfo: """Information about an argument.""" - def __init__(self, win_id=False, count=False, flag=None, hide=False, - metavar=None, completion=None, choices=None): + def __init__(self, win_id=False, count=False, hide=False, metavar=None, + zero_count=False, flag=None, completion=None, choices=None): if win_id and count: raise TypeError("Argument marked as both count/win_id!") + if zero_count and not count: + raise TypeError("Zero_count Argument cannot exist without count!") self.win_id = win_id self.count = count + self.zero_count = zero_count self.flag = flag self.hide = hide self.metavar = metavar
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Contains the Command class, a skeleton for a command.""" import inspect import collections import traceback from qutebrowser.commands import cmdexc, argparser from qutebrowser.utils import (log, utils, message, docutils, objreg, usertypes, typing) from qutebrowser.utils import debug as debug_utils class ArgInfo: """Information about an argument.""" def __init__(self, win_id=False, count=False, flag=None, hide=False, metavar=None, completion=None, choices=None): if win_id and count: raise TypeError("Argument marked as both count/win_id!") self.win_id = win_id self.count = count self.flag = flag self.hide = hide self.metavar = metavar self.completion = completion self.choices = choices def __eq__(self, other): return (self.win_id == other.win_id and self.count == other.count and self.flag == other.flag and self.hide == other.hide and self.metavar == other.metavar and self.completion == other.completion and self.choices == other.choices) def __repr__(self): return utils.get_repr(self, win_id=self.win_id, count=self.count, flag=self.flag, hide=self.hide, metavar=self.metavar, completion=self.completion, choices=self.choices, constructor=True) class Command: """Base skeleton for a command. Attributes: name: The main name of the command. maxsplit: The maximum amount of splits to do for the commandline, or None. hide: Whether to hide the arguments or not. deprecated: False, or a string to describe why a command is deprecated. desc: The description of the command. handler: The handler function to call. debug: Whether this is a debugging command (only shown with --debug). parser: The ArgumentParser to use to parse this command. flags_with_args: A list of flags which take an argument. no_cmd_split: If true, ';;' to split sub-commands is ignored. backend: Which backend the command works with (or None if it works with both) no_replace_variables: Don't replace variables like {url} _qute_args: The saved data from @cmdutils.argument _modes: The modes the command can be executed in. _not_modes: The modes the command can not be executed in. _count: The count set for the command. _instance: The object to bind 'self' to. _scope: The scope to get _instance for in the object registry. """ def __init__(self, *, handler, name, instance=None, maxsplit=None, hide=False, modes=None, not_modes=None, debug=False, ignore_args=False, deprecated=False, no_cmd_split=False, star_args_optional=False, scope='global', backend=None, no_replace_variables=False): # I really don't know how to solve this in a better way, I tried. # pylint: disable=too-many-locals if modes is not None and not_modes is not None: raise ValueError("Only modes or not_modes can be given!") if modes is not None: for m in modes: if not isinstance(m, usertypes.KeyMode): raise TypeError("Mode {} is no KeyMode member!".format(m)) if not_modes is not None: for m in not_modes: if not isinstance(m, usertypes.KeyMode): raise TypeError("Mode {} is no KeyMode member!".format(m)) if scope != 'global' and instance is None: raise ValueError("Setting scope without setting instance makes " "no sense!") self.name = name self.maxsplit = maxsplit self.hide = hide self.deprecated = deprecated self._instance = instance self._modes = modes self._not_modes = not_modes self._scope = scope self._star_args_optional = star_args_optional self.debug = debug self.ignore_args = ignore_args self.handler = handler self.no_cmd_split = no_cmd_split self.backend = backend self.no_replace_variables = no_replace_variables self.docparser = docutils.DocstringParser(handler) self.parser = argparser.ArgumentParser( name, description=self.docparser.short_desc, epilog=self.docparser.long_desc) self.parser.add_argument('-h', '--help', action=argparser.HelpAction, default=argparser.SUPPRESS, nargs=0, help=argparser.SUPPRESS) self._check_func() self.opt_args = collections.OrderedDict() self.namespace = None self._count = None self.pos_args = [] self.desc = None self.flags_with_args = [] # This is checked by future @cmdutils.argument calls so they fail # (as they'd be silently ignored otherwise) self._qute_args = getattr(self.handler, 'qute_args', {}) self.handler.qute_args = None self._inspect_func() def _check_prerequisites(self, win_id): """Check if the command is permitted to run currently. Args: win_id: The window ID the command is run in. """ mode_manager = objreg.get('mode-manager', scope='window', window=win_id) curmode = mode_manager.mode if self._modes is not None and curmode not in self._modes: mode_names = '/'.join(mode.name for mode in self._modes) raise cmdexc.PrerequisitesError( "{}: This command is only allowed in {} mode.".format( self.name, mode_names)) elif self._not_modes is not None and curmode in self._not_modes: mode_names = '/'.join(mode.name for mode in self._not_modes) raise cmdexc.PrerequisitesError( "{}: This command is not allowed in {} mode.".format( self.name, mode_names)) used_backend = usertypes.arg2backend[objreg.get('args').backend] if self.backend is not None and used_backend != self.backend: raise cmdexc.PrerequisitesError( "{}: Only available with {} " "backend.".format(self.name, self.backend.name)) if self.deprecated: message.warning(win_id, '{} is deprecated - {}'.format( self.name, self.deprecated)) def _check_func(self): """Make sure the function parameters don't violate any rules.""" signature = inspect.signature(self.handler) if 'self' in signature.parameters and self._instance is None: raise TypeError("{} is a class method, but instance was not " "given!".format(self.name[0])) elif 'self' not in signature.parameters and self._instance is not None: raise TypeError("{} is not a class method, but instance was " "given!".format(self.name[0])) elif any(param.kind == inspect.Parameter.VAR_KEYWORD for param in signature.parameters.values()): raise TypeError("{}: functions with varkw arguments are not " "supported!".format(self.name[0])) def get_arg_info(self, param): """Get an ArgInfo tuple for the given inspect.Parameter.""" return self._qute_args.get(param.name, ArgInfo()) def get_pos_arg_info(self, pos): """Get an ArgInfo tuple for the given positional parameter.""" name = self.pos_args[pos][0] return self._qute_args.get(name, ArgInfo()) def _inspect_special_param(self, param): """Check if the given parameter is a special one. Args: param: The inspect.Parameter to handle. Return: True if the parameter is special, False otherwise. """ arg_info = self.get_arg_info(param) if arg_info.count: if param.default is inspect.Parameter.empty: raise TypeError("{}: handler has count parameter " "without default!".format(self.name)) return True elif arg_info.win_id: return True def _inspect_func(self): """Inspect the function to get useful informations from it. Sets instance attributes (desc, type_conv, name_conv) based on the informations. Return: How many user-visible arguments the command has. """ signature = inspect.signature(self.handler) doc = inspect.getdoc(self.handler) if doc is not None: self.desc = doc.splitlines()[0].strip() else: self.desc = "" if not self.ignore_args: for param in signature.parameters.values(): # https://docs.python.org/3/library/inspect.html#inspect.Parameter.kind # "Python has no explicit syntax for defining positional-only # parameters, but many built-in and extension module functions # (especially those that accept only one or two parameters) # accept them." assert param.kind != inspect.Parameter.POSITIONAL_ONLY if param.name == 'self': continue if self._inspect_special_param(param): continue typ = self._get_type(param) is_bool = typ is bool kwargs = self._param_to_argparse_kwargs(param, is_bool) args = self._param_to_argparse_args(param, is_bool) callsig = debug_utils.format_call( self.parser.add_argument, args, kwargs, full=False) log.commands.vdebug('Adding arg {} of type {} -> {}'.format( param.name, typ, callsig)) self.parser.add_argument(*args, **kwargs) return signature.parameters.values() def _param_to_argparse_kwargs(self, param, is_bool): """Get argparse keyword arguments for a parameter. Args: param: The inspect.Parameter object to get the args for. is_bool: Whether the parameter is a boolean. Return: A kwargs dict. """ kwargs = {} try: kwargs['help'] = self.docparser.arg_descs[param.name] except KeyError: pass kwargs['dest'] = param.name arg_info = self.get_arg_info(param) if is_bool: kwargs['action'] = 'store_true' else: if arg_info.metavar is not None: kwargs['metavar'] = arg_info.metavar else: kwargs['metavar'] = argparser.arg_name(param.name) if param.kind == inspect.Parameter.VAR_POSITIONAL: kwargs['nargs'] = '*' if self._star_args_optional else '+' elif param.kind == inspect.Parameter.KEYWORD_ONLY: kwargs['default'] = param.default elif not is_bool and param.default is not inspect.Parameter.empty: kwargs['default'] = param.default kwargs['nargs'] = '?' return kwargs def _param_to_argparse_args(self, param, is_bool): """Get argparse positional arguments for a parameter. Args: param: The inspect.Parameter object to get the args for. is_bool: Whether the parameter is a boolean. Return: A list of args. """ args = [] name = argparser.arg_name(param.name) arg_info = self.get_arg_info(param) if arg_info.flag is not None: shortname = arg_info.flag else: shortname = name[0] if len(shortname) != 1: raise ValueError("Flag '{}' of parameter {} (command {}) must be " "exactly 1 char!".format(shortname, name, self.name)) if is_bool or param.kind == inspect.Parameter.KEYWORD_ONLY: long_flag = '--{}'.format(name) short_flag = '-{}'.format(shortname) args.append(long_flag) args.append(short_flag) self.opt_args[param.name] = long_flag, short_flag if not is_bool: self.flags_with_args += [short_flag, long_flag] else: if not arg_info.hide: self.pos_args.append((param.name, name)) return args def _get_type(self, param): """Get the type of an argument from its default value or annotation. Args: param: The inspect.Parameter to look at. """ arginfo = self.get_arg_info(param) if param.annotation is not inspect.Parameter.empty: return param.annotation elif param.default not in [None, inspect.Parameter.empty]: return type(param.default) elif arginfo.count or arginfo.win_id or param.kind in [ inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD]: return None else: return str def _get_self_arg(self, win_id, param, args): """Get the self argument for a function call. Arguments: win_id: The window id this command should be executed in. param: The count parameter. args: The positional argument list. Gets modified directly. """ assert param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD if self._scope == 'global': tab_id = None win_id = None elif self._scope == 'tab': tab_id = 'current' elif self._scope == 'window': tab_id = None else: raise ValueError("Invalid scope {}!".format(self._scope)) obj = objreg.get(self._instance, scope=self._scope, window=win_id, tab=tab_id) args.append(obj) def _get_count_arg(self, param, args, kwargs): """Add the count argument to a function call. Arguments: param: The count parameter. args: The positional argument list. Gets modified directly. kwargs: The keyword argument dict. Gets modified directly. """ if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: if self._count is not None: args.append(self._count) else: args.append(param.default) elif param.kind == inspect.Parameter.KEYWORD_ONLY: if self._count is not None: kwargs[param.name] = self._count else: raise TypeError("{}: invalid parameter type {} for argument " "{!r}!".format(self.name, param.kind, param.name)) def _get_win_id_arg(self, win_id, param, args, kwargs): """Add the win_id argument to a function call. Arguments: win_id: The window ID to add. param: The count parameter. args: The positional argument list. Gets modified directly. kwargs: The keyword argument dict. Gets modified directly. """ if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: args.append(win_id) elif param.kind == inspect.Parameter.KEYWORD_ONLY: kwargs[param.name] = win_id else: raise TypeError("{}: invalid parameter type {} for argument " "{!r}!".format(self.name, param.kind, param.name)) def _get_param_value(self, param): """Get the converted value for an inspect.Parameter.""" value = getattr(self.namespace, param.name) typ = self._get_type(param) if isinstance(typ, tuple): raise TypeError("{}: Legacy tuple type annotation!".format( self.name)) elif issubclass(typ, typing.Union): # this is... slightly evil, I know types = list(typ.__union_params__) # pylint: disable=no-member if param.default is not inspect.Parameter.empty: types.append(type(param.default)) choices = self.get_arg_info(param).choices value = argparser.multitype_conv(param, types, value, str_choices=choices) elif typ is str: choices = self.get_arg_info(param).choices value = argparser.type_conv(param, typ, value, str_choices=choices) elif typ is bool: # no type conversion for flags assert isinstance(value, bool) elif typ is None: pass else: value = argparser.type_conv(param, typ, value) return value def _get_call_args(self, win_id): """Get arguments for a function call. Args: win_id: The window id this command should be executed in. Return: An (args, kwargs) tuple. """ args = [] kwargs = {} signature = inspect.signature(self.handler) if self.ignore_args: if self._instance is not None: param = list(signature.parameters.values())[0] self._get_self_arg(win_id, param, args) return args, kwargs for i, param in enumerate(signature.parameters.values()): arg_info = self.get_arg_info(param) if i == 0 and self._instance is not None: # Special case for 'self'. self._get_self_arg(win_id, param, args) continue elif arg_info.count: # Special case for count parameter. self._get_count_arg(param, args, kwargs) continue # elif arg_info.win_id: elif arg_info.win_id: # Special case for win_id parameter. self._get_win_id_arg(win_id, param, args, kwargs) continue value = self._get_param_value(param) if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: args.append(value) elif param.kind == inspect.Parameter.VAR_POSITIONAL: if value is not None: args += value elif param.kind == inspect.Parameter.KEYWORD_ONLY: kwargs[param.name] = value else: raise TypeError("{}: Invalid parameter type {} for argument " "'{}'!".format( self.name, param.kind, param.name)) return args, kwargs def run(self, win_id, args=None, count=None): """Run the command. Note we don't catch CommandError here as it might happen async. Args: win_id: The window ID the command is run in. args: Arguments to the command. count: Command repetition count. """ dbgout = ["command called:", self.name] if args: dbgout.append(str(args)) elif args is None: args = [] if count is not None: dbgout.append("(count={})".format(count)) log.commands.debug(' '.join(dbgout)) try: self.namespace = self.parser.parse_args(args) except argparser.ArgumentParserError as e: message.error(win_id, '{}: {}'.format(self.name, e), stack=traceback.format_exc()) return except argparser.ArgumentParserExit as e: log.commands.debug("argparser exited with status {}: {}".format( e.status, e)) return self._count = count self._check_prerequisites(win_id) posargs, kwargs = self._get_call_args(win_id) log.commands.debug('Calling {}'.format( debug_utils.format_call(self.handler, posargs, kwargs))) self.handler(*posargs, **kwargs)
1
16,571
nitpick: Please lower-case `Zero_count` (as it's a literal argument name) and `Argument` here.
qutebrowser-qutebrowser
py
@@ -2693,7 +2693,7 @@ func (a *Account) hasIssuer(issuer string) bool { // hasIssuerNoLock is the unlocked version of hasIssuer func (a *Account) hasIssuerNoLock(issuer string) bool { // same issuer - if a.Issuer == issuer { + if a.Name == issuer { return true } for i := 0; i < len(a.signingKeys); i++ {
1
// Copyright 2018-2020 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "bytes" "encoding/hex" "errors" "fmt" "io/ioutil" "math" "math/rand" "net/http" "net/textproto" "net/url" "reflect" "sort" "strconv" "strings" "sync" "time" "github.com/nats-io/jwt/v2" "github.com/nats-io/nkeys" "github.com/nats-io/nuid" ) // For backwards compatibility with NATS < 2.0, users who are not explicitly defined into an // account will be grouped in the default global account. const globalAccountName = DEFAULT_GLOBAL_ACCOUNT // Account are subject namespace definitions. By default no messages are shared between accounts. // You can share via Exports and Imports of Streams and Services. type Account struct { Name string Nkey string Issuer string claimJWT string updated time.Time mu sync.RWMutex sqmu sync.Mutex sl *Sublist ic *client isid uint64 etmr *time.Timer ctmr *time.Timer strack map[string]sconns nrclients int32 sysclients int32 nleafs int32 nrleafs int32 clients map[*client]struct{} rm map[string]int32 lqws map[string]int32 usersRevoked map[string]int64 actsRevoked map[string]int64 mappings []*mapping lleafs []*client imports importMap exports exportMap js *jsAccount jsLimits *JetStreamAccountLimits limits expired bool incomplete bool signingKeys []string srv *Server // server this account is registered with (possibly nil) lds string // loop detection subject for leaf nodes siReply []byte // service reply prefix, will form wildcard subscription. prand *rand.Rand eventIds *nuid.NUID eventIdsMu sync.Mutex defaultPerms *Permissions } // Account based limits. type limits struct { mpay int32 msubs int32 mconns int32 mleafs int32 } // Used to track remote clients and leafnodes per remote server. type sconns struct { conns int32 leafs int32 } // Import stream mapping struct type streamImport struct { acc *Account from string to string tr *transform rtr *transform claim *jwt.Import usePub bool invalid bool } // Import service mapping struct type serviceImport struct { acc *Account claim *jwt.Import se *serviceExport sid []byte from string to string exsub string tr *transform ts int64 rt ServiceRespType latency *serviceLatency m1 *ServiceLatency rc *client usePub bool response bool invalid bool share bool tracking bool didDeliver bool isSysAcc bool trackingHdr http.Header // header from request } // This is used to record when we create a mapping for implicit service // imports. We use this to clean up entries that are not singletons when // we detect that interest is no longer present. The key to the map will // be the actual interest. We record the mapped subject and the account. type serviceRespEntry struct { acc *Account msub string } // ServiceRespType represents the types of service request response types. type ServiceRespType uint8 // Service response types. Defaults to a singleton. const ( Singleton ServiceRespType = iota Streamed Chunked ) // String helper. func (rt ServiceRespType) String() string { switch rt { case Singleton: return "Singleton" case Streamed: return "Streamed" case Chunked: return "Chunked" } return "Unknown ServiceResType" } // exportAuth holds configured approvals or boolean indicating an // auth token is required for import. type exportAuth struct { tokenReq bool approved map[string]*Account } // streamExport type streamExport struct { exportAuth } // serviceExport holds additional information for exported services. type serviceExport struct { exportAuth acc *Account respType ServiceRespType latency *serviceLatency rtmr *time.Timer respThresh time.Duration } // Used to track service latency. type serviceLatency struct { sampling int8 // percentage from 1-100 or 0 to indicate triggered by header subject string } // exportMap tracks the exported streams and services. type exportMap struct { streams map[string]*streamExport services map[string]*serviceExport responses map[string]*serviceImport } // importMap tracks the imported streams and services. // For services we will also track the response mappings as well. type importMap struct { streams []*streamImport services map[string]*serviceImport rrMap map[string][]*serviceRespEntry } // NewAccount creates a new unlimited account with the given name. func NewAccount(name string) *Account { a := &Account{ Name: name, limits: limits{-1, -1, -1, -1}, eventIds: nuid.New(), } return a } // Used to create shallow copies of accounts for transfer // from opts to real accounts in server struct. func (a *Account) shallowCopy() *Account { na := NewAccount(a.Name) na.Nkey = a.Nkey na.Issuer = a.Issuer if a.imports.streams != nil { na.imports.streams = make([]*streamImport, 0, len(a.imports.streams)) for _, v := range a.imports.streams { si := *v na.imports.streams = append(na.imports.streams, &si) } } if a.imports.services != nil { na.imports.services = make(map[string]*serviceImport) for k, v := range a.imports.services { si := *v na.imports.services[k] = &si } } if a.exports.streams != nil { na.exports.streams = make(map[string]*streamExport) for k, v := range a.exports.streams { if v != nil { se := *v na.exports.streams[k] = &se } else { na.exports.streams[k] = nil } } } if a.exports.services != nil { na.exports.services = make(map[string]*serviceExport) for k, v := range a.exports.services { if v != nil { se := *v na.exports.services[k] = &se } else { na.exports.services[k] = nil } } } // JetStream na.jsLimits = a.jsLimits return na } // nextEventID uses its own lock for better concurrency. func (a *Account) nextEventID() string { a.eventIdsMu.Lock() id := a.eventIds.Next() a.eventIdsMu.Unlock() return id } // Called to track a remote server and connections and leafnodes it // has for this account. func (a *Account) updateRemoteServer(m *AccountNumConns) []*client { a.mu.Lock() if a.strack == nil { a.strack = make(map[string]sconns) } // This does not depend on receiving all updates since each one is idempotent. // FIXME(dlc) - We should cleanup when these both go to zero. prev := a.strack[m.Server.ID] a.strack[m.Server.ID] = sconns{conns: int32(m.Conns), leafs: int32(m.LeafNodes)} a.nrclients += int32(m.Conns) - prev.conns a.nrleafs += int32(m.LeafNodes) - prev.leafs mtce := a.mconns != jwt.NoLimit && (len(a.clients)-int(a.sysclients)+int(a.nrclients) > int(a.mconns)) // If we are over here some have snuck in and we need to rebalance. // All others will probably be doing the same thing but better to be // conservative and bit harsh here. Clients will reconnect if we over compensate. var clients []*client if mtce { clients = make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } sort.Slice(clients, func(i, j int) bool { return clients[i].start.After(clients[j].start) }) over := (len(a.clients) - int(a.sysclients) + int(a.nrclients)) - int(a.mconns) if over < len(clients) { clients = clients[:over] } } // Now check leafnodes. mtlce := a.mleafs != jwt.NoLimit && (a.nleafs+a.nrleafs > a.mleafs) if mtlce { // Take ones from the end. leafs := a.lleafs over := int(a.nleafs + a.nrleafs - a.mleafs) if over < len(leafs) { leafs = leafs[len(leafs)-over:] } clients = append(clients, leafs...) } a.mu.Unlock() // If we have exceeded our max clients this will be populated. return clients } // Removes tracking for a remote server that has shutdown. func (a *Account) removeRemoteServer(sid string) { a.mu.Lock() if a.strack != nil { prev := a.strack[sid] delete(a.strack, sid) a.nrclients -= prev.conns a.nrleafs -= prev.leafs } a.mu.Unlock() } // When querying for subject interest this is the number of // expected responses. We need to actually check that the entry // has active connections. func (a *Account) expectedRemoteResponses() (expected int32) { a.mu.RLock() for _, sc := range a.strack { if sc.conns > 0 || sc.leafs > 0 { expected++ } } a.mu.RUnlock() return } // Clears eventing and tracking for this account. func (a *Account) clearEventing() { a.mu.Lock() a.nrclients = 0 // Now clear state clearTimer(&a.etmr) clearTimer(&a.ctmr) a.clients = nil a.strack = nil a.mu.Unlock() } // GetName will return the accounts name. func (a *Account) GetName() string { if a == nil { return "n/a" } a.mu.RLock() name := a.Name a.mu.RUnlock() return name } // NumConnections returns active number of clients for this account for // all known servers. func (a *Account) NumConnections() int { a.mu.RLock() nc := len(a.clients) + int(a.nrclients) a.mu.RUnlock() return nc } // NumRemoteConnections returns the number of client or leaf connections that // are not on this server. func (a *Account) NumRemoteConnections() int { a.mu.RLock() nc := int(a.nrclients + a.nrleafs) a.mu.RUnlock() return nc } // NumLocalConnections returns active number of clients for this account // on this server. func (a *Account) NumLocalConnections() int { a.mu.RLock() nlc := a.numLocalConnections() a.mu.RUnlock() return nlc } // Do not account for the system accounts. func (a *Account) numLocalConnections() int { return len(a.clients) - int(a.sysclients) - int(a.nleafs) } // This is for extended local interest. // Lock should not be held. func (a *Account) numLocalAndLeafConnections() int { a.mu.RLock() nlc := len(a.clients) - int(a.sysclients) a.mu.RUnlock() return nlc } func (a *Account) numLocalLeafNodes() int { return int(a.nleafs) } // MaxTotalConnectionsReached returns if we have reached our limit for number of connections. func (a *Account) MaxTotalConnectionsReached() bool { var mtce bool a.mu.RLock() if a.mconns != jwt.NoLimit { mtce = len(a.clients)-int(a.sysclients)+int(a.nrclients) >= int(a.mconns) } a.mu.RUnlock() return mtce } // MaxActiveConnections return the set limit for the account system // wide for total number of active connections. func (a *Account) MaxActiveConnections() int { a.mu.RLock() mconns := int(a.mconns) a.mu.RUnlock() return mconns } // MaxTotalLeafNodesReached returns if we have reached our limit for number of leafnodes. func (a *Account) MaxTotalLeafNodesReached() bool { a.mu.RLock() mtc := a.maxTotalLeafNodesReached() a.mu.RUnlock() return mtc } func (a *Account) maxTotalLeafNodesReached() bool { if a.mleafs != jwt.NoLimit { return a.nleafs+a.nrleafs >= a.mleafs } return false } // NumLeafNodes returns the active number of local and remote // leaf node connections. func (a *Account) NumLeafNodes() int { a.mu.RLock() nln := int(a.nleafs + a.nrleafs) a.mu.RUnlock() return nln } // NumRemoteLeafNodes returns the active number of remote // leaf node connections. func (a *Account) NumRemoteLeafNodes() int { a.mu.RLock() nrn := int(a.nrleafs) a.mu.RUnlock() return nrn } // MaxActiveLeafNodes return the set limit for the account system // wide for total number of leavenode connections. // NOTE: these are tracked separately. func (a *Account) MaxActiveLeafNodes() int { a.mu.RLock() mleafs := int(a.mleafs) a.mu.RUnlock() return mleafs } // RoutedSubs returns how many subjects we would send across a route when first // connected or expressing interest. Local client subs. func (a *Account) RoutedSubs() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.rm) } // TotalSubs returns total number of Subscriptions for this account. func (a *Account) TotalSubs() int { a.mu.RLock() defer a.mu.RUnlock() return int(a.sl.Count()) } // MapDest is for mapping published subjects for clients. type MapDest struct { Subject string `json:"subject"` Weight uint8 `json:"weight"` OptCluster string `json:"cluster,omitempty"` } func NewMapDest(subject string, weight uint8) *MapDest { return &MapDest{subject, weight, ""} } // destination is for internal representation for a weighted mapped destination. type destination struct { tr *transform weight uint8 } // mapping is an internal entry for mapping subjects. type mapping struct { src string wc bool dests []*destination cdests map[string][]*destination } // AddMapping adds in a simple route mapping from src subject to dest subject // for inbound client messages. func (a *Account) AddMapping(src, dest string) error { return a.AddWeightedMappings(src, NewMapDest(dest, 100)) } // AddWeightedMapping will add in a weighted mappings for the destinations. // TODO(dlc) - Allow cluster filtering func (a *Account) AddWeightedMappings(src string, dests ...*MapDest) error { a.mu.Lock() defer a.mu.Unlock() // We use this for selecting between multiple weighted destinations. if a.prand == nil { a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } if !IsValidSubject(src) { return ErrBadSubject } m := &mapping{src: src, wc: subjectHasWildcard(src), dests: make([]*destination, 0, len(dests)+1)} seen := make(map[string]struct{}) var tw uint8 for _, d := range dests { if _, ok := seen[d.Subject]; ok { return fmt.Errorf("duplicate entry for %q", d.Subject) } seen[d.Subject] = struct{}{} if d.Weight > 100 { return fmt.Errorf("individual weights need to be <= 100") } tw += d.Weight if tw > 100 { return fmt.Errorf("total weight needs to be <= 100") } if !IsValidSubject(d.Subject) { return ErrBadSubject } tr, err := newTransform(src, d.Subject) if err != nil { return err } if d.OptCluster == "" { m.dests = append(m.dests, &destination{tr, d.Weight}) } else { // We have a cluster scoped filter. if m.cdests == nil { m.cdests = make(map[string][]*destination) } ad := m.cdests[d.OptCluster] ad = append(ad, &destination{tr, d.Weight}) m.cdests[d.OptCluster] = ad } } processDestinations := func(dests []*destination) ([]*destination, error) { var ltw uint8 for _, d := range dests { ltw += d.weight } // Auto add in original at weight difference if all entries weight does not total to 100. // Iff the src was not already added in explicitly, meaning they want loss. _, haveSrc := seen[src] if ltw != 100 && !haveSrc { dest := src if m.wc { // We need to make the appropriate markers for the wildcards etc. dest = transformTokenize(dest) } tr, err := newTransform(src, dest) if err != nil { return nil, err } aw := 100 - ltw if len(dests) == 0 { aw = 100 } dests = append(dests, &destination{tr, aw}) } sort.Slice(dests, func(i, j int) bool { return dests[i].weight < dests[j].weight }) var lw uint8 for _, d := range dests { d.weight += lw lw = d.weight } return dests, nil } var err error if m.dests, err = processDestinations(m.dests); err != nil { return err } // Option cluster scoped destinations for cluster, dests := range m.cdests { if dests, err = processDestinations(dests); err != nil { return err } m.cdests[cluster] = dests } // Replace an old one if it exists. for i, m := range a.mappings { if m.src == src { a.mappings[i] = m return nil } } // If we did not replace add to the end. a.mappings = append(a.mappings, m) return nil } // Helper function to tokenize subjects with partial wildcards into formal transform destinations. // e.g. foo.*.* -> foo.$1.$2 func transformTokenize(subject string) string { // We need to make the appropriate markers for the wildcards etc. i := 1 var nda []string for _, token := range strings.Split(subject, tsep) { if token == "*" { nda = append(nda, fmt.Sprintf("$%d", i)) i++ } else { nda = append(nda, token) } } return strings.Join(nda, tsep) } func transformUntokenize(subject string) (string, []string) { var phs []string var nda []string for _, token := range strings.Split(subject, tsep) { if len(token) > 1 && token[0] == '$' && token[1] >= '1' && token[1] <= '9' { phs = append(phs, token) nda = append(nda, "*") } else { nda = append(nda, token) } } return strings.Join(nda, tsep), phs } // RemoveMapping will remove an existing mapping. func (a *Account) RemoveMapping(src string) bool { a.mu.Lock() defer a.mu.Unlock() for i, m := range a.mappings { if m.src == src { // Swap last one into this spot. Its ok to change order. a.mappings[i] = a.mappings[len(a.mappings)-1] a.mappings[len(a.mappings)-1] = nil // gc a.mappings = a.mappings[:len(a.mappings)-1] return true } } return false } // Indicates we have mapping entries. func (a *Account) hasMappings() bool { if a == nil { return false } a.mu.RLock() n := len(a.mappings) a.mu.RUnlock() return n > 0 } // This performs the logic to map to a new dest subject based on mappings. // Should only be called from processInboundClientMsg or service import processing. func (a *Account) selectMappedSubject(dest string) (string, bool) { a.mu.RLock() if len(a.mappings) == 0 { a.mu.RUnlock() return dest, false } // In case we have to tokenize for subset matching. tsa := [32]string{} tts := tsa[:0] var m *mapping for _, rm := range a.mappings { if !rm.wc && rm.src == dest { m = rm break } else { // tokenize and reuse for subset matching. if len(tts) == 0 { start := 0 subject := dest for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) } if isSubsetMatch(tts, rm.src) { m = rm break } } } if m == nil { a.mu.RUnlock() return dest, false } // The selected destination for the mapping. var d *destination var ndest string dests := m.dests if len(m.cdests) > 0 { cn := a.srv.cachedClusterName() dests = m.cdests[cn] if dests == nil { // Fallback to main if we do not match the cluster. dests = m.dests } } // Optimize for single entry case. if len(dests) == 1 && dests[0].weight == 100 { d = dests[0] } else { w := uint8(a.prand.Int31n(100)) for _, rm := range dests { if w < rm.weight { d = rm break } } } if d != nil { if len(d.tr.dtpi) == 0 { ndest = d.tr.dest } else if nsubj, err := d.tr.transform(tts); err == nil { ndest = nsubj } } a.mu.RUnlock() return ndest, true } // SubscriptionInterest returns true if this account has a matching subscription // for the given `subject`. Works only for literal subjects. // TODO: Add support for wildcards func (a *Account) SubscriptionInterest(subject string) bool { return a.Interest(subject) > 0 } // Interest returns the number of subscriptions for a given subject that match. func (a *Account) Interest(subject string) int { var nms int a.mu.RLock() if a.sl != nil { res := a.sl.Match(subject) nms = len(res.psubs) + len(res.qsubs) } a.mu.RUnlock() return nms } // addClient keeps our accounting of local active clients or leafnodes updated. // Returns previous total. func (a *Account) addClient(c *client) int { a.mu.Lock() n := len(a.clients) if a.clients != nil { a.clients[c] = struct{}{} } added := n != len(a.clients) if added { if c.kind == SYSTEM { a.sysclients++ } else if c.kind == LEAF { a.nleafs++ a.lleafs = append(a.lleafs, c) } } a.mu.Unlock() if c != nil && c.srv != nil && added { c.srv.accConnsUpdate(a) } return n } // Helper function to remove leaf nodes. If number of leafnodes gets large // this may need to be optimized out of linear search but believe number // of active leafnodes per account scope to be small and therefore cache friendly. // Lock should be held on account. func (a *Account) removeLeafNode(c *client) { ll := len(a.lleafs) for i, l := range a.lleafs { if l == c { a.lleafs[i] = a.lleafs[ll-1] if ll == 1 { a.lleafs = nil } else { a.lleafs = a.lleafs[:ll-1] } return } } } // removeClient keeps our accounting of local active clients updated. func (a *Account) removeClient(c *client) int { a.mu.Lock() n := len(a.clients) delete(a.clients, c) removed := n != len(a.clients) if removed { if c.kind == SYSTEM { a.sysclients-- } else if c.kind == LEAF { a.nleafs-- a.removeLeafNode(c) } } a.mu.Unlock() if c != nil && c.srv != nil && removed { c.srv.mu.Lock() doRemove := a != c.srv.gacc c.srv.mu.Unlock() if doRemove { c.srv.accConnsUpdate(a) } } return n } func (a *Account) randomClient() *client { if a.ic != nil { return a.ic } var c *client for c = range a.clients { break } return c } // AddServiceExport will configure the account with the defined export. func (a *Account) AddServiceExport(subject string, accounts []*Account) error { return a.AddServiceExportWithResponse(subject, Singleton, accounts) } // AddServiceExportWithResponse will configure the account with the defined export and response type. func (a *Account) AddServiceExportWithResponse(subject string, respType ServiceRespType, accounts []*Account) error { if a == nil { return ErrMissingAccount } a.mu.Lock() defer a.mu.Unlock() if a.exports.services == nil { a.exports.services = make(map[string]*serviceExport) } se := a.exports.services[subject] // Always create a service export if se == nil { se = &serviceExport{} } if respType != Singleton { se.respType = respType } if accounts != nil { // empty means auth required but will be import token. if len(accounts) == 0 { se.tokenReq = true } else { if se.approved == nil { se.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { se.approved[acc.Name] = acc } } } lrt := a.lowestServiceExportResponseTime() se.acc = a se.respThresh = DEFAULT_SERVICE_EXPORT_RESPONSE_THRESHOLD a.exports.services[subject] = se if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt { a.updateAllClientsServiceExportResponseTime(nlrt) } return nil } // TrackServiceExport will enable latency tracking of the named service. // Results will be published in this account to the given results subject. func (a *Account) TrackServiceExport(service, results string) error { return a.TrackServiceExportWithSampling(service, results, DEFAULT_SERVICE_LATENCY_SAMPLING) } // TrackServiceExportWithSampling will enable latency tracking of the named service for the given // sampling rate (1-100). Results will be published in this account to the given results subject. func (a *Account) TrackServiceExportWithSampling(service, results string, sampling int) error { if a == nil { return ErrMissingAccount } if sampling != 0 { // 0 means triggered by header if sampling < 1 || sampling > 100 { return ErrBadSampling } } if !IsValidPublishSubject(results) { return ErrBadPublishSubject } // Don't loop back on outselves. if a.IsExportService(results) { return ErrBadPublishSubject } if a.srv != nil && !a.srv.EventsEnabled() { return ErrNoSysAccount } a.mu.Lock() if a.exports.services == nil { a.mu.Unlock() return ErrMissingService } ea, ok := a.exports.services[service] if !ok { a.mu.Unlock() return ErrMissingService } if ea == nil { ea = &serviceExport{} a.exports.services[service] = ea } else if ea.respType != Singleton { a.mu.Unlock() return ErrBadServiceType } ea.latency = &serviceLatency{ sampling: int8(sampling), subject: results, } s := a.srv a.mu.Unlock() if s == nil { return nil } // Now track down the imports and add in latency as needed to enable. s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name && subjectIsSubsetMatch(im.to, service) { im.latency = ea.latency } } acc.mu.Unlock() return true }) return nil } // UnTrackServiceExport will disable latency tracking of the named service. func (a *Account) UnTrackServiceExport(service string) { if a == nil || (a.srv != nil && !a.srv.EventsEnabled()) { return } a.mu.Lock() if a == nil || a.exports.services == nil { a.mu.Unlock() return } ea, ok := a.exports.services[service] if !ok || ea == nil || ea.latency == nil { a.mu.Unlock() return } // We have latency here. ea.latency = nil s := a.srv a.mu.Unlock() if s == nil { return } // Now track down the imports and clean them up. s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name { if subjectIsSubsetMatch(im.to, service) { im.latency, im.m1 = nil, nil } } } acc.mu.Unlock() return true }) } // IsExportService will indicate if this service exists. Will check wildcard scenarios. func (a *Account) IsExportService(service string) bool { a.mu.RLock() defer a.mu.RUnlock() _, ok := a.exports.services[service] if ok { return true } tokens := strings.Split(service, tsep) for subj := range a.exports.services { if isSubsetMatch(tokens, subj) { return true } } return false } // IsExportServiceTracking will indicate if given publish subject is an export service with tracking enabled. func (a *Account) IsExportServiceTracking(service string) bool { a.mu.RLock() ea, ok := a.exports.services[service] if ok && ea == nil { a.mu.RUnlock() return false } if ok && ea != nil && ea.latency != nil { a.mu.RUnlock() return true } // FIXME(dlc) - Might want to cache this is in the hot path checking for latency tracking. tokens := strings.Split(service, tsep) for subj, ea := range a.exports.services { if isSubsetMatch(tokens, subj) && ea != nil && ea.latency != nil { a.mu.RUnlock() return true } } a.mu.RUnlock() return false } // ServiceLatency is the JSON message sent out in response to latency tracking for // an accounts exported services. Additional client info is available in requestor // and responder. Note that for a requestor, the only information shared by default // is the RTT used to calculate the total latency. The requestor's account can // designate to share the additional information in the service import. type ServiceLatency struct { TypedEvent Status int `json:"status"` Error string `json:"description,omitempty"` Requestor LatencyClient `json:"requestor,omitempty"` Responder LatencyClient `json:"responder,omitempty"` RequestHeader http.Header `json:"header,omitempty"` // only contains header(s) triggering the measurement RequestStart time.Time `json:"start"` ServiceLatency time.Duration `json:"service"` SystemLatency time.Duration `json:"system"` TotalLatency time.Duration `json:"total"` } // ServiceLatencyType is the NATS Event Type for ServiceLatency const ServiceLatencyType = "io.nats.server.metric.v1.service_latency" // LatencyClient is the JSON message structure assigned to requestors and responders. // Note that for a requestor, the only information shared by default is the RTT used // to calculate the total latency. The requestor's account can designate to share // the additional information in the service import. type LatencyClient struct { Account string `json:"acc"` RTT time.Duration `json:"rtt"` Start time.Time `json:"start,omitempty"` User string `json:"user,omitempty"` Name string `json:"name,omitempty"` Lang string `json:"lang,omitempty"` Version string `json:"ver,omitempty"` IP string `json:"ip,omitempty"` CID uint64 `json:"cid,omitempty"` Server string `json:"server,omitempty"` } // NATSTotalTime is a helper function that totals the NATS latencies. func (nl *ServiceLatency) NATSTotalTime() time.Duration { return nl.Requestor.RTT + nl.Responder.RTT + nl.SystemLatency } // Merge function to merge m1 and m2 (requestor and responder) measurements // when there are two samples. This happens when the requestor and responder // are on different servers. // // m2 ServiceLatency is correct, so use that. // m1 TotalLatency is correct, so use that. // Will use those to back into NATS latency. func (m1 *ServiceLatency) merge(m2 *ServiceLatency) { m1.SystemLatency = m1.ServiceLatency - (m2.ServiceLatency + m2.Responder.RTT) m1.ServiceLatency = m2.ServiceLatency m1.Responder = m2.Responder sanitizeLatencyMetric(m1) } // sanitizeLatencyMetric adjusts latency metric values that could go // negative in some edge conditions since we estimate client RTT // for both requestor and responder. // These numbers are never meant to be negative, it just could be // how we back into the values based on estimated RTT. func sanitizeLatencyMetric(sl *ServiceLatency) { if sl.ServiceLatency < 0 { sl.ServiceLatency = 0 } if sl.SystemLatency < 0 { sl.SystemLatency = 0 } } // Used for transporting remote latency measurements. type remoteLatency struct { Account string `json:"account"` ReqId string `json:"req_id"` M2 ServiceLatency `json:"m2"` respThresh time.Duration } // sendLatencyResult will send a latency result and clear the si of the requestor(rc). func (a *Account) sendLatencyResult(si *serviceImport, sl *ServiceLatency) { sl.Type = ServiceLatencyType sl.ID = a.nextEventID() sl.Time = time.Now().UTC() a.mu.Lock() lsubj := si.latency.subject si.rc = nil a.mu.Unlock() a.srv.sendInternalAccountMsg(a, lsubj, sl) } // Used to send a bad request metric when we do not have a reply subject func (a *Account) sendBadRequestTrackingLatency(si *serviceImport, requestor *client, header http.Header) { sl := &ServiceLatency{ Status: 400, Error: "Bad Request", Requestor: requestor.getClientInfo(si.share), } sl.RequestHeader = header sl.RequestStart = time.Now().Add(-sl.Requestor.RTT).UTC() a.sendLatencyResult(si, sl) } // Used to send a latency result when the requestor interest was lost before the // response could be delivered. func (a *Account) sendReplyInterestLostTrackLatency(si *serviceImport) { sl := &ServiceLatency{ Status: 408, Error: "Request Timeout", } a.mu.RLock() rc := si.rc share := si.share ts := si.ts sl.RequestHeader = si.trackingHdr a.mu.RUnlock() if rc != nil { sl.Requestor = rc.getClientInfo(share) } sl.RequestStart = time.Unix(0, ts-int64(sl.Requestor.RTT)).UTC() a.sendLatencyResult(si, sl) } func (a *Account) sendBackendErrorTrackingLatency(si *serviceImport, reason rsiReason) { sl := &ServiceLatency{} a.mu.RLock() rc := si.rc share := si.share ts := si.ts sl.RequestHeader = si.trackingHdr a.mu.RUnlock() if rc != nil { sl.Requestor = rc.getClientInfo(share) } sl.RequestStart = time.Unix(0, ts-int64(sl.Requestor.RTT)).UTC() if reason == rsiNoDelivery { sl.Status = 503 sl.Error = "Service Unavailable" } else if reason == rsiTimeout { sl.Status = 504 sl.Error = "Service Timeout" } a.sendLatencyResult(si, sl) } // sendTrackingMessage will send out the appropriate tracking information for the // service request/response latency. This is called when the requestor's server has // received the response. // TODO(dlc) - holding locks for RTTs may be too much long term. Should revisit. func (a *Account) sendTrackingLatency(si *serviceImport, responder *client) bool { if si.rc == nil { return true } ts := time.Now() serviceRTT := time.Duration(ts.UnixNano() - si.ts) requestor := si.rc sl := &ServiceLatency{ Status: 200, Requestor: requestor.getClientInfo(si.share), Responder: responder.getClientInfo(true), } sl.RequestStart = time.Unix(0, si.ts-int64(sl.Requestor.RTT)).UTC() sl.ServiceLatency = serviceRTT - sl.Responder.RTT sl.TotalLatency = sl.Requestor.RTT + serviceRTT if sl.Responder.RTT > 0 { sl.SystemLatency = time.Since(ts) sl.TotalLatency += sl.SystemLatency } sl.RequestHeader = si.trackingHdr sanitizeLatencyMetric(sl) sl.Type = ServiceLatencyType sl.ID = a.nextEventID() sl.Time = time.Now().UTC() // If we are expecting a remote measurement, store our sl here. // We need to account for the race between this and us receiving the // remote measurement. // FIXME(dlc) - We need to clean these up but this should happen // already with the auto-expire logic. if responder != nil && responder.kind != CLIENT { si.acc.mu.Lock() if si.m1 != nil { m1, m2 := sl, si.m1 m1.merge(m2) si.acc.mu.Unlock() a.srv.sendInternalAccountMsg(a, si.latency.subject, m1) a.mu.Lock() si.rc = nil a.mu.Unlock() return true } si.m1 = sl si.acc.mu.Unlock() return false } else { a.srv.sendInternalAccountMsg(a, si.latency.subject, sl) a.mu.Lock() si.rc = nil a.mu.Unlock() } return true } // This will check to make sure our response lower threshold is set // properly in any clients doing rrTracking. // Lock should be held. func (a *Account) updateAllClientsServiceExportResponseTime(lrt time.Duration) { for c := range a.clients { c.mu.Lock() if c.rrTracking != nil && lrt != c.rrTracking.lrt { c.rrTracking.lrt = lrt if c.rrTracking.ptmr.Stop() { c.rrTracking.ptmr.Reset(lrt) } } c.mu.Unlock() } } // Will select the lowest respThresh from all service exports. // Read lock should be held. func (a *Account) lowestServiceExportResponseTime() time.Duration { // Lowest we will allow is 5 minutes. Its an upper bound for this function. lrt := time.Duration(5 * time.Minute) for _, se := range a.exports.services { if se.respThresh < lrt { lrt = se.respThresh } } return lrt } // AddServiceImportWithClaim will add in the service import via the jwt claim. func (a *Account) AddServiceImportWithClaim(destination *Account, from, to string, imClaim *jwt.Import) error { if destination == nil { return ErrMissingAccount } // Empty means use from. if to == "" { to = from } if !IsValidSubject(from) || !IsValidSubject(to) { return ErrInvalidSubject } // First check to see if the account has authorized us to route to the "to" subject. if !destination.checkServiceImportAuthorized(a, to, imClaim) { return ErrServiceImportAuthorization } if a.importFormsCycle(destination, from, to) { return ErrServiceImportFormsCycle } _, err := a.addServiceImport(destination, from, to, imClaim) return err } // Detects if we have a cycle. func (a *Account) importFormsCycle(destination *Account, from, to string) bool { // Check that what we are importing is not something we also export. if a.serviceExportOverlaps(to) { // So at this point if destination account is also importing from us, that forms a cycle. if destination.serviceImportOverlaps(from) { return true } } return false } // SetServiceImportSharing will allow sharing of information about requests with the export account. // Used for service latency tracking at the moment. func (a *Account) SetServiceImportSharing(destination *Account, to string, allow bool) error { a.mu.Lock() defer a.mu.Unlock() if a.isClaimAccount() { return fmt.Errorf("claim based accounts can not be updated directly") } for _, si := range a.imports.services { if si.acc == destination && si.to == to { si.share = allow return nil } } return fmt.Errorf("service import not found") } // AddServiceImport will add a route to an account to send published messages / requests // to the destination account. From is the local subject to map, To is the // subject that will appear on the destination account. Destination will need // to have an import rule to allow access via addService. func (a *Account) AddServiceImport(destination *Account, from, to string) error { return a.AddServiceImportWithClaim(destination, from, to, nil) } // NumPendingReverseResponses returns the number of response mappings we have for all outstanding // requests for service imports. func (a *Account) NumPendingReverseResponses() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.rrMap) } // NumPendingAllResponses return the number of all responses outstanding for service exports. func (a *Account) NumPendingAllResponses() int { return a.NumPendingResponses("") } // NumResponsesPending returns the number of responses outstanding for service exports // on this account. An empty filter string returns all responses regardless of which export. // If you specify the filter we will only return ones that are for that export. // NOTE this is only for what this server is tracking. func (a *Account) NumPendingResponses(filter string) int { a.mu.RLock() defer a.mu.RUnlock() if filter == "" { return len(a.exports.responses) } se := a.getServiceExport(filter) if se == nil { return 0 } var nre int for _, si := range a.exports.responses { if si.se == se { nre++ } } return nre } // NumServiceImports returns the number of service imports we have configured. func (a *Account) NumServiceImports() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.services) } // Reason why we are removing this response serviceImport. type rsiReason int const ( rsiOk = rsiReason(iota) rsiNoDelivery rsiTimeout ) // removeRespServiceImport removes a response si mapping and the reverse entries for interest detection. func (a *Account) removeRespServiceImport(si *serviceImport, reason rsiReason) { if si == nil { return } a.mu.Lock() delete(a.exports.responses, si.from) dest := si.acc to := si.to tracking := si.tracking rc := si.rc a.mu.Unlock() if tracking && rc != nil { a.sendBackendErrorTrackingLatency(si, reason) } dest.checkForReverseEntry(to, si, false) } // removeServiceImport will remove the route by subject. func (a *Account) removeServiceImport(subject string) { a.mu.Lock() si, ok := a.imports.services[subject] delete(a.imports.services, subject) var sid []byte c := a.ic if ok && si != nil { if a.ic != nil && si.sid != nil { sid = si.sid } } a.mu.Unlock() if sid != nil { c.processUnsub(sid) } } // This tracks responses to service requests mappings. This is used for cleanup. func (a *Account) addReverseRespMapEntry(acc *Account, reply, from string) { a.mu.Lock() if a.imports.rrMap == nil { a.imports.rrMap = make(map[string][]*serviceRespEntry) } sre := &serviceRespEntry{acc, from} sra := a.imports.rrMap[reply] a.imports.rrMap[reply] = append(sra, sre) a.mu.Unlock() } // checkForReverseEntries is for when we are trying to match reverse entries to a wildcard. // This will be called from checkForReverseEntry when the reply arg is a wildcard subject. // This will usually be called in a go routine since we need to walk all the entries. func (a *Account) checkForReverseEntries(reply string, checkInterest bool) { a.mu.RLock() if len(a.imports.rrMap) == 0 { a.mu.RUnlock() return } if subjectIsLiteral(reply) { a.mu.RUnlock() a.checkForReverseEntry(reply, nil, checkInterest) return } var _rs [32]string rs := _rs[:0] for k := range a.imports.rrMap { if subjectIsSubsetMatch(k, reply) { rs = append(rs, k) } } a.mu.RUnlock() for _, reply := range rs { a.checkForReverseEntry(reply, nil, checkInterest) } } // This checks for any response map entries. If you specify an si we will only match and // clean up for that one, otherwise we remove them all. func (a *Account) checkForReverseEntry(reply string, si *serviceImport, checkInterest bool) { a.mu.RLock() if len(a.imports.rrMap) == 0 { a.mu.RUnlock() return } if subjectHasWildcard(reply) { a.mu.RUnlock() go a.checkForReverseEntries(reply, checkInterest) return } sres := a.imports.rrMap[reply] if sres == nil { a.mu.RUnlock() return } // If we are here we have an entry we should check. // If requested we will first check if there is any // interest for this subject for the entire account. // If there is we can not delete any entries yet. // Note that if we are here reply has to be a literal subject. if checkInterest { // If interest still exists we can not clean these up yet. if rr := a.sl.Match(reply); len(rr.psubs)+len(rr.qsubs) > 0 { a.mu.RUnlock() return } } a.mu.RUnlock() // Delete the appropriate entries here based on optional si. a.mu.Lock() if si == nil { delete(a.imports.rrMap, reply) } else { // Find the one we are looking for.. for i, sre := range sres { if sre.msub == si.from { sres = append(sres[:i], sres[i+1:]...) break } } if len(sres) > 0 { a.imports.rrMap[si.to] = sres } else { delete(a.imports.rrMap, si.to) } } a.mu.Unlock() // If we are here we no longer have interest and we have // response entries that we should clean up. if si == nil { for _, sre := range sres { acc := sre.acc var trackingCleanup bool var rsi *serviceImport acc.mu.Lock() if rsi = acc.exports.responses[sre.msub]; rsi != nil && !rsi.didDeliver { delete(acc.exports.responses, rsi.from) trackingCleanup = rsi.tracking && rsi.rc != nil } acc.mu.Unlock() if trackingCleanup { acc.sendReplyInterestLostTrackLatency(rsi) } } } } // Internal check to see if the to subject overlaps with another export. func (a *Account) serviceExportOverlaps(to string) bool { a.mu.RLock() defer a.mu.RUnlock() for subj := range a.exports.services { if to == subj || SubjectsCollide(to, subj) { return true } } return false } // Internal check to see if the from subject overlaps with another import. func (a *Account) serviceImportOverlaps(from string) bool { a.mu.RLock() defer a.mu.RUnlock() for subj := range a.imports.services { if from == subj || SubjectsCollide(from, subj) { return true } } return false } // Internal check to see if a service import exists. func (a *Account) serviceImportExists(dest *Account, from string) bool { a.mu.RLock() dup := a.imports.services[from] a.mu.RUnlock() return dup != nil } // Add a service import. // This does no checks and should only be called by the msg processing code. Use // AddServiceImport from above if responding to user input or config changes, etc. func (a *Account) addServiceImport(dest *Account, from, to string, claim *jwt.Import) (*serviceImport, error) { rt := Singleton var lat *serviceLatency dest.mu.RLock() se := dest.getServiceExport(to) if se != nil { rt = se.respType lat = se.latency } s := dest.srv dest.mu.RUnlock() // Track if this maps us to the system account. var isSysAcc bool if s != nil { s.mu.Lock() if s.sys != nil && dest == s.sys.account { isSysAcc = true } s.mu.Unlock() } a.mu.Lock() if a.imports.services == nil { a.imports.services = make(map[string]*serviceImport) } else if dup := a.imports.services[from]; dup != nil { a.mu.Unlock() return nil, fmt.Errorf("duplicate service import subject %q, previously used in import for account %q, subject %q", from, dup.acc.Name, dup.to) } if to == "" { to = from } // Check to see if we have a wildcard var ( usePub bool tr *transform err error ) if subjectHasWildcard(to) { // If to and from match, then we use the published subject. if to == from { usePub = true } else { from, _ = transformUntokenize(from) // Create a transform if tr, err = newTransform(from, transformTokenize(to)); err != nil { a.mu.Unlock() return nil, fmt.Errorf("failed to create mapping transform for service import subject %q to %q: %v", from, to, err) } } } si := &serviceImport{dest, claim, se, nil, from, to, "", tr, 0, rt, lat, nil, nil, usePub, false, false, false, false, false, isSysAcc, nil} a.imports.services[from] = si a.mu.Unlock() if err := a.addServiceImportSub(si); err != nil { a.removeServiceImport(si.from) return nil, err } return si, nil } // Returns the internal client, will create one if not present. // Lock should be held. func (a *Account) internalClient() *client { if a.ic == nil && a.srv != nil { a.ic = a.srv.createInternalAccountClient() a.ic.acc = a } return a.ic } // Internal account scoped subscriptions. func (a *Account) subscribeInternal(subject string, cb msgHandler) (*subscription, error) { a.mu.Lock() c := a.internalClient() a.isid++ sid := strconv.FormatUint(a.isid, 10) a.mu.Unlock() // This will happen in parsing when the account has not been properly setup. if c == nil { return nil, fmt.Errorf("no internal account client") } return c.processSub([]byte(subject), nil, []byte(sid), cb, false) } // This will add an account subscription that matches the "from" from a service import entry. func (a *Account) addServiceImportSub(si *serviceImport) error { a.mu.Lock() c := a.internalClient() // This will happen in parsing when the account has not been properly setup. if c == nil { a.mu.Unlock() return nil } if si.sid != nil { a.mu.Unlock() return fmt.Errorf("duplicate call to create subscription for service import") } a.isid++ sid := strconv.FormatUint(a.isid, 10) si.sid = []byte(sid) subject := si.from a.mu.Unlock() cb := func(sub *subscription, c *client, subject, reply string, msg []byte) { c.processServiceImport(si, a, msg) } _, err := c.processSub([]byte(subject), nil, []byte(sid), cb, true) return err } // Remove all the subscriptions associated with service imports. func (a *Account) removeAllServiceImportSubs() { a.mu.RLock() var sids [][]byte for _, si := range a.imports.services { if si.sid != nil { sids = append(sids, si.sid) si.sid = nil } } c := a.ic a.ic = nil a.mu.RUnlock() if c == nil { return } for _, sid := range sids { c.processUnsub(sid) } c.closeConnection(InternalClient) } // Add in subscriptions for all registered service imports. func (a *Account) addAllServiceImportSubs() { for _, si := range a.imports.services { a.addServiceImportSub(si) } } var ( // header where all information is encoded in one value. trcUber = textproto.CanonicalMIMEHeaderKey("Uber-Trace-Id") trcCtx = textproto.CanonicalMIMEHeaderKey("Traceparent") trcB3 = textproto.CanonicalMIMEHeaderKey("B3") // openzipkin header to check trcB3Sm = textproto.CanonicalMIMEHeaderKey("X-B3-Sampled") trcB3Id = textproto.CanonicalMIMEHeaderKey("X-B3-TraceId") // additional header needed to include when present trcB3PSId = textproto.CanonicalMIMEHeaderKey("X-B3-ParentSpanId") trcB3SId = textproto.CanonicalMIMEHeaderKey("X-B3-SpanId") trcCtxSt = textproto.CanonicalMIMEHeaderKey("Tracestate") trcUberCtxPrefix = textproto.CanonicalMIMEHeaderKey("Uberctx-") ) func newB3Header(h http.Header) http.Header { retHdr := http.Header{} if v, ok := h[trcB3Sm]; ok { retHdr[trcB3Sm] = v } if v, ok := h[trcB3Id]; ok { retHdr[trcB3Id] = v } if v, ok := h[trcB3PSId]; ok { retHdr[trcB3PSId] = v } if v, ok := h[trcB3SId]; ok { retHdr[trcB3SId] = v } return retHdr } func newUberHeader(h http.Header, tId []string) http.Header { retHdr := http.Header{trcUber: tId} for k, v := range h { if strings.HasPrefix(k, trcUberCtxPrefix) { retHdr[k] = v } } return retHdr } func newTraceCtxHeader(h http.Header, tId []string) http.Header { retHdr := http.Header{trcCtx: tId} if v, ok := h[trcCtxSt]; ok { retHdr[trcCtxSt] = v } return retHdr } // Helper to determine when to sample. When header has a value, sampling is driven by header func shouldSample(l *serviceLatency, c *client) (bool, http.Header) { if l == nil { return false, nil } if l.sampling < 0 { return false, nil } if l.sampling >= 100 { return true, nil } if l.sampling > 0 && rand.Int31n(100) <= int32(l.sampling) { return true, nil } h := c.parseState.getHeader() if len(h) == 0 { return false, nil } if tId := h[trcUber]; len(tId) != 0 { // sample 479fefe9525eddb:5adb976bfc1f95c1:479fefe9525eddb:1 tk := strings.Split(tId[0], ":") if len(tk) == 4 && len(tk[3]) > 0 && len(tk[3]) <= 2 { dst := [2]byte{} src := [2]byte{'0', tk[3][0]} if len(tk[3]) == 2 { src[1] = tk[3][1] } if _, err := hex.Decode(dst[:], src[:]); err == nil && dst[0]&1 == 1 { return true, newUberHeader(h, tId) } } return false, nil } else if sampled := h[trcB3Sm]; len(sampled) != 0 && sampled[0] == "1" { return true, newB3Header(h) // allowed } else if len(sampled) != 0 && sampled[0] == "0" { return false, nil // denied } else if _, ok := h[trcB3Id]; ok { // sample 80f198ee56343ba864fe8b2a57d3eff7 // presence (with X-B3-Sampled not being 0) means sampling left to recipient return true, newB3Header(h) } else if b3 := h[trcB3]; len(b3) != 0 { // sample 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-1-05e3ac9a4f6e3b90 // sample 0 tk := strings.Split(b3[0], "-") if len(tk) > 2 && tk[2] == "0" { return false, nil // denied } else if len(tk) == 1 && tk[0] == "0" { return false, nil // denied } return true, http.Header{trcB3: b3} // sampling allowed or left to recipient of header } else if tId := h[trcCtx]; len(tId) != 0 { // sample 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01 tk := strings.Split(tId[0], "-") if len(tk) == 4 && len([]byte(tk[3])) == 2 && tk[3] == "01" { return true, newTraceCtxHeader(h, tId) } else { return false, nil } } return false, nil } // Used to mimic client like replies. const ( replyPrefix = "_R_." trackSuffix = ".T" replyPrefixLen = len(replyPrefix) baseServerLen = 10 replyLen = 6 minReplyLen = 15 digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 ) // This is where all service export responses are handled. func (a *Account) processServiceImportResponse(sub *subscription, c *client, subject, reply string, msg []byte) { a.mu.RLock() if a.expired || len(a.exports.responses) == 0 { a.mu.RUnlock() return } si := a.exports.responses[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() // Send for normal processing. c.processServiceImport(si, a, msg) } // Will create a wildcard subscription to handle interest graph propagation for all // service replies. // Lock should not be held. func (a *Account) createRespWildcard() []byte { a.mu.Lock() if a.prand == nil { a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } var b = [baseServerLen]byte{'_', 'R', '_', '.'} rn := a.prand.Int63() for i, l := replyPrefixLen, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } a.siReply = append(b[:], '.') pre := a.siReply wcsub := append(a.siReply, '>') c := a.internalClient() a.isid++ sid := strconv.FormatUint(a.isid, 10) a.mu.Unlock() // Create subscription and internal callback for all the wildcard response subjects. c.processSub(wcsub, nil, []byte(sid), a.processServiceImportResponse, false) return pre } // Test whether this is a tracked reply. func isTrackedReply(reply []byte) bool { lreply := len(reply) - 1 return lreply > 3 && reply[lreply-1] == '.' && reply[lreply] == 'T' } // Generate a new service reply from the wildcard prefix. // FIXME(dlc) - probably do not have to use rand here. about 25ns per. func (a *Account) newServiceReply(tracking bool) []byte { a.mu.RLock() replyPre := a.siReply s := a.srv a.mu.RUnlock() if replyPre == nil { replyPre = a.createRespWildcard() } var b [replyLen]byte rn := a.prand.Int63() for i, l := 0, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } // Make sure to copy. reply := make([]byte, 0, len(replyPre)+len(b)) reply = append(reply, replyPre...) reply = append(reply, b[:]...) if tracking && s.sys != nil { // Add in our tracking identifier. This allows the metrics to get back to only // this server without needless SUBS/UNSUBS. reply = append(reply, '.') reply = append(reply, s.sys.shash...) reply = append(reply, '.', 'T') } return reply } // Checks if a serviceImport was created to map responses. func (si *serviceImport) isRespServiceImport() bool { return si != nil && si.response } // Sets the response theshold timer for a service export. // Account lock should be held func (se *serviceExport) setResponseThresholdTimer() { if se.rtmr != nil { return // Already set } se.rtmr = time.AfterFunc(se.respThresh, se.checkExpiredResponses) } // Account lock should be held func (se *serviceExport) clearResponseThresholdTimer() bool { if se.rtmr == nil { return true } stopped := se.rtmr.Stop() se.rtmr = nil return stopped } // checkExpiredResponses will check for any pending responses that need to // be cleaned up. func (se *serviceExport) checkExpiredResponses() { acc := se.acc if acc == nil { acc.mu.Lock() se.clearResponseThresholdTimer() acc.mu.Unlock() return } var expired []*serviceImport mints := time.Now().UnixNano() - int64(se.respThresh) // TODO(dlc) - Should we release lock while doing this? Or only do these in batches? // Should we break this up for responses only from this service export? // Responses live on acc directly for fast inbound processsing for the _R_ wildcard. // We could do another indirection at this level but just to get to the service export? var totalResponses int acc.mu.RLock() for _, si := range acc.exports.responses { if si.se == se { totalResponses++ if si.ts <= mints { expired = append(expired, si) } } } acc.mu.RUnlock() for _, si := range expired { acc.removeRespServiceImport(si, rsiTimeout) } // Pull out expired to determine if we have any left for timer. totalResponses -= len(expired) // Redo timer as needed. acc.mu.Lock() if totalResponses > 0 && se.rtmr != nil { se.rtmr.Stop() se.rtmr.Reset(se.respThresh) } else { se.clearResponseThresholdTimer() } acc.mu.Unlock() } // ServiceExportResponseThreshold returns the current threshold. func (a *Account) ServiceExportResponseThreshold(export string) (time.Duration, error) { a.mu.Lock() defer a.mu.Unlock() se := a.getServiceExport(export) if se == nil { return 0, fmt.Errorf("no export defined for %q", export) } return se.respThresh, nil } // SetServiceExportResponseThreshold sets the maximum time the system will a response to be delivered // from a service export responder. func (a *Account) SetServiceExportResponseThreshold(export string, maxTime time.Duration) error { a.mu.Lock() defer a.mu.Unlock() if a.isClaimAccount() { return fmt.Errorf("claim based accounts can not be updated directly") } lrt := a.lowestServiceExportResponseTime() se := a.getServiceExport(export) if se == nil { return fmt.Errorf("no export defined for %q", export) } se.respThresh = maxTime if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt { a.updateAllClientsServiceExportResponseTime(nlrt) } return nil } // This is for internal service import responses. func (a *Account) addRespServiceImport(dest *Account, to string, osi *serviceImport, tracking bool, header http.Header) *serviceImport { nrr := string(osi.acc.newServiceReply(tracking)) a.mu.Lock() rt := osi.rt // dest is the requestor's account. a is the service responder with the export. // Marked as internal here, that is how we distinguish. si := &serviceImport{dest, nil, osi.se, nil, nrr, to, osi.to, nil, 0, rt, nil, nil, nil, false, true, false, osi.share, false, false, false, nil} if a.exports.responses == nil { a.exports.responses = make(map[string]*serviceImport) } a.exports.responses[nrr] = si // Always grab time and make sure response threshold timer is running. si.ts = time.Now().UnixNano() osi.se.setResponseThresholdTimer() if rt == Singleton && tracking { si.latency = osi.latency si.tracking = true si.trackingHdr = header } a.mu.Unlock() // We do not do individual subscriptions here like we do on configured imports. // We have an internal callback for all responses inbound to this account and // will process appropriately there. This does not pollute the sublist and the caches. // We do add in the reverse map such that we can detect loss of interest and do proper // cleanup of this si as interest goes away. dest.addReverseRespMapEntry(a, to, nrr) return si } // AddStreamImportWithClaim will add in the stream import from a specific account with optional token. func (a *Account) AddStreamImportWithClaim(account *Account, from, prefix string, imClaim *jwt.Import) error { if account == nil { return ErrMissingAccount } // First check to see if the account has authorized export of the subject. if !account.checkStreamImportAuthorized(a, from, imClaim) { return ErrStreamImportAuthorization } // Check prefix if it exists and make sure its a literal. // Append token separator if not already present. if prefix != "" { // Make sure there are no wildcards here, this prefix needs to be a literal // since it will be prepended to a publish subject. if !subjectIsLiteral(prefix) { return ErrStreamImportBadPrefix } if prefix[len(prefix)-1] != btsep { prefix = prefix + string(btsep) } } return a.AddMappedStreamImportWithClaim(account, from, prefix+from, imClaim) } // AddMappedStreamImport helper for AddMappedStreamImportWithClaim func (a *Account) AddMappedStreamImport(account *Account, from, to string) error { return a.AddMappedStreamImportWithClaim(account, from, to, nil) } // AddMappedStreamImportWithClaim will add in the stream import from a specific account with optional token. func (a *Account) AddMappedStreamImportWithClaim(account *Account, from, to string, imClaim *jwt.Import) error { if account == nil { return ErrMissingAccount } // First check to see if the account has authorized export of the subject. if !account.checkStreamImportAuthorized(a, from, imClaim) { return ErrStreamImportAuthorization } if to == "" { to = from } var ( usePub bool tr *transform err error ) if subjectHasWildcard(from) { if to == from { usePub = true } else { // Create a transform if tr, err = newTransform(from, transformTokenize(to)); err != nil { return fmt.Errorf("failed to create mapping transform for stream import subject %q to %q: %v", from, to, err) } } } a.mu.Lock() if a.isStreamImportDuplicate(account, from) { a.mu.Unlock() return ErrStreamImportDuplicate } a.imports.streams = append(a.imports.streams, &streamImport{account, from, to, tr, nil, imClaim, usePub, false}) a.mu.Unlock() return nil } // isStreamImportDuplicate checks for duplicate. // Lock should be held. func (a *Account) isStreamImportDuplicate(acc *Account, from string) bool { for _, si := range a.imports.streams { if si.acc == acc && si.from == from { return true } } return false } // AddStreamImport will add in the stream import from a specific account. func (a *Account) AddStreamImport(account *Account, from, prefix string) error { return a.AddStreamImportWithClaim(account, from, prefix, nil) } // IsPublicExport is a placeholder to denote a public export. var IsPublicExport = []*Account(nil) // AddStreamExport will add an export to the account. If accounts is nil // it will signify a public export, meaning anyone can impoort. func (a *Account) AddStreamExport(subject string, accounts []*Account) error { if a == nil { return ErrMissingAccount } a.mu.Lock() defer a.mu.Unlock() if a.exports.streams == nil { a.exports.streams = make(map[string]*streamExport) } ea := a.exports.streams[subject] if accounts != nil { if ea == nil { ea = &streamExport{} } // empty means auth required but will be import token. if len(accounts) == 0 { ea.tokenReq = true } else { if ea.approved == nil { ea.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { ea.approved[acc.Name] = acc } } } a.exports.streams[subject] = ea return nil } // Check if another account is authorized to import from us. func (a *Account) checkStreamImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the exports list. a.mu.RLock() auth := a.checkStreamImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return auth } func (a *Account) checkStreamImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { if a.exports.streams == nil || !IsValidSubject(subject) { return false } return a.checkStreamExportApproved(account, subject, imClaim) } func (a *Account) checkAuth(ea *exportAuth, account *Account, imClaim *jwt.Import) bool { // if ea is nil or ea.approved is nil, that denotes a public export if ea == nil || (ea.approved == nil && !ea.tokenReq) { return true } // Check if token required if ea.tokenReq { return a.checkActivation(account, imClaim, true) } // If we have a matching account we are authorized _, ok := ea.approved[account.Name] return ok } func (a *Account) checkStreamExportApproved(account *Account, subject string, imClaim *jwt.Import) bool { // Check direct match of subject first ea, ok := a.exports.streams[subject] if ok { if ea == nil { return true } return a.checkAuth(&ea.exportAuth, account, imClaim) } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, ea := range a.exports.streams { if isSubsetMatch(tokens, subj) { if ea == nil { return true } return a.checkAuth(&ea.exportAuth, account, imClaim) } } return false } func (a *Account) checkServiceExportApproved(account *Account, subject string, imClaim *jwt.Import) bool { // Check direct match of subject first se, ok := a.exports.services[subject] if ok { // if se is nil or eq.approved is nil, that denotes a public export if se == nil || (se.approved == nil && !se.tokenReq) { return true } // Check if token required if se.tokenReq { return a.checkActivation(account, imClaim, true) } // If we have a matching account we are authorized _, ok := se.approved[account.Name] return ok } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, se := range a.exports.services { if isSubsetMatch(tokens, subj) { if se == nil || (se.approved == nil && !se.tokenReq) { return true } // Check if token required if se.tokenReq { return a.checkActivation(account, imClaim, true) } _, ok := se.approved[account.Name] return ok } } return false } // Helper function to get a serviceExport. // Lock should be held on entry. func (a *Account) getServiceExport(subj string) *serviceExport { se, ok := a.exports.services[subj] // The export probably has a wildcard, so lookup that up. if !ok { se = a.getWildcardServiceExport(subj) } return se } // This helper is used when trying to match a serviceExport record that is // represented by a wildcard. // Lock should be held on entry. func (a *Account) getWildcardServiceExport(from string) *serviceExport { tokens := strings.Split(from, tsep) for subj, se := range a.exports.services { if isSubsetMatch(tokens, subj) { return se } } return nil } // Will fetch the activation token for an import. func fetchActivation(url string) string { // FIXME(dlc) - Make configurable. c := &http.Client{Timeout: 2 * time.Second} resp, err := c.Get(url) if err != nil || resp == nil { return "" } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "" } return string(body) } // These are import stream specific versions for when an activation expires. func (a *Account) streamActivationExpired(exportAcc *Account, subject string) { a.mu.RLock() if a.expired || a.imports.streams == nil { a.mu.RUnlock() return } var si *streamImport for _, si = range a.imports.streams { if si.acc == exportAcc && si.from == subject { break } } if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true clients := make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } awcsti := map[string]struct{}{a.Name: {}} a.mu.Unlock() for _, c := range clients { c.processSubsOnConfigReload(awcsti) } } // These are import service specific versions for when an activation expires. func (a *Account) serviceActivationExpired(subject string) { a.mu.RLock() if a.expired || a.imports.services == nil { a.mu.RUnlock() return } si := a.imports.services[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true a.mu.Unlock() } // Fires for expired activation tokens. We could track this with timers etc. // Instead we just re-analyze where we are and if we need to act. func (a *Account) activationExpired(exportAcc *Account, subject string, kind jwt.ExportType) { switch kind { case jwt.Stream: a.streamActivationExpired(exportAcc, subject) case jwt.Service: a.serviceActivationExpired(subject) } } func isRevoked(revocations map[string]int64, subject string, issuedAt int64) bool { if revocations == nil { return false } if t, ok := revocations[subject]; !ok || t < issuedAt { return false } return true } // checkActivation will check the activation token for validity. func (a *Account) checkActivation(importAcc *Account, claim *jwt.Import, expTimer bool) bool { if claim == nil || claim.Token == "" { return false } // Create a quick clone so we can inline Token JWT. clone := *claim // We grab the token from a URL by hand here since we need expiration etc. if url, err := url.Parse(clone.Token); err == nil && url.Scheme != "" { clone.Token = fetchActivation(url.String()) } vr := jwt.CreateValidationResults() clone.Validate(a.Name, vr) if vr.IsBlocking(true) { return false } act, err := jwt.DecodeActivationClaims(clone.Token) if err != nil { return false } if !a.isIssuerClaimTrusted(act) { return false } vr = jwt.CreateValidationResults() act.Validate(vr) if vr.IsBlocking(true) { return false } if act.Expires != 0 { tn := time.Now().Unix() if act.Expires <= tn { return false } if expTimer { expiresAt := time.Duration(act.Expires - tn) time.AfterFunc(expiresAt*time.Second, func() { importAcc.activationExpired(a, string(act.ImportSubject), claim.Type) }) } } // Check for token revocation.. return !isRevoked(a.actsRevoked, act.Subject, act.IssuedAt) } // Returns true if the activation claim is trusted. That is the issuer matches // the account or is an entry in the signing keys. func (a *Account) isIssuerClaimTrusted(claims *jwt.ActivationClaims) bool { // if no issuer account, issuer is the account if claims.IssuerAccount == "" { return true } // If the IssuerAccount is not us, then this is considered an error. if a.Name != claims.IssuerAccount { if a.srv != nil { a.srv.Errorf("Invalid issuer account %q in activation claim (subject: %q - type: %q) for account %q", claims.IssuerAccount, claims.Activation.ImportSubject, claims.Activation.ImportType, a.Name) } return false } return a.hasIssuerNoLock(claims.Issuer) } // Returns true if `a` and `b` stream imports are the same. Note that the // check is done with the account's name, not the pointer. This is used // during config reload where we are comparing current and new config // in which pointers are different. // No lock is acquired in this function, so it is assumed that the // import maps are not changed while this executes. func (a *Account) checkStreamImportsEqual(b *Account) bool { if len(a.imports.streams) != len(b.imports.streams) { return false } // Load the b imports into a map index by what we are looking for. bm := make(map[string]*streamImport, len(b.imports.streams)) for _, bim := range b.imports.streams { bm[bim.acc.Name+bim.from+bim.to] = bim } for _, aim := range a.imports.streams { if _, ok := bm[aim.acc.Name+aim.from+aim.to]; !ok { return false } } return true } func (a *Account) checkStreamExportsEqual(b *Account) bool { if len(a.exports.streams) != len(b.exports.streams) { return false } for subj, aea := range a.exports.streams { bea, ok := b.exports.streams[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } func (a *Account) checkServiceExportsEqual(b *Account) bool { if len(a.exports.services) != len(b.exports.services) { return false } for subj, aea := range a.exports.services { bea, ok := b.exports.services[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { a.mu.RLock() authorized := a.checkServiceImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return authorized } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the services list. if a.exports.services == nil { return false } return a.checkServiceExportApproved(account, subject, imClaim) } // IsExpired returns expiration status. func (a *Account) IsExpired() bool { a.mu.RLock() exp := a.expired a.mu.RUnlock() return exp } // Called when an account has expired. func (a *Account) expiredTimeout() { // Mark expired first. a.mu.Lock() a.expired = true a.mu.Unlock() // Collect the clients and expire them. cs := make([]*client, 0, len(a.clients)) a.mu.RLock() for c := range a.clients { cs = append(cs, c) } a.mu.RUnlock() for _, c := range cs { c.accountAuthExpired() } } // Sets the expiration timer for an account JWT that has it set. func (a *Account) setExpirationTimer(d time.Duration) { a.etmr = time.AfterFunc(d, a.expiredTimeout) } // Lock should be held func (a *Account) clearExpirationTimer() bool { if a.etmr == nil { return true } stopped := a.etmr.Stop() a.etmr = nil return stopped } // checkUserRevoked will check if a user has been revoked. func (a *Account) checkUserRevoked(nkey string, issuedAt int64) bool { a.mu.RLock() defer a.mu.RUnlock() return isRevoked(a.usersRevoked, nkey, issuedAt) } // Check expiration and set the proper state as needed. func (a *Account) checkExpiration(claims *jwt.ClaimsData) { a.mu.Lock() defer a.mu.Unlock() a.clearExpirationTimer() if claims.Expires == 0 { a.expired = false return } tn := time.Now().Unix() if claims.Expires <= tn { a.expired = true return } expiresAt := time.Duration(claims.Expires - tn) a.setExpirationTimer(expiresAt * time.Second) a.expired = false } // hasIssuer returns true if the issuer matches the account // issuer or it is a signing key for the account. func (a *Account) hasIssuer(issuer string) bool { a.mu.RLock() hi := a.hasIssuerNoLock(issuer) a.mu.RUnlock() return hi } // hasIssuerNoLock is the unlocked version of hasIssuer func (a *Account) hasIssuerNoLock(issuer string) bool { // same issuer if a.Issuer == issuer { return true } for i := 0; i < len(a.signingKeys); i++ { if a.signingKeys[i] == issuer { return true } } return false } // Returns the loop detection subject used for leafnodes func (a *Account) getLDSubject() string { a.mu.RLock() lds := a.lds a.mu.RUnlock() return lds } // Placeholder for signaling token auth required. var tokenAuthReq = []*Account{} func authAccounts(tokenReq bool) []*Account { if tokenReq { return tokenAuthReq } return nil } // SetAccountResolver will assign the account resolver. func (s *Server) SetAccountResolver(ar AccountResolver) { s.mu.Lock() s.accResolver = ar s.mu.Unlock() } // AccountResolver returns the registered account resolver. func (s *Server) AccountResolver() AccountResolver { s.mu.Lock() ar := s.accResolver s.mu.Unlock() return ar } // isClaimAccount returns if this account is backed by a JWT claim. // Lock should be held. func (a *Account) isClaimAccount() bool { return a.claimJWT != "" } // updateAccountClaims will update an existing account with new claims. // This will replace any exports or imports previously defined. // Lock MUST NOT be held upon entry. func (s *Server) UpdateAccountClaims(a *Account, ac *jwt.AccountClaims) { s.updateAccountClaimsWithRefresh(a, ac, true) } // updateAccountClaimsWithRefresh will update an existing account with new claims. // If refreshImportingAccounts is true it will also update incomplete dependent accounts // This will replace any exports or imports previously defined. // Lock MUST NOT be held upon entry. func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaims, refreshImportingAccounts bool) { if a == nil { return } s.Debugf("Updating account claims: %s", a.Name) a.checkExpiration(ac.Claims()) a.mu.Lock() // Clone to update, only select certain fields. old := &Account{Name: a.Name, exports: a.exports, limits: a.limits, signingKeys: a.signingKeys} // Reset exports and imports here. // Exports is creating a whole new map. a.exports = exportMap{} // Imports are checked unlocked in processInbound, so we can't change out the struct here. Need to process inline. if a.imports.streams != nil { old.imports.streams = a.imports.streams a.imports.streams = nil } if a.imports.services != nil { old.imports.services = make(map[string]*serviceImport, len(a.imports.services)) } for k, v := range a.imports.services { old.imports.services[k] = v delete(a.imports.services, k) } // Reset any notion of export revocations. a.actsRevoked = nil // update account signing keys a.signingKeys = nil signersChanged := false if len(ac.SigningKeys) > 0 { // insure copy the new keys and sort a.signingKeys = append(a.signingKeys, ac.SigningKeys...) sort.Strings(a.signingKeys) } if len(a.signingKeys) != len(old.signingKeys) { signersChanged = true } else { for i := 0; i < len(old.signingKeys); i++ { if a.signingKeys[i] != old.signingKeys[i] { signersChanged = true break } } } a.mu.Unlock() gatherClients := func() []*client { a.mu.RLock() clients := make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } a.mu.RUnlock() return clients } jsEnabled := s.JetStreamEnabled() if jsEnabled && a == s.SystemAccount() { for _, export := range allJsExports { s.Debugf("Adding jetstream service export %q for %s", export, a.Name) if err := a.AddServiceExport(export, nil); err != nil { s.Errorf("Error setting up jetstream service exports: %v", err) } } } for _, e := range ac.Exports { switch e.Type { case jwt.Stream: s.Debugf("Adding stream export %q for %s", e.Subject, a.Name) if err := a.AddStreamExport(string(e.Subject), authAccounts(e.TokenReq)); err != nil { s.Debugf("Error adding stream export to account [%s]: %v", a.Name, err.Error()) } case jwt.Service: s.Debugf("Adding service export %q for %s", e.Subject, a.Name) rt := Singleton switch e.ResponseType { case jwt.ResponseTypeStream: rt = Streamed case jwt.ResponseTypeChunked: rt = Chunked } if err := a.AddServiceExportWithResponse(string(e.Subject), rt, authAccounts(e.TokenReq)); err != nil { s.Debugf("Error adding service export to account [%s]: %v", a.Name, err) } if e.Latency != nil { if err := a.TrackServiceExportWithSampling(string(e.Subject), string(e.Latency.Results), e.Latency.Sampling); err != nil { s.Debugf("Error adding latency tracking for service export to account [%s]: %v", a.Name, err) } } } // We will track these at the account level. Should not have any collisions. if e.Revocations != nil { a.mu.Lock() if a.actsRevoked == nil { a.actsRevoked = make(map[string]int64) } for k, t := range e.Revocations { a.actsRevoked[k] = t } a.mu.Unlock() } } var incompleteImports []*jwt.Import for _, i := range ac.Imports { // check tmpAccounts with priority var acc *Account var err error if v, ok := s.tmpAccounts.Load(i.Account); ok { acc = v.(*Account) } else { acc, err = s.lookupAccount(i.Account) } if acc == nil || err != nil { s.Errorf("Can't locate account [%s] for import of [%v] %s (err=%v)", i.Account, i.Subject, i.Type, err) incompleteImports = append(incompleteImports, i) continue } switch i.Type { case jwt.Stream: s.Debugf("Adding stream import %s:%q for %s:%q", acc.Name, i.Subject, a.Name, i.To) if err := a.AddStreamImportWithClaim(acc, string(i.Subject), string(i.To), i); err != nil { s.Debugf("Error adding stream import to account [%s]: %v", a.Name, err.Error()) incompleteImports = append(incompleteImports, i) } case jwt.Service: // FIXME(dlc) - need to add in respThresh here eventually. s.Debugf("Adding service import %s:%q for %s:%q", acc.Name, i.Subject, a.Name, i.To) if err := a.AddServiceImportWithClaim(acc, string(i.Subject), string(i.To), i); err != nil { s.Debugf("Error adding service import to account [%s]: %v", a.Name, err.Error()) incompleteImports = append(incompleteImports, i) } } } // Now let's apply any needed changes from import/export changes. if !a.checkStreamImportsEqual(old) { awcsti := map[string]struct{}{a.Name: {}} for _, c := range gatherClients() { c.processSubsOnConfigReload(awcsti) } } // Now check if stream exports have changed. if !a.checkStreamExportsEqual(old) || signersChanged { clients := map[*client]struct{}{} // We need to check all accounts that have an import claim from this account. awcsti := map[string]struct{}{} s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) // Move to the next if this account is actually account "a". if acc.Name == a.Name { return true } // TODO: checkStreamImportAuthorized() stack should not be trying // to lock "acc". If we find that to be needed, we will need to // rework this to ensure we don't lock acc. acc.mu.Lock() for _, im := range acc.imports.streams { if im != nil && im.acc.Name == a.Name { // Check for if we are still authorized for an import. im.invalid = !a.checkStreamImportAuthorized(acc, im.from, im.claim) awcsti[acc.Name] = struct{}{} for c := range acc.clients { clients[c] = struct{}{} } } } acc.mu.Unlock() return true }) // Now walk clients. for c := range clients { c.processSubsOnConfigReload(awcsti) } } // Now check if service exports have changed. if !a.checkServiceExportsEqual(old) || signersChanged { s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) // Move to the next if this account is actually account "a". if acc.Name == a.Name { return true } // TODO: checkServiceImportAuthorized() stack should not be trying // to lock "acc". If we find that to be needed, we will need to // rework this to ensure we don't lock acc. acc.mu.Lock() for _, si := range acc.imports.services { if si != nil && si.acc.Name == a.Name { // Check for if we are still authorized for an import. si.invalid = !a.checkServiceImportAuthorized(acc, si.to, si.claim) if si.latency != nil && !si.response { // Make sure we should still be tracking latency. if se := a.getServiceExport(si.to); se != nil { si.latency = se.latency } } } } acc.mu.Unlock() return true }) } // Now make sure we shutdown the old service import subscriptions. var sids [][]byte a.mu.RLock() c := a.ic for _, si := range old.imports.services { if c != nil && si.sid != nil { sids = append(sids, si.sid) } } a.mu.RUnlock() for _, sid := range sids { c.processUnsub(sid) } // Now do limits if they are present. a.mu.Lock() a.msubs = int32(ac.Limits.Subs) a.mpay = int32(ac.Limits.Payload) a.mconns = int32(ac.Limits.Conn) a.mleafs = int32(ac.Limits.LeafNodeConn) // Check for any revocations if len(ac.Revocations) > 0 { // We will always replace whatever we had with most current, so no // need to look at what we have. a.usersRevoked = make(map[string]int64, len(ac.Revocations)) for pk, t := range ac.Revocations { a.usersRevoked[pk] = t } } else { a.usersRevoked = nil } a.defaultPerms = buildPermissionsFromJwt(&ac.DefaultPermissions) a.incomplete = len(incompleteImports) != 0 for _, i := range incompleteImports { s.incompleteAccExporterMap.Store(i.Account, struct{}{}) } if a.srv == nil { a.srv = s } if jsEnabled { if ac.Limits.JetStreamLimits.DiskStorage != 0 || ac.Limits.JetStreamLimits.MemoryStorage != 0 { // JetStreamAccountLimits and jwt.JetStreamLimits use same value for unlimited a.jsLimits = &JetStreamAccountLimits{ MaxMemory: ac.Limits.JetStreamLimits.MemoryStorage, MaxStore: ac.Limits.JetStreamLimits.DiskStorage, MaxStreams: int(ac.Limits.JetStreamLimits.Streams), MaxConsumers: int(ac.Limits.JetStreamLimits.Consumer), } } else if a.jsLimits != nil { // covers failed update followed by disable a.jsLimits = nil } } a.updated = time.Now() a.mu.Unlock() clients := gatherClients() // Sort if we are over the limit. if a.MaxTotalConnectionsReached() { sort.Slice(clients, func(i, j int) bool { return clients[i].start.After(clients[j].start) }) } if jsEnabled { if err := s.configJetStream(a); err != nil { s.Errorf("Error configuring jetstream for account [%s]: %v", a.Name, err.Error()) a.mu.Lock() // Absent reload of js server cfg, this is going to be broken until js is disabled a.incomplete = true a.mu.Unlock() } } for i, c := range clients { a.mu.RLock() exceeded := a.mconns != jwt.NoLimit && i >= int(a.mconns) a.mu.RUnlock() if exceeded { c.maxAccountConnExceeded() continue } c.mu.Lock() c.applyAccountLimits() theJWT := c.opts.JWT c.mu.Unlock() // Check for being revoked here. We use ac one to avoid the account lock. if ac.Revocations != nil && theJWT != "" { if juc, err := jwt.DecodeUserClaims(theJWT); err != nil { c.Debugf("User JWT not valid: %v", err) c.authViolation() continue } else if ok := ac.IsClaimRevoked(juc); ok { c.sendErrAndDebug("User Authentication Revoked") c.closeConnection(Revocation) continue } } } // Check if the signing keys changed, might have to evict if signersChanged { for _, c := range clients { c.mu.Lock() sk := c.user.SigningKey c.mu.Unlock() if sk != "" && !a.hasIssuer(sk) { c.closeConnection(AuthenticationViolation) } } } if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok && refreshImportingAccounts { s.incompleteAccExporterMap.Delete(old.Name) s.accounts.Range(func(key, value interface{}) bool { acc := value.(*Account) acc.mu.RLock() incomplete := acc.incomplete name := acc.Name // Must use jwt in account or risk failing on fetch // This jwt may not be the same that caused exportingAcc to be in incompleteAccExporterMap claimJWT := acc.claimJWT acc.mu.RUnlock() if incomplete && name != old.Name { if accClaims, _, err := s.verifyAccountClaims(claimJWT); err == nil { // Since claimJWT has not changed, acc can become complete // but it won't alter incomplete for it's dependents accounts. s.updateAccountClaimsWithRefresh(acc, accClaims, false) // old.Name was deleted before ranging over accounts // If it exists again, UpdateAccountClaims set it for failed imports of acc. // So there was one import of acc that imported this account and failed again. // Since this account just got updated, the import itself may be in error. So trace that. if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok { s.incompleteAccExporterMap.Delete(old.Name) s.Errorf("Account %s has issues importing account %s", name, old.Name) } } } return true }) } } // Helper to build an internal account structure from a jwt.AccountClaims. // Lock MUST NOT be held upon entry. func (s *Server) buildInternalAccount(ac *jwt.AccountClaims) *Account { acc := NewAccount(ac.Subject) acc.Issuer = ac.Issuer // Set this here since we are placing in s.tmpAccounts below and may be // referenced by an route RS+, etc. s.setAccountSublist(acc) // We don't want to register an account that is in the process of // being built, however, to solve circular import dependencies, we // need to store it here. s.tmpAccounts.Store(ac.Subject, acc) s.UpdateAccountClaims(acc, ac) return acc } // Helper to build Permissions from jwt.Permissions // or return nil if none were specified func buildPermissionsFromJwt(uc *jwt.Permissions) *Permissions { if uc == nil { return nil } var p *Permissions if len(uc.Pub.Allow) > 0 || len(uc.Pub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Publish = &SubjectPermission{} p.Publish.Allow = uc.Pub.Allow p.Publish.Deny = uc.Pub.Deny } if len(uc.Sub.Allow) > 0 || len(uc.Sub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Subscribe = &SubjectPermission{} p.Subscribe.Allow = uc.Sub.Allow p.Subscribe.Deny = uc.Sub.Deny } if uc.Resp != nil { if p == nil { p = &Permissions{} } p.Response = &ResponsePermission{ MaxMsgs: uc.Resp.MaxMsgs, Expires: uc.Resp.Expires, } validateResponsePermissions(p) } return p } // Helper to build internal NKeyUser. func buildInternalNkeyUser(uc *jwt.UserClaims, acts map[string]struct{}, acc *Account) *NkeyUser { nu := &NkeyUser{Nkey: uc.Subject, Account: acc, AllowedConnectionTypes: acts} if uc.IssuerAccount != "" { nu.SigningKey = uc.Issuer } // Now check for permissions. var p = buildPermissionsFromJwt(&uc.Permissions) if p == nil && acc.defaultPerms != nil { p = acc.defaultPerms.clone() } nu.Permissions = p return nu } const fetchTimeout = 2 * time.Second func fetchAccount(res AccountResolver, name string) (string, error) { if !nkeys.IsValidPublicAccountKey(name) { return "", fmt.Errorf("will only fetch valid account keys") } return res.Fetch(name) } // AccountResolver interface. This is to fetch Account JWTs by public nkeys type AccountResolver interface { Fetch(name string) (string, error) Store(name, jwt string) error IsReadOnly() bool Start(server *Server) error IsTrackingUpdate() bool Reload() error Close() } // Default implementations of IsReadOnly/Start so only need to be written when changed type resolverDefaultsOpsImpl struct{} func (*resolverDefaultsOpsImpl) IsReadOnly() bool { return true } func (*resolverDefaultsOpsImpl) IsTrackingUpdate() bool { return false } func (*resolverDefaultsOpsImpl) Start(*Server) error { return nil } func (*resolverDefaultsOpsImpl) Reload() error { return nil } func (*resolverDefaultsOpsImpl) Close() { } func (*resolverDefaultsOpsImpl) Store(_, _ string) error { return fmt.Errorf("Store operation not supported for URL Resolver") } // MemAccResolver is a memory only resolver. // Mostly for testing. type MemAccResolver struct { sm sync.Map resolverDefaultsOpsImpl } // Fetch will fetch the account jwt claims from the internal sync.Map. func (m *MemAccResolver) Fetch(name string) (string, error) { if j, ok := m.sm.Load(name); ok { return j.(string), nil } return _EMPTY_, ErrMissingAccount } // Store will store the account jwt claims in the internal sync.Map. func (m *MemAccResolver) Store(name, jwt string) error { m.sm.Store(name, jwt) return nil } func (ur *MemAccResolver) IsReadOnly() bool { return false } // URLAccResolver implements an http fetcher. type URLAccResolver struct { url string c *http.Client resolverDefaultsOpsImpl } // NewURLAccResolver returns a new resolver for the given base URL. func NewURLAccResolver(url string) (*URLAccResolver, error) { if !strings.HasSuffix(url, "/") { url += "/" } // FIXME(dlc) - Make timeout and others configurable. // We create our own transport to amortize TLS. tr := &http.Transport{ MaxIdleConns: 10, IdleConnTimeout: 30 * time.Second, } ur := &URLAccResolver{ url: url, c: &http.Client{Timeout: fetchTimeout, Transport: tr}, } return ur, nil } // Fetch will fetch the account jwt claims from the base url, appending the // account name onto the end. func (ur *URLAccResolver) Fetch(name string) (string, error) { url := ur.url + name resp, err := ur.c.Get(url) if err != nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, err) } else if resp == nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: no response", url) } else if resp.StatusCode != http.StatusOK { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, resp.Status) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return _EMPTY_, err } return string(body), nil } // Resolver based on nats for synchronization and backing directory for storage. type DirAccResolver struct { *DirJWTStore *Server syncInterval time.Duration } func (dr *DirAccResolver) IsTrackingUpdate() bool { return true } func (dr *DirAccResolver) Reload() error { return dr.DirJWTStore.Reload() } func respondToUpdate(s *Server, respSubj string, acc string, message string, err error) { if err == nil { if acc == "" { s.Debugf("%s", message) } else { s.Debugf("%s - %s", message, acc) } } else { if acc == "" { s.Errorf("%s - %s", message, err) } else { s.Errorf("%s - %s - %s", message, acc, err) } } if respSubj == "" { return } server := &ServerInfo{} response := map[string]interface{}{"server": server} m := map[string]interface{}{} if acc != "" { m["account"] = acc } if err == nil { m["code"] = http.StatusOK m["message"] = message response["data"] = m } else { m["code"] = http.StatusInternalServerError m["description"] = fmt.Sprintf("%s - %v", message, err) response["error"] = m } s.sendInternalMsgLocked(respSubj, _EMPTY_, server, response) } func handleListRequest(store *DirJWTStore, s *Server, reply string) { if reply == "" { return } accIds := make([]string, 0, 1024) if err := store.PackWalk(1, func(partialPackMsg string) { if tk := strings.Split(partialPackMsg, "|"); len(tk) == 2 { accIds = append(accIds, tk[0]) } }); err != nil { // let them timeout s.Errorf("list request error: %v", err) } else { s.Debugf("list request responded with %d account ids", len(accIds)) server := &ServerInfo{} response := map[string]interface{}{"server": server, "data": accIds} s.sendInternalMsgLocked(reply, _EMPTY_, server, response) } } func handleDeleteRequest(store *DirJWTStore, s *Server, msg []byte, reply string) { var accIds []interface{} var subj, sysAccName string if sysAcc := s.SystemAccount(); sysAcc != nil { sysAccName = sysAcc.GetName() } // TODO Can allow keys (issuer) to delete accounts they issued and operator key to delete all accounts. // For now only operator is allowed to delete gk, err := jwt.DecodeGeneric(string(msg)) if err == nil { subj = gk.Subject if store.deleteType == NoDelete { err = fmt.Errorf("delete must be enabled in server config") } else if subj != gk.Issuer { err = fmt.Errorf("not self signed") } else if !s.isTrustedIssuer(gk.Issuer) { err = fmt.Errorf("not trusted") } else if store.operator != gk.Issuer { err = fmt.Errorf("needs to be the operator operator") } else if list, ok := gk.Data["accounts"]; !ok { err = fmt.Errorf("malformed request") } else if accIds, ok = list.([]interface{}); !ok { err = fmt.Errorf("malformed request") } else { for _, entry := range accIds { if acc, ok := entry.(string); !ok || acc == "" || !nkeys.IsValidPublicAccountKey(acc) { err = fmt.Errorf("malformed request") break } else if acc == sysAccName { err = fmt.Errorf("not allowed to delete system account") break } } } } if err != nil { respondToUpdate(s, reply, "", fmt.Sprintf("delete accounts request by %s failed", subj), err) return } errs := []string{} passCnt := 0 for _, acc := range accIds { if err := store.delete(acc.(string)); err != nil { errs = append(errs, err.Error()) } else { passCnt++ } } if len(errs) == 0 { respondToUpdate(s, reply, "", fmt.Sprintf("deleted %d accounts", passCnt), nil) } else { respondToUpdate(s, reply, "", fmt.Sprintf("deleted %d accounts, failed for %d", passCnt, len(errs)), errors.New(strings.Join(errs, "<\n"))) } } func getOperator(s *Server) (string, error) { var op string if opts := s.getOpts(); opts != nil && len(opts.TrustedOperators) > 0 { op = opts.TrustedOperators[0].Subject } if op == "" { return "", fmt.Errorf("no operator found") } return op, nil } func (dr *DirAccResolver) Start(s *Server) error { op, err := getOperator(s) if err != nil { return err } dr.Lock() defer dr.Unlock() dr.Server = s dr.operator = op dr.DirJWTStore.changed = func(pubKey string) { if v, ok := s.accounts.Load(pubKey); !ok { } else if jwt, err := dr.LoadAcc(pubKey); err != nil { s.Errorf("update got error on load: %v", err) } else if err := s.updateAccountWithClaimJWT(v.(*Account), jwt); err != nil { s.Errorf("update resulted in error %v", err) } } packRespIb := s.newRespInbox() for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { // subscribe to account jwt update requests if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, subj, resp string, msg []byte) { pubKey := "" tk := strings.Split(subj, tsep) if len(tk) == accUpdateTokensNew { pubKey = tk[accReqAccIndex] } else if len(tk) == accUpdateTokensOld { pubKey = tk[accUpdateAccIdxOld] } else { s.Debugf("jwt update skipped due to bad subject %q", subj) return } if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else if claim.Subject != pubKey { err := errors.New("subject does not match jwt content") respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else if err := dr.save(pubKey, string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else { respondToUpdate(s, resp, pubKey, "jwt updated", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } } if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, subj, resp string, msg []byte) { if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update resulted in error", err) } else if err := dr.save(claim.Subject, string(msg)); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt update resulted in error", err) } else { respondToUpdate(s, resp, claim.Subject, "jwt updated", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } // respond to lookups with our version if _, err := s.sysSubscribe(fmt.Sprintf(accLookupReqSubj, "*"), func(_ *subscription, _ *client, subj, reply string, msg []byte) { if reply == "" { return } tk := strings.Split(subj, tsep) if len(tk) != accLookupReqTokens { return } if theJWT, err := dr.DirJWTStore.LoadAcc(tk[accReqAccIndex]); err != nil { s.Errorf("Merging resulted in error: %v", err) } else { s.sendInternalMsgLocked(reply, "", nil, []byte(theJWT)) } }); err != nil { return fmt.Errorf("error setting up lookup request handling: %v", err) } // respond to pack requests with one or more pack messages // an empty message signifies the end of the response responder if _, err := s.sysSubscribeQ(accPackReqSubj, "responder", func(_ *subscription, _ *client, _, reply string, theirHash []byte) { if reply == "" { return } ourHash := dr.DirJWTStore.Hash() if bytes.Equal(theirHash, ourHash[:]) { s.sendInternalMsgLocked(reply, "", nil, []byte{}) s.Debugf("pack request matches hash %x", ourHash[:]) } else if err := dr.DirJWTStore.PackWalk(1, func(partialPackMsg string) { s.sendInternalMsgLocked(reply, "", nil, []byte(partialPackMsg)) }); err != nil { // let them timeout s.Errorf("pack request error: %v", err) } else { s.Debugf("pack request hash %x - finished responding with hash %x", theirHash, ourHash) s.sendInternalMsgLocked(reply, "", nil, []byte{}) } }); err != nil { return fmt.Errorf("error setting up pack request handling: %v", err) } // respond to list requests with one message containing all account ids if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _, reply string, _ []byte) { handleListRequest(dr.DirJWTStore, s, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _, reply string, msg []byte) { handleDeleteRequest(dr.DirJWTStore, s, msg, reply) }); err != nil { return fmt.Errorf("error setting up delete request handling: %v", err) } // embed pack responses into store if _, err := s.sysSubscribe(packRespIb, func(_ *subscription, _ *client, _, _ string, msg []byte) { hash := dr.DirJWTStore.Hash() if len(msg) == 0 { // end of response stream s.Debugf("Merging Finished and resulting in: %x", dr.DirJWTStore.Hash()) return } else if err := dr.DirJWTStore.Merge(string(msg)); err != nil { s.Errorf("Merging resulted in error: %v", err) } else { s.Debugf("Merging succeeded and changed %x to %x", hash, dr.DirJWTStore.Hash()) } }); err != nil { return fmt.Errorf("error setting up pack response handling: %v", err) } // periodically send out pack message quit := s.quitCh s.startGoRoutine(func() { defer s.grWG.Done() ticker := time.NewTicker(dr.syncInterval) for { select { case <-quit: ticker.Stop() return case <-ticker.C: } ourHash := dr.DirJWTStore.Hash() s.Debugf("Checking store state: %x", ourHash) s.sendInternalMsgLocked(accPackReqSubj, packRespIb, nil, ourHash[:]) } }) s.Noticef("Managing all jwt in exclusive directory %s", dr.directory) return nil } func (dr *DirAccResolver) Fetch(name string) (string, error) { if theJWT, err := dr.LoadAcc(name); theJWT != "" { return theJWT, nil } else { dr.Lock() srv := dr.Server dr.Unlock() if srv == nil { return "", err } return srv.fetch(dr, name) // lookup from other server } } func (dr *DirAccResolver) Store(name, jwt string) error { return dr.saveIfNewer(name, jwt) } func NewDirAccResolver(path string, limit int64, syncInterval time.Duration, delete bool) (*DirAccResolver, error) { if limit == 0 { limit = math.MaxInt64 } if syncInterval <= 0 { syncInterval = time.Minute } deleteType := NoDelete if delete { deleteType = RenameDeleted } store, err := NewExpiringDirJWTStore(path, false, true, deleteType, 0, limit, false, 0, nil) if err != nil { return nil, err } return &DirAccResolver{store, nil, syncInterval}, nil } // Caching resolver using nats for lookups and making use of a directory for storage type CacheDirAccResolver struct { DirAccResolver ttl time.Duration } func (s *Server) fetch(res AccountResolver, name string) (string, error) { if s == nil { return "", ErrNoAccountResolver } respC := make(chan []byte, 1) accountLookupRequest := fmt.Sprintf(accLookupReqSubj, name) s.mu.Lock() if s.sys == nil || s.sys.replies == nil { s.mu.Unlock() return "", fmt.Errorf("eventing shut down") } replySubj := s.newRespInbox() replies := s.sys.replies // Store our handler. replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) { clone := make([]byte, len(msg)) copy(clone, msg) s.mu.Lock() if _, ok := replies[replySubj]; ok { select { case respC <- clone: // only use first response and only if there is still interest default: } } s.mu.Unlock() } s.sendInternalMsg(accountLookupRequest, replySubj, nil, []byte{}) quit := s.quitCh s.mu.Unlock() var err error var theJWT string select { case <-quit: err = errors.New("fetching jwt failed due to shutdown") case <-time.After(fetchTimeout): err = errors.New("fetching jwt timed out") case m := <-respC: if err = res.Store(name, string(m)); err == nil { theJWT = string(m) } } s.mu.Lock() delete(replies, replySubj) s.mu.Unlock() close(respC) return theJWT, err } func NewCacheDirAccResolver(path string, limit int64, ttl time.Duration, _ ...dirJWTStoreOption) (*CacheDirAccResolver, error) { if limit <= 0 { limit = 1_000 } store, err := NewExpiringDirJWTStore(path, false, true, HardDelete, 0, limit, true, ttl, nil) if err != nil { return nil, err } return &CacheDirAccResolver{DirAccResolver{store, nil, 0}, ttl}, nil } func (dr *CacheDirAccResolver) Start(s *Server) error { op, err := getOperator(s) if err != nil { return err } dr.Lock() defer dr.Unlock() dr.Server = s dr.operator = op dr.DirJWTStore.changed = func(pubKey string) { if v, ok := s.accounts.Load(pubKey); !ok { } else if jwt, err := dr.LoadAcc(pubKey); err != nil { s.Errorf("update got error on load: %v", err) } else if err := s.updateAccountWithClaimJWT(v.(*Account), jwt); err != nil { s.Errorf("update resulted in error %v", err) } } for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { // subscribe to account jwt update requests if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, subj, resp string, msg []byte) { pubKey := "" tk := strings.Split(subj, tsep) if len(tk) == accUpdateTokensNew { pubKey = tk[accReqAccIndex] } else if len(tk) == accUpdateTokensOld { pubKey = tk[accUpdateAccIdxOld] } else { s.Debugf("jwt update cache skipped due to bad subject %q", subj) return } if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else if claim.Subject != pubKey { err := errors.New("subject does not match jwt content") respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else if _, ok := s.accounts.Load(pubKey); !ok { respondToUpdate(s, resp, pubKey, "jwt update cache skipped", nil) } else if err := dr.save(pubKey, string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else { respondToUpdate(s, resp, pubKey, "jwt updated cache", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } } if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, subj, resp string, msg []byte) { if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update cache resulted in error", err) } else if _, ok := s.accounts.Load(claim.Subject); !ok { respondToUpdate(s, resp, claim.Subject, "jwt update cache skipped", nil) } else if err := dr.save(claim.Subject, string(msg)); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt update cache resulted in error", err) } else { respondToUpdate(s, resp, claim.Subject, "jwt updated cache", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } // respond to list requests with one message containing all account ids if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _, reply string, _ []byte) { handleListRequest(dr.DirJWTStore, s, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _, reply string, msg []byte) { handleDeleteRequest(dr.DirJWTStore, s, msg, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } s.Noticef("Managing some jwt in exclusive directory %s", dr.directory) return nil } func (dr *CacheDirAccResolver) Reload() error { return dr.DirAccResolver.Reload() } // Transforms for arbitrarily mapping subjects from one to another for maps, tees and filters. // These can also be used for proper mapping on wildcard exports/imports. // These will be grouped and caching and locking are assumed to be in the upper layers. type transform struct { src, dest string dtoks []string stoks []string dtpi []int8 } // Helper to pull raw place holder index. Returns -1 if not a place holder. func placeHolderIndex(token string) int { if len(token) > 1 && token[0] == '$' { var tp int if n, err := fmt.Sscanf(token, "$%d", &tp); err == nil && n == 1 { return tp } } return -1 } // newTransform will create a new transform checking the src and dest subjects for accuracy. func newTransform(src, dest string) (*transform, error) { // Both entries need to be valid subjects. sv, stokens, npwcs, hasFwc := subjectInfo(src) dv, dtokens, dnpwcs, dHasFwc := subjectInfo(dest) // Make sure both are valid, match fwc if present and there are no pwcs in the dest subject. if !sv || !dv || dnpwcs > 0 || hasFwc != dHasFwc { return nil, ErrBadSubject } var dtpi []int8 // If the src has partial wildcards then the dest needs to have the token place markers. if npwcs > 0 || hasFwc { // We need to count to make sure that the dest has token holders for the pwcs. sti := make(map[int]int) for i, token := range stokens { if len(token) == 1 && token[0] == pwc { sti[len(sti)+1] = i } } nphs := 0 for _, token := range dtokens { tp := placeHolderIndex(token) if tp >= 0 { if tp > npwcs { return nil, ErrBadSubject } nphs++ // Now build up our runtime mapping from dest to source tokens. dtpi = append(dtpi, int8(sti[tp])) } else { dtpi = append(dtpi, -1) } } if nphs != npwcs { return nil, ErrBadSubject } } return &transform{src: src, dest: dest, dtoks: dtokens, stoks: stokens, dtpi: dtpi}, nil } // match will take a literal published subject that is associated with a client and will match and transform // the subject if possible. // TODO(dlc) - We could add in client here to allow for things like foo -> foo.$ACCOUNT func (tr *transform) match(subject string) (string, error) { // Tokenize the subject. This should always be a literal subject. tsa := [32]string{} tts := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) if !isValidLiteralSubject(tts) { return "", ErrBadSubject } if isSubsetMatch(tts, tr.src) { return tr.transform(tts) } return "", ErrNoTransforms } // Do not need to match, just transform. func (tr *transform) transformSubject(subject string) (string, error) { // Tokenize the subject. tsa := [32]string{} tts := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) return tr.transform(tts) } // Do a transform on the subject to the dest subject. func (tr *transform) transform(tokens []string) (string, error) { if len(tr.dtpi) == 0 { return tr.dest, nil } var b strings.Builder var token string // We need to walk destination tokens and create the mapped subject pulling tokens from src. // This is slow and that is ok, transforms should have caching layer in front for mapping transforms // and export/import semantics with streams and services. li := len(tr.dtpi) - 1 for i, index := range tr.dtpi { // <0 means use destination token. if index < 0 { token = tr.dtoks[i] // Break if fwc if len(token) == 1 && token[0] == fwc { break } } else { // >= 0 means use source map index to figure out which source token to pull. token = tokens[index] } b.WriteString(token) if i < li { b.WriteByte(btsep) } } // We may have more source tokens available. This happens with ">". if tr.dtoks[len(tr.dtoks)-1] == ">" { for sli, i := len(tokens)-1, len(tr.stoks)-1; i < len(tokens); i++ { b.WriteString(tokens[i]) if i < sli { b.WriteByte(btsep) } } } return b.String(), nil } // Reverse a transform. func (tr *transform) reverse() *transform { if len(tr.dtpi) == 0 { rtr, _ := newTransform(tr.dest, tr.src) return rtr } // If we are here we need to dynamically get the correct reverse // of this transform. nsrc, phs := transformUntokenize(tr.dest) var nda []string for _, token := range tr.stoks { if token == "*" { if len(phs) == 0 { // TODO(dlc) - Should not happen return nil } nda = append(nda, phs[0]) phs = phs[1:] } else { nda = append(nda, token) } } ndest := strings.Join(nda, tsep) rtr, _ := newTransform(nsrc, ndest) return rtr }
1
12,006
change the name of the function to match its functionality? account IsIssuing?
nats-io-nats-server
go
@@ -84,6 +84,19 @@ class _Frame(object): """ return _spark_col_apply(self, F.abs) + def groupby(self, by): + from databricks.koalas.groupby import GroupBy + from databricks.koalas.series import Series + if isinstance(by, str): + by = [by] + elif isinstance(by, Series): + by = [by] + else: + by = list(by) + if len(by) == 0: + raise ValueError('No group keys passed!') + return GroupBy(self, by=by) + def compute(self): """Alias of `toPandas()` to mimic dask for easily porting tests.""" return self.toPandas()
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A base class to be monkey-patched to DataFrame/Column to behave similar to pandas DataFrame/Series. """ import pandas as pd from pyspark.sql import functions as F from databricks.koalas.dask.utils import derived_from max_display_count = 1000 class _Frame(object): """ The base class for both dataframes and series. """ def to_numpy(self): """ A NumPy ndarray representing the values in this DataFrame :return: numpy.ndarray Numpy representation of DataFrame .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. """ return self.toPandas().values @derived_from(pd.DataFrame, ua_args=['axis', 'skipna', 'level', 'numeric_only']) def mean(self): return self._reduce_for_stat_function(F.mean) @derived_from(pd.DataFrame, ua_args=['axis', 'skipna', 'level', 'numeric_only', 'min_count']) def sum(self): return self._reduce_for_stat_function(F.sum) @derived_from(pd.DataFrame, ua_args=['axis', 'skipna', 'level', 'numeric_only']) def skew(self): return self._reduce_for_stat_function(F.skewness) @derived_from(pd.DataFrame, ua_args=['axis', 'skipna', 'level', 'numeric_only']) def kurtosis(self): return self._reduce_for_stat_function(F.kurtosis) kurt = kurtosis @derived_from(pd.DataFrame, ua_args=['axis', 'skipna', 'level', 'numeric_only']) def min(self): return self._reduce_for_stat_function(F.min) @derived_from(pd.DataFrame, ua_args=['axis', 'skipna', 'level', 'numeric_only']) def max(self): return self._reduce_for_stat_function(F.max) @derived_from(pd.DataFrame, ua_args=['axis', 'skipna', 'level', 'ddof', 'numeric_only']) def std(self): return self._reduce_for_stat_function(F.stddev) @derived_from(pd.DataFrame, ua_args=['axis', 'skipna', 'level', 'ddof', 'numeric_only']) def var(self): return self._reduce_for_stat_function(F.variance) @derived_from(pd.DataFrame) def abs(self): """ Return a Series/DataFrame with absolute numeric value of each element. :return: :class:`Series` or :class:`DataFrame` with the absolute value of each element. """ return _spark_col_apply(self, F.abs) def compute(self): """Alias of `toPandas()` to mimic dask for easily porting tests.""" return self.toPandas() def _spark_col_apply(kdf_or_ks, sfun): """ Performs a function to all cells on a dataframe, the function being a known sql function. """ from databricks.koalas.frame import DataFrame from databricks.koalas.series import Series if isinstance(kdf_or_ks, Series): ks = kdf_or_ks return Series(sfun(kdf_or_ks._scol), ks._kdf, ks._index_info) assert isinstance(kdf_or_ks, DataFrame) kdf = kdf_or_ks sdf = kdf._sdf sdf = sdf.select([sfun(sdf[col]).alias(col) for col in kdf.columns]) return DataFrame(sdf)
1
8,598
for later: add docstring (can just copy paste Pandas')
databricks-koalas
py
@@ -4,10 +4,10 @@ package cli import ( - "fmt" "testing" climocks "github.com/aws/amazon-ecs-cli-v2/internal/pkg/cli/mocks" + archerMocks "github.com/aws/amazon-ecs-cli-v2/mocks" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" )
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "fmt" "testing" climocks "github.com/aws/amazon-ecs-cli-v2/internal/pkg/cli/mocks" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) const githubRepo = "https://github.com/badGoose/chaOS" const githubToken = "hunter2" func TestInitPipelineOpts_Ask(t *testing.T) { testCases := map[string]struct { inEnvironments []string inGitHubRepo string inGitHubAccessToken string inProjectEnvs []string mockPrompt func(m *climocks.Mockprompter) expectedGitHubRepo string expectedGitHubAccessToken string expectedEnvironments []string expectedError error }{ "prompts for all input": { inEnvironments: []string{}, inGitHubRepo: "", inGitHubAccessToken: "", inProjectEnvs: []string{"test", "prod"}, mockPrompt: func(m *climocks.Mockprompter) { m.EXPECT().Confirm(pipelineAddEnvPrompt, gomock.Any()).Return(true, nil).Times(3) m.EXPECT().SelectOne(pipelineSelectEnvPrompt, gomock.Any(), []string{"test", "prod"}).Return("test", nil).Times(1) m.EXPECT().SelectOne(pipelineSelectEnvPrompt, gomock.Any(), []string{"prod"}).Return("prod", nil).Times(1) m.EXPECT().Get(gomock.Eq(pipelineEnterGitHubRepoPrompt), gomock.Any(), gomock.Any()).Return(githubRepo, nil).Times(1) m.EXPECT().GetSecret(gomock.Eq("Please enter your GitHub Personal Access Token for your repository: https://github.com/badGoose/chaOS"), gomock.Any()).Return(githubToken, nil).Times(1) }, expectedGitHubRepo: githubRepo, expectedGitHubAccessToken: githubToken, expectedEnvironments: []string{"test", "prod"}, expectedError: nil, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { // GIVEN ctrl := gomock.NewController(t) defer ctrl.Finish() mockPrompt := climocks.NewMockprompter(ctrl) opts := &InitPipelineOpts{ Environments: tc.inEnvironments, GitHubRepo: tc.inGitHubRepo, GitHubAccessToken: tc.inGitHubAccessToken, prompt: mockPrompt, projectEnvs: tc.inProjectEnvs, } tc.mockPrompt(mockPrompt) // WHEN fmt.Printf("BEFORE %+v\n", opts) err := opts.Ask() fmt.Printf("AFTER %+v\n", opts) // THEN if tc.expectedError != nil { require.Equal(t, tc.expectedError, err) } else { require.Nil(t, err) require.Equal(t, tc.expectedGitHubRepo, opts.GitHubRepo) require.Equal(t, tc.expectedGitHubRepo, opts.GitHubRepo) require.Equal(t, tc.expectedGitHubAccessToken, opts.GitHubAccessToken) require.ElementsMatch(t, tc.expectedEnvironments, opts.Environments) } }) } }
1
11,005
nit: @sonofachamp pointed out to me that the idiomatic way is "archermocks" (lowercase for package names)
aws-copilot-cli
go
@@ -31,8 +31,7 @@ import ( ) var ( - errRouterNotSet = errors.New("router not set") - errRouterHasNoProcedures = errors.New("router has no procedures") + errRouterNotSet = errors.New("router not set") _ transport.Inbound = (*Inbound)(nil) )
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package grpc import ( "errors" "net" "sync" "go.uber.org/yarpc/api/transport" "go.uber.org/yarpc/pkg/lifecycle" "google.golang.org/grpc" ) var ( errRouterNotSet = errors.New("router not set") errRouterHasNoProcedures = errors.New("router has no procedures") _ transport.Inbound = (*Inbound)(nil) ) // Inbound is a grpc transport.Inbound. type Inbound struct { once *lifecycle.Once lock sync.Mutex t *Transport listener net.Listener inboundOptions *inboundOptions router transport.Router server *grpc.Server } func newInbound(t *Transport, listener net.Listener, options ...InboundOption) *Inbound { return &Inbound{lifecycle.NewOnce(), sync.Mutex{}, t, listener, newInboundOptions(options), nil, nil} } // Start implements transport.Lifecycle#Start. func (i *Inbound) Start() error { return i.once.Start(i.start) } // Stop implements transport.Lifecycle#Stop. func (i *Inbound) Stop() error { return i.once.Stop(i.stop) } // IsRunning implements transport.Lifecycle#IsRunning. func (i *Inbound) IsRunning() bool { return i.once.IsRunning() } // SetRouter implements transport.Inbound#SetRouter. func (i *Inbound) SetRouter(router transport.Router) { i.lock.Lock() defer i.lock.Unlock() i.router = router } // Transports implements transport.Inbound#Transports. func (i *Inbound) Transports() []transport.Transport { return []transport.Transport{i.t} } func (i *Inbound) start() error { i.lock.Lock() defer i.lock.Unlock() if i.router == nil { return errRouterNotSet } serviceDescs, err := i.getServiceDescs() if err != nil { return err } server := grpc.NewServer( grpc.CustomCodec(customCodec{}), // TODO: does this actually work for yarpc // this needs a lot of review //grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(i.inboundOptions.getTracer())), // TODO grpc.UnaryInterceptor handles when parameter is nil, but should not rely on this grpc.UnaryInterceptor(i.inboundOptions.getUnaryInterceptor()), ) for _, serviceDesc := range serviceDescs { server.RegisterService(serviceDesc, noopGrpcStruct{}) } go func() { // TODO there should be some mechanism to block here // there is a race because the listener gets set in the grpc // Server implementation and we should be able to block // until Serve initialization is done // // It would be even better if we could do this outside the // lock in i // // TODO Server always returns a non-nil error but should // we do something with some or all errors? _ = server.Serve(i.listener) }() i.server = server return nil } func (i *Inbound) stop() error { i.lock.Lock() defer i.lock.Unlock() if i.server != nil { i.server.GracefulStop() } return nil } func (i *Inbound) getServiceDescs() ([]*grpc.ServiceDesc, error) { // TODO: router.Procedures() is not guaranteed to be immutable // https://github.com/yarpc/yarpc-go/issues/825 procedures := i.router.Procedures() if len(procedures) == 0 { return nil, errRouterHasNoProcedures } serviceNameToMethodNames := make(map[string]map[string]bool) for _, procedure := range procedures { serviceName, methodName, err := procedureNameToServiceNameMethodName(procedure.Name) if err != nil { return nil, err } methodNames, ok := serviceNameToMethodNames[serviceName] if !ok { methodNames = make(map[string]bool) serviceNameToMethodNames[serviceName] = methodNames } methodNames[methodName] = true } serviceDescs := make([]*grpc.ServiceDesc, 0, len(serviceNameToMethodNames)) for serviceName, methodNames := range serviceNameToMethodNames { serviceDesc := &grpc.ServiceDesc{ ServiceName: serviceName, HandlerType: (*noopGrpcInterface)(nil), Methods: make([]grpc.MethodDesc, 0, len(methodNames)), } for methodName := range methodNames { serviceDesc.Methods = append(serviceDesc.Methods, grpc.MethodDesc{ MethodName: methodName, Handler: newHandler( serviceName, methodName, i.router, ).handle, }) } serviceDescs = append(serviceDescs, serviceDesc) } return serviceDescs, nil } type noopGrpcInterface interface{} type noopGrpcStruct struct{}
1
14,857
Ugh I'm stupid, can you change this to `yarpc.InternalErrorf`?
yarpc-yarpc-go
go
@@ -42,10 +42,16 @@ import org.apache.iceberg.util.ThreadPools; public class AllDataFilesTable extends BaseMetadataTable { private final TableOperations ops; private final Table table; + private final String name; public AllDataFilesTable(TableOperations ops, Table table) { + this(ops, table, table.name() + ".all_data_files"); + } + + public AllDataFilesTable(TableOperations ops, Table table, String name) { this.ops = ops; this.table = table; + this.name = name; } @Override
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.io.IOException; import java.util.List; import org.apache.iceberg.exceptions.RuntimeIOException; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.expressions.ResidualEvaluator; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.relocated.com.google.common.collect.Iterables; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.util.ParallelIterable; import org.apache.iceberg.util.ThreadPools; /** * A {@link Table} implementation that exposes a table's valid data files as rows. * <p> * A valid data file is one that is readable from any snapshot currently tracked by the table. * <p> * This table may return duplicate rows. */ public class AllDataFilesTable extends BaseMetadataTable { private final TableOperations ops; private final Table table; public AllDataFilesTable(TableOperations ops, Table table) { this.ops = ops; this.table = table; } @Override Table table() { return table; } @Override String metadataTableName() { return "all_data_files"; } @Override public TableScan newScan() { return new AllDataFilesTableScan(ops, table, schema()); } @Override public Schema schema() { Schema schema = new Schema(DataFile.getType(table.spec().partitionType()).fields()); if (table.spec().fields().size() < 1) { // avoid returning an empty struct, which is not always supported. instead, drop the partition field (id 102) return TypeUtil.selectNot(schema, Sets.newHashSet(102)); } else { return schema; } } public static class AllDataFilesTableScan extends BaseAllMetadataTableScan { private final Schema fileSchema; AllDataFilesTableScan(TableOperations ops, Table table, Schema fileSchema) { super(ops, table, fileSchema); this.fileSchema = fileSchema; } private AllDataFilesTableScan(TableOperations ops, Table table, Schema schema, Schema fileSchema, TableScanContext context) { super(ops, table, schema, context); this.fileSchema = fileSchema; } @Override protected TableScan newRefinedScan(TableOperations ops, Table table, Schema schema, TableScanContext context) { return new AllDataFilesTableScan(ops, table, schema, fileSchema, context); } @Override public TableScan useSnapshot(long scanSnapshotId) { throw new UnsupportedOperationException("Cannot select snapshot: all_data_files is for all snapshots"); } @Override public TableScan asOfTime(long timestampMillis) { throw new UnsupportedOperationException("Cannot select snapshot: all_data_files is for all snapshots"); } @Override protected long targetSplitSize(TableOperations ops) { return ops.current().propertyAsLong( TableProperties.METADATA_SPLIT_SIZE, TableProperties.METADATA_SPLIT_SIZE_DEFAULT); } @Override protected CloseableIterable<FileScanTask> planFiles( TableOperations ops, Snapshot snapshot, Expression rowFilter, boolean ignoreResiduals, boolean caseSensitive, boolean colStats) { CloseableIterable<ManifestFile> manifests = allDataManifestFiles(ops.current().snapshots()); String schemaString = SchemaParser.toJson(schema()); String specString = PartitionSpecParser.toJson(PartitionSpec.unpartitioned()); Expression filter = ignoreResiduals ? Expressions.alwaysTrue() : rowFilter; ResidualEvaluator residuals = ResidualEvaluator.unpartitioned(filter); // Data tasks produce the table schema, not the projection schema and projection is done by processing engines. // This data task needs to use the table schema, which may not include a partition schema to avoid having an // empty struct in the schema for unpartitioned tables. Some engines, like Spark, can't handle empty structs in // all cases. return CloseableIterable.transform(manifests, manifest -> new DataFilesTable.ManifestReadTask(ops.io(), manifest, fileSchema, schemaString, specString, residuals)); } } private static CloseableIterable<ManifestFile> allDataManifestFiles(List<Snapshot> snapshots) { try (CloseableIterable<ManifestFile> iterable = new ParallelIterable<>( Iterables.transform(snapshots, Snapshot::dataManifests), ThreadPools.getWorkerPool())) { return CloseableIterable.withNoopClose(Sets.newHashSet(iterable)); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to close parallel iterable"); } } }
1
25,827
We instantiate some metadata tables in tests so I kept the old constructor too. Won't harm if someone is using it directly too.
apache-iceberg
java
@@ -4424,11 +4424,15 @@ function _execPopulateQuery(mod, match, select, assignmentOpts, callback) { limit: mod.options.limit, perDocumentLimit: mod.options.perDocumentLimit }, mod.options.options); + if (mod.count) { delete queryOptions.skip; } - if (queryOptions.perDocumentLimit != null) { + if (queryOptions.perDocumentLimit != null && queryOptions.limit != null) { + throw new Error('Can not use `limit` and `perDocumentLimit` at the same time. Model `' + mod.model.modelName + '`.' ); + } + else if (queryOptions.perDocumentLimit != null) { queryOptions.limit = queryOptions.perDocumentLimit; delete queryOptions.perDocumentLimit; } else if (queryOptions.limit != null) {
1
'use strict'; /*! * Module dependencies. */ const Aggregate = require('./aggregate'); const ChangeStream = require('./cursor/ChangeStream'); const Document = require('./document'); const DocumentNotFoundError = require('./error/notFound'); const DivergentArrayError = require('./error/divergentArray'); const EventEmitter = require('events').EventEmitter; const MongooseBuffer = require('./types/buffer'); const MongooseError = require('./error/index'); const OverwriteModelError = require('./error/overwriteModel'); const PromiseProvider = require('./promise_provider'); const Query = require('./query'); const RemoveOptions = require('./options/removeOptions'); const SaveOptions = require('./options/saveOptions'); const Schema = require('./schema'); const ServerSelectionError = require('./error/serverSelection'); const SkipPopulateValue = require('./helpers/populate/SkipPopulateValue'); const ValidationError = require('./error/validation'); const VersionError = require('./error/version'); const ParallelSaveError = require('./error/parallelSave'); const applyQueryMiddleware = require('./helpers/query/applyQueryMiddleware'); const applyHooks = require('./helpers/model/applyHooks'); const applyMethods = require('./helpers/model/applyMethods'); const applyStaticHooks = require('./helpers/model/applyStaticHooks'); const applyStatics = require('./helpers/model/applyStatics'); const applyWriteConcern = require('./helpers/schema/applyWriteConcern'); const assignVals = require('./helpers/populate/assignVals'); const castBulkWrite = require('./helpers/model/castBulkWrite'); const discriminator = require('./helpers/model/discriminator'); const each = require('./helpers/each'); const getDiscriminatorByValue = require('./helpers/discriminator/getDiscriminatorByValue'); const getModelsMapForPopulate = require('./helpers/populate/getModelsMapForPopulate'); const immediate = require('./helpers/immediate'); const internalToObjectOptions = require('./options').internalToObjectOptions; const isDefaultIdIndex = require('./helpers/indexes/isDefaultIdIndex'); const isPathSelectedInclusive = require('./helpers/projection/isPathSelectedInclusive'); const get = require('./helpers/get'); const leanPopulateMap = require('./helpers/populate/leanPopulateMap'); const modifiedPaths = require('./helpers/update/modifiedPaths'); const mpath = require('mpath'); const parallelLimit = require('./helpers/parallelLimit'); const promiseOrCallback = require('./helpers/promiseOrCallback'); const parseProjection = require('./helpers/projection/parseProjection'); const util = require('util'); const utils = require('./utils'); const VERSION_WHERE = 1; const VERSION_INC = 2; const VERSION_ALL = VERSION_WHERE | VERSION_INC; const arrayAtomicsSymbol = require('./helpers/symbols').arrayAtomicsSymbol; const modelCollectionSymbol = Symbol('mongoose#Model#collection'); const modelDbSymbol = Symbol('mongoose#Model#db'); const modelSymbol = require('./helpers/symbols').modelSymbol; const subclassedSymbol = Symbol('mongoose#Model#subclassed'); const saveToObjectOptions = Object.assign({}, internalToObjectOptions, { bson: true }); /** * A Model is a class that's your primary tool for interacting with MongoDB. * An instance of a Model is called a [Document](./api.html#Document). * * In Mongoose, the term "Model" refers to subclasses of the `mongoose.Model` * class. You should not use the `mongoose.Model` class directly. The * [`mongoose.model()`](./api.html#mongoose_Mongoose-model) and * [`connection.model()`](./api.html#connection_Connection-model) functions * create subclasses of `mongoose.Model` as shown below. * * ####Example: * * // `UserModel` is a "Model", a subclass of `mongoose.Model`. * const UserModel = mongoose.model('User', new Schema({ name: String })); * * // You can use a Model to create new documents using `new`: * const userDoc = new UserModel({ name: 'Foo' }); * await userDoc.save(); * * // You also use a model to create queries: * const userFromDb = await UserModel.findOne({ name: 'Foo' }); * * @param {Object} doc values for initial set * @param [fields] optional object containing the fields that were selected in the query which returned this document. You do **not** need to set this parameter to ensure Mongoose handles your [query projection](./api.html#query_Query-select). * @inherits Document http://mongoosejs.com/docs/api.html#document-js * @event `error`: If listening to this event, 'error' is emitted when a document was saved without passing a callback and an `error` occurred. If not listening, the event bubbles to the connection used to create this Model. * @event `index`: Emitted after `Model#ensureIndexes` completes. If an error occurred it is passed with the event. * @event `index-single-start`: Emitted when an individual index starts within `Model#ensureIndexes`. The fields and options being used to build the index are also passed with the event. * @event `index-single-done`: Emitted when an individual index finishes within `Model#ensureIndexes`. If an error occurred it is passed with the event. The fields, options, and index name are also passed. * @api public */ function Model(doc, fields, skipId) { if (fields instanceof Schema) { throw new TypeError('2nd argument to `Model` must be a POJO or string, ' + '**not** a schema. Make sure you\'re calling `mongoose.model()`, not ' + '`mongoose.Model()`.'); } Document.call(this, doc, fields, skipId); } /*! * Inherits from Document. * * All Model.prototype features are available on * top level (non-sub) documents. */ Model.prototype.__proto__ = Document.prototype; Model.prototype.$isMongooseModelPrototype = true; /** * Connection the model uses. * * @api public * @property db * @memberOf Model * @instance */ Model.prototype.db; /** * Collection the model uses. * * This property is read-only. Modifying this property is a no-op. * * @api public * @property collection * @memberOf Model * @instance */ Model.prototype.collection; /** * The name of the model * * @api public * @property modelName * @memberOf Model * @instance */ Model.prototype.modelName; /** * Additional properties to attach to the query when calling `save()` and * `isNew` is false. * * @api public * @property $where * @memberOf Model * @instance */ Model.prototype.$where; /** * If this is a discriminator model, `baseModelName` is the name of * the base model. * * @api public * @property baseModelName * @memberOf Model * @instance */ Model.prototype.baseModelName; /** * Event emitter that reports any errors that occurred. Useful for global error * handling. * * ####Example: * * MyModel.events.on('error', err => console.log(err.message)); * * // Prints a 'CastError' because of the above handler * await MyModel.findOne({ _id: 'notanid' }).catch(noop); * * @api public * @fires error whenever any query or model function errors * @memberOf Model * @static events */ Model.events; /*! * Compiled middleware for this model. Set in `applyHooks()`. * * @api private * @property _middleware * @memberOf Model * @static */ Model._middleware; /*! * ignore */ function _applyCustomWhere(doc, where) { if (doc.$where == null) { return; } const keys = Object.keys(doc.$where); const len = keys.length; for (let i = 0; i < len; ++i) { where[keys[i]] = doc.$where[keys[i]]; } } /*! * ignore */ Model.prototype.$__handleSave = function(options, callback) { const _this = this; let saveOptions = {}; if ('safe' in options) { _handleSafe(options); } applyWriteConcern(this.schema, options); if ('w' in options) { saveOptions.w = options.w; } if ('j' in options) { saveOptions.j = options.j; } if ('wtimeout' in options) { saveOptions.wtimeout = options.wtimeout; } if ('checkKeys' in options) { saveOptions.checkKeys = options.checkKeys; } const session = this.$session(); if (!saveOptions.hasOwnProperty('session')) { saveOptions.session = session; } if (Object.keys(saveOptions).length === 0) { saveOptions = null; } if (this.isNew) { // send entire doc const obj = this.toObject(saveToObjectOptions); if ((obj || {})._id === void 0) { // documents must have an _id else mongoose won't know // what to update later if more changes are made. the user // wouldn't know what _id was generated by mongodb either // nor would the ObjectId generated by mongodb necessarily // match the schema definition. setTimeout(function() { callback(new MongooseError('document must have an _id before saving')); }, 0); return; } this.$__version(true, obj); this[modelCollectionSymbol].insertOne(obj, saveOptions, function(err, ret) { if (err) { _setIsNew(_this, true); callback(err, null); return; } callback(null, ret); }); this.$__reset(); _setIsNew(this, false); // Make it possible to retry the insert this.$__.inserting = true; } else { // Make sure we don't treat it as a new object on error, // since it already exists this.$__.inserting = false; const delta = this.$__delta(); if (delta) { if (delta instanceof MongooseError) { callback(delta); return; } const where = this.$__where(delta[0]); if (where instanceof MongooseError) { callback(where); return; } _applyCustomWhere(this, where); this[modelCollectionSymbol].updateOne(where, delta[1], saveOptions, function(err, ret) { if (err) { callback(err); return; } ret.$where = where; callback(null, ret); }); } else { this.constructor.exists(this.$__where(), saveOptions) .then((documentExists)=>{ if (!documentExists) throw new DocumentNotFoundError(this.$__where(),this.constructor.modelName); this.$__reset(); callback(); }) .catch(callback); return; } _setIsNew(this, false); } }; /*! * ignore */ Model.prototype.$__save = function(options, callback) { this.$__handleSave(options, (error, result) => { const hooks = this.schema.s.hooks; if (error) { return hooks.execPost('save:error', this, [this], { error: error }, (error) => { callback(error, this); }); } // store the modified paths before the document is reset const modifiedPaths = this.modifiedPaths(); this.$__reset(); let numAffected = 0; if (get(options, 'safe.w') !== 0 && get(options, 'w') !== 0) { // Skip checking if write succeeded if writeConcern is set to // unacknowledged writes, because otherwise `numAffected` will always be 0 if (result) { if (Array.isArray(result)) { numAffected = result.length; } else if (result.result && result.result.n !== undefined) { numAffected = result.result.n; } else if (result.result && result.result.nModified !== undefined) { numAffected = result.result.nModified; } else { numAffected = result; } } // was this an update that required a version bump? if (this.$__.version && !this.$__.inserting) { const doIncrement = VERSION_INC === (VERSION_INC & this.$__.version); this.$__.version = undefined; const key = this.schema.options.versionKey; const version = this.$__getValue(key) || 0; if (numAffected <= 0) { // the update failed. pass an error back const err = this.$__.$versionError || new VersionError(this, version, modifiedPaths); return callback(err); } // increment version if was successful if (doIncrement) { this.$__setValue(key, version + 1); } } if (result != null && numAffected <= 0) { error = new DocumentNotFoundError(result.$where, this.constructor.modelName, numAffected, result); return hooks.execPost('save:error', this, [this], { error: error }, (error) => { callback(error, this); }); } } this.$__.saving = undefined; this.emit('save', this, numAffected); this.constructor.emit('save', this, numAffected); callback(null, this); }); }; /*! * ignore */ function generateVersionError(doc, modifiedPaths) { const key = doc.schema.options.versionKey; if (!key) { return null; } const version = doc.$__getValue(key) || 0; return new VersionError(doc, version, modifiedPaths); } /** * Saves this document. * * ####Example: * * product.sold = Date.now(); * product = await product.save(); * * If save is successful, the returned promise will fulfill with the document * saved. * * ####Example: * * const newProduct = await product.save(); * newProduct === product; // true * * @param {Object} [options] options optional options * @param {Session} [options.session=null] the [session](https://docs.mongodb.com/manual/reference/server-sessions/) associated with this save operation. If not specified, defaults to the [document's associated session](api.html#document_Document-$session). * @param {Object} [options.safe] (DEPRECATED) overrides [schema's safe option](http://mongoosejs.com//docs/guide.html#safe). Use the `w` option instead. * @param {Boolean} [options.validateBeforeSave] set to false to save without validating. * @param {Number|String} [options.w] set the [write concern](https://docs.mongodb.com/manual/reference/write-concern/#w-option). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern) * @param {Boolean} [options.j] set to true for MongoDB to wait until this `save()` has been [journaled before resolving the returned promise](https://docs.mongodb.com/manual/reference/write-concern/#j-option). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern) * @param {Number} [options.wtimeout] sets a [timeout for the write concern](https://docs.mongodb.com/manual/reference/write-concern/#wtimeout). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern). * @param {Boolean} [options.checkKeys=true] the MongoDB driver prevents you from saving keys that start with '$' or contain '.' by default. Set this option to `false` to skip that check. See [restrictions on field names](https://docs.mongodb.com/manual/reference/limits/#Restrictions-on-Field-Names) * @param {Boolean} [options.timestamps=true] if `false` and [timestamps](./guide.html#timestamps) are enabled, skip timestamps for this `save()`. * @param {Function} [fn] optional callback * @throws {DocumentNotFoundError} if this [save updates an existing document](api.html#document_Document-isNew) but the document doesn't exist in the database. For example, you will get this error if the document is [deleted between when you retrieved the document and when you saved it](documents.html#updating). * @return {Promise|undefined} Returns undefined if used with callback or a Promise otherwise. * @api public * @see middleware http://mongoosejs.com/docs/middleware.html */ Model.prototype.save = function(options, fn) { let parallelSave; this.$op = 'save'; if (this.$__.saving) { parallelSave = new ParallelSaveError(this); } else { this.$__.saving = new ParallelSaveError(this); } if (typeof options === 'function') { fn = options; options = undefined; } options = new SaveOptions(options); if (options.hasOwnProperty('session')) { this.$session(options.session); } this.$__.$versionError = generateVersionError(this, this.modifiedPaths()); fn = this.constructor.$handleCallbackError(fn); return promiseOrCallback(fn, cb => { cb = this.constructor.$wrapCallback(cb); if (parallelSave) { this.$__handleReject(parallelSave); return cb(parallelSave); } this.$__.saveOptions = options; this.$__save(options, error => { this.$__.saving = undefined; delete this.$__.saveOptions; delete this.$__.$versionError; this.$op = null; if (error) { this.$__handleReject(error); return cb(error); } cb(null, this); }); }, this.constructor.events); }; /*! * Determines whether versioning should be skipped for the given path * * @param {Document} self * @param {String} path * @return {Boolean} true if versioning should be skipped for the given path */ function shouldSkipVersioning(self, path) { const skipVersioning = self.schema.options.skipVersioning; if (!skipVersioning) return false; // Remove any array indexes from the path path = path.replace(/\.\d+\./, '.'); return skipVersioning[path]; } /*! * Apply the operation to the delta (update) clause as * well as track versioning for our where clause. * * @param {Document} self * @param {Object} where * @param {Object} delta * @param {Object} data * @param {Mixed} val * @param {String} [operation] */ function operand(self, where, delta, data, val, op) { // delta op || (op = '$set'); if (!delta[op]) delta[op] = {}; delta[op][data.path] = val; // disabled versioning? if (self.schema.options.versionKey === false) return; // path excluded from versioning? if (shouldSkipVersioning(self, data.path)) return; // already marked for versioning? if (VERSION_ALL === (VERSION_ALL & self.$__.version)) return; switch (op) { case '$set': case '$unset': case '$pop': case '$pull': case '$pullAll': case '$push': case '$addToSet': break; default: // nothing to do return; } // ensure updates sent with positional notation are // editing the correct array element. // only increment the version if an array position changes. // modifying elements of an array is ok if position does not change. if (op === '$push' || op === '$addToSet' || op === '$pullAll' || op === '$pull') { self.$__.version = VERSION_INC; } else if (/^\$p/.test(op)) { // potentially changing array positions self.increment(); } else if (Array.isArray(val)) { // $set an array self.increment(); } else if (/\.\d+\.|\.\d+$/.test(data.path)) { // now handling $set, $unset // subpath of array self.$__.version = VERSION_WHERE; } } /*! * Compiles an update and where clause for a `val` with _atomics. * * @param {Document} self * @param {Object} where * @param {Object} delta * @param {Object} data * @param {Array} value */ function handleAtomics(self, where, delta, data, value) { if (delta.$set && delta.$set[data.path]) { // $set has precedence over other atomics return; } if (typeof value.$__getAtomics === 'function') { value.$__getAtomics().forEach(function(atomic) { const op = atomic[0]; const val = atomic[1]; operand(self, where, delta, data, val, op); }); return; } // legacy support for plugins const atomics = value[arrayAtomicsSymbol]; const ops = Object.keys(atomics); let i = ops.length; let val; let op; if (i === 0) { // $set if (utils.isMongooseObject(value)) { value = value.toObject({depopulate: 1, _isNested: true}); } else if (value.valueOf) { value = value.valueOf(); } return operand(self, where, delta, data, value); } function iter(mem) { return utils.isMongooseObject(mem) ? mem.toObject({depopulate: 1, _isNested: true}) : mem; } while (i--) { op = ops[i]; val = atomics[op]; if (utils.isMongooseObject(val)) { val = val.toObject({depopulate: true, transform: false, _isNested: true}); } else if (Array.isArray(val)) { val = val.map(iter); } else if (val.valueOf) { val = val.valueOf(); } if (op === '$addToSet') { val = {$each: val}; } operand(self, where, delta, data, val, op); } } /** * Produces a special query document of the modified properties used in updates. * * @api private * @method $__delta * @memberOf Model * @instance */ Model.prototype.$__delta = function() { const dirty = this.$__dirty(); if (!dirty.length && VERSION_ALL !== this.$__.version) { return; } const where = {}; const delta = {}; const len = dirty.length; const divergent = []; let d = 0; where._id = this._doc._id; // If `_id` is an object, need to depopulate, but also need to be careful // because `_id` can technically be null (see gh-6406) if (get(where, '_id.$__', null) != null) { where._id = where._id.toObject({ transform: false, depopulate: true }); } for (; d < len; ++d) { const data = dirty[d]; let value = data.value; const match = checkDivergentArray(this, data.path, value); if (match) { divergent.push(match); continue; } const pop = this.populated(data.path, true); if (!pop && this.$__.selected) { // If any array was selected using an $elemMatch projection, we alter the path and where clause // NOTE: MongoDB only supports projected $elemMatch on top level array. const pathSplit = data.path.split('.'); const top = pathSplit[0]; if (this.$__.selected[top] && this.$__.selected[top].$elemMatch) { // If the selected array entry was modified if (pathSplit.length > 1 && pathSplit[1] == 0 && typeof where[top] === 'undefined') { where[top] = this.$__.selected[top]; pathSplit[1] = '$'; data.path = pathSplit.join('.'); } // if the selected array was modified in any other way throw an error else { divergent.push(data.path); continue; } } } if (divergent.length) continue; if (value === undefined) { operand(this, where, delta, data, 1, '$unset'); } else if (value === null) { operand(this, where, delta, data, null); } else if (value.isMongooseArray && value.$path() && value[arrayAtomicsSymbol]) { // arrays and other custom types (support plugins etc) handleAtomics(this, where, delta, data, value); } else if (value[MongooseBuffer.pathSymbol] && Buffer.isBuffer(value)) { // MongooseBuffer value = value.toObject(); operand(this, where, delta, data, value); } else { value = utils.clone(value, { depopulate: true, transform: false, virtuals: false, getters: false, _isNested: true }); operand(this, where, delta, data, value); } } if (divergent.length) { return new DivergentArrayError(divergent); } if (this.$__.version) { this.$__version(where, delta); } return [where, delta]; }; /*! * Determine if array was populated with some form of filter and is now * being updated in a manner which could overwrite data unintentionally. * * @see https://github.com/Automattic/mongoose/issues/1334 * @param {Document} doc * @param {String} path * @return {String|undefined} */ function checkDivergentArray(doc, path, array) { // see if we populated this path const pop = doc.populated(path, true); if (!pop && doc.$__.selected) { // If any array was selected using an $elemMatch projection, we deny the update. // NOTE: MongoDB only supports projected $elemMatch on top level array. const top = path.split('.')[0]; if (doc.$__.selected[top + '.$']) { return top; } } if (!(pop && array && array.isMongooseArray)) return; // If the array was populated using options that prevented all // documents from being returned (match, skip, limit) or they // deselected the _id field, $pop and $set of the array are // not safe operations. If _id was deselected, we do not know // how to remove elements. $pop will pop off the _id from the end // of the array in the db which is not guaranteed to be the // same as the last element we have here. $set of the entire array // would be similarily destructive as we never received all // elements of the array and potentially would overwrite data. const check = pop.options.match || pop.options.options && utils.object.hasOwnProperty(pop.options.options, 'limit') || // 0 is not permitted pop.options.options && pop.options.options.skip || // 0 is permitted pop.options.select && // deselected _id? (pop.options.select._id === 0 || /\s?-_id\s?/.test(pop.options.select)); if (check) { const atomics = array[arrayAtomicsSymbol]; if (Object.keys(atomics).length === 0 || atomics.$set || atomics.$pop) { return path; } } } /** * Appends versioning to the where and update clauses. * * @api private * @method $__version * @memberOf Model * @instance */ Model.prototype.$__version = function(where, delta) { const key = this.schema.options.versionKey; if (where === true) { // this is an insert if (key) this.$__setValue(key, delta[key] = 0); return; } // updates // only apply versioning if our versionKey was selected. else // there is no way to select the correct version. we could fail // fast here and force them to include the versionKey but // thats a bit intrusive. can we do this automatically? if (!this.isSelected(key)) { return; } // $push $addToSet don't need the where clause set if (VERSION_WHERE === (VERSION_WHERE & this.$__.version)) { const value = this.$__getValue(key); if (value != null) where[key] = value; } if (VERSION_INC === (VERSION_INC & this.$__.version)) { if (get(delta.$set, key, null) != null) { // Version key is getting set, means we'll increment the doc's version // after a successful save, so we should set the incremented version so // future saves don't fail (gh-5779) ++delta.$set[key]; } else { delta.$inc = delta.$inc || {}; delta.$inc[key] = 1; } } }; /** * Signal that we desire an increment of this documents version. * * ####Example: * * Model.findById(id, function (err, doc) { * doc.increment(); * doc.save(function (err) { .. }) * }) * * @see versionKeys http://mongoosejs.com/docs/guide.html#versionKey * @api public */ Model.prototype.increment = function increment() { this.$__.version = VERSION_ALL; return this; }; /** * Returns a query object * * @api private * @method $__where * @memberOf Model * @instance */ Model.prototype.$__where = function _where(where) { where || (where = {}); if (!where._id) { where._id = this._doc._id; } if (this._doc._id === void 0) { return new MongooseError('No _id found on document!'); } return where; }; /** * Removes this document from the db. * * ####Example: * product.remove(function (err, product) { * if (err) return handleError(err); * Product.findById(product._id, function (err, product) { * console.log(product) // null * }) * }) * * * As an extra measure of flow control, remove will return a Promise (bound to `fn` if passed) so it could be chained, or hooked to recieve errors * * ####Example: * product.remove().then(function (product) { * ... * }).catch(function (err) { * assert.ok(err) * }) * * @param {Object} [options] * @param {Session} [options.session=null] the [session](https://docs.mongodb.com/manual/reference/server-sessions/) associated with this operation. If not specified, defaults to the [document's associated session](api.html#document_Document-$session). * @param {function(err,product)} [fn] optional callback * @return {Promise} Promise * @api public */ Model.prototype.remove = function remove(options, fn) { if (typeof options === 'function') { fn = options; options = undefined; } options = new RemoveOptions(options); if (options.hasOwnProperty('session')) { this.$session(options.session); } this.$op = 'remove'; fn = this.constructor.$handleCallbackError(fn); return promiseOrCallback(fn, cb => { cb = this.constructor.$wrapCallback(cb); this.$__remove(options, (err, res) => { this.$op = null; cb(err, res); }); }, this.constructor.events); }; /** * Alias for remove */ Model.prototype.delete = Model.prototype.remove; /** * Removes this document from the db. Equivalent to `.remove()`. * * ####Example: * product = await product.deleteOne(); * await Product.findById(product._id); // null * * @param {function(err,product)} [fn] optional callback * @return {Promise} Promise * @api public */ Model.prototype.deleteOne = function deleteOne(options, fn) { if (typeof options === 'function') { fn = options; options = undefined; } if (!options) { options = {}; } fn = this.constructor.$handleCallbackError(fn); return promiseOrCallback(fn, cb => { cb = this.constructor.$wrapCallback(cb); this.$__deleteOne(options, cb); }, this.constructor.events); }; /*! * ignore */ Model.prototype.$__remove = function $__remove(options, cb) { if (this.$__.isDeleted) { return immediate(() => cb(null, this)); } const where = this.$__where(); if (where instanceof MongooseError) { return cb(where); } _applyCustomWhere(this, where); const session = this.$session(); if (!options.hasOwnProperty('session')) { options.session = session; } this[modelCollectionSymbol].deleteOne(where, options, err => { if (!err) { this.$__.isDeleted = true; this.emit('remove', this); this.constructor.emit('remove', this); return cb(null, this); } this.$__.isDeleted = false; cb(err); }); }; /*! * ignore */ Model.prototype.$__deleteOne = Model.prototype.$__remove; /** * Returns another Model instance. * * ####Example: * * var doc = new Tank; * doc.model('User').findById(id, callback); * * @param {String} name model name * @api public */ Model.prototype.model = function model(name) { return this[modelDbSymbol].model(name); }; /** * Returns true if at least one document exists in the database that matches * the given `filter`, and false otherwise. * * Under the hood, `MyModel.exists({ answer: 42 })` is equivalent to * `MyModel.findOne({ answer: 42 }).select({ _id: 1 }).lean().then(doc => !!doc)` * * ####Example: * await Character.deleteMany({}); * await Character.create({ name: 'Jean-Luc Picard' }); * * await Character.exists({ name: /picard/i }); // true * await Character.exists({ name: /riker/i }); // false * * This function triggers the following middleware. * * - `findOne()` * * @param {Object} filter * @param {Function} [callback] callback * @return {Promise} */ Model.exists = function exists(filter, options, callback) { _checkContext(this, 'exists'); if (typeof options === 'function') { callback = options; options = null; } const query = this.findOne(filter). select({ _id: 1 }). lean(). setOptions(options); if (typeof callback === 'function') { query.exec(function(err, doc) { if (err != null) { return callback(err); } callback(null, !!doc); }); return; } return query.then(doc => !!doc); }; /** * Adds a discriminator type. * * ####Example: * * function BaseSchema() { * Schema.apply(this, arguments); * * this.add({ * name: String, * createdAt: Date * }); * } * util.inherits(BaseSchema, Schema); * * var PersonSchema = new BaseSchema(); * var BossSchema = new BaseSchema({ department: String }); * * var Person = mongoose.model('Person', PersonSchema); * var Boss = Person.discriminator('Boss', BossSchema); * new Boss().__t; // "Boss". `__t` is the default `discriminatorKey` * * var employeeSchema = new Schema({ boss: ObjectId }); * var Employee = Person.discriminator('Employee', employeeSchema, 'staff'); * new Employee().__t; // "staff" because of 3rd argument above * * @param {String} name discriminator model name * @param {Schema} schema discriminator model schema * @param {String} [value] the string stored in the `discriminatorKey` property. If not specified, Mongoose uses the `name` parameter. * @return {Model} The newly created discriminator model * @api public */ Model.discriminator = function(name, schema, value) { let model; if (typeof name === 'function') { model = name; name = utils.getFunctionName(model); if (!(model.prototype instanceof Model)) { throw new MongooseError('The provided class ' + name + ' must extend Model'); } } _checkContext(this, 'discriminator'); schema = discriminator(this, name, schema, value, true); if (this.db.models[name]) { throw new OverwriteModelError(name); } schema.$isRootDiscriminator = true; schema.$globalPluginsApplied = true; model = this.db.model(model || name, schema, this.collection.name); this.discriminators[name] = model; const d = this.discriminators[name]; d.prototype.__proto__ = this.prototype; Object.defineProperty(d, 'baseModelName', { value: this.modelName, configurable: true, writable: false }); // apply methods and statics applyMethods(d, schema); applyStatics(d, schema); if (this[subclassedSymbol] != null) { for (const submodel of this[subclassedSymbol]) { submodel.discriminators = submodel.discriminators || {}; submodel.discriminators[name] = model.__subclass(model.db, schema, submodel.collection.name); } } return d; }; /*! * Make sure `this` is a model */ function _checkContext(ctx, fnName) { // Check context, because it is easy to mistakenly type // `new Model.discriminator()` and get an incomprehensible error if (ctx == null || ctx === global) { throw new MongooseError('`Model.' + fnName + '()` cannot run without a ' + 'model as `this`. Make sure you are calling `MyModel.' + fnName + '()` ' + 'where `MyModel` is a Mongoose model.'); } else if (ctx[modelSymbol] == null) { throw new MongooseError('`Model.' + fnName + '()` cannot run without a ' + 'model as `this`. Make sure you are not calling ' + '`new Model.' + fnName + '()`'); } } // Model (class) features /*! * Give the constructor the ability to emit events. */ for (const i in EventEmitter.prototype) { Model[i] = EventEmitter.prototype[i]; } /** * This function is responsible for building [indexes](https://docs.mongodb.com/manual/indexes/), * unless [`autoIndex`](http://mongoosejs.com/docs/guide.html#autoIndex) is turned off. * * Mongoose calls this function automatically when a model is created using * [`mongoose.model()`](/docs/api.html#mongoose_Mongoose-model) or * [`connection.model()`](/docs/api.html#connection_Connection-model), so you * don't need to call it. This function is also idempotent, so you may call it * to get back a promise that will resolve when your indexes are finished * building as an alternative to [`MyModel.on('index')`](/docs/guide.html#indexes) * * ####Example: * * var eventSchema = new Schema({ thing: { type: 'string', unique: true }}) * // This calls `Event.init()` implicitly, so you don't need to call * // `Event.init()` on your own. * var Event = mongoose.model('Event', eventSchema); * * Event.init().then(function(Event) { * // You can also use `Event.on('index')` if you prefer event emitters * // over promises. * console.log('Indexes are done building!'); * }); * * @api public * @param {Function} [callback] * @returns {Promise} */ Model.init = function init(callback) { _checkContext(this, 'init'); this.schema.emit('init', this); if (this.$init != null) { if (callback) { this.$init.then(() => callback(), err => callback(err)); return null; } return this.$init; } const Promise = PromiseProvider.get(); const autoIndex = utils.getOption('autoIndex', this.schema.options, this.db.config, this.db.base.options); const autoCreate = this.schema.options.autoCreate == null ? this.db.config.autoCreate : this.schema.options.autoCreate; const _ensureIndexes = autoIndex ? cb => this.ensureIndexes({ _automatic: true }, cb) : cb => cb(); const _createCollection = autoCreate ? cb => this.createCollection({}, cb) : cb => cb(); this.$init = new Promise((resolve, reject) => { _createCollection(error => { if (error) { return reject(error); } _ensureIndexes(error => { if (error) { return reject(error); } resolve(this); }); }); }); if (callback) { this.$init.then(() => callback(), err => callback(err)); this.$caught = true; return null; } else { const _catch = this.$init.catch; const _this = this; this.$init.catch = function() { this.$caught = true; return _catch.apply(_this.$init, arguments); }; } return this.$init; }; /** * Create the collection for this model. By default, if no indexes are specified, * mongoose will not create the collection for the model until any documents are * created. Use this method to create the collection explicitly. * * Note 1: You may need to call this before starting a transaction * See https://docs.mongodb.com/manual/core/transactions/#transactions-and-operations * * Note 2: You don't have to call this if your schema contains index or unique field. * In that case, just use `Model.init()` * * ####Example: * * var userSchema = new Schema({ name: String }) * var User = mongoose.model('User', userSchema); * * User.createCollection().then(function(collection) { * console.log('Collection is created!'); * }); * * @api public * @param {Object} [options] see [MongoDB driver docs](http://mongodb.github.io/node-mongodb-native/3.1/api/Db.html#createCollection) * @param {Function} [callback] * @returns {Promise} */ Model.createCollection = function createCollection(options, callback) { _checkContext(this, 'createCollection'); if (typeof options === 'string') { throw new MongooseError('You can\'t specify a new collection name in Model.createCollection.' + 'This is not like Connection.createCollection. Only options are accepted here.'); } else if (typeof options === 'function') { callback = options; options = null; } const schemaCollation = get(this, 'schema.options.collation', null); if (schemaCollation != null) { options = Object.assign({ collation: schemaCollation }, options); } callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { cb = this.$wrapCallback(cb); this.db.createCollection(this.collection.collectionName, options, utils.tick((error) => { if (error) { return cb(error); } this.collection = this.db.collection(this.collection.collectionName, options); cb(null, this.collection); })); }, this.events); }; /** * Makes the indexes in MongoDB match the indexes defined in this model's * schema. This function will drop any indexes that are not defined in * the model's schema except the `_id` index, and build any indexes that * are in your schema but not in MongoDB. * * See the [introductory blog post](http://thecodebarbarian.com/whats-new-in-mongoose-5-2-syncindexes) * for more information. * * ####Example: * * const schema = new Schema({ name: { type: String, unique: true } }); * const Customer = mongoose.model('Customer', schema); * await Customer.createIndex({ age: 1 }); // Index is not in schema * // Will drop the 'age' index and create an index on `name` * await Customer.syncIndexes(); * * @param {Object} [options] options to pass to `ensureIndexes()` * @param {Boolean} [options.background=null] if specified, overrides each index's `background` property * @param {Function} [callback] optional callback * @return {Promise|undefined} Returns `undefined` if callback is specified, returns a promise if no callback. * @api public */ Model.syncIndexes = function syncIndexes(options, callback) { _checkContext(this, 'syncIndexes'); callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { cb = this.$wrapCallback(cb); this.createCollection(err => { if (err) { return cb(err); } this.cleanIndexes((err, dropped) => { if (err != null) { return cb(err); } this.createIndexes(options, err => { if (err != null) { return cb(err); } cb(null, dropped); }); }); }); }, this.events); }; /** * Deletes all indexes that aren't defined in this model's schema. Used by * `syncIndexes()`. * * The returned promise resolves to a list of the dropped indexes' names as an array * * @param {Function} [callback] optional callback * @return {Promise|undefined} Returns `undefined` if callback is specified, returns a promise if no callback. * @api public */ Model.cleanIndexes = function cleanIndexes(callback) { _checkContext(this, 'cleanIndexes'); callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { const collection = this.collection; this.listIndexes((err, indexes) => { if (err != null) { return cb(err); } const schemaIndexes = this.schema.indexes(); const toDrop = []; for (const index of indexes) { let found = false; // Never try to drop `_id` index, MongoDB server doesn't allow it if (isDefaultIdIndex(index)) { continue; } for (const schemaIndex of schemaIndexes) { if (isIndexEqual(this, schemaIndex, index)) { found = true; } } if (!found) { toDrop.push(index.name); } } if (toDrop.length === 0) { return cb(null, []); } dropIndexes(toDrop, cb); }); function dropIndexes(toDrop, cb) { let remaining = toDrop.length; let error = false; toDrop.forEach(indexName => { collection.dropIndex(indexName, err => { if (err != null) { error = true; return cb(err); } if (!error) { --remaining || cb(null, toDrop); } }); }); } }); }; /*! * ignore */ function isIndexEqual(model, schemaIndex, dbIndex) { const key = schemaIndex[0]; const options = _decorateDiscriminatorIndexOptions(model, utils.clone(schemaIndex[1])); // If these options are different, need to rebuild the index const optionKeys = [ 'unique', 'partialFilterExpression', 'sparse', 'expireAfterSeconds', 'collation' ]; for (const key of optionKeys) { if (!(key in options) && !(key in dbIndex)) { continue; } if (!utils.deepEqual(options[key], dbIndex[key])) { return false; } } const schemaIndexKeys = Object.keys(key); const dbIndexKeys = Object.keys(dbIndex.key); if (schemaIndexKeys.length !== dbIndexKeys.length) { return false; } for (let i = 0; i < schemaIndexKeys.length; ++i) { if (schemaIndexKeys[i] !== dbIndexKeys[i]) { return false; } if (!utils.deepEqual(key[schemaIndexKeys[i]], dbIndex.key[dbIndexKeys[i]])) { return false; } } return true; } /** * Lists the indexes currently defined in MongoDB. This may or may not be * the same as the indexes defined in your schema depending on whether you * use the [`autoIndex` option](/docs/guide.html#autoIndex) and if you * build indexes manually. * * @param {Function} [cb] optional callback * @return {Promise|undefined} Returns `undefined` if callback is specified, returns a promise if no callback. * @api public */ Model.listIndexes = function init(callback) { _checkContext(this, 'listIndexes'); const _listIndexes = cb => { this.collection.listIndexes().toArray(cb); }; callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { cb = this.$wrapCallback(cb); // Buffering if (this.collection.buffer) { this.collection.addQueue(_listIndexes, [cb]); } else { _listIndexes(cb); } }, this.events); }; /** * Sends `createIndex` commands to mongo for each index declared in the schema. * The `createIndex` commands are sent in series. * * ####Example: * * Event.ensureIndexes(function (err) { * if (err) return handleError(err); * }); * * After completion, an `index` event is emitted on this `Model` passing an error if one occurred. * * ####Example: * * var eventSchema = new Schema({ thing: { type: 'string', unique: true }}) * var Event = mongoose.model('Event', eventSchema); * * Event.on('index', function (err) { * if (err) console.error(err); // error occurred during index creation * }) * * _NOTE: It is not recommended that you run this in production. Index creation may impact database performance depending on your load. Use with caution._ * * @param {Object} [options] internal options * @param {Function} [cb] optional callback * @return {Promise} * @api public */ Model.ensureIndexes = function ensureIndexes(options, callback) { _checkContext(this, 'ensureIndexes'); if (typeof options === 'function') { callback = options; options = null; } callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { cb = this.$wrapCallback(cb); _ensureIndexes(this, options || {}, error => { if (error) { return cb(error); } cb(null); }); }, this.events); }; /** * Similar to `ensureIndexes()`, except for it uses the [`createIndex`](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#createIndex) * function. * * @param {Object} [options] internal options * @param {Function} [cb] optional callback * @return {Promise} * @api public */ Model.createIndexes = function createIndexes(options, callback) { _checkContext(this, 'createIndexes'); if (typeof options === 'function') { callback = options; options = {}; } options = options || {}; options.createIndex = true; return this.ensureIndexes(options, callback); }; /*! * ignore */ function _ensureIndexes(model, options, callback) { const indexes = model.schema.indexes(); let indexError; options = options || {}; const done = function(err) { if (err && !model.$caught) { model.emit('error', err); } model.emit('index', err || indexError); callback && callback(err); }; for (const index of indexes) { if (isDefaultIdIndex(index)) { console.warn('mongoose: Cannot specify a custom index on `_id` for ' + 'model name "' + model.modelName + '", ' + 'MongoDB does not allow overwriting the default `_id` index. See ' + 'http://bit.ly/mongodb-id-index'); } } if (!indexes.length) { immediate(function() { done(); }); return; } // Indexes are created one-by-one to support how MongoDB < 2.4 deals // with background indexes. const indexSingleDone = function(err, fields, options, name) { model.emit('index-single-done', err, fields, options, name); }; const indexSingleStart = function(fields, options) { model.emit('index-single-start', fields, options); }; const baseSchema = model.schema._baseSchema; const baseSchemaIndexes = baseSchema ? baseSchema.indexes() : []; const create = function() { if (options._automatic) { if (model.schema.options.autoIndex === false || (model.schema.options.autoIndex == null && model.db.config.autoIndex === false)) { return done(); } } const index = indexes.shift(); if (!index) { return done(); } if (baseSchemaIndexes.find(i => utils.deepEqual(i, index))) { return create(); } const indexFields = utils.clone(index[0]); const indexOptions = utils.clone(index[1]); _decorateDiscriminatorIndexOptions(model, indexOptions); if ('safe' in options) { _handleSafe(options); } applyWriteConcern(model.schema, indexOptions); indexSingleStart(indexFields, options); let useCreateIndex = !!model.base.options.useCreateIndex; if ('useCreateIndex' in model.db.config) { useCreateIndex = !!model.db.config.useCreateIndex; } if ('createIndex' in options) { useCreateIndex = !!options.createIndex; } if ('background' in options) { indexOptions.background = options.background; } const methodName = useCreateIndex ? 'createIndex' : 'ensureIndex'; model.collection[methodName](indexFields, indexOptions, utils.tick(function(err, name) { indexSingleDone(err, indexFields, indexOptions, name); if (err) { if (!indexError) { indexError = err; } if (!model.$caught) { model.emit('error', err); } } create(); })); }; immediate(function() { // If buffering is off, do this manually. if (options._automatic && !model.collection.collection) { model.collection.addQueue(create, []); } else { create(); } }); } function _decorateDiscriminatorIndexOptions(model, indexOptions) { // If the model is a discriminator and it has a unique index, add a // partialFilterExpression by default so the unique index will only apply // to that discriminator. if (model.baseModelName != null && indexOptions.unique && !('partialFilterExpression' in indexOptions) && !('sparse' in indexOptions)) { const value = ( model.schema.discriminatorMapping && model.schema.discriminatorMapping.value ) || model.modelName; indexOptions.partialFilterExpression = { [model.schema.options.discriminatorKey]: value }; } return indexOptions; } const safeDeprecationWarning = 'Mongoose: the `safe` option for `save()` is ' + 'deprecated. Use the `w` option instead: http://bit.ly/mongoose-save'; const _handleSafe = util.deprecate(function _handleSafe(options) { if (options.safe) { if (typeof options.safe === 'boolean') { options.w = options.safe; delete options.safe; } if (typeof options.safe === 'object') { options.w = options.safe.w; options.j = options.safe.j; options.wtimeout = options.safe.wtimeout; delete options.safe; } } }, safeDeprecationWarning); /** * Schema the model uses. * * @property schema * @receiver Model * @api public * @memberOf Model */ Model.schema; /*! * Connection instance the model uses. * * @property db * @api public * @memberOf Model */ Model.db; /*! * Collection the model uses. * * @property collection * @api public * @memberOf Model */ Model.collection; /** * Base Mongoose instance the model uses. * * @property base * @api public * @memberOf Model */ Model.base; /** * Registered discriminators for this model. * * @property discriminators * @api public * @memberOf Model */ Model.discriminators; /** * Translate any aliases fields/conditions so the final query or document object is pure * * ####Example: * * Character * .find(Character.translateAliases({ * '名': 'Eddard Stark' // Alias for 'name' * }) * .exec(function(err, characters) {}) * * ####Note: * Only translate arguments of object type anything else is returned raw * * @param {Object} raw fields/conditions that may contain aliased keys * @return {Object} the translated 'pure' fields/conditions */ Model.translateAliases = function translateAliases(fields) { _checkContext(this, 'translateAliases'); const translate = (key, value) => { let alias; const translated = []; const fieldKeys = key.split('.'); let currentSchema = this.schema; for (const i in fieldKeys) { const name = fieldKeys[i]; if (currentSchema && currentSchema.aliases[name]) { alias = currentSchema.aliases[name]; // Alias found, translated.push(alias); } else { // Alias not found, so treat as un-aliased key translated.push(name); } // Check if aliased path is a schema if (currentSchema && currentSchema.paths[alias]) { currentSchema = currentSchema.paths[alias].schema; } else currentSchema = null; } const translatedKey = translated.join('.'); if (fields instanceof Map) fields.set(translatedKey, value); else fields[translatedKey] = value; if (translatedKey !== key) { // We'll be using the translated key instead if (fields instanceof Map) { // Delete from map fields.delete(key); } else { // Delete from object delete fields[key]; // We'll be using the translated key instead } } return fields; }; if (typeof fields === 'object') { // Fields is an object (query conditions or document fields) if (fields instanceof Map) { // A Map was supplied for (const field of new Map(fields)) { fields = translate(field[0], field[1]); } } else { // Infer a regular object was supplied for (const key of Object.keys(fields)) { fields = translate(key, fields[key]); if (key[0] === '$') { if (Array.isArray(fields[key])) { for (const i in fields[key]) { // Recursively translate nested queries fields[key][i] = this.translateAliases(fields[key][i]); } } } } } return fields; } else { // Don't know typeof fields return fields; } }; /** * Removes all documents that match `conditions` from the collection. * To remove just the first document that matches `conditions`, set the `single` * option to true. * * ####Example: * * const res = await Character.remove({ name: 'Eddard Stark' }); * res.deletedCount; // Number of documents removed * * ####Note: * * This method sends a remove command directly to MongoDB, no Mongoose documents * are involved. Because no Mongoose documents are involved, Mongoose does * not execute [document middleware](/docs/middleware.html#types-of-middleware). * * @param {Object} conditions * @param {Object} [options] * @param {Session} [options.session=null] the [session](https://docs.mongodb.com/manual/reference/server-sessions/) associated with this operation. * @param {Function} [callback] * @return {Query} * @api public */ Model.remove = function remove(conditions, options, callback) { _checkContext(this, 'remove'); if (typeof conditions === 'function') { callback = conditions; conditions = {}; options = null; } else if (typeof options === 'function') { callback = options; options = null; } // get the mongodb collection object const mq = new this.Query({}, {}, this, this.collection); mq.setOptions(options); callback = this.$handleCallbackError(callback); return mq.remove(conditions, callback); }; /** * Deletes the first document that matches `conditions` from the collection. * Behaves like `remove()`, but deletes at most one document regardless of the * `single` option. * * ####Example: * * Character.deleteOne({ name: 'Eddard Stark' }, function (err) {}); * * ####Note: * * Like `Model.remove()`, this function does **not** trigger `pre('remove')` or `post('remove')` hooks. * * @param {Object} conditions * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @api public */ Model.deleteOne = function deleteOne(conditions, options, callback) { _checkContext(this, 'deleteOne'); if (typeof conditions === 'function') { callback = conditions; conditions = {}; options = null; } else if (typeof options === 'function') { callback = options; options = null; } const mq = new this.Query({}, {}, this, this.collection); mq.setOptions(options); callback = this.$handleCallbackError(callback); return mq.deleteOne(conditions, callback); }; /** * Deletes all of the documents that match `conditions` from the collection. * Behaves like `remove()`, but deletes all documents that match `conditions` * regardless of the `single` option. * * ####Example: * * Character.deleteMany({ name: /Stark/, age: { $gte: 18 } }, function (err) {}); * * ####Note: * * Like `Model.remove()`, this function does **not** trigger `pre('remove')` or `post('remove')` hooks. * * @param {Object} conditions * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @api public */ Model.deleteMany = function deleteMany(conditions, options, callback) { _checkContext(this, 'deleteMany'); if (typeof conditions === 'function') { callback = conditions; conditions = {}; options = null; } else if (typeof options === 'function') { callback = options; options = null; } const mq = new this.Query({}, {}, this, this.collection); mq.setOptions(options); callback = this.$handleCallbackError(callback); return mq.deleteMany(conditions, callback); }; /** * Finds documents. * * The `filter` are cast to their respective SchemaTypes before the command is sent. * See our [query casting tutorial](/docs/tutorials/query_casting.html) for * more information on how Mongoose casts `filter`. * * ####Examples: * * // named john and at least 18 * MyModel.find({ name: 'john', age: { $gte: 18 }}); * * // executes, passing results to callback * MyModel.find({ name: 'john', age: { $gte: 18 }}, function (err, docs) {}); * * // executes, name LIKE john and only selecting the "name" and "friends" fields * MyModel.find({ name: /john/i }, 'name friends', function (err, docs) { }) * * // passing options * MyModel.find({ name: /john/i }, null, { skip: 10 }) * * // passing options and executes * MyModel.find({ name: /john/i }, null, { skip: 10 }, function (err, docs) {}); * * // executing a query explicitly * var query = MyModel.find({ name: /john/i }, null, { skip: 10 }) * query.exec(function (err, docs) {}); * * // using the promise returned from executing a query * var query = MyModel.find({ name: /john/i }, null, { skip: 10 }); * var promise = query.exec(); * promise.addBack(function (err, docs) {}); * * @param {Object|ObjectId} filter * @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](http://mongoosejs.com/docs/api.html#query_Query-select) * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see field selection #query_Query-select * @see query casting /docs/tutorials/query_casting.html * @api public */ Model.find = function find(conditions, projection, options, callback) { _checkContext(this, 'find'); if (typeof conditions === 'function') { callback = conditions; conditions = {}; projection = null; options = null; } else if (typeof projection === 'function') { callback = projection; projection = null; options = null; } else if (typeof options === 'function') { callback = options; options = null; } const mq = new this.Query({}, {}, this, this.collection); mq.select(projection); mq.setOptions(options); if (this.schema.discriminatorMapping && this.schema.discriminatorMapping.isRoot && mq.selectedInclusively()) { // Need to select discriminator key because original schema doesn't have it mq.select(this.schema.options.discriminatorKey); } callback = this.$handleCallbackError(callback); return mq.find(conditions, callback); }; /** * Finds a single document by its _id field. `findById(id)` is almost* * equivalent to `findOne({ _id: id })`. If you want to query by a document's * `_id`, use `findById()` instead of `findOne()`. * * The `id` is cast based on the Schema before sending the command. * * This function triggers the following middleware. * * - `findOne()` * * \* Except for how it treats `undefined`. If you use `findOne()`, you'll see * that `findOne(undefined)` and `findOne({ _id: undefined })` are equivalent * to `findOne({})` and return arbitrary documents. However, mongoose * translates `findById(undefined)` into `findOne({ _id: null })`. * * ####Example: * * // find adventure by id and execute * Adventure.findById(id, function (err, adventure) {}); * * // same as above * Adventure.findById(id).exec(callback); * * // select only the adventures name and length * Adventure.findById(id, 'name length', function (err, adventure) {}); * * // same as above * Adventure.findById(id, 'name length').exec(callback); * * // include all properties except for `length` * Adventure.findById(id, '-length').exec(function (err, adventure) {}); * * // passing options (in this case return the raw js objects, not mongoose documents by passing `lean` * Adventure.findById(id, 'name', { lean: true }, function (err, doc) {}); * * // same as above * Adventure.findById(id, 'name').lean().exec(function (err, doc) {}); * * @param {Any} id value of `_id` to query by * @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](#query_Query-select) * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see field selection #query_Query-select * @see lean queries /docs/tutorials/lean.html * @see findById in Mongoose https://masteringjs.io/tutorials/mongoose/find-by-id * @api public */ Model.findById = function findById(id, projection, options, callback) { _checkContext(this, 'findById'); if (typeof id === 'undefined') { id = null; } callback = this.$handleCallbackError(callback); return this.findOne({_id: id}, projection, options, callback); }; /** * Finds one document. * * The `conditions` are cast to their respective SchemaTypes before the command is sent. * * *Note:* `conditions` is optional, and if `conditions` is null or undefined, * mongoose will send an empty `findOne` command to MongoDB, which will return * an arbitrary document. If you're querying by `_id`, use `findById()` instead. * * ####Example: * * // find one iphone adventures - iphone adventures?? * Adventure.findOne({ type: 'iphone' }, function (err, adventure) {}); * * // same as above * Adventure.findOne({ type: 'iphone' }).exec(function (err, adventure) {}); * * // select only the adventures name * Adventure.findOne({ type: 'iphone' }, 'name', function (err, adventure) {}); * * // same as above * Adventure.findOne({ type: 'iphone' }, 'name').exec(function (err, adventure) {}); * * // specify options, in this case lean * Adventure.findOne({ type: 'iphone' }, 'name', { lean: true }, callback); * * // same as above * Adventure.findOne({ type: 'iphone' }, 'name', { lean: true }).exec(callback); * * // chaining findOne queries (same as above) * Adventure.findOne({ type: 'iphone' }).select('name').lean().exec(callback); * * @param {Object} [conditions] * @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](#query_Query-select) * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see field selection #query_Query-select * @see lean queries /docs/tutorials/lean.html * @api public */ Model.findOne = function findOne(conditions, projection, options, callback) { _checkContext(this, 'findOne'); if (typeof options === 'function') { callback = options; options = null; } else if (typeof projection === 'function') { callback = projection; projection = null; options = null; } else if (typeof conditions === 'function') { callback = conditions; conditions = {}; projection = null; options = null; } const mq = new this.Query({}, {}, this, this.collection); mq.select(projection); mq.setOptions(options); if (this.schema.discriminatorMapping && this.schema.discriminatorMapping.isRoot && mq.selectedInclusively()) { mq.select(this.schema.options.discriminatorKey); } callback = this.$handleCallbackError(callback); return mq.findOne(conditions, callback); }; /** * Estimates the number of documents in the MongoDB collection. Faster than * using `countDocuments()` for large collections because * `estimatedDocumentCount()` uses collection metadata rather than scanning * the entire collection. * * ####Example: * * const numAdventures = Adventure.estimatedDocumentCount(); * * @param {Object} [options] * @param {Function} [callback] * @return {Query} * @api public */ Model.estimatedDocumentCount = function estimatedDocumentCount(options, callback) { _checkContext(this, 'estimatedDocumentCount'); const mq = new this.Query({}, {}, this, this.collection); callback = this.$handleCallbackError(callback); return mq.estimatedDocumentCount(options, callback); }; /** * Counts number of documents matching `filter` in a database collection. * * ####Example: * * Adventure.countDocuments({ type: 'jungle' }, function (err, count) { * console.log('there are %d jungle adventures', count); * }); * * If you want to count all documents in a large collection, * use the [`estimatedDocumentCount()` function](/docs/api.html#model_Model.estimatedDocumentCount) * instead. If you call `countDocuments({})`, MongoDB will always execute * a full collection scan and **not** use any indexes. * * The `countDocuments()` function is similar to `count()`, but there are a * [few operators that `countDocuments()` does not support](https://mongodb.github.io/node-mongodb-native/3.1/api/Collection.html#countDocuments). * Below are the operators that `count()` supports but `countDocuments()` does not, * and the suggested replacement: * * - `$where`: [`$expr`](https://docs.mongodb.com/manual/reference/operator/query/expr/) * - `$near`: [`$geoWithin`](https://docs.mongodb.com/manual/reference/operator/query/geoWithin/) with [`$center`](https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center) * - `$nearSphere`: [`$geoWithin`](https://docs.mongodb.com/manual/reference/operator/query/geoWithin/) with [`$centerSphere`](https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere) * * @param {Object} filter * @param {Function} [callback] * @return {Query} * @api public */ Model.countDocuments = function countDocuments(conditions, callback) { _checkContext(this, 'countDocuments'); if (typeof conditions === 'function') { callback = conditions; conditions = {}; } const mq = new this.Query({}, {}, this, this.collection); callback = this.$handleCallbackError(callback); return mq.countDocuments(conditions, callback); }; /** * Counts number of documents that match `filter` in a database collection. * * This method is deprecated. If you want to count the number of documents in * a collection, e.g. `count({})`, use the [`estimatedDocumentCount()` function](/docs/api.html#model_Model.estimatedDocumentCount) * instead. Otherwise, use the [`countDocuments()`](/docs/api.html#model_Model.countDocuments) function instead. * * ####Example: * * Adventure.count({ type: 'jungle' }, function (err, count) { * if (err) .. * console.log('there are %d jungle adventures', count); * }); * * @deprecated * @param {Object} filter * @param {Function} [callback] * @return {Query} * @api public */ Model.count = function count(conditions, callback) { _checkContext(this, 'count'); if (typeof conditions === 'function') { callback = conditions; conditions = {}; } const mq = new this.Query({}, {}, this, this.collection); callback = this.$handleCallbackError(callback); return mq.count(conditions, callback); }; /** * Creates a Query for a `distinct` operation. * * Passing a `callback` executes the query. * * ####Example * * Link.distinct('url', { clicks: {$gt: 100}}, function (err, result) { * if (err) return handleError(err); * * assert(Array.isArray(result)); * console.log('unique urls with more than 100 clicks', result); * }) * * var query = Link.distinct('url'); * query.exec(callback); * * @param {String} field * @param {Object} [conditions] optional * @param {Function} [callback] * @return {Query} * @api public */ Model.distinct = function distinct(field, conditions, callback) { _checkContext(this, 'distinct'); const mq = new this.Query({}, {}, this, this.collection); if (typeof conditions === 'function') { callback = conditions; conditions = {}; } callback = this.$handleCallbackError(callback); return mq.distinct(field, conditions, callback); }; /** * Creates a Query, applies the passed conditions, and returns the Query. * * For example, instead of writing: * * User.find({age: {$gte: 21, $lte: 65}}, callback); * * we can instead write: * * User.where('age').gte(21).lte(65).exec(callback); * * Since the Query class also supports `where` you can continue chaining * * User * .where('age').gte(21).lte(65) * .where('name', /^b/i) * ... etc * * @param {String} path * @param {Object} [val] optional value * @return {Query} * @api public */ Model.where = function where(path, val) { _checkContext(this, 'where'); void val; // eslint const mq = new this.Query({}, {}, this, this.collection).find({}); return mq.where.apply(mq, arguments); }; /** * Creates a `Query` and specifies a `$where` condition. * * Sometimes you need to query for things in mongodb using a JavaScript expression. You can do so via `find({ $where: javascript })`, or you can use the mongoose shortcut method $where via a Query chain or from your mongoose Model. * * Blog.$where('this.username.indexOf("val") !== -1').exec(function (err, docs) {}); * * @param {String|Function} argument is a javascript string or anonymous function * @method $where * @memberOf Model * @return {Query} * @see Query.$where #query_Query-%24where * @api public */ Model.$where = function $where() { _checkContext(this, '$where'); const mq = new this.Query({}, {}, this, this.collection).find({}); return mq.$where.apply(mq, arguments); }; /** * Issues a mongodb findAndModify update command. * * Finds a matching document, updates it according to the `update` arg, passing any `options`, and returns the found document (if any) to the callback. The query executes if `callback` is passed else a Query object is returned. * * ####Options: * * - `new`: bool - if true, return the modified document rather than the original. defaults to false (changed in 4.0) * - `upsert`: bool - creates the object if it doesn't exist. defaults to false. * - `fields`: {Object|String} - Field selection. Equivalent to `.select(fields).findOneAndUpdate()` * - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0 * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema. * - `setDefaultsOnInsert`: if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/). * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findOneAndUpdate(conditions, update, options, callback) // executes * A.findOneAndUpdate(conditions, update, options) // returns Query * A.findOneAndUpdate(conditions, update, callback) // executes * A.findOneAndUpdate(conditions, update) // returns Query * A.findOneAndUpdate() // returns Query * * ####Note: * * All top level update keys which are not `atomic` operation names are treated as set operations: * * ####Example: * * var query = { name: 'borne' }; * Model.findOneAndUpdate(query, { name: 'jason bourne' }, options, callback) * * // is sent as * Model.findOneAndUpdate(query, { $set: { name: 'jason bourne' }}, options, callback) * * This helps prevent accidentally overwriting your document with `{ name: 'jason bourne' }`. * * ####Note: * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * `findAndModify` helpers support limited validation. You can * enable these by setting the `runValidators` options, * respectively. * * If you need full-fledged validation, use the traditional approach of first * retrieving the document. * * Model.findById(id, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }); * * @param {Object} [conditions] * @param {Object} [update] * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](/docs/api.html#query_Query-lean) and [the Mongoose lean tutorial](/docs/tutorials/lean.html). * @param {ClientSession} [options.session=null] The session associated with this query. See [transactions docs](/docs/transactions.html). * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Boolean} [options.omitUndefined=false] If true, delete any properties whose value is `undefined` when casting an update. In other words, if this is set, Mongoose will delete `baz` from the update in `Model.updateOne({}, { foo: 'bar', baz: undefined })` before sending the update to the server. * @param {Boolean} [options.timestamps=null] If set to `false` and [schema-level timestamps](/docs/guide.html#timestamps) are enabled, skip timestamps for this update. Note that this allows you to overwrite timestamps. Does nothing if schema-level timestamps are not set. * @param {Function} [callback] * @return {Query} * @see Tutorial /docs/tutorials/findoneandupdate.html * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command * @api public */ Model.findOneAndUpdate = function(conditions, update, options, callback) { _checkContext(this, 'findOneAndUpdate'); if (typeof options === 'function') { callback = options; options = null; } else if (arguments.length === 1) { if (typeof conditions === 'function') { const msg = 'Model.findOneAndUpdate(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findOneAndUpdate(conditions, update, options, callback)\n' + ' ' + this.modelName + '.findOneAndUpdate(conditions, update, options)\n' + ' ' + this.modelName + '.findOneAndUpdate(conditions, update)\n' + ' ' + this.modelName + '.findOneAndUpdate(update)\n' + ' ' + this.modelName + '.findOneAndUpdate()\n'; throw new TypeError(msg); } update = conditions; conditions = undefined; } callback = this.$handleCallbackError(callback); let fields; if (options) { fields = options.fields || options.projection; } update = utils.clone(update, { depopulate: true, _isNested: true }); _decorateUpdateWithVersionKey(update, options, this.schema.options.versionKey); const mq = new this.Query({}, {}, this, this.collection); mq.select(fields); return mq.findOneAndUpdate(conditions, update, options, callback); }; /*! * Decorate the update with a version key, if necessary */ function _decorateUpdateWithVersionKey(update, options, versionKey) { if (!versionKey || !get(options, 'upsert', false)) { return; } const updatedPaths = modifiedPaths(update); if (!updatedPaths[versionKey]) { if (options.overwrite) { update[versionKey] = 0; } else { if (!update.$setOnInsert) { update.$setOnInsert = {}; } update.$setOnInsert[versionKey] = 0; } } } /** * Issues a mongodb findAndModify update command by a document's _id field. * `findByIdAndUpdate(id, ...)` is equivalent to `findOneAndUpdate({ _id: id }, ...)`. * * Finds a matching document, updates it according to the `update` arg, * passing any `options`, and returns the found document (if any) to the * callback. The query executes if `callback` is passed. * * This function triggers the following middleware. * * - `findOneAndUpdate()` * * ####Options: * * - `new`: bool - true to return the modified document rather than the original. defaults to false * - `upsert`: bool - creates the object if it doesn't exist. defaults to false. * - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema. * - `setDefaultsOnInsert`: if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/). * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `select`: sets the document fields to return * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findByIdAndUpdate(id, update, options, callback) // executes * A.findByIdAndUpdate(id, update, options) // returns Query * A.findByIdAndUpdate(id, update, callback) // executes * A.findByIdAndUpdate(id, update) // returns Query * A.findByIdAndUpdate() // returns Query * * ####Note: * * All top level update keys which are not `atomic` operation names are treated as set operations: * * ####Example: * * Model.findByIdAndUpdate(id, { name: 'jason bourne' }, options, callback) * * // is sent as * Model.findByIdAndUpdate(id, { $set: { name: 'jason bourne' }}, options, callback) * * This helps prevent accidentally overwriting your document with `{ name: 'jason bourne' }`. * * ####Note: * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * `findAndModify` helpers support limited validation. You can * enable these by setting the `runValidators` options, * respectively. * * If you need full-fledged validation, use the traditional approach of first * retrieving the document. * * Model.findById(id, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }); * * @param {Object|Number|String} id value of `_id` to query by * @param {Object} [update] * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](/docs/api.html#query_Query-lean) and the [Mongoose lean tutorial](/docs/tutorials/lean.html). * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Boolean} [options.omitUndefined=false] If true, delete any properties whose value is `undefined` when casting an update. In other words, if this is set, Mongoose will delete `baz` from the update in `Model.updateOne({}, { foo: 'bar', baz: undefined })` before sending the update to the server. * @param {Boolean} [options.timestamps=null] If set to `false` and [schema-level timestamps](/docs/guide.html#timestamps) are enabled, skip timestamps for this update. Note that this allows you to overwrite timestamps. Does nothing if schema-level timestamps are not set. * @param {Function} [callback] * @return {Query} * @see Model.findOneAndUpdate #model_Model.findOneAndUpdate * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command * @api public */ Model.findByIdAndUpdate = function(id, update, options, callback) { _checkContext(this, 'findByIdAndUpdate'); callback = this.$handleCallbackError(callback); if (arguments.length === 1) { if (typeof id === 'function') { const msg = 'Model.findByIdAndUpdate(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findByIdAndUpdate(id, callback)\n' + ' ' + this.modelName + '.findByIdAndUpdate(id)\n' + ' ' + this.modelName + '.findByIdAndUpdate()\n'; throw new TypeError(msg); } return this.findOneAndUpdate({_id: id}, undefined); } // if a model is passed in instead of an id if (id instanceof Document) { id = id._id; } return this.findOneAndUpdate.call(this, {_id: id}, update, options, callback); }; /** * Issue a MongoDB `findOneAndDelete()` command. * * Finds a matching document, removes it, and passes the found document * (if any) to the callback. * * Executes the query if `callback` is passed. * * This function triggers the following middleware. * * - `findOneAndDelete()` * * This function differs slightly from `Model.findOneAndRemove()` in that * `findOneAndRemove()` becomes a [MongoDB `findAndModify()` command](https://docs.mongodb.com/manual/reference/method/db.collection.findAndModify/), * as opposed to a `findOneAndDelete()` command. For most mongoose use cases, * this distinction is purely pedantic. You should use `findOneAndDelete()` * unless you have a good reason not to. * * ####Options: * * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0 * - `select`: sets the document fields to return * - `projection`: like select, it determines which fields to return, ex. `{ projection: { _id: 0 } }` * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findOneAndDelete(conditions, options, callback) // executes * A.findOneAndDelete(conditions, options) // return Query * A.findOneAndDelete(conditions, callback) // executes * A.findOneAndDelete(conditions) // returns Query * A.findOneAndDelete() // returns Query * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * `findAndModify` helpers support limited validation. You can * enable these by setting the `runValidators` options, * respectively. * * If you need full-fledged validation, use the traditional approach of first * retrieving the document. * * Model.findById(id, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }); * * @param {Object} conditions * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {ClientSession} [options.session=null] The session associated with this query. See [transactions docs](/docs/transactions.html). * @param {Function} [callback] * @return {Query} * @api public */ Model.findOneAndDelete = function(conditions, options, callback) { _checkContext(this, 'findOneAndDelete'); if (arguments.length === 1 && typeof conditions === 'function') { const msg = 'Model.findOneAndDelete(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findOneAndDelete(conditions, callback)\n' + ' ' + this.modelName + '.findOneAndDelete(conditions)\n' + ' ' + this.modelName + '.findOneAndDelete()\n'; throw new TypeError(msg); } if (typeof options === 'function') { callback = options; options = undefined; } callback = this.$handleCallbackError(callback); let fields; if (options) { fields = options.select; options.select = undefined; } const mq = new this.Query({}, {}, this, this.collection); mq.select(fields); return mq.findOneAndDelete(conditions, options, callback); }; /** * Issue a MongoDB `findOneAndDelete()` command by a document's _id field. * In other words, `findByIdAndDelete(id)` is a shorthand for * `findOneAndDelete({ _id: id })`. * * This function triggers the following middleware. * * - `findOneAndDelete()` * * @param {Object|Number|String} id value of `_id` to query by * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Function} [callback] * @return {Query} * @see Model.findOneAndRemove #model_Model.findOneAndRemove * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command */ Model.findByIdAndDelete = function(id, options, callback) { _checkContext(this, 'findByIdAndDelete'); if (arguments.length === 1 && typeof id === 'function') { const msg = 'Model.findByIdAndDelete(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findByIdAndDelete(id, callback)\n' + ' ' + this.modelName + '.findByIdAndDelete(id)\n' + ' ' + this.modelName + '.findByIdAndDelete()\n'; throw new TypeError(msg); } callback = this.$handleCallbackError(callback); return this.findOneAndDelete({_id: id}, options, callback); }; /** * Issue a MongoDB `findOneAndReplace()` command. * * Finds a matching document, replaces it with the provided doc, and passes the * returned doc to the callback. * * Executes the query if `callback` is passed. * * This function triggers the following query middleware. * * - `findOneAndReplace()` * * ####Options: * * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0 * - `select`: sets the document fields to return * - `projection`: like select, it determines which fields to return, ex. `{ projection: { _id: 0 } }` * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findOneAndReplace(conditions, options, callback) // executes * A.findOneAndReplace(conditions, options) // return Query * A.findOneAndReplace(conditions, callback) // executes * A.findOneAndReplace(conditions) // returns Query * A.findOneAndReplace() // returns Query * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * @param {Object} filter Replace the first document that matches this filter * @param {Object} [replacement] Replace with this document * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](http://mongoosejs.com/docs/api.html#query_Query-lean). * @param {ClientSession} [options.session=null] The session associated with this query. See [transactions docs](/docs/transactions.html). * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Boolean} [options.omitUndefined=false] If true, delete any properties whose value is `undefined` when casting an update. In other words, if this is set, Mongoose will delete `baz` from the update in `Model.updateOne({}, { foo: 'bar', baz: undefined })` before sending the update to the server. * @param {Function} [callback] * @return {Query} * @api public */ Model.findOneAndReplace = function(filter, replacement, options, callback) { _checkContext(this, 'findOneAndReplace'); if (arguments.length === 1 && typeof filter === 'function') { const msg = 'Model.findOneAndReplace(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findOneAndReplace(conditions, callback)\n' + ' ' + this.modelName + '.findOneAndReplace(conditions)\n' + ' ' + this.modelName + '.findOneAndReplace()\n'; throw new TypeError(msg); } if (arguments.length === 3 && typeof options === 'function') { callback = options; options = replacement; replacement = void 0; } if (arguments.length === 2 && typeof replacement === 'function') { callback = replacement; replacement = void 0; options = void 0; } callback = this.$handleCallbackError(callback); let fields; if (options) { fields = options.select; options.select = undefined; } const mq = new this.Query({}, {}, this, this.collection); mq.select(fields); return mq.findOneAndReplace(filter, replacement, options, callback); }; /** * Issue a mongodb findAndModify remove command. * * Finds a matching document, removes it, passing the found document (if any) to the callback. * * Executes the query if `callback` is passed. * * This function triggers the following middleware. * * - `findOneAndRemove()` * * ####Options: * * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0 * - `select`: sets the document fields to return * - `projection`: like select, it determines which fields to return, ex. `{ projection: { _id: 0 } }` * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findOneAndRemove(conditions, options, callback) // executes * A.findOneAndRemove(conditions, options) // return Query * A.findOneAndRemove(conditions, callback) // executes * A.findOneAndRemove(conditions) // returns Query * A.findOneAndRemove() // returns Query * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * `findAndModify` helpers support limited validation. You can * enable these by setting the `runValidators` options, * respectively. * * If you need full-fledged validation, use the traditional approach of first * retrieving the document. * * Model.findById(id, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }); * * @param {Object} conditions * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {ClientSession} [options.session=null] The session associated with this query. See [transactions docs](/docs/transactions.html). * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Function} [callback] * @return {Query} * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command * @api public */ Model.findOneAndRemove = function(conditions, options, callback) { _checkContext(this, 'findOneAndRemove'); if (arguments.length === 1 && typeof conditions === 'function') { const msg = 'Model.findOneAndRemove(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findOneAndRemove(conditions, callback)\n' + ' ' + this.modelName + '.findOneAndRemove(conditions)\n' + ' ' + this.modelName + '.findOneAndRemove()\n'; throw new TypeError(msg); } if (typeof options === 'function') { callback = options; options = undefined; } callback = this.$handleCallbackError(callback); let fields; if (options) { fields = options.select; options.select = undefined; } const mq = new this.Query({}, {}, this, this.collection); mq.select(fields); return mq.findOneAndRemove(conditions, options, callback); }; /** * Issue a mongodb findAndModify remove command by a document's _id field. `findByIdAndRemove(id, ...)` is equivalent to `findOneAndRemove({ _id: id }, ...)`. * * Finds a matching document, removes it, passing the found document (if any) to the callback. * * Executes the query if `callback` is passed. * * This function triggers the following middleware. * * - `findOneAndRemove()` * * ####Options: * * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `select`: sets the document fields to return * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findByIdAndRemove(id, options, callback) // executes * A.findByIdAndRemove(id, options) // return Query * A.findByIdAndRemove(id, callback) // executes * A.findByIdAndRemove(id) // returns Query * A.findByIdAndRemove() // returns Query * * @param {Object|Number|String} id value of `_id` to query by * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {ClientSession} [options.session=null] The session associated with this query. See [transactions docs](/docs/transactions.html). * @param {Function} [callback] * @return {Query} * @see Model.findOneAndRemove #model_Model.findOneAndRemove * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command */ Model.findByIdAndRemove = function(id, options, callback) { _checkContext(this, 'findByIdAndRemove'); if (arguments.length === 1 && typeof id === 'function') { const msg = 'Model.findByIdAndRemove(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findByIdAndRemove(id, callback)\n' + ' ' + this.modelName + '.findByIdAndRemove(id)\n' + ' ' + this.modelName + '.findByIdAndRemove()\n'; throw new TypeError(msg); } callback = this.$handleCallbackError(callback); return this.findOneAndRemove({_id: id}, options, callback); }; /** * Shortcut for saving one or more documents to the database. * `MyModel.create(docs)` does `new MyModel(doc).save()` for every doc in * docs. * * This function triggers the following middleware. * * - `save()` * * ####Example: * * // pass a spread of docs and a callback * Candy.create({ type: 'jelly bean' }, { type: 'snickers' }, function (err, jellybean, snickers) { * if (err) // ... * }); * * // pass an array of docs * var array = [{ type: 'jelly bean' }, { type: 'snickers' }]; * Candy.create(array, function (err, candies) { * if (err) // ... * * var jellybean = candies[0]; * var snickers = candies[1]; * // ... * }); * * // callback is optional; use the returned promise if you like: * var promise = Candy.create({ type: 'jawbreaker' }); * promise.then(function (jawbreaker) { * // ... * }) * * @param {Array|Object} docs Documents to insert, as a spread or array * @param {Object} [options] Options passed down to `save()`. To specify `options`, `docs` **must** be an array, not a spread. * @param {Function} [callback] callback * @return {Promise} * @api public */ Model.create = function create(doc, options, callback) { _checkContext(this, 'create'); let args; let cb; const discriminatorKey = this.schema.options.discriminatorKey; if (Array.isArray(doc)) { args = doc; cb = typeof options === 'function' ? options : callback; options = options != null && typeof options === 'object' ? options : {}; } else { const last = arguments[arguments.length - 1]; options = {}; // Handle falsy callbacks re: #5061 if (typeof last === 'function' || !last) { cb = last; args = utils.args(arguments, 0, arguments.length - 1); } else { args = utils.args(arguments); } if (args.length === 2 && args[0] != null && args[1] != null && args[0].session == null && last.session != null && last.session.constructor.name === 'ClientSession' && !this.schema.path('session')) { // Probably means the user is running into the common mistake of trying // to use a spread to specify options, see gh-7535 console.warn('WARNING: to pass a `session` to `Model.create()` in ' + 'Mongoose, you **must** pass an array as the first argument. See: ' + 'https://mongoosejs.com/docs/api.html#model_Model.create'); } } return promiseOrCallback(cb, cb => { cb = this.$wrapCallback(cb); if (args.length === 0) { return cb(null); } const toExecute = []; let firstError; args.forEach(doc => { toExecute.push(callback => { const Model = this.discriminators && doc[discriminatorKey] != null ? this.discriminators[doc[discriminatorKey]] || getDiscriminatorByValue(this, doc[discriminatorKey]) : this; if (Model == null) { throw new MongooseError(`Discriminator "${doc[discriminatorKey]}" not ` + `found for model "${this.modelName}"`); } let toSave = doc; const callbackWrapper = (error, doc) => { if (error) { if (!firstError) { firstError = error; } return callback(null, { error: error }); } callback(null, { doc: doc }); }; if (!(toSave instanceof Model)) { try { toSave = new Model(toSave); } catch (error) { return callbackWrapper(error); } } toSave.save(options, callbackWrapper); }); }); let numFns = toExecute.length; if (numFns === 0) { return cb(null, []); } const _done = (error, res) => { const savedDocs = []; const len = res.length; for (let i = 0; i < len; ++i) { if (res[i].doc) { savedDocs.push(res[i].doc); } } if (firstError) { return cb(firstError, savedDocs); } if (doc instanceof Array) { cb(null, savedDocs); } else { cb.apply(this, [null].concat(savedDocs)); } }; const _res = []; toExecute.forEach((fn, i) => { fn((err, res) => { _res[i] = res; if (--numFns <= 0) { return _done(null, _res); } }); }); }, this.events); }; /** * _Requires a replica set running MongoDB >= 3.6.0._ Watches the * underlying collection for changes using * [MongoDB change streams](https://docs.mongodb.com/manual/changeStreams/). * * This function does **not** trigger any middleware. In particular, it * does **not** trigger aggregate middleware. * * The ChangeStream object is an event emitter that emits the following events: * * - 'change': A change occurred, see below example * - 'error': An unrecoverable error occurred. In particular, change streams currently error out if they lose connection to the replica set primary. Follow [this GitHub issue](https://github.com/Automattic/mongoose/issues/6799) for updates. * - 'end': Emitted if the underlying stream is closed * - 'close': Emitted if the underlying stream is closed * * ####Example: * * const doc = await Person.create({ name: 'Ned Stark' }); * const changeStream = Person.watch().on('change', change => console.log(change)); * // Will print from the above `console.log()`: * // { _id: { _data: ... }, * // operationType: 'delete', * // ns: { db: 'mydb', coll: 'Person' }, * // documentKey: { _id: 5a51b125c5500f5aa094c7bd } } * await doc.remove(); * * @param {Array} [pipeline] * @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/3.0/api/Collection.html#watch) * @return {ChangeStream} mongoose-specific change stream wrapper, inherits from EventEmitter * @api public */ Model.watch = function(pipeline, options) { _checkContext(this, 'watch'); const changeStreamThunk = cb => { if (this.collection.buffer) { this.collection.addQueue(() => { if (this.closed) { return; } const driverChangeStream = this.collection.watch(pipeline, options); cb(null, driverChangeStream); }); } else { const driverChangeStream = this.collection.watch(pipeline, options); cb(null, driverChangeStream); } }; return new ChangeStream(changeStreamThunk, pipeline, options); }; /** * _Requires MongoDB >= 3.6.0._ Starts a [MongoDB session](https://docs.mongodb.com/manual/release-notes/3.6/#client-sessions) * for benefits like causal consistency, [retryable writes](https://docs.mongodb.com/manual/core/retryable-writes/), * and [transactions](http://thecodebarbarian.com/a-node-js-perspective-on-mongodb-4-transactions.html). * * Calling `MyModel.startSession()` is equivalent to calling `MyModel.db.startSession()`. * * This function does not trigger any middleware. * * ####Example: * * const session = await Person.startSession(); * let doc = await Person.findOne({ name: 'Ned Stark' }, null, { session }); * await doc.remove(); * // `doc` will always be null, even if reading from a replica set * // secondary. Without causal consistency, it is possible to * // get a doc back from the below query if the query reads from a * // secondary that is experiencing replication lag. * doc = await Person.findOne({ name: 'Ned Stark' }, null, { session, readPreference: 'secondary' }); * * @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/3.0/api/MongoClient.html#startSession) * @param {Boolean} [options.causalConsistency=true] set to false to disable causal consistency * @param {Function} [callback] * @return {Promise<ClientSession>} promise that resolves to a MongoDB driver `ClientSession` * @api public */ Model.startSession = function() { _checkContext(this, 'startSession'); return this.db.startSession.apply(this.db, arguments); }; /** * Shortcut for validating an array of documents and inserting them into * MongoDB if they're all valid. This function is faster than `.create()` * because it only sends one operation to the server, rather than one for each * document. * * Mongoose always validates each document **before** sending `insertMany` * to MongoDB. So if one document has a validation error, no documents will * be saved, unless you set * [the `ordered` option to false](https://docs.mongodb.com/manual/reference/method/db.collection.insertMany/#error-handling). * * This function does **not** trigger save middleware. * * This function triggers the following middleware. * * - `insertMany()` * * ####Example: * * var arr = [{ name: 'Star Wars' }, { name: 'The Empire Strikes Back' }]; * Movies.insertMany(arr, function(error, docs) {}); * * @param {Array|Object|*} doc(s) * @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#insertMany) * @param {Boolean} [options.ordered = true] if true, will fail fast on the first error encountered. If false, will insert all the documents it can and report errors later. An `insertMany()` with `ordered = false` is called an "unordered" `insertMany()`. * @param {Boolean} [options.rawResult = false] if false, the returned promise resolves to the documents that passed mongoose document validation. If `true`, will return the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#~insertWriteOpCallback) with a `mongoose` property that contains `validationErrors` if this is an unordered `insertMany`. * @param {Boolean} [options.lean = false] if `true`, skips hydrating and validating the documents. This option is useful if you need the extra performance, but Mongoose won't validate the documents before inserting. * @param {Function} [callback] callback * @return {Promise} * @api public */ Model.insertMany = function(arr, options, callback) { _checkContext(this, 'insertMany'); if (typeof options === 'function') { callback = options; options = null; } return promiseOrCallback(callback, cb => { this.$__insertMany(arr, options, cb); }, this.events); }; /*! * ignore */ Model.$__insertMany = function(arr, options, callback) { const _this = this; if (typeof options === 'function') { callback = options; options = null; } if (callback) { callback = this.$handleCallbackError(callback); callback = this.$wrapCallback(callback); } callback = callback || utils.noop; options = options || {}; const limit = get(options, 'limit', 1000); const rawResult = get(options, 'rawResult', false); const ordered = get(options, 'ordered', true); const lean = get(options, 'lean', false); if (!Array.isArray(arr)) { arr = [arr]; } const validationErrors = []; const toExecute = arr.map(doc => callback => { if (!(doc instanceof _this)) { try { doc = new _this(doc); } catch (err) { return callback(err); } } if (options.session != null) { doc.$session(options.session); } // If option `lean` is set to true bypass validation if (lean) { // we have to execute callback at the nextTick to be compatible // with parallelLimit, as `results` variable has TDZ issue if we // execute the callback synchronously return process.nextTick(() => callback(null, doc)); } doc.validate({ __noPromise: true }, function(error) { if (error) { // Option `ordered` signals that insert should be continued after reaching // a failing insert. Therefore we delegate "null", meaning the validation // failed. It's up to the next function to filter out all failed models if (ordered === false) { validationErrors.push(error); return callback(null, null); } return callback(error); } callback(null, doc); }); }); parallelLimit(toExecute, limit, function(error, docs) { if (error) { callback(error, null); return; } // We filter all failed pre-validations by removing nulls const docAttributes = docs.filter(function(doc) { return doc != null; }); // Quickly escape while there aren't any valid docAttributes if (docAttributes.length < 1) { callback(null, []); return; } const docObjects = docAttributes.map(function(doc) { if (doc.schema.options.versionKey) { doc[doc.schema.options.versionKey] = 0; } if (doc.initializeTimestamps) { return doc.initializeTimestamps().toObject(internalToObjectOptions); } return doc.toObject(internalToObjectOptions); }); _this.collection.insertMany(docObjects, options, function(error, res) { if (error) { callback(error, null); return; } for (let i = 0; i < docAttributes.length; ++i) { docAttributes[i].$__reset(); _setIsNew(docAttributes[i], false); } if (rawResult) { if (ordered === false) { // Decorate with mongoose validation errors in case of unordered, // because then still do `insertMany()` res.mongoose = { validationErrors: validationErrors }; } return callback(null, res); } callback(null, docAttributes); }); }); }; /*! * ignore */ function _setIsNew(doc, val) { doc.isNew = val; doc.emit('isNew', val); doc.constructor.emit('isNew', val); const subdocs = doc.$__getAllSubdocs(); for (const subdoc of subdocs) { subdoc.isNew = val; } } /** * Sends multiple `insertOne`, `updateOne`, `updateMany`, `replaceOne`, * `deleteOne`, and/or `deleteMany` operations to the MongoDB server in one * command. This is faster than sending multiple independent operations (like) * if you use `create()`) because with `bulkWrite()` there is only one round * trip to MongoDB. * * Mongoose will perform casting on all operations you provide. * * This function does **not** trigger any middleware, not `save()` nor `update()`. * If you need to trigger * `save()` middleware for every document use [`create()`](http://mongoosejs.com/docs/api.html#model_Model.create) instead. * * ####Example: * * Character.bulkWrite([ * { * insertOne: { * document: { * name: 'Eddard Stark', * title: 'Warden of the North' * } * } * }, * { * updateOne: { * filter: { name: 'Eddard Stark' }, * // If you were using the MongoDB driver directly, you'd need to do * // `update: { $set: { title: ... } }` but mongoose adds $set for * // you. * update: { title: 'Hand of the King' } * } * }, * { * deleteOne: { * { * filter: { name: 'Eddard Stark' } * } * } * } * ]).then(res => { * // Prints "1 1 1" * console.log(res.insertedCount, res.modifiedCount, res.deletedCount); * }); * * The [supported operations](https://docs.mongodb.com/manual/reference/method/db.collection.bulkWrite/#db.collection.bulkWrite) are: * * - `insertOne` * - `updateOne` * - `updateMany` * - `deleteOne` * - `deleteMany` * - `replaceOne` * * @param {Array} ops * @param {Object} [ops.insertOne.document] The document to insert * @param {Object} [opts.updateOne.filter] Update the first document that matches this filter * @param {Object} [opts.updateOne.update] An object containing [update operators](https://docs.mongodb.com/manual/reference/operator/update/) * @param {Boolean} [opts.updateOne.upsert=false] If true, insert a doc if none match * @param {Object} [opts.updateOne.collation] The [MongoDB collation](https://thecodebarbarian.com/a-nodejs-perspective-on-mongodb-34-collations) to use * @param {Array} [opts.updateOne.arrayFilters] The [array filters](https://thecodebarbarian.com/a-nodejs-perspective-on-mongodb-36-array-filters.html) used in `update` * @param {Object} [opts.updateMany.filter] Update all the documents that match this filter * @param {Object} [opts.updateMany.update] An object containing [update operators](https://docs.mongodb.com/manual/reference/operator/update/) * @param {Boolean} [opts.updateMany.upsert=false] If true, insert a doc if no documents match `filter` * @param {Object} [opts.updateMany.collation] The [MongoDB collation](https://thecodebarbarian.com/a-nodejs-perspective-on-mongodb-34-collations) to use * @param {Array} [opts.updateMany.arrayFilters] The [array filters](https://thecodebarbarian.com/a-nodejs-perspective-on-mongodb-36-array-filters.html) used in `update` * @param {Object} [opts.deleteOne.filter] Delete the first document that matches this filter * @param {Object} [opts.deleteMany.filter] Delete all documents that match this filter * @param {Object} [opts.replaceOne.filter] Replace the first document that matches this filter * @param {Object} [opts.replaceOne.replacement] The replacement document * @param {Boolean} [opts.replaceOne.upsert=false] If true, insert a doc if no documents match `filter` * @param {Object} [options] * @param {Boolean} [options.ordered=true] If true, execute writes in order and stop at the first error. If false, execute writes in parallel and continue until all writes have either succeeded or errored. * @param {ClientSession} [options.session=null] The session associated with this bulk write. See [transactions docs](/docs/transactions.html). * @param {String|number} [options.w=1] The [write concern](https://docs.mongodb.com/manual/reference/write-concern/). See [`Query#w()`](/docs/api.html#query_Query-w) for more information. * @param {number} [options.wtimeout=null] The [write concern timeout](https://docs.mongodb.com/manual/reference/write-concern/#wtimeout). * @param {Boolean} [options.j=true] If false, disable [journal acknowledgement](https://docs.mongodb.com/manual/reference/write-concern/#j-option) * @param {Boolean} [options.bypassDocumentValidation=false] If true, disable [MongoDB server-side schema validation](https://docs.mongodb.com/manual/core/schema-validation/) for all writes in this bulk. * @param {Function} [callback] callback `function(error, bulkWriteOpResult) {}` * @return {Promise} resolves to a [`BulkWriteOpResult`](http://mongodb.github.io/node-mongodb-native/3.1/api/Collection.html#~BulkWriteOpResult) if the operation succeeds * @api public */ Model.bulkWrite = function(ops, options, callback) { _checkContext(this, 'bulkWrite'); if (typeof options === 'function') { callback = options; options = null; } options = options || {}; const validations = ops.map(op => castBulkWrite(this, op, options)); callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { cb = this.$wrapCallback(cb); each(validations, (fn, cb) => fn(cb), error => { if (error) { return cb(error); } this.collection.bulkWrite(ops, options, (error, res) => { if (error) { return cb(error); } cb(null, res); }); }); }, this.events); }; /** * Shortcut for creating a new Document from existing raw data, pre-saved in the DB. * The document returned has no paths marked as modified initially. * * ####Example: * * // hydrate previous data into a Mongoose document * var mongooseCandy = Candy.hydrate({ _id: '54108337212ffb6d459f854c', type: 'jelly bean' }); * * @param {Object} obj * @return {Document} document instance * @api public */ Model.hydrate = function(obj) { _checkContext(this, 'hydrate'); const model = require('./queryhelpers').createModel(this, obj); model.init(obj); return model; }; /** * Updates one document in the database without returning it. * * This function triggers the following middleware. * * - `update()` * * ####Examples: * * MyModel.update({ age: { $gt: 18 } }, { oldEnough: true }, fn); * * const res = await MyModel.update({ name: 'Tobi' }, { ferret: true }); * res.n; // Number of documents that matched `{ name: 'Tobi' }` * // Number of documents that were changed. If every doc matched already * // had `ferret` set to `true`, `nModified` will be 0. * res.nModified; * * ####Valid options: * * - `strict` (boolean): overrides the [schema-level `strict` option](/docs/guide.html#strict) for this update * - `upsert` (boolean): whether to create the doc if it doesn't match (false) * - `writeConcern` (object): sets the [write concern](https://docs.mongodb.com/manual/reference/write-concern/) for replica sets. Overrides the [schema-level write concern](/docs/guide.html#writeConcern) * - `omitUndefined` (boolean): If true, delete any properties whose value is `undefined` when casting an update. In other words, if this is set, Mongoose will delete `baz` from the update in `Model.updateOne({}, { foo: 'bar', baz: undefined })` before sending the update to the server. * - `multi` (boolean): whether multiple documents should be updated (false) * - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema. * - `setDefaultsOnInsert` (boolean): if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/). * - `timestamps` (boolean): If set to `false` and [schema-level timestamps](/docs/guide.html#timestamps) are enabled, skip timestamps for this update. Does nothing if schema-level timestamps are not set. * - `overwrite` (boolean): disables update-only mode, allowing you to overwrite the doc (false) * * All `update` values are cast to their appropriate SchemaTypes before being sent. * * The `callback` function receives `(err, rawResponse)`. * * - `err` is the error if any occurred * - `rawResponse` is the full response from Mongo * * ####Note: * * All top level keys which are not `atomic` operation names are treated as set operations: * * ####Example: * * var query = { name: 'borne' }; * Model.update(query, { name: 'jason bourne' }, options, callback); * * // is sent as * Model.update(query, { $set: { name: 'jason bourne' }}, options, function(err, res)); * // if overwrite option is false. If overwrite is true, sent without the $set wrapper. * * This helps prevent accidentally overwriting all documents in your collection with `{ name: 'jason bourne' }`. * * ####Note: * * Be careful to not use an existing model instance for the update clause (this won't work and can cause weird behavior like infinite loops). Also, ensure that the update clause does not have an _id property, which causes Mongo to return a "Mod on _id not allowed" error. * * ####Note: * * Mongoose casts values and runs setters when using update. The following * features are **not** applied by default. * * - [defaults](/docs/defaults.html#the-setdefaultsoninsert-option) * - [validators](/docs/validation.html#update-validators) * - middleware * * If you need document middleware and fully-featured validation, load the * document first and then use [`save()`](/docs/api.html#model_Model-save). * * Model.findOne({ name: 'borne' }, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }) * * @see strict http://mongoosejs.com/docs/guide.html#strict * @see response http://docs.mongodb.org/v2.6/reference/command/update/#output * @param {Object} filter * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Boolean} [options.upsert=false] if true, and no documents found, insert a new document * @param {Object} [options.writeConcern=null] sets the [write concern](https://docs.mongodb.com/manual/reference/write-concern/) for replica sets. Overrides the [schema-level write concern](/docs/guide.html#writeConcern) * @param {Boolean} [options.omitUndefined=false] If true, delete any properties whose value is `undefined` when casting an update. In other words, if this is set, Mongoose will delete `baz` from the update in `Model.updateOne({}, { foo: 'bar', baz: undefined })` before sending the update to the server. * @param {Boolean} [options.multi=false] whether multiple documents should be updated or just the first one that matches `filter`. * @param {Boolean} [options.runValidators=false] if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema. * @param {Boolean} [options.setDefaultsOnInsert=false] if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/). * @param {Boolean} [options.timestamps=null] If set to `false` and [schema-level timestamps](/docs/guide.html#timestamps) are enabled, skip timestamps for this update. Does nothing if schema-level timestamps are not set. * @param {Boolean} [options.overwrite=false] By default, if you don't include any [update operators](https://docs.mongodb.com/manual/reference/operator/update/) in `doc`, Mongoose will wrap `doc` in `$set` for you. This prevents you from accidentally overwriting the document. This option tells Mongoose to skip adding `$set`. * @param {Function} [callback] params are (error, writeOpResult) * @param {Function} [callback] * @return {Query} * @see MongoDB docs https://docs.mongodb.com/manual/reference/command/update/#update-command-output * @see writeOpResult http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#~WriteOpResult * @see Query docs https://mongoosejs.com/docs/queries.html * @api public */ Model.update = function update(conditions, doc, options, callback) { _checkContext(this, 'update'); return _update(this, 'update', conditions, doc, options, callback); }; /** * Same as `update()`, except MongoDB will update _all_ documents that match * `filter` (as opposed to just the first one) regardless of the value of * the `multi` option. * * **Note** updateMany will _not_ fire update middleware. Use `pre('updateMany')` * and `post('updateMany')` instead. * * ####Example: * const res = await Person.updateMany({ name: /Stark$/ }, { isDeleted: true }); * res.n; // Number of documents matched * res.nModified; // Number of documents modified * * This function triggers the following middleware. * * - `updateMany()` * * @param {Object} filter * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Boolean} [options.upsert=false] if true, and no documents found, insert a new document * @param {Object} [options.writeConcern=null] sets the [write concern](https://docs.mongodb.com/manual/reference/write-concern/) for replica sets. Overrides the [schema-level write concern](/docs/guide.html#writeConcern) * @param {Boolean} [options.omitUndefined=false] If true, delete any properties whose value is `undefined` when casting an update. In other words, if this is set, Mongoose will delete `baz` from the update in `Model.updateOne({}, { foo: 'bar', baz: undefined })` before sending the update to the server. * @param {Boolean} [options.timestamps=null] If set to `false` and [schema-level timestamps](/docs/guide.html#timestamps) are enabled, skip timestamps for this update. Does nothing if schema-level timestamps are not set. * @param {Function} [callback] `function(error, res) {}` where `res` has 3 properties: `n`, `nModified`, `ok`. * @return {Query} * @see Query docs https://mongoosejs.com/docs/queries.html * @see MongoDB docs https://docs.mongodb.com/manual/reference/command/update/#update-command-output * @see writeOpResult http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#~WriteOpResult * @api public */ Model.updateMany = function updateMany(conditions, doc, options, callback) { _checkContext(this, 'updateMany'); return _update(this, 'updateMany', conditions, doc, options, callback); }; /** * Same as `update()`, except it does not support the `multi` or `overwrite` * options. * * - MongoDB will update _only_ the first document that matches `filter` regardless of the value of the `multi` option. * - Use `replaceOne()` if you want to overwrite an entire document rather than using atomic operators like `$set`. * * ####Example: * const res = await Person.updateOne({ name: 'Jean-Luc Picard' }, { ship: 'USS Enterprise' }); * res.n; // Number of documents matched * res.nModified; // Number of documents modified * * This function triggers the following middleware. * * - `updateOne()` * * @param {Object} filter * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Boolean} [options.upsert=false] if true, and no documents found, insert a new document * @param {Object} [options.writeConcern=null] sets the [write concern](https://docs.mongodb.com/manual/reference/write-concern/) for replica sets. Overrides the [schema-level write concern](/docs/guide.html#writeConcern) * @param {Boolean} [options.omitUndefined=false] If true, delete any properties whose value is `undefined` when casting an update. In other words, if this is set, Mongoose will delete `baz` from the update in `Model.updateOne({}, { foo: 'bar', baz: undefined })` before sending the update to the server. * @param {Boolean} [options.timestamps=null] If set to `false` and [schema-level timestamps](/docs/guide.html#timestamps) are enabled, skip timestamps for this update. Note that this allows you to overwrite timestamps. Does nothing if schema-level timestamps are not set. * @param {Function} [callback] params are (error, writeOpResult) * @return {Query} * @see Query docs https://mongoosejs.com/docs/queries.html * @see MongoDB docs https://docs.mongodb.com/manual/reference/command/update/#update-command-output * @see writeOpResult http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#~WriteOpResult * @api public */ Model.updateOne = function updateOne(conditions, doc, options, callback) { _checkContext(this, 'updateOne'); return _update(this, 'updateOne', conditions, doc, options, callback); }; /** * Same as `update()`, except MongoDB replace the existing document with the * given document (no atomic operators like `$set`). * * ####Example: * const res = await Person.replaceOne({ _id: 24601 }, { name: 'Jean Valjean' }); * res.n; // Number of documents matched * res.nModified; // Number of documents modified * * This function triggers the following middleware. * * - `replaceOne()` * * @param {Object} filter * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Boolean|String} [options.strict] overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) * @param {Boolean} [options.upsert=false] if true, and no documents found, insert a new document * @param {Object} [options.writeConcern=null] sets the [write concern](https://docs.mongodb.com/manual/reference/write-concern/) for replica sets. Overrides the [schema-level write concern](/docs/guide.html#writeConcern) * @param {Boolean} [options.omitUndefined=false] If true, delete any properties whose value is `undefined` when casting an update. In other words, if this is set, Mongoose will delete `baz` from the update in `Model.updateOne({}, { foo: 'bar', baz: undefined })` before sending the update to the server. * @param {Boolean} [options.timestamps=null] If set to `false` and [schema-level timestamps](/docs/guide.html#timestamps) are enabled, skip timestamps for this update. Does nothing if schema-level timestamps are not set. * @param {Function} [callback] `function(error, res) {}` where `res` has 3 properties: `n`, `nModified`, `ok`. * @return {Query} * @see Query docs https://mongoosejs.com/docs/queries.html * @see writeOpResult http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#~WriteOpResult * @return {Query} * @api public */ Model.replaceOne = function replaceOne(conditions, doc, options, callback) { _checkContext(this, 'replaceOne'); const versionKey = get(this, 'schema.options.versionKey', null); if (versionKey && !doc[versionKey]) { doc[versionKey] = 0; } return _update(this, 'replaceOne', conditions, doc, options, callback); }; /*! * Common code for `updateOne()`, `updateMany()`, `replaceOne()`, and `update()` * because they need to do the same thing */ function _update(model, op, conditions, doc, options, callback) { const mq = new model.Query({}, {}, model, model.collection); callback = model.$handleCallbackError(callback); // gh-2406 // make local deep copy of conditions if (conditions instanceof Document) { conditions = conditions.toObject(); } else { conditions = utils.clone(conditions); } options = typeof options === 'function' ? options : utils.clone(options); const versionKey = get(model, 'schema.options.versionKey', null); _decorateUpdateWithVersionKey(doc, options, versionKey); return mq[op](conditions, doc, options, callback); } /** * Executes a mapReduce command. * * `o` is an object specifying all mapReduce options as well as the map and reduce functions. All options are delegated to the driver implementation. See [node-mongodb-native mapReduce() documentation](http://mongodb.github.io/node-mongodb-native/api-generated/collection.html#mapreduce) for more detail about options. * * This function does not trigger any middleware. * * ####Example: * * var o = {}; * // `map()` and `reduce()` are run on the MongoDB server, not Node.js, * // these functions are converted to strings * o.map = function () { emit(this.name, 1) }; * o.reduce = function (k, vals) { return vals.length }; * User.mapReduce(o, function (err, results) { * console.log(results) * }) * * ####Other options: * * - `query` {Object} query filter object. * - `sort` {Object} sort input objects using this key * - `limit` {Number} max number of documents * - `keeptemp` {Boolean, default:false} keep temporary data * - `finalize` {Function} finalize function * - `scope` {Object} scope variables exposed to map/reduce/finalize during execution * - `jsMode` {Boolean, default:false} it is possible to make the execution stay in JS. Provided in MongoDB > 2.0.X * - `verbose` {Boolean, default:false} provide statistics on job execution time. * - `readPreference` {String} * - `out*` {Object, default: {inline:1}} sets the output target for the map reduce job. * * ####* out options: * * - `{inline:1}` the results are returned in an array * - `{replace: 'collectionName'}` add the results to collectionName: the results replace the collection * - `{reduce: 'collectionName'}` add the results to collectionName: if dups are detected, uses the reducer / finalize functions * - `{merge: 'collectionName'}` add the results to collectionName: if dups exist the new docs overwrite the old * * If `options.out` is set to `replace`, `merge`, or `reduce`, a Model instance is returned that can be used for further querying. Queries run against this model are all executed with the [`lean` option](/docs/tutorials/lean.html); meaning only the js object is returned and no Mongoose magic is applied (getters, setters, etc). * * ####Example: * * var o = {}; * // You can also define `map()` and `reduce()` as strings if your * // linter complains about `emit()` not being defined * o.map = 'function () { emit(this.name, 1) }'; * o.reduce = 'function (k, vals) { return vals.length }'; * o.out = { replace: 'createdCollectionNameForResults' } * o.verbose = true; * * User.mapReduce(o, function (err, model, stats) { * console.log('map reduce took %d ms', stats.processtime) * model.find().where('value').gt(10).exec(function (err, docs) { * console.log(docs); * }); * }) * * // `mapReduce()` returns a promise. However, ES6 promises can only * // resolve to exactly one value, * o.resolveToObject = true; * var promise = User.mapReduce(o); * promise.then(function (res) { * var model = res.model; * var stats = res.stats; * console.log('map reduce took %d ms', stats.processtime) * return model.find().where('value').gt(10).exec(); * }).then(function (docs) { * console.log(docs); * }).then(null, handleError).end() * * @param {Object} o an object specifying map-reduce options * @param {Function} [callback] optional callback * @see http://www.mongodb.org/display/DOCS/MapReduce * @return {Promise} * @api public */ Model.mapReduce = function mapReduce(o, callback) { _checkContext(this, 'mapReduce'); callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { cb = this.$wrapCallback(cb); if (!Model.mapReduce.schema) { const opts = {noId: true, noVirtualId: true, strict: false}; Model.mapReduce.schema = new Schema({}, opts); } if (!o.out) o.out = {inline: 1}; if (o.verbose !== false) o.verbose = true; o.map = String(o.map); o.reduce = String(o.reduce); if (o.query) { let q = new this.Query(o.query); q.cast(this); o.query = q._conditions; q = undefined; } this.collection.mapReduce(null, null, o, (err, res) => { if (err) { return cb(err); } if (res.collection) { // returned a collection, convert to Model const model = Model.compile('_mapreduce_' + res.collection.collectionName, Model.mapReduce.schema, res.collection.collectionName, this.db, this.base); model._mapreduce = true; res.model = model; return cb(null, res); } cb(null, res); }); }, this.events); }; /** * Performs [aggregations](http://docs.mongodb.org/manual/applications/aggregation/) on the models collection. * * If a `callback` is passed, the `aggregate` is executed and a `Promise` is returned. If a callback is not passed, the `aggregate` itself is returned. * * This function triggers the following middleware. * * - `aggregate()` * * ####Example: * * // Find the max balance of all accounts * Users.aggregate([ * { $group: { _id: null, maxBalance: { $max: '$balance' }}}, * { $project: { _id: 0, maxBalance: 1 }} * ]). * then(function (res) { * console.log(res); // [ { maxBalance: 98000 } ] * }); * * // Or use the aggregation pipeline builder. * Users.aggregate(). * group({ _id: null, maxBalance: { $max: '$balance' } }). * project('-id maxBalance'). * exec(function (err, res) { * if (err) return handleError(err); * console.log(res); // [ { maxBalance: 98 } ] * }); * * ####NOTE: * * - Mongoose does **not** cast aggregation pipelines to the model's schema because `$project` and `$group` operators allow redefining the "shape" of the documents at any stage of the pipeline, which may leave documents in an incompatible format. You can use the [mongoose-cast-aggregation plugin](https://github.com/AbdelrahmanHafez/mongoose-cast-aggregation) to enable minimal casting for aggregation pipelines. * - The documents returned are plain javascript objects, not mongoose documents (since any shape of document can be returned). * * @see Aggregate #aggregate_Aggregate * @see MongoDB http://docs.mongodb.org/manual/applications/aggregation/ * @param {Array} [pipeline] aggregation pipeline as an array of objects * @param {Function} [callback] * @return {Aggregate} * @api public */ Model.aggregate = function aggregate(pipeline, callback) { _checkContext(this, 'aggregate'); if (arguments.length > 2 || get(pipeline, 'constructor.name') === 'Object') { throw new MongooseError('Mongoose 5.x disallows passing a spread of operators ' + 'to `Model.aggregate()`. Instead of ' + '`Model.aggregate({ $match }, { $skip })`, do ' + '`Model.aggregate([{ $match }, { $skip }])`'); } if (typeof pipeline === 'function') { callback = pipeline; pipeline = []; } const aggregate = new Aggregate(pipeline || []); aggregate.model(this); if (typeof callback === 'undefined') { return aggregate; } callback = this.$handleCallbackError(callback); callback = this.$wrapCallback(callback); aggregate.exec(callback); return aggregate; }; /** * Casts and validates the given object against this model's schema, passing the * given `context` to custom validators. * * ####Example: * * const Model = mongoose.model('Test', Schema({ * name: { type: String, required: true }, * age: { type: Number, required: true } * }); * * try { * await Model.validate({ name: null }, ['name']) * } catch (err) { * err instanceof mongoose.Error.ValidationError; // true * Object.keys(err.errors); // ['name'] * } * * @param {Object} obj * @param {Array} pathsToValidate * @param {Object} [context] * @param {Function} [callback] * @return {Promise|undefined} * @api public */ Model.validate = function validate(obj, pathsToValidate, context, callback) { return promiseOrCallback(callback, cb => { const schema = this.schema; let paths = Object.keys(schema.paths); if (pathsToValidate != null) { const _pathsToValidate = new Set(pathsToValidate); paths = paths.filter(p => { const pieces = p.split('.'); let cur = pieces[0]; for (let i = 0; i < pieces.length; ++i) { if (_pathsToValidate.has(cur)) { return true; } cur += '.' + pieces[i]; } return _pathsToValidate.has(p); }); } let remaining = paths.length; let error = null; for (const path of paths) { const schematype = schema.path(path); if (schematype == null) { _checkDone(); continue; } const pieces = path.split('.'); let cur = obj; for (let i = 0; i < pieces.length - 1; ++i) { cur = cur[pieces[i]]; } let val = get(obj, path, void 0); if (val != null) { try { val = schematype.cast(val); cur[pieces[pieces.length - 1]] = val; } catch (err) { error = error || new ValidationError(); error.addError(path, err); _checkDone(); continue; } } schematype.doValidate(val, err => { if (err) { error = error || new ValidationError(); if (err instanceof ValidationError) { for (const _err of Object.keys(err.errors)) { error.addError(`${path}.${err.errors[_err].path}`, _err); } } else { error.addError(err.path, err); } } _checkDone(); }, context); } function _checkDone() { if (--remaining <= 0) { return cb(error); } } }); }; /** * Implements `$geoSearch` functionality for Mongoose * * This function does not trigger any middleware * * ####Example: * * var options = { near: [10, 10], maxDistance: 5 }; * Locations.geoSearch({ type : "house" }, options, function(err, res) { * console.log(res); * }); * * ####Options: * - `near` {Array} x,y point to search for * - `maxDistance` {Number} the maximum distance from the point near that a result can be * - `limit` {Number} The maximum number of results to return * - `lean` {Object|Boolean} return the raw object instead of the Mongoose Model * * @param {Object} conditions an object that specifies the match condition (required) * @param {Object} options for the geoSearch, some (near, maxDistance) are required * @param {Object|Boolean} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](/docs/api.html#query_Query-lean) and the [Mongoose lean tutorial](/docs/tutorials/lean.html). * @param {Function} [callback] optional callback * @return {Promise} * @see http://docs.mongodb.org/manual/reference/command/geoSearch/ * @see http://docs.mongodb.org/manual/core/geohaystack/ * @api public */ Model.geoSearch = function(conditions, options, callback) { _checkContext(this, 'geoSearch'); if (typeof options === 'function') { callback = options; options = {}; } callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { cb = this.$wrapCallback(cb); let error; if (conditions === undefined || !utils.isObject(conditions)) { error = new MongooseError('Must pass conditions to geoSearch'); } else if (!options.near) { error = new MongooseError('Must specify the near option in geoSearch'); } else if (!Array.isArray(options.near)) { error = new MongooseError('near option must be an array [x, y]'); } if (error) { return cb(error); } // send the conditions in the options object options.search = conditions; this.collection.geoHaystackSearch(options.near[0], options.near[1], options, (err, res) => { if (err) { return cb(err); } let count = res.results.length; if (options.lean || count === 0) { return cb(null, res.results); } const errSeen = false; function init(err) { if (err && !errSeen) { return cb(err); } if (!--count && !errSeen) { cb(null, res.results); } } for (let i = 0; i < res.results.length; ++i) { const temp = res.results[i]; res.results[i] = new this(); res.results[i].init(temp, {}, init); } }); }, this.events); }; /** * Populates document references. * * ####Available top-level options: * * - path: space delimited path(s) to populate * - select: optional fields to select * - match: optional query conditions to match * - model: optional name of the model to use for population * - options: optional query options like sort, limit, etc * - justOne: optional boolean, if true Mongoose will always set `path` to an array. Inferred from schema by default. * * ####Examples: * * // populates a single object * User.findById(id, function (err, user) { * var opts = [ * { path: 'company', match: { x: 1 }, select: 'name' }, * { path: 'notes', options: { limit: 10 }, model: 'override' } * ]; * * User.populate(user, opts, function (err, user) { * console.log(user); * }); * }); * * // populates an array of objects * User.find(match, function (err, users) { * var opts = [{ path: 'company', match: { x: 1 }, select: 'name' }]; * * var promise = User.populate(users, opts); * promise.then(console.log).end(); * }) * * // imagine a Weapon model exists with two saved documents: * // { _id: 389, name: 'whip' } * // { _id: 8921, name: 'boomerang' } * // and this schema: * // new Schema({ * // name: String, * // weapon: { type: ObjectId, ref: 'Weapon' } * // }); * * var user = { name: 'Indiana Jones', weapon: 389 }; * Weapon.populate(user, { path: 'weapon', model: 'Weapon' }, function (err, user) { * console.log(user.weapon.name); // whip * }) * * // populate many plain objects * var users = [{ name: 'Indiana Jones', weapon: 389 }] * users.push({ name: 'Batman', weapon: 8921 }) * Weapon.populate(users, { path: 'weapon' }, function (err, users) { * users.forEach(function (user) { * console.log('%s uses a %s', users.name, user.weapon.name) * // Indiana Jones uses a whip * // Batman uses a boomerang * }); * }); * // Note that we didn't need to specify the Weapon model because * // it is in the schema's ref * * @param {Document|Array} docs Either a single document or array of documents to populate. * @param {Object} options A hash of key/val (path, options) used for population. * @param {boolean} [options.retainNullValues=false] by default, Mongoose removes null and undefined values from populated arrays. Use this option to make `populate()` retain `null` and `undefined` array entries. * @param {boolean} [options.getters=false] if true, Mongoose will call any getters defined on the `localField`. By default, Mongoose gets the raw value of `localField`. For example, you would need to set this option to `true` if you wanted to [add a `lowercase` getter to your `localField`](/docs/schematypes.html#schematype-options). * @param {boolean} [options.clone=false] When you do `BlogPost.find().populate('author')`, blog posts with the same author will share 1 copy of an `author` doc. Enable this option to make Mongoose clone populated docs before assigning them. * @param {Object|Function} [options.match=null] Add an additional filter to the populate query. Can be a filter object containing [MongoDB query syntax](https://docs.mongodb.com/manual/tutorial/query-documents/), or a function that returns a filter object. * @param {Boolean} [options.skipInvalidIds=false] By default, Mongoose throws a cast error if `localField` and `foreignField` schemas don't line up. If you enable this option, Mongoose will instead filter out any `localField` properties that cannot be casted to `foreignField`'s schema type. * @param {Number} [options.perDocumentLimit=null] For legacy reasons, `limit` with `populate()` may give incorrect results because it only executes a single query for every document being populated. If you set `perDocumentLimit`, Mongoose will ensure correct `limit` per document by executing a separate query for each document to `populate()`. For example, `.find().populate({ path: 'test', perDocumentLimit: 2 })` will execute 2 additional queries if `.find()` returns 2 documents. * @param {Object} [options.options=null] Additional options like `limit` and `lean`. * @param {Function} [callback(err,doc)] Optional callback, executed upon completion. Receives `err` and the `doc(s)`. * @return {Promise} * @api public */ Model.populate = function(docs, paths, callback) { _checkContext(this, 'populate'); const _this = this; // normalized paths paths = utils.populate(paths); // data that should persist across subPopulate calls const cache = {}; callback = this.$handleCallbackError(callback); return promiseOrCallback(callback, cb => { cb = this.$wrapCallback(cb); _populate(_this, docs, paths, cache, cb); }, this.events); }; /*! * Populate helper * * @param {Model} model the model to use * @param {Document|Array} docs Either a single document or array of documents to populate. * @param {Object} paths * @param {Function} [cb(err,doc)] Optional callback, executed upon completion. Receives `err` and the `doc(s)`. * @return {Function} * @api private */ function _populate(model, docs, paths, cache, callback) { const length = paths.length; let pending = paths.length; if (length === 0) { return callback(null, docs); } // each path has its own query options and must be executed separately for (let i = 0; i < length; ++i) { populate(model, docs, paths[i], next); } function next(err) { if (err) { return callback(err, null); } if (--pending) { return; } callback(null, docs); } } /*! * Populates `docs` */ const excludeIdReg = /\s?-_id\s?/; const excludeIdRegGlobal = /\s?-_id\s?/g; function populate(model, docs, options, callback) { // normalize single / multiple docs passed if (!Array.isArray(docs)) { docs = [docs]; } if (docs.length === 0 || docs.every(utils.isNullOrUndefined)) { return callback(); } const modelsMap = getModelsMapForPopulate(model, docs, options); if (modelsMap instanceof MongooseError) { return immediate(function() { callback(modelsMap); }); } const len = modelsMap.length; let vals = []; function flatten(item) { // no need to include undefined values in our query return undefined !== item; } let _remaining = len; let hasOne = false; const params = []; for (let i = 0; i < len; ++i) { const mod = modelsMap[i]; let select = mod.options.select; const match = _formatMatch(mod.match); let ids = utils.array.flatten(mod.ids, flatten); ids = utils.array.unique(ids); const assignmentOpts = {}; assignmentOpts.sort = get(mod, 'options.options.sort', void 0); assignmentOpts.excludeId = excludeIdReg.test(select) || (select && select._id === 0); if (ids.length === 0 || ids.every(utils.isNullOrUndefined)) { // Ensure that we set populate virtuals to 0 or empty array even // if we don't actually execute a query because they don't have // a value by default. See gh-7731, gh-8230 --_remaining; if (mod.count || mod.isVirtual) { _assign(model, [], mod, assignmentOpts); } continue; } hasOne = true; if (mod.foreignField.size === 1) { const foreignField = Array.from(mod.foreignField)[0]; const foreignSchemaType = mod.model.schema.path(foreignField); if (foreignField !== '_id' || !match['_id']) { ids = _filterInvalidIds(ids, foreignSchemaType, mod.options.skipInvalidIds); match[foreignField] = { $in: ids }; } } else { const $or = []; if (Array.isArray(match.$or)) { match.$and = [{ $or: match.$or }, { $or: $or }]; delete match.$or; } else { match.$or = $or; } for (const foreignField of mod.foreignField) { if (foreignField !== '_id' || !match['_id']) { const foreignSchemaType = mod.model.schema.path(foreignField); ids = _filterInvalidIds(ids, foreignSchemaType, mod.options.skipInvalidIds); $or.push({ [foreignField]: { $in: ids } }); } } } if (assignmentOpts.excludeId) { // override the exclusion from the query so we can use the _id // for document matching during assignment. we'll delete the // _id back off before returning the result. if (typeof select === 'string') { select = select.replace(excludeIdRegGlobal, ' '); } else { // preserve original select conditions by copying select = utils.object.shallowCopy(select); delete select._id; } } if (mod.options.options && mod.options.options.limit) { assignmentOpts.originalLimit = mod.options.options.limit; } params.push([mod, match, select, assignmentOpts, _next]); } if (!hasOne) { return callback(); } for (const arr of params) { _execPopulateQuery.apply(null, arr); } function _next(err, valsFromDb) { if (err != null) { return callback(err, null); } vals = vals.concat(valsFromDb); if (--_remaining === 0) { _done(); } } function _done() { for (const arr of params) { const mod = arr[0]; const assignmentOpts = arr[3]; _assign(model, vals, mod, assignmentOpts); } callback(); } } /*! * ignore */ function _execPopulateQuery(mod, match, select, assignmentOpts, callback) { const subPopulate = utils.clone(mod.options.populate); const queryOptions = Object.assign({ skip: mod.options.skip, limit: mod.options.limit, perDocumentLimit: mod.options.perDocumentLimit }, mod.options.options); if (mod.count) { delete queryOptions.skip; } if (queryOptions.perDocumentLimit != null) { queryOptions.limit = queryOptions.perDocumentLimit; delete queryOptions.perDocumentLimit; } else if (queryOptions.limit != null) { queryOptions.limit = queryOptions.limit * mod.ids.length; } const query = mod.model.find(match, select, queryOptions); // If we're doing virtual populate and projection is inclusive and foreign // field is not selected, automatically select it because mongoose needs it. // If projection is exclusive and client explicitly unselected the foreign // field, that's the client's fault. for (const foreignField of mod.foreignField) { if (foreignField !== '_id' && query.selectedInclusively() && !isPathSelectedInclusive(query._fields, foreignField)) { query.select(foreignField); } } // If using count, still need the `foreignField` so we can match counts // to documents, otherwise we would need a separate `count()` for every doc. if (mod.count) { for (const foreignField of mod.foreignField) { query.select(foreignField); } } // If we need to sub-populate, call populate recursively if (subPopulate) { query.populate(subPopulate); } query.exec(callback); } /*! * ignore */ function _assign(model, vals, mod, assignmentOpts) { const options = mod.options; const isVirtual = mod.isVirtual; const justOne = mod.justOne; let _val; const lean = get(options, 'options.lean', false); const projection = parseProjection(get(options, 'select', null), true) || parseProjection(get(options, 'options.select', null), true); const len = vals.length; const rawOrder = {}; const rawDocs = {}; let key; let val; // Clone because `assignRawDocsToIdStructure` will mutate the array const allIds = utils.clone(mod.allIds); // optimization: // record the document positions as returned by // the query result. for (let i = 0; i < len; i++) { val = vals[i]; if (val == null) { continue; } for (const foreignField of mod.foreignField) { _val = utils.getValue(foreignField, val); if (Array.isArray(_val)) { _val = utils.array.flatten(_val); const _valLength = _val.length; for (let j = 0; j < _valLength; ++j) { let __val = _val[j]; if (__val instanceof Document) { __val = __val._id; } key = String(__val); if (rawDocs[key]) { if (Array.isArray(rawDocs[key])) { rawDocs[key].push(val); rawOrder[key].push(i); } else { rawDocs[key] = [rawDocs[key], val]; rawOrder[key] = [rawOrder[key], i]; } } else { if (isVirtual && !justOne) { rawDocs[key] = [val]; rawOrder[key] = [i]; } else { rawDocs[key] = val; rawOrder[key] = i; } } } } else { if (_val instanceof Document) { _val = _val._id; } key = String(_val); if (rawDocs[key]) { if (Array.isArray(rawDocs[key])) { rawDocs[key].push(val); rawOrder[key].push(i); } else { rawDocs[key] = [rawDocs[key], val]; rawOrder[key] = [rawOrder[key], i]; } } else { rawDocs[key] = val; rawOrder[key] = i; } } // flag each as result of population if (lean) { leanPopulateMap.set(val, mod.model); } else { val.$__.wasPopulated = true; } // gh-8460: if user used `-foreignField`, assume this means they // want the foreign field unset even if it isn't excluded in the query. if (projection != null && projection.hasOwnProperty('-' + foreignField)) { if (val.$__ != null) { val.set(foreignField, void 0); } else { mpath.unset(foreignField, val); } } } } assignVals({ originalModel: model, // If virtual, make sure to not mutate original field rawIds: mod.isVirtual ? allIds : mod.allIds, allIds: allIds, foreignField: mod.foreignField, rawDocs: rawDocs, rawOrder: rawOrder, docs: mod.docs, path: options.path, options: assignmentOpts, justOne: mod.justOne, isVirtual: mod.isVirtual, allOptions: mod, lean: lean, virtual: mod.virtual, count: mod.count, match: mod.match }); } /*! * Optionally filter out invalid ids that don't conform to foreign field's schema * to avoid cast errors (gh-7706) */ function _filterInvalidIds(ids, foreignSchemaType, skipInvalidIds) { ids = ids.filter(v => !(v instanceof SkipPopulateValue)); if (!skipInvalidIds) { return ids; } return ids.filter(id => { try { foreignSchemaType.cast(id); return true; } catch (err) { return false; } }); } /*! * Format `mod.match` given that it may be an array that we need to $or if * the client has multiple docs with match functions */ function _formatMatch(match) { if (Array.isArray(match)) { if (match.length > 1) { return { $or: [].concat(match.map(m => Object.assign({}, m))) }; } return Object.assign({}, match[0]); } return Object.assign({}, match); } /*! * Compiler utility. * * @param {String|Function} name model name or class extending Model * @param {Schema} schema * @param {String} collectionName * @param {Connection} connection * @param {Mongoose} base mongoose instance */ Model.compile = function compile(name, schema, collectionName, connection, base) { const versioningEnabled = schema.options.versionKey !== false; if (versioningEnabled && !schema.paths[schema.options.versionKey]) { // add versioning to top level documents only const o = {}; o[schema.options.versionKey] = Number; schema.add(o); } let model; if (typeof name === 'function' && name.prototype instanceof Model) { model = name; name = model.name; schema.loadClass(model, false); model.prototype.$isMongooseModelPrototype = true; } else { // generate new class model = function model(doc, fields, skipId) { model.hooks.execPreSync('createModel', doc); if (!(this instanceof model)) { return new model(doc, fields, skipId); } const discriminatorKey = model.schema.options.discriminatorKey; if (model.discriminators == null || doc == null || doc[discriminatorKey] == null) { Model.call(this, doc, fields, skipId); return; } // If discriminator key is set, use the discriminator instead (gh-7586) const Discriminator = model.discriminators[doc[discriminatorKey]] || getDiscriminatorByValue(model, doc[discriminatorKey]); if (Discriminator != null) { return new Discriminator(doc, fields, skipId); } // Otherwise, just use the top-level model Model.call(this, doc, fields, skipId); }; } model.hooks = schema.s.hooks.clone(); model.base = base; model.modelName = name; if (!(model.prototype instanceof Model)) { model.__proto__ = Model; model.prototype.__proto__ = Model.prototype; } model.model = function model(name) { return this.db.model(name); }; model.db = connection; model.prototype.db = connection; model.prototype[modelDbSymbol] = connection; model.discriminators = model.prototype.discriminators = undefined; model[modelSymbol] = true; model.events = new EventEmitter(); model.prototype.$__setSchema(schema); const _userProvidedOptions = schema._userProvidedOptions || {}; // `bufferCommands` is true by default... let bufferCommands = true; // First, take the global option if (connection.base.get('bufferCommands') != null) { bufferCommands = connection.base.get('bufferCommands'); } // Connection-specific overrides the global option if (connection.config.bufferCommands != null) { bufferCommands = connection.config.bufferCommands; } // And schema options override global and connection if (_userProvidedOptions.bufferCommands != null) { bufferCommands = _userProvidedOptions.bufferCommands; } const collectionOptions = { bufferCommands: bufferCommands, capped: schema.options.capped, autoCreate: schema.options.autoCreate, Promise: model.base.Promise }; model.prototype.collection = connection.collection( collectionName, collectionOptions ); model.prototype[modelCollectionSymbol] = model.prototype.collection; // apply methods and statics applyMethods(model, schema); applyStatics(model, schema); applyHooks(model, schema); applyStaticHooks(model, schema.s.hooks, schema.statics); model.schema = model.prototype.schema; model.collection = model.prototype.collection; // Create custom query constructor model.Query = function() { Query.apply(this, arguments); }; model.Query.prototype = Object.create(Query.prototype); model.Query.base = Query.base; applyQueryMiddleware(model.Query, model); applyQueryMethods(model, schema.query); return model; }; /*! * Register custom query methods for this model * * @param {Model} model * @param {Schema} schema */ function applyQueryMethods(model, methods) { for (const i in methods) { model.Query.prototype[i] = methods[i]; } } /*! * Subclass this model with `conn`, `schema`, and `collection` settings. * * @param {Connection} conn * @param {Schema} [schema] * @param {String} [collection] * @return {Model} */ Model.__subclass = function subclass(conn, schema, collection) { // subclass model using this connection and collection name const _this = this; const Model = function Model(doc, fields, skipId) { if (!(this instanceof Model)) { return new Model(doc, fields, skipId); } _this.call(this, doc, fields, skipId); }; Model.__proto__ = _this; Model.prototype.__proto__ = _this.prototype; Model.db = conn; Model.prototype.db = conn; Model.prototype[modelDbSymbol] = conn; _this[subclassedSymbol] = _this[subclassedSymbol] || []; _this[subclassedSymbol].push(Model); if (_this.discriminators != null) { Model.discriminators = {}; for (const key of Object.keys(_this.discriminators)) { Model.discriminators[key] = _this.discriminators[key]. __subclass(_this.db, _this.discriminators[key].schema, collection); } } const s = schema && typeof schema !== 'string' ? schema : _this.prototype.schema; const options = s.options || {}; const _userProvidedOptions = s._userProvidedOptions || {}; if (!collection) { collection = _this.prototype.schema.get('collection') || utils.toCollectionName(_this.modelName, this.base.pluralize()); } let bufferCommands = true; if (s) { if (conn.config.bufferCommands != null) { bufferCommands = conn.config.bufferCommands; } if (_userProvidedOptions.bufferCommands != null) { bufferCommands = _userProvidedOptions.bufferCommands; } } const collectionOptions = { bufferCommands: bufferCommands, capped: s && options.capped }; Model.prototype.collection = conn.collection(collection, collectionOptions); Model.prototype[modelCollectionSymbol] = Model.prototype.collection; Model.collection = Model.prototype.collection; // Errors handled internally, so ignore Model.init(() => {}); return Model; }; Model.$handleCallbackError = function(callback) { if (callback == null) { return callback; } if (typeof callback !== 'function') { throw new MongooseError('Callback must be a function, got ' + callback); } const _this = this; return function() { try { callback.apply(null, arguments); } catch (error) { _this.emit('error', error); } }; }; /*! * ignore */ Model.$wrapCallback = function(callback) { const serverSelectionError = new ServerSelectionError(); const _this = this; return function(err) { if (err != null && err.name === 'MongoServerSelectionError') { arguments[0] = serverSelectionError.assimilateError(err); } if (err != null && err.name === 'MongoNetworkError' && err.message.endsWith('timed out')) { _this.db.emit('timeout'); } return callback.apply(null, arguments); }; }; /** * Helper for console.log. Given a model named 'MyModel', returns the string * `'Model { MyModel }'`. * * ####Example: * * const MyModel = mongoose.model('Test', Schema({ name: String })); * MyModel.inspect(); // 'Model { Test }' * console.log(MyModel); // Prints 'Model { Test }' * * @api public */ Model.inspect = function() { return `Model { ${this.modelName} }`; }; if (util.inspect.custom) { /*! * Avoid Node deprecation warning DEP0079 */ Model[util.inspect.custom] = Model.inspect; } /*! * Module exports. */ module.exports = exports = Model;
1
14,148
I think the better place to put this might be `lib/options/PopulateOptions.js`. That should make it easier - checking options in `populate()` can get confusing.
Automattic-mongoose
js
@@ -41,13 +41,13 @@ var ( // NATProviderPinger pings provider and optionally hands off connection to consumer proxy. type NATProviderPinger interface { - PingProvider(ip string, providerPort, consumerPort, proxyPort int, stop <-chan struct{}) error + PingProvider(params Params, proxyPort int) (*net.UDPConn, error) } // NATPinger is responsible for pinging nat holes type NATPinger interface { NATProviderPinger - PingTarget(*Params) + PingTarget(Params) BindServicePort(key string, port int) Start() Stop()
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package traversal import ( "fmt" "net" "sync" "time" "github.com/mysteriumnetwork/node/core/port" "github.com/mysteriumnetwork/node/eventbus" "github.com/mysteriumnetwork/node/nat/event" "github.com/pkg/errors" "github.com/rs/zerolog/log" "golang.org/x/net/ipv4" ) // StageName represents hole-punching stage of NAT traversal const StageName = "hole_punching" var ( errNATPunchAttemptStopped = errors.New("NAT punch attempt stopped") errNATPunchAttemptTimedOut = errors.New("NAT punch attempt timed out") ) // NATProviderPinger pings provider and optionally hands off connection to consumer proxy. type NATProviderPinger interface { PingProvider(ip string, providerPort, consumerPort, proxyPort int, stop <-chan struct{}) error } // NATPinger is responsible for pinging nat holes type NATPinger interface { NATProviderPinger PingTarget(*Params) BindServicePort(key string, port int) Start() Stop() SetProtectSocketCallback(SocketProtect func(socket int) bool) Valid() bool } // PingConfig represents NAT pinger config. type PingConfig struct { Interval time.Duration Timeout time.Duration } // DefaultPingConfig returns default NAT pinger config. func DefaultPingConfig() *PingConfig { return &PingConfig{ Interval: 200 * time.Millisecond, Timeout: 10 * time.Second, } } // Pinger represents NAT pinger structure type Pinger struct { pingConfig *PingConfig pingTarget chan *Params stop chan struct{} stopNATProxy chan struct{} once sync.Once natProxy *NATProxy eventPublisher eventbus.Publisher } // PortSupplier provides port needed to run a service on type PortSupplier interface { Acquire() (port.Port, error) } // NewPinger returns Pinger instance func NewPinger(pingConfig *PingConfig, proxy *NATProxy, publisher eventbus.Publisher) NATPinger { return &Pinger{ pingConfig: pingConfig, pingTarget: make(chan *Params), stop: make(chan struct{}), stopNATProxy: make(chan struct{}), natProxy: proxy, eventPublisher: publisher, } } // Params contains session parameters needed to NAT ping remote peer type Params struct { ProviderPort int ConsumerPort int ConsumerPublicIP string ProxyPortMappingKey string Cancel chan struct{} } // Start starts NAT pinger and waits for PingTarget to ping func (p *Pinger) Start() { log.Info().Msg("Starting a NAT pinger") for { select { case <-p.stop: log.Info().Msg("NAT pinger is stopped") return case pingParams := <-p.pingTarget: if isPunchingRequired(pingParams) { go p.pingTargetConsumer(pingParams) } } } } func isPunchingRequired(params *Params) bool { return params.ConsumerPort > 0 } // Stop stops pinger loop func (p *Pinger) Stop() { p.once.Do(func() { close(p.stopNATProxy) close(p.stop) }) } // PingProvider pings provider determined by destination provided in sessionConfig func (p *Pinger) PingProvider(ip string, providerPort, consumerPort, proxyPort int, stop <-chan struct{}) error { log.Info().Msg("NAT pinging to provider") conn, err := p.getConnection(ip, providerPort, consumerPort) if err != nil { return errors.Wrap(err, "failed to get connection") } // Add read deadline to prevent possible conn.Read hang when remote peer doesn't send ping ack. conn.SetReadDeadline(time.Now().Add(p.pingConfig.Timeout * 2)) pingStop := make(chan struct{}) defer close(pingStop) go func() { err := p.ping(conn, pingStop) if err != nil { log.Warn().Err(err).Msg("Error while pinging") } }() time.Sleep(p.pingConfig.Interval) err = p.pingReceiver(conn, stop) if err != nil { return err } // send one last ping request to end hole punching procedure gracefully err = p.sendPingRequest(conn, 128) if err != nil { return errors.Wrap(err, "remote ping failed") } if proxyPort > 0 { consumerAddr := fmt.Sprintf("127.0.0.1:%d", proxyPort) log.Info().Msg("Handing connection to consumer NATProxy: " + consumerAddr) // Set higher read deadline when NAT proxy is used. conn.SetReadDeadline(time.Now().Add(12 * time.Hour)) p.stopNATProxy = p.natProxy.consumerHandOff(consumerAddr, conn) } else { log.Info().Msg("Closing ping connection") if err := conn.Close(); err != nil { return errors.Wrap(err, "could not close ping conn") } } return nil } func (p *Pinger) ping(conn *net.UDPConn, stop <-chan struct{}) error { // Windows detects that 1 TTL is too low and throws an exception during send ttl := 0 i := 0 for { select { case <-stop: return nil case <-time.After(p.pingConfig.Interval): log.Debug().Msg("Pinging... ") // This is the essence of the TTL based udp punching. // We're slowly increasing the TTL so that the packet is held. // After a few attempts we're setting the value to 128 and assuming we're through. // We could stop sending ping to Consumer beyond 4 hops to prevent from possible Consumer's router's // DOS block, but we plan, that Consumer at the same time will be Provider too in near future. ttl++ if ttl > 4 { ttl = 128 } err := p.sendPingRequest(conn, ttl) if err != nil { p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildFailureEvent(StageName, err)) return err } i++ if time.Duration(i)*p.pingConfig.Interval > p.pingConfig.Timeout { err := errors.New("timeout while waiting for ping ack, trying to continue") p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildFailureEvent(StageName, err)) return err } } } } func (p *Pinger) sendPingRequest(conn *net.UDPConn, ttl int) error { err := ipv4.NewConn(conn).SetTTL(ttl) if err != nil { return errors.Wrap(err, "pinger setting ttl failed") } _, err = conn.Write([]byte("continuously pinging to " + conn.RemoteAddr().String())) return errors.Wrap(err, "pinging request failed") } func (p *Pinger) getConnection(ip string, port int, pingerPort int) (*net.UDPConn, error) { udpAddr, err := net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", ip, port)) if err != nil { return nil, err } log.Info().Msg("Remote socket: " + udpAddr.String()) conn, err := net.DialUDP("udp", &net.UDPAddr{Port: pingerPort}, udpAddr) if err != nil { return nil, err } log.Info().Msg("Local socket: " + conn.LocalAddr().String()) return conn, nil } // PingTarget relays ping target address data func (p *Pinger) PingTarget(target *Params) { select { case p.pingTarget <- target: return // do not block if ping target is not received case <-time.After(100 * time.Millisecond): log.Info().Msgf("Ping target timeout: %v", target) return } } // BindServicePort register service port to forward connection to func (p *Pinger) BindServicePort(key string, port int) { p.natProxy.registerServicePort(key, port) } func (p *Pinger) pingReceiver(conn *net.UDPConn, stop <-chan struct{}) error { timeout := time.After(p.pingConfig.Timeout) buf := make([]byte, bufferLen) for { select { case <-timeout: return errNATPunchAttemptTimedOut case <-stop: return errNATPunchAttemptStopped default: n, err := conn.Read(buf) if err != nil { log.Error().Err(err).Msgf("Failed to read remote peer: %s - attempting to continue", conn.RemoteAddr().String()) continue } if n > 0 { log.Info().Msgf("Remote peer data received: %s, len: %d", string(buf[:n]), n) return nil } } } } // SetProtectSocketCallback sets socket protection callback to be called when new socket is created in consumer NATProxy func (p *Pinger) SetProtectSocketCallback(socketProtect func(socket int) bool) { p.natProxy.setProtectSocketCallback(socketProtect) } // Valid returns that this pinger is a valid pinger func (p *Pinger) Valid() bool { return true } func (p *Pinger) pingTargetConsumer(pingParams *Params) { log.Info().Msgf("Pinging peer with: %+v", pingParams) if pingParams.ProxyPortMappingKey == "" { log.Error().Msg("Service proxy connection port mapping key is missing") return } log.Info().Msgf("Ping target received: IP: %v, port: %v", pingParams.ConsumerPublicIP, pingParams.ConsumerPort) if !p.natProxy.isAvailable(pingParams.ProxyPortMappingKey) { log.Warn().Msgf("NATProxy is not available for this transport protocol key %v", pingParams.ProxyPortMappingKey) return } conn, err := p.getConnection(pingParams.ConsumerPublicIP, pingParams.ConsumerPort, pingParams.ProviderPort) if err != nil { log.Error().Err(err).Msg("Failed to get connection") return } pingStop := make(chan struct{}) defer close(pingStop) go func() { err := p.ping(conn, pingStop) if err != nil { log.Warn().Err(err).Msg("Error while pinging") } }() err = p.pingReceiver(conn, pingParams.Cancel) if err != nil { log.Error().Err(err).Msg("Ping receiver error") return } p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildSuccessfulEvent(StageName)) log.Info().Msg("Ping received, waiting for a new connection") go p.natProxy.handOff(pingParams.ProxyPortMappingKey, conn) }
1
15,786
Lets have simple function arguments here, because now struct `traversal.Params` started to have 2 purposes: 1. used as contract in DTO between consumer-provider 2. as function parameters for internal code calls
mysteriumnetwork-node
go
@@ -0,0 +1,5 @@ +package org.openqa.selenium.grid.distributor.remote; + +public class RemoteDistributorTest { + +}
1
1
16,857
Probably best not to have an empty test....
SeleniumHQ-selenium
js
@@ -0,0 +1,7 @@ +<figure> + <img src="/assets/upcase/testimonial_thumbs/anthony-lee.jpg" alt="Anthony"> + <p class="quotee">Anthony Lee<strong>Professional</strong> </p> +</figure> +<blockquote> + <p><strong>I am really loving upcase.</strong> The most valuable part for me was how I was able to dissect "upcase" app and see how it was built. Having the access to the repo and seeing how codes were structured was such a priceless lesson.</p> +</blockquote>
1
1
11,601
`image_tag` in the `testimonials` files?
thoughtbot-upcase
rb
@@ -255,9 +255,10 @@ public class ExecutionFlowDao { + "SET status=?,update_time=?,start_time=?,end_time=?,enc_type=?,flow_data=? " + "WHERE exec_id=?"; - final String json = JSONUtils.toJSON(flow.toObject()); byte[] data = null; try { + // If this action fails, the execution must be failed. + final String json = JSONUtils.toJSON(flow.toObject()); final byte[] stringData = json.getBytes("UTF-8"); data = stringData; // Todo kunkun-tang: use a common method to transform stringData to data.
1
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.db.DatabaseOperator; import azkaban.db.EncodingType; import azkaban.db.SQLTransaction; import azkaban.utils.GZIPUtils; import azkaban.utils.JSONUtils; import azkaban.utils.Pair; import java.io.IOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.List; import javax.inject.Inject; import javax.inject.Singleton; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.dbutils.ResultSetHandler; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; @Singleton public class ExecutionFlowDao { private static final Logger logger = Logger.getLogger(ExecutionFlowDao.class); private final DatabaseOperator dbOperator; private final MysqlNamedLock mysqlNamedLock; @Inject public ExecutionFlowDao(final DatabaseOperator dbOperator, final MysqlNamedLock mysqlNamedLock) { this.dbOperator = dbOperator; this.mysqlNamedLock = mysqlNamedLock; } public void uploadExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { final String useExecutorParam = flow.getExecutionOptions().getFlowParameters().get(ExecutionOptions.USE_EXECUTOR); final String executorId = StringUtils.isNotEmpty(useExecutorParam) ? useExecutorParam : null; final String flowPriorityParam = flow.getExecutionOptions().getFlowParameters().get(ExecutionOptions.FLOW_PRIORITY); final int flowPriority = StringUtils.isNotEmpty(flowPriorityParam) ? Integer.parseInt(flowPriorityParam) : ExecutionOptions.DEFAULT_FLOW_PRIORITY; final String INSERT_EXECUTABLE_FLOW = "INSERT INTO execution_flows " + "(project_id, flow_id, version, status, submit_time, submit_user, update_time, " + "use_executor, flow_priority) values (?,?,?,?,?,?,?,?,?)"; final long submitTime = flow.getSubmitTime(); /** * Why we need a transaction to get last insert ID? * Because "SELECT LAST_INSERT_ID()" needs to have the same connection * as inserting the new entry. * See https://dev.mysql.com/doc/refman/5.7/en/information-functions.html#function_last-insert-id */ final SQLTransaction<Long> insertAndGetLastID = transOperator -> { transOperator.update(INSERT_EXECUTABLE_FLOW, flow.getProjectId(), flow.getFlowId(), flow.getVersion(), flow.getStatus().getNumVal(), submitTime, flow.getSubmitUser(), submitTime, executorId, flowPriority); transOperator.getConnection().commit(); return transOperator.getLastInsertId(); }; try { final long id = this.dbOperator.transaction(insertAndGetLastID); logger.info("Flow given " + flow.getFlowId() + " given id " + id); flow.setExecutionId((int) id); updateExecutableFlow(flow); } catch (final SQLException e) { throw new ExecutorManagerException("Error creating execution.", e); } } List<ExecutableFlow> fetchFlowHistory(final int skip, final int num) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_ALL_EXECUTABLE_FLOW_HISTORY, new FetchExecutableFlows(), skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow History", e); } } List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_HISTORY, new FetchExecutableFlows(), projectId, flowId, skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow history", e); } } public List<Pair<ExecutionReference, ExecutableFlow>> fetchQueuedFlows() throws ExecutorManagerException { try { return this.dbOperator.query(FetchQueuedExecutableFlows.FETCH_QUEUED_EXECUTABLE_FLOW, new FetchQueuedExecutableFlows()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } /** * fetch flow execution history with specified {@code projectId}, {@code flowId} and flow start * time >= {@code startTime} * * @return the list of flows meeting the specified criteria */ public List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final long startTime) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_BY_START_TIME, new FetchExecutableFlows(), projectId, flowId, startTime); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching historic flows", e); } } List<ExecutableFlow> fetchFlowHistory(final int projectId, final String flowId, final int skip, final int num, final Status status) throws ExecutorManagerException { try { return this.dbOperator.query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW_BY_STATUS, new FetchExecutableFlows(), projectId, flowId, status.getNumVal(), skip, num); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } List<ExecutableFlow> fetchRecentlyFinishedFlows(final Duration maxAge) throws ExecutorManagerException { try { return this.dbOperator.query(FetchRecentlyFinishedFlows.FETCH_RECENTLY_FINISHED_FLOW, new FetchRecentlyFinishedFlows(), System.currentTimeMillis() - maxAge.toMillis(), Status.SUCCEEDED.getNumVal(), Status.KILLED.getNumVal(), Status.FAILED.getNumVal()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching recently finished flows", e); } } List<ExecutableFlow> fetchFlowHistory(final String projectNameContains, final String flowNameContains, final String userNameContains, final int status, final long startTime, final long endTime, final int skip, final int num) throws ExecutorManagerException { String query = FetchExecutableFlows.FETCH_BASE_EXECUTABLE_FLOW_QUERY; final List<Object> params = new ArrayList<>(); boolean first = true; if (projectNameContains != null && !projectNameContains.isEmpty()) { query += " JOIN projects p ON ef.project_id = p.id WHERE name LIKE ?"; params.add('%' + projectNameContains + '%'); first = false; } // todo kunkun-tang: we don't need the below complicated logics. We should just use a simple way. if (flowNameContains != null && !flowNameContains.isEmpty()) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " flow_id LIKE ?"; params.add('%' + flowNameContains + '%'); } if (userNameContains != null && !userNameContains.isEmpty()) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " submit_user LIKE ?"; params.add('%' + userNameContains + '%'); } if (status != 0) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " status = ?"; params.add(status); } if (startTime > 0) { if (first) { query += " WHERE "; first = false; } else { query += " AND "; } query += " start_time > ?"; params.add(startTime); } if (endTime > 0) { if (first) { query += " WHERE "; } else { query += " AND "; } query += " end_time < ?"; params.add(endTime); } if (skip > -1 && num > 0) { query += " ORDER BY exec_id DESC LIMIT ?, ?"; params.add(skip); params.add(num); } try { return this.dbOperator.query(query, new FetchExecutableFlows(), params.toArray()); } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching active flows", e); } } void updateExecutableFlow(final ExecutableFlow flow) throws ExecutorManagerException { updateExecutableFlow(flow, EncodingType.GZIP); } private void updateExecutableFlow(final ExecutableFlow flow, final EncodingType encType) throws ExecutorManagerException { final String UPDATE_EXECUTABLE_FLOW_DATA = "UPDATE execution_flows " + "SET status=?,update_time=?,start_time=?,end_time=?,enc_type=?,flow_data=? " + "WHERE exec_id=?"; final String json = JSONUtils.toJSON(flow.toObject()); byte[] data = null; try { final byte[] stringData = json.getBytes("UTF-8"); data = stringData; // Todo kunkun-tang: use a common method to transform stringData to data. if (encType == EncodingType.GZIP) { data = GZIPUtils.gzipBytes(stringData); } } catch (final IOException e) { throw new ExecutorManagerException("Error encoding the execution flow."); } try { this.dbOperator.update(UPDATE_EXECUTABLE_FLOW_DATA, flow.getStatus() .getNumVal(), flow.getUpdateTime(), flow.getStartTime(), flow .getEndTime(), encType.getNumVal(), data, flow.getExecutionId()); } catch (final SQLException e) { throw new ExecutorManagerException("Error updating flow.", e); } } public ExecutableFlow fetchExecutableFlow(final int execId) throws ExecutorManagerException { final FetchExecutableFlows flowHandler = new FetchExecutableFlows(); try { final List<ExecutableFlow> properties = this.dbOperator .query(FetchExecutableFlows.FETCH_EXECUTABLE_FLOW, flowHandler, execId); if (properties.isEmpty()) { return null; } else { return properties.get(0); } } catch (final SQLException e) { throw new ExecutorManagerException("Error fetching flow id " + execId, e); } } /** * set executor id to null for the execution id */ public void unsetExecutorIdForExecution(final int executionId) throws ExecutorManagerException { final String UNSET_EXECUTOR = "UPDATE execution_flows SET executor_id = null, update_time = ? where exec_id = ?"; final SQLTransaction<Integer> unsetExecutor = transOperator -> transOperator.update(UNSET_EXECUTOR, System.currentTimeMillis(), executionId); try { this.dbOperator.transaction(unsetExecutor); } catch (final SQLException e) { throw new ExecutorManagerException("Error unsetting executor id for execution " + executionId, e); } } public int selectAndUpdateExecution(final int executorId, final boolean isActive) throws ExecutorManagerException { final String UPDATE_EXECUTION = "UPDATE execution_flows SET executor_id = ?, update_time = ? " + "where exec_id = ?"; final String selectExecutionForUpdate = isActive ? SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_ACTIVE : SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_INACTIVE; final SQLTransaction<Integer> selectAndUpdateExecution = transOperator -> { transOperator.getConnection().setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); final List<Integer> execIds = transOperator.query(selectExecutionForUpdate, new SelectFromExecutionFlows(), executorId); int execId = -1; if (!execIds.isEmpty()) { execId = execIds.get(0); transOperator.update(UPDATE_EXECUTION, executorId, System.currentTimeMillis(), execId); } transOperator.getConnection().commit(); return execId; }; try { return this.dbOperator.transaction(selectAndUpdateExecution); } catch (final SQLException e) { throw new ExecutorManagerException("Error selecting and updating execution with executor " + executorId, e); } } public int selectAndUpdateExecutionWithLocking(final int executorId, final boolean isActive) throws ExecutorManagerException { final String UPDATE_EXECUTION = "UPDATE execution_flows SET executor_id = ?, update_time = ? " + "where exec_id = ?"; final String selectExecutionForUpdate = isActive ? SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_ACTIVE : SelectFromExecutionFlows.SELECT_EXECUTION_FOR_UPDATE_INACTIVE; final SQLTransaction<Integer> selectAndUpdateExecution = transOperator -> { final String POLLING_LOCK_NAME = "execution_flows_polling"; final int GET_LOCK_TIMEOUT_IN_SECONDS = 5; int execId = -1; final boolean hasLocked = this.mysqlNamedLock.getLock(transOperator, POLLING_LOCK_NAME, GET_LOCK_TIMEOUT_IN_SECONDS); logger.info("ExecutionFlow polling lock value: " + hasLocked + " for executorId: " + executorId); if (hasLocked) { try { final List<Integer> execIds = transOperator.query(selectExecutionForUpdate, new SelectFromExecutionFlows(), executorId); if (CollectionUtils.isNotEmpty(execIds)) { execId = execIds.get(0); transOperator.update(UPDATE_EXECUTION, executorId, System.currentTimeMillis(), execId); } } finally { this.mysqlNamedLock.releaseLock(transOperator, POLLING_LOCK_NAME); logger.info("Released polling lock for executorId: " + executorId); } } else { logger.info("Could not acquire polling lock for executorId: " + executorId); } return execId; }; try { return this.dbOperator.transaction(selectAndUpdateExecution); } catch (final SQLException e) { throw new ExecutorManagerException("Error selecting and updating execution with executor " + executorId, e); } } public static class SelectFromExecutionFlows implements ResultSetHandler<List<Integer>> { private static final String SELECT_EXECUTION_FOR_UPDATE_FORMAT = "SELECT exec_id from execution_flows WHERE exec_id = (SELECT exec_id from execution_flows" + " WHERE status = " + Status.PREPARING.getNumVal() + " and executor_id is NULL and flow_data is NOT NULL and %s" + " ORDER BY flow_priority DESC, update_time ASC, exec_id ASC LIMIT 1) and executor_id is NULL FOR UPDATE"; public static final String SELECT_EXECUTION_FOR_UPDATE_ACTIVE = String.format(SELECT_EXECUTION_FOR_UPDATE_FORMAT, "(use_executor is NULL or use_executor = ?)"); public static final String SELECT_EXECUTION_FOR_UPDATE_INACTIVE = String.format(SELECT_EXECUTION_FOR_UPDATE_FORMAT, "use_executor = ?"); @Override public List<Integer> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<Integer> execIds = new ArrayList<>(); do { final int execId = rs.getInt(1); execIds.add(execId); } while (rs.next()); return execIds; } } public static class FetchExecutableFlows implements ResultSetHandler<List<ExecutableFlow>> { static String FETCH_EXECUTABLE_FLOW_BY_START_TIME = "SELECT ef.exec_id, ef.enc_type, ef.flow_data, ef.status FROM execution_flows ef WHERE " + "project_id=? AND flow_id=? AND start_time >= ? ORDER BY start_time DESC"; static String FETCH_BASE_EXECUTABLE_FLOW_QUERY = "SELECT ef.exec_id, ef.enc_type, ef.flow_data, ef.status FROM execution_flows ef"; static String FETCH_EXECUTABLE_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE exec_id=?"; static String FETCH_ALL_EXECUTABLE_FLOW_HISTORY = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "ORDER BY exec_id DESC LIMIT ?, ?"; static String FETCH_EXECUTABLE_FLOW_HISTORY = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE project_id=? AND flow_id=? " + "ORDER BY exec_id DESC LIMIT ?, ?"; static String FETCH_EXECUTABLE_FLOW_BY_STATUS = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE project_id=? AND flow_id=? AND status=? " + "ORDER BY exec_id DESC LIMIT ?, ?"; @Override public List<ExecutableFlow> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<ExecutableFlow> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data != null) { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); execFlows.add(exFlow); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } /** * JDBC ResultSetHandler to fetch queued executions */ private static class FetchQueuedExecutableFlows implements ResultSetHandler<List<Pair<ExecutionReference, ExecutableFlow>>> { // Select queued unassigned flows private static final String FETCH_QUEUED_EXECUTABLE_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows" + " WHERE executor_id is NULL AND status = " + Status.PREPARING.getNumVal(); @Override public List<Pair<ExecutionReference, ExecutableFlow>> handle(final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<Pair<ExecutionReference, ExecutableFlow>> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data == null) { ExecutionFlowDao.logger.error("Found a flow with empty data blob exec_id: " + id); } else { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); final ExecutionReference ref = new ExecutionReference(id); execFlows.add(new Pair<>(ref, exFlow)); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } private static class FetchRecentlyFinishedFlows implements ResultSetHandler<List<ExecutableFlow>> { // Execution_flows table is already indexed by end_time private static final String FETCH_RECENTLY_FINISHED_FLOW = "SELECT exec_id, enc_type, flow_data, status FROM execution_flows " + "WHERE end_time > ? AND status IN (?, ?, ?)"; @Override public List<ExecutableFlow> handle( final ResultSet rs) throws SQLException { if (!rs.next()) { return Collections.emptyList(); } final List<ExecutableFlow> execFlows = new ArrayList<>(); do { final int id = rs.getInt(1); final int encodingType = rs.getInt(2); final byte[] data = rs.getBytes(3); if (data != null) { final EncodingType encType = EncodingType.fromInteger(encodingType); final Status status = Status.fromInteger(rs.getInt(4)); try { final ExecutableFlow exFlow = ExecutableFlow.createExecutableFlow( GZIPUtils.transformBytesToObject(data, encType), status); execFlows.add(exFlow); } catch (final IOException e) { throw new SQLException("Error retrieving flow data " + id, e); } } } while (rs.next()); return execFlows; } } }
1
19,263
I believe flow.toObject() method is throwing NPE if SLA option list (i.e. this.executionOptions.getSlaOptions()) is null or one of the value in the list (i.e. this.executionOptions.getSlaOptions()) is null. If that is the case we could fix root cause of NPE in the ExecutableFlow.toObject() method. The corresponding code to populate SLA options can be modified as follows. final List<Map<String, Object>> slaOptions = Optional.ofNullable(this.executionOptions.getSlaOptions()).orElse(Collections.emptyList()).stream() .filter(Objects::nonNull) .map(slaOption -> slaOption.toObject()) .collect(Collectors.toList());
azkaban-azkaban
java
@@ -33,7 +33,7 @@ export default Controller.extend(SettingsSaveMixin, { }), iconImageSource: computed('model.icon', function () { - return this.get('model.icon') || ''; + return this.get('model.icon') || '/favicon.ico'; }), coverImageSource: computed('model.cover', function () {
1
import Controller from 'ember-controller'; import computed, {notEmpty} from 'ember-computed'; import injectService from 'ember-service/inject'; import observer from 'ember-metal/observer'; import run from 'ember-runloop'; import SettingsSaveMixin from 'ghost-admin/mixins/settings-save'; import randomPassword from 'ghost-admin/utils/random-password'; import $ from 'jquery'; export default Controller.extend(SettingsSaveMixin, { availableTimezones: null, themeToDelete: null, showUploadLogoModal: false, showUploadCoverModal: false, showUploadIconModal: false, showDeleteThemeModal: notEmpty('themeToDelete'), ajax: injectService(), config: injectService(), ghostPaths: injectService(), notifications: injectService(), session: injectService(), _scratchFacebook: null, _scratchTwitter: null, iconMimeTypes: 'image/png,image/x-icon', iconExtensions: ['ico', 'png'], logoImageSource: computed('model.logo', function () { return this.get('model.logo') || ''; }), iconImageSource: computed('model.icon', function () { return this.get('model.icon') || ''; }), coverImageSource: computed('model.cover', function () { return this.get('model.cover') || ''; }), isDatedPermalinks: computed('model.permalinks', { set(key, value) { this.set('model.permalinks', value ? '/:year/:month/:day/:slug/' : '/:slug/'); let slugForm = this.get('model.permalinks'); return slugForm !== '/:slug/'; }, get() { let slugForm = this.get('model.permalinks'); return slugForm !== '/:slug/'; } }), generatePassword: observer('model.isPrivate', function () { this.get('model.errors').remove('password'); if (this.get('model.isPrivate') && this.get('model.hasDirtyAttributes')) { this.get('model').set('password', randomPassword()); } }), _deleteTheme() { let theme = this.get('themeToDelete'); let themeURL = `${this.get('ghostPaths.apiRoot')}/themes/${theme.name}/`; if (!theme) { return; } return this.get('ajax').del(themeURL).then(() => { this.send('reloadSettings'); }).catch((error) => { this.get('notifications').showAPIError(error); }); }, save() { let notifications = this.get('notifications'); let config = this.get('config'); return this.get('model').save().then((model) => { config.set('blogTitle', model.get('title')); // this forces the document title to recompute after // a blog title change this.send('collectTitleTokens', []); return model; }).catch((error) => { if (error) { notifications.showAPIError(error, {key: 'settings.save'}); } throw error; }); }, actions: { setTheme(theme) { this.set('model.activeTheme', theme.name); this.send('save'); }, downloadTheme(theme) { let themeURL = `${this.get('ghostPaths.apiRoot')}/themes/${theme.name}`; let accessToken = this.get('session.data.authenticated.access_token'); let downloadURL = `${themeURL}/download/?access_token=${accessToken}`; let iframe = $('#iframeDownload'); if (iframe.length === 0) { iframe = $('<iframe>', {id: 'iframeDownload'}).hide().appendTo('body'); } iframe.attr('src', downloadURL); }, deleteTheme(theme) { if (theme) { return this.set('themeToDelete', theme); } return this._deleteTheme(); }, hideDeleteThemeModal() { this.set('themeToDelete', null); }, setTimezone(timezone) { this.set('model.activeTimezone', timezone.name); }, toggleUploadCoverModal() { this.toggleProperty('showUploadCoverModal'); }, toggleUploadLogoModal() { this.toggleProperty('showUploadLogoModal'); }, toggleUploadIconModal() { this.toggleProperty('showUploadIconModal'); }, validateFacebookUrl() { let newUrl = this.get('_scratchFacebook'); let oldUrl = this.get('model.facebook'); let errMessage = ''; if (newUrl === '') { // Clear out the Facebook url this.set('model.facebook', ''); this.get('model.errors').remove('facebook'); return; } // _scratchFacebook will be null unless the user has input something if (!newUrl) { newUrl = oldUrl; } // If new url didn't change, exit if (newUrl === oldUrl) { this.get('model.errors').remove('facebook'); return; } if (newUrl.match(/(?:facebook\.com\/)(\S+)/) || newUrl.match(/([a-z\d\.]+)/i)) { let username = []; if (newUrl.match(/(?:facebook\.com\/)(\S+)/)) { [, username] = newUrl.match(/(?:facebook\.com\/)(\S+)/); } else { [, username] = newUrl.match(/(?:https\:\/\/|http\:\/\/)?(?:www\.)?(?:\w+\.\w+\/+)?(\S+)/mi); } // check if we have a /page/username or without if (username.match(/^(?:\/)?(pages?\/\S+)/mi)) { // we got a page url, now save the username without the / in the beginning [, username] = username.match(/^(?:\/)?(pages?\/\S+)/mi); } else if (username.match(/^(http|www)|(\/)/) || !username.match(/^([a-z\d\.]{5,50})$/mi)) { errMessage = !username.match(/^([a-z\d\.]{5,50})$/mi) ? 'Your Page name is not a valid Facebook Page name' : 'The URL must be in a format like https://www.facebook.com/yourPage'; this.get('model.errors').add('facebook', errMessage); this.get('model.hasValidated').pushObject('facebook'); return; } newUrl = `https://www.facebook.com/${username}`; this.set('model.facebook', newUrl); this.get('model.errors').remove('facebook'); this.get('model.hasValidated').pushObject('facebook'); // User input is validated return this.save().then(() => { this.set('model.facebook', ''); run.schedule('afterRender', this, function () { this.set('model.facebook', newUrl); }); }); } else { errMessage = 'The URL must be in a format like ' + 'https://www.facebook.com/yourPage'; this.get('model.errors').add('facebook', errMessage); this.get('model.hasValidated').pushObject('facebook'); return; } }, validateTwitterUrl() { let newUrl = this.get('_scratchTwitter'); let oldUrl = this.get('model.twitter'); let errMessage = ''; if (newUrl === '') { // Clear out the Twitter url this.set('model.twitter', ''); this.get('model.errors').remove('twitter'); return; } // _scratchTwitter will be null unless the user has input something if (!newUrl) { newUrl = oldUrl; } // If new url didn't change, exit if (newUrl === oldUrl) { this.get('model.errors').remove('twitter'); return; } if (newUrl.match(/(?:twitter\.com\/)(\S+)/) || newUrl.match(/([a-z\d\.]+)/i)) { let username = []; if (newUrl.match(/(?:twitter\.com\/)(\S+)/)) { [, username] = newUrl.match(/(?:twitter\.com\/)(\S+)/); } else { [username] = newUrl.match(/([^/]+)\/?$/mi); } // check if username starts with http or www and show error if so if (username.match(/^(http|www)|(\/)/) || !username.match(/^[a-z\d\.\_]{1,15}$/mi)) { errMessage = !username.match(/^[a-z\d\.\_]{1,15}$/mi) ? 'Your Username is not a valid Twitter Username' : 'The URL must be in a format like https://twitter.com/yourUsername'; this.get('model.errors').add('twitter', errMessage); this.get('model.hasValidated').pushObject('twitter'); return; } newUrl = `https://twitter.com/${username}`; this.set('model.twitter', newUrl); this.get('model.errors').remove('twitter'); this.get('model.hasValidated').pushObject('twitter'); // User input is validated return this.save().then(() => { this.set('model.twitter', ''); run.schedule('afterRender', this, function () { this.set('model.twitter', newUrl); }); }); } else { errMessage = 'The URL must be in a format like ' + 'https://twitter.com/yourUsername'; this.get('model.errors').add('twitter', errMessage); this.get('model.hasValidated').pushObject('twitter'); return; } } } });
1
7,808
So I haven't tested this - but since the icon location is just directly dumped into the img _src_ attribute, won't this cause issues with Ghost blogs in a subdirectory? If I'm misunderstanding what the purpose of the default is then let me know
TryGhost-Admin
js
@@ -120,8 +120,16 @@ int main(int argc, char **argv) MPI_Init(nullptr, nullptr); #endif - ::testing::InitGoogleTest(&argc, argv); - int result = RUN_ALL_TESTS(); + int result = -1; + try + { + ::testing::InitGoogleTest(&argc, argv); + result = RUN_ALL_TESTS(); + } + catch (std::exception &e) + { + result = 1; + } #ifdef ADIOS2_HAVE_MPI MPI_Finalize();
1
/* * Distributed under the OSI-approved Apache License, Version 2.0. See * accompanying file Copyright.txt for details. * * TestBPWriteTypes.c * * Created on: Aug 9, 2017 * Author: Haocheng */ #include <adios2_c.h> #ifdef ADIOS2_HAVE_MPI #include <mpi.h> #endif #include <gtest/gtest.h> #include "SmallTestData_c.h" class BPWriteTypes : public ::testing::Test { public: BPWriteTypes() = default; }; TEST_F(BPWriteTypes, ADIOS2BPWriteTypes) { #ifdef ADIOS2_HAVE_MPI int rank(0); int size(0); adios2_adios *adiosH = adios2_init(MPI_COMM_WORLD, adios2_debug_mode_on); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); #else adios2_adios *adiosH = adios2_init_nompi(adios2_debug_mode_on); #endif // IO adios2_io *ioH = adios2_declare_io(adiosH, "CArrayTypes"); // Set engine parameters adios2_set_engine(ioH, "BPFile"); adios2_set_parameter(ioH, "ProfileUnits", "Microseconds"); adios2_set_parameter(ioH, "Threads", "1"); // Set transport and parameters const unsigned int transportID = adios2_add_transport(ioH, "File"); adios2_set_transport_parameter(ioH, transportID, "library", "fstream"); // count dims are allocated in stack size_t count[1]; count[0] = data_Nx; // Define variables in ioH { adios2_define_variable(ioH, "varI8", adios2_type_int8_t, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varI16", adios2_type_int16_t, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varI32", adios2_type_int32_t, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varI64", adios2_type_int64_t, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varU8", adios2_type_uint8_t, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varU16", adios2_type_uint16_t, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varU32", adios2_type_uint32_t, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varU64", adios2_type_uint64_t, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varR32", adios2_type_float, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); adios2_define_variable(ioH, "varR64", adios2_type_double, 1, NULL, NULL, count, adios2_constant_dims_true, NULL); } // inquire variables adios2_variable *varI8 = adios2_inquire_variable(ioH, "varI8"); adios2_variable *varI16 = adios2_inquire_variable(ioH, "varI16"); adios2_variable *varI32 = adios2_inquire_variable(ioH, "varI32"); adios2_variable *varI64 = adios2_inquire_variable(ioH, "varI64"); adios2_variable *varU8 = adios2_inquire_variable(ioH, "varU8"); adios2_variable *varU16 = adios2_inquire_variable(ioH, "varU16"); adios2_variable *varU32 = adios2_inquire_variable(ioH, "varU32"); adios2_variable *varU64 = adios2_inquire_variable(ioH, "varU64"); adios2_variable *varR32 = adios2_inquire_variable(ioH, "varR32"); adios2_variable *varR64 = adios2_inquire_variable(ioH, "varR64"); adios2_engine *engineH = adios2_open(ioH, "ctypes.bp", adios2_mode_write); adios2_put_sync(engineH, varI8, data_I8); adios2_put_sync(engineH, varI16, data_I16); adios2_put_sync(engineH, varI32, data_I32); adios2_put_sync(engineH, varI64, data_I64); adios2_put_sync(engineH, varU8, data_U8); adios2_put_sync(engineH, varU16, data_U16); adios2_put_sync(engineH, varU32, data_U32); adios2_put_sync(engineH, varU64, data_U64); adios2_put_sync(engineH, varR32, data_R32); adios2_put_sync(engineH, varR64, data_R64); adios2_close(engineH); // deallocate adiosH adios2_finalize(adiosH); } //****************************************************************************** // main //****************************************************************************** int main(int argc, char **argv) { #ifdef ADIOS2_HAVE_MPI MPI_Init(nullptr, nullptr); #endif ::testing::InitGoogleTest(&argc, argv); int result = RUN_ALL_TESTS(); #ifdef ADIOS2_HAVE_MPI MPI_Finalize(); #endif return result; }
1
12,132
Why swallow the exception here rather than propagate it?
ornladios-ADIOS2
cpp
@@ -340,7 +340,8 @@ class _InternalFrame(object): index_map: Optional[List[IndexMap]] = None, scol: Optional[spark.Column] = None, data_columns: Optional[List[str]] = None, - column_index: Optional[List[Tuple[str]]] = None) -> None: + column_index: Optional[List[Tuple[str]]] = None, + column_names: Optional[List[str]] = None) -> None: """ Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and index fields and names.
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ An internal immutable DataFrame with some metadata to manage indexes. """ from typing import List, Optional, Tuple, Union import numpy as np import pandas as pd from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype from pyspark import sql as spark from pyspark._globals import _NoValue, _NoValueType from pyspark.sql.types import DataType, StructField, StructType, to_arrow_type from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.typedef import infer_pd_series_spark_type from databricks.koalas.utils import default_session, lazy_property, scol_for IndexMap = Tuple[str, Optional[str]] class _InternalFrame(object): """ The internal immutable DataFrame which manages Spark DataFrame and column names and index information. :ivar _sdf: Spark DataFrame :ivar _index_map: list of pair holding the Spark field names for indexes, and the index name to be seen in Koalas DataFrame. :ivar _scol: Spark Column :ivar _data_columns: list of the Spark field names to be seen as columns in Koalas DataFrame. .. note:: this is an internal class. It is not supposed to be exposed to users and users should not directly access to it. The internal immutable DataFrame represents the index information for a DataFrame it belongs to. For instance, if we have a Koalas DataFrame as below, Pandas DataFrame does not store the index as columns. >>> kdf = ks.DataFrame({ ... 'A': [1, 2, 3, 4], ... 'B': [5, 6, 7, 8], ... 'C': [9, 10, 11, 12], ... 'D': [13, 14, 15, 16], ... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E']) >>> kdf # doctest: +NORMALIZE_WHITESPACE A B C D E 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 However, all columns including index column are also stored in Spark DataFrame internally as below. >>> kdf.to_spark().show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ In order to fill this gap, the current metadata is used by mapping Spark's internal column to Koalas' index. See the method below: * `sdf` represents the internal Spark DataFrame * `data_columns` represents non-indexing columns * `index_columns` represents internal index columns * `columns` represents all columns * `index_names` represents the external index name * `index_map` is zipped pairs of `index_columns` and `index_names` * `spark_df` represents Spark DataFrame derived by the metadata * `pandas_df` represents pandas DataFrame derived by the metadata >>> internal = kdf._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.data_columns ['A', 'B', 'C', 'D', 'E'] >>> internal.index_columns ['__index_level_0__'] >>> internal.columns ['__index_level_0__', 'A', 'B', 'C', 'D', 'E'] >>> internal.index_names [None] >>> internal.index_map [('__index_level_0__', None)] >>> internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.pandas_df A B C D E 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 In case that index is set to one of the existing column as below: >>> kdf1 = kdf.set_index("A") >>> kdf1 # doctest: +NORMALIZE_WHITESPACE B C D E A 1 5 9 13 17 2 6 10 14 18 3 7 11 15 19 4 8 12 16 20 >>> kdf1.to_spark().show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal = kdf1._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal.data_columns ['B', 'C', 'D', 'E'] >>> internal.index_columns ['A'] >>> internal.columns ['A', 'B', 'C', 'D', 'E'] >>> internal.index_names ['A'] >>> internal.index_map [('A', 'A')] >>> internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B C D E A 1 5 9 13 17 2 6 10 14 18 3 7 11 15 19 4 8 12 16 20 In case that index becomes a multi index as below: >>> kdf2 = kdf.set_index("A", append=True) >>> kdf2 # doctest: +NORMALIZE_WHITESPACE B C D E A 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 >>> kdf2.to_spark().show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal = kdf2._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.data_columns ['B', 'C', 'D', 'E'] >>> internal.index_columns ['__index_level_0__', 'A'] >>> internal.columns ['__index_level_0__', 'A', 'B', 'C', 'D', 'E'] >>> internal.index_names [None, 'A'] >>> internal.index_map [('__index_level_0__', None), ('A', 'A')] >>> internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+---+---+---+---+---+ |__index_level_0__| A| B| C| D| E| +-----------------+---+---+---+---+---+ | 0| 1| 5| 9| 13| 17| | 1| 2| 6| 10| 14| 18| | 2| 3| 7| 11| 15| 19| | 3| 4| 8| 12| 16| 20| +-----------------+---+---+---+---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B C D E A 0 1 5 9 13 17 1 2 6 10 14 18 2 3 7 11 15 19 3 4 8 12 16 20 For multi-level columns, it also holds column_index >>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ... ('Y', 'C'), ('Y', 'D')]) >>> kdf3 = ks.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12], ... [13, 14, 15, 16], ... [17, 18, 19, 20]], columns = columns) >>> kdf3 # doctest: +NORMALIZE_WHITESPACE X Y A B C D 0 1 2 3 4 1 5 6 7 8 2 9 10 11 12 3 13 14 15 16 4 17 18 19 20 >>> internal = kdf3._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +-----------------+----------+----------+----------+----------+ |__index_level_0__|('X', 'A')|('X', 'B')|('Y', 'C')|('Y', 'D')| +-----------------+----------+----------+----------+----------+ | 0| 1| 2| 3| 4| | 1| 5| 6| 7| 8| | 2| 9| 10| 11| 12| | 3| 13| 14| 15| 16| | 4| 17| 18| 19| 20| +-----------------+----------+----------+----------+----------+ >>> internal.data_columns ["('X', 'A')", "('X', 'B')", "('Y', 'C')", "('Y', 'D')"] >>> internal.column_index [('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')] For series, it also holds scol to represent the column. >>> kseries = kdf1.B >>> kseries A 1 5 2 6 3 7 4 8 Name: B, dtype: int64 >>> internal = kseries._internal >>> internal.sdf.show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+---+---+ | A| B| C| D| E| +---+---+---+---+---+ | 1| 5| 9| 13| 17| | 2| 6| 10| 14| 18| | 3| 7| 11| 15| 19| | 4| 8| 12| 16| 20| +---+---+---+---+---+ >>> internal.scol Column<b'B'> >>> internal.data_columns ['B'] >>> internal.index_columns ['A'] >>> internal.columns ['A', 'B'] >>> internal.index_names ['A'] >>> internal.index_map [('A', 'A')] >>> internal.spark_df.show() # doctest: +NORMALIZE_WHITESPACE +---+---+ | A| B| +---+---+ | 1| 5| | 2| 6| | 3| 7| | 4| 8| +---+---+ >>> internal.pandas_df # doctest: +NORMALIZE_WHITESPACE B A 1 5 2 6 3 7 4 8 """ def __init__(self, sdf: spark.DataFrame, index_map: Optional[List[IndexMap]] = None, scol: Optional[spark.Column] = None, data_columns: Optional[List[str]] = None, column_index: Optional[List[Tuple[str]]] = None) -> None: """ Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and index fields and names. :param sdf: Spark DataFrame to be managed. :param index_map: list of string pair Each pair holds the index field name which exists in Spark fields, and the index name. :param scol: Spark Column to be managed. :param data_columns: list of string Field names to appear as columns. If scol is not None, this argument is ignored, otherwise if this is None, calculated from sdf. :param column_index: list of tuples with the same length The multi-level values in the tuples. """ assert isinstance(sdf, spark.DataFrame) assert index_map is None \ or all(isinstance(index_field, str) and (index_name is None or isinstance(index_name, str)) for index_field, index_name in index_map) assert scol is None or isinstance(scol, spark.Column) assert data_columns is None or all(isinstance(col, str) for col in data_columns) self._sdf = sdf # type: spark.DataFrame self._index_map = (index_map if index_map is not None else []) # type: List[IndexMap] self._scol = scol # type: Optional[spark.Column] if scol is not None: self._data_columns = sdf.select(scol).columns column_index = None elif data_columns is None: index_columns = set(index_column for index_column, _ in self._index_map) self._data_columns = [column for column in sdf.columns if column not in index_columns] else: self._data_columns = data_columns assert column_index is None or (len(column_index) == len(self._data_columns) and all(isinstance(i, tuple) for i in column_index) and len(set(len(i) for i in column_index)) <= 1) self._column_index = column_index def scol_for(self, column_name: str) -> spark.Column: """ Return Spark Column for the given column name. """ if self._scol is not None and column_name == self._data_columns[0]: return self._scol else: return scol_for(self._sdf, column_name) def spark_type_for(self, column_name: str) -> DataType: """ Return DataType for the given column name. """ return self._sdf.schema[column_name].dataType @property def sdf(self) -> spark.DataFrame: """ Return the managed Spark DataFrame. """ return self._sdf @property def data_columns(self) -> List[str]: """ Return the managed column field names. """ return self._data_columns @lazy_property def data_scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed data columns. """ return [self.scol_for(column) for column in self.data_columns] @lazy_property def index_columns(self) -> List[str]: """ Return the managed index field names. """ return [index_column for index_column, _ in self._index_map] @lazy_property def index_scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed index columns. """ return [self.scol_for(column) for column in self.index_columns] @lazy_property def columns(self) -> List[str]: """ Return all the field names including index field names. """ index_columns = set(self.index_columns) return self.index_columns + [column for column in self._data_columns if column not in index_columns] @lazy_property def scols(self) -> List[spark.Column]: """ Return Spark Columns for the managed columns including index columns. """ return [self.scol_for(column) for column in self.columns] @property def index_map(self) -> List[IndexMap]: """ Return the managed index information. """ return self._index_map @lazy_property def index_names(self) -> List[Optional[str]]: """ Return the managed index names. """ return [index_name for _, index_name in self.index_map] @property def scol(self) -> Optional[spark.Column]: """ Return the managed Spark Column. """ return self._scol @property def column_index(self) -> Optional[List[Tuple[str]]]: """ Return the managed column index. """ return self._column_index @lazy_property def spark_df(self) -> spark.DataFrame: """ Return as Spark DataFrame. """ return self._sdf.select(self.scols) @lazy_property def pandas_df(self): """ Return as pandas DataFrame. """ sdf = self.spark_df pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype() for field in sdf.schema}) index_columns = self.index_columns if len(index_columns) > 0: append = False for index_field in index_columns: drop = index_field not in self.data_columns pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[self.data_columns] if self._column_index is not None: pdf.columns = pd.MultiIndex.from_tuples(self._column_index) index_names = self.index_names if len(index_names) > 0: if isinstance(pdf.index, pd.MultiIndex): pdf.index.names = index_names else: pdf.index.name = index_names[0] return pdf def copy(self, sdf: Union[spark.DataFrame, _NoValueType] = _NoValue, index_map: Union[List[IndexMap], _NoValueType] = _NoValue, scol: Union[spark.Column, _NoValueType] = _NoValue, data_columns: Union[List[str], _NoValueType] = _NoValue, column_index: Union[List[Tuple[str]], _NoValueType] = _NoValue) -> '_InternalFrame': """ Copy the immutable DataFrame. :param sdf: the new Spark DataFrame. If None, then the original one is used. :param index_map: the new index information. If None, then the original one is used. :param scol: the new Spark Column. If None, then the original one is used. :param data_columns: the new column field names. If None, then the original ones are used. :param column_index: the new column index. :return: the copied immutable DataFrame. """ if sdf is _NoValue: sdf = self._sdf if index_map is _NoValue: index_map = self._index_map if scol is _NoValue: scol = self._scol if data_columns is _NoValue: data_columns = self._data_columns if column_index is _NoValue: column_index = self._column_index return _InternalFrame(sdf, index_map=index_map, scol=scol, data_columns=data_columns, column_index=column_index) @staticmethod def from_pandas(pdf: pd.DataFrame) -> '_InternalFrame': """ Create an immutable DataFrame from pandas DataFrame. :param pdf: :class:`pd.DataFrame` :return: the created immutable DataFrame """ columns = pdf.columns data_columns = [str(col) for col in columns] if isinstance(columns, pd.MultiIndex): column_index = columns.tolist() else: column_index = None index = pdf.index index_map = [] # type: List[IndexMap] if isinstance(index, pd.MultiIndex): if index.names is None: index_map = [('__index_level_{}__'.format(i), None) for i in range(len(index.levels))] else: index_map = [('__index_level_{}__'.format(i) if name is None else name, name) for i, name in enumerate(index.names)] else: index_map = [(index.name if index.name is not None else '__index_level_0__', index.name)] index_columns = [index_column for index_column, _ in index_map] reset_index = pdf.reset_index() reset_index.columns = index_columns + data_columns schema = StructType([StructField(name, infer_pd_series_spark_type(col), nullable=bool(col.isnull().any())) for name, col in reset_index.iteritems()]) for name, col in reset_index.iteritems(): dt = col.dtype if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt): continue reset_index[name] = col.replace({np.nan: None}) sdf = default_session().createDataFrame(reset_index, schema=schema) return _InternalFrame(sdf=sdf, index_map=index_map, data_columns=data_columns, column_index=column_index)
1
10,854
How about `column_index_names`? `column_names` sounds ambiguous.
databricks-koalas
py
@@ -89,6 +89,7 @@ public class TableProperties { public static final String METADATA_COMPRESSION = "write.metadata.compression-codec"; public static final String METADATA_COMPRESSION_DEFAULT = "none"; + public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column."; public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default"; public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)"; }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; public class TableProperties { private TableProperties() {} public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries"; public static final int COMMIT_NUM_RETRIES_DEFAULT = 4; public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms"; public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100; public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms"; public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms"; public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes"; public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge"; public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100; public static final String DEFAULT_FILE_FORMAT = "write.format.default"; public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet"; public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes"; public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes"; public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes"; public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec"; public static final String PARQUET_COMPRESSION_DEFAULT = "gzip"; public static final String AVRO_COMPRESSION = "write.avro.compression-codec"; public static final String AVRO_COMPRESSION_DEFAULT = "gzip"; public static final String SPLIT_SIZE = "read.split.target-size"; public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB public static final String SPLIT_LOOKBACK = "read.split.planning-lookback"; public static final int SPLIT_LOOKBACK_DEFAULT = 10; public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost"; public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled"; public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false; public static final String OBJECT_STORE_PATH = "write.object-storage.path"; // This only applies to files written after this property is set. Files previously written aren't // relocated to reflect this parameter. // If not set, defaults to a "data" folder underneath the root path of the table. public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path"; // This only applies to files written after this property is set. Files previously written aren't // relocated to reflect this parameter. // If not set, defaults to a "meatdata" folder underneath the root path of the table. public static final String WRITE_METADATA_LOCATION = "write.metadata.path"; public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled"; public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true; public static final String METADATA_COMPRESSION = "write.metadata.compression-codec"; public static final String METADATA_COMPRESSION_DEFAULT = "none"; public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default"; public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)"; }
1
14,522
+1 on this. Do we want to have it as `WRITE_METRICS_MODE_COLUMN_CONF_PREFIX` to be consistent with defaults? Is there a possibility we will have `READ_METRICS_MODE_COLUMN_CONF_PREFIX`? Not sure.
apache-iceberg
java
@@ -2264,8 +2264,8 @@ class DataFrameTest(ReusedSQLTestCase, SQLTestUtils): def test_cumprod(self): pdf = pd.DataFrame( - [[2.0, 1.0], [5, None], [1.0, 1.0], [2.0, 4.0], [4.0, 9.0]], - columns=list("AB"), + [[2.0, 1.0, 1], [5, None, 2], [1.0, 1.0, 3], [2.0, 4.0, 4], [4.0, 9.0, 5]], + columns=list("ABC"), index=np.random.rand(5), ) kdf = ks.from_pandas(pdf)
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from datetime import datetime from distutils.version import LooseVersion import inspect import sys import unittest from io import StringIO import numpy as np import pandas as pd import pyspark from pyspark import StorageLevel from pyspark.ml.linalg import SparseVector from databricks import koalas as ks from databricks.koalas.config import option_context from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils from databricks.koalas.exceptions import PandasNotImplementedError from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.frame import CachedDataFrame class DataFrameTest(ReusedSQLTestCase, SQLTestUtils): @property def pdf(self): return pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0],}, index=np.random.rand(9), ) @property def kdf(self): return ks.from_pandas(self.pdf) @property def df_pair(self): pdf = self.pdf kdf = ks.from_pandas(pdf) return pdf, kdf def test_dataframe(self): pdf, kdf = self.df_pair self.assert_eq(kdf["a"] + 1, pdf["a"] + 1) self.assert_eq(kdf.columns, pd.Index(["a", "b"])) self.assert_eq(kdf[kdf["b"] > 2], pdf[pdf["b"] > 2]) self.assert_eq(kdf[["a", "b"]], pdf[["a", "b"]]) self.assert_eq(kdf.a, pdf.a) self.assert_eq(kdf.b.mean(), pdf.b.mean()) self.assert_eq(kdf.b.var(), pdf.b.var()) self.assert_eq(kdf.b.std(), pdf.b.std()) pdf, kdf = self.df_pair self.assert_eq(kdf[["a", "b"]], pdf[["a", "b"]]) self.assertEqual(kdf.a.notnull().alias("x").name, "x") # check ks.DataFrame(ks.Series) pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3)) kser = ks.from_pandas(pser) self.assert_eq(pd.DataFrame(pser), ks.DataFrame(kser)) # check kdf[pd.Index] pdf, kdf = self.df_pair column_mask = pdf.columns.isin(["a", "b"]) index_cols = pdf.columns[column_mask] self.assert_eq(kdf[index_cols], pdf[index_cols]) def test_inplace(self): pdf, kdf = self.df_pair pser = pdf.a kser = kdf.a pdf["a"] = pdf["a"] + 10 kdf["a"] = kdf["a"] + 10 self.assert_eq(kdf, pdf) self.assert_eq(kser, pser) def test_assign_list(self): pdf, kdf = self.df_pair pser = pdf.a kser = kdf.a pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] kdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] self.assert_eq(kdf.sort_index(), pdf.sort_index()) self.assert_eq(kser, pser) with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"): kdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80] def test_dataframe_multiindex_columns(self): pdf = pd.DataFrame( { ("x", "a", "1"): [1, 2, 3], ("x", "b", "2"): [4, 5, 6], ("y.z", "c.d", "3"): [7, 8, 9], ("x", "b", "4"): [10, 11, 12], }, index=np.random.rand(3), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) self.assert_eq(kdf["x"], pdf["x"]) self.assert_eq(kdf["y.z"], pdf["y.z"]) self.assert_eq(kdf["x"]["b"], pdf["x"]["b"]) self.assert_eq(kdf["x"]["b"]["2"], pdf["x"]["b"]["2"]) self.assert_eq(kdf.x, pdf.x) self.assert_eq(kdf.x.b, pdf.x.b) self.assert_eq(kdf.x.b["2"], pdf.x.b["2"]) self.assertRaises(KeyError, lambda: kdf["z"]) self.assertRaises(AttributeError, lambda: kdf.z) self.assert_eq(kdf[("x",)], pdf[("x",)]) self.assert_eq(kdf[("x", "a")], pdf[("x", "a")]) self.assert_eq(kdf[("x", "a", "1")], pdf[("x", "a", "1")]) def test_dataframe_column_level_name(self): column = pd.Index(["A", "B", "C"], name="X") pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) self.assert_eq(kdf.columns.names, pdf.columns.names) self.assert_eq(kdf.to_pandas().columns.names, pdf.columns.names) def test_dataframe_multiindex_names_level(self): columns = pd.MultiIndex.from_tuples( [("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")], names=["lvl_1", "lvl_2", "lv_3"], ) pdf = pd.DataFrame( [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]], columns=columns, index=np.random.rand(5), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.columns.names, pdf.columns.names) self.assert_eq(kdf.to_pandas().columns.names, pdf.columns.names) kdf1 = ks.from_pandas(pdf) self.assert_eq(kdf1.columns.names, pdf.columns.names) with self.assertRaisesRegex( ValueError, "Column_index_names should " "be list-like or None for a MultiIndex" ): ks.DataFrame(kdf1._internal.copy(column_label_names="level")) self.assert_eq(kdf["X"], pdf["X"]) self.assert_eq(kdf["X"].columns.names, pdf["X"].columns.names) self.assert_eq(kdf["X"].to_pandas().columns.names, pdf["X"].columns.names) self.assert_eq(kdf["X"]["A"], pdf["X"]["A"]) self.assert_eq(kdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names) self.assert_eq(kdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names) self.assert_eq(kdf[("X", "A")], pdf[("X", "A")]) self.assert_eq(kdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names) self.assert_eq(kdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names) self.assert_eq(kdf[("X", "A", "Z")], pdf[("X", "A", "Z")]) def test_iterrows(self): pdf = pd.DataFrame( { ("x", "a", "1"): [1, 2, 3], ("x", "b", "2"): [4, 5, 6], ("y.z", "c.d", "3"): [7, 8, 9], ("x", "b", "4"): [10, 11, 12], }, index=np.random.rand(3), ) kdf = ks.from_pandas(pdf) for (pdf_k, pdf_v), (kdf_k, kdf_v) in zip(pdf.iterrows(), kdf.iterrows()): self.assert_eq(pdf_k, kdf_k) self.assert_eq(pdf_v, kdf_v) def test_reset_index(self): pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.reset_index(), pdf.reset_index()) self.assert_eq(kdf.reset_index(drop=True), pdf.reset_index(drop=True)) pdf.index.name = "a" kdf.index.name = "a" with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"): kdf.reset_index() self.assert_eq(kdf.reset_index(drop=True), pdf.reset_index(drop=True)) # inplace pser = pdf.a kser = kdf.a pdf.reset_index(drop=True, inplace=True) kdf.reset_index(drop=True, inplace=True) self.assert_eq(kdf, pdf) self.assert_eq(kser, pser) def test_reset_index_with_default_index_types(self): pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3)) kdf = ks.from_pandas(pdf) with ks.option_context("compute.default_index_type", "sequence"): self.assert_eq(kdf.reset_index(), pdf.reset_index()) with ks.option_context("compute.default_index_type", "distributed-sequence"): self.assert_eq(kdf.reset_index(), pdf.reset_index()) with ks.option_context("compute.default_index_type", "distributed"): # the index is different. self.assert_eq(kdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index()) def test_reset_index_with_multiindex_columns(self): index = pd.MultiIndex.from_tuples( [("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")], names=["class", "name"], ) columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")]) pdf = pd.DataFrame( [(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")], index=index, columns=columns, ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) self.assert_eq(kdf.reset_index(), pdf.reset_index()) self.assert_eq(kdf.reset_index(level="class"), pdf.reset_index(level="class")) self.assert_eq( kdf.reset_index(level="class", col_level=1), pdf.reset_index(level="class", col_level=1) ) self.assert_eq( kdf.reset_index(level="class", col_level=1, col_fill="species"), pdf.reset_index(level="class", col_level=1, col_fill="species"), ) self.assert_eq( kdf.reset_index(level="class", col_level=1, col_fill="genus"), pdf.reset_index(level="class", col_level=1, col_fill="genus"), ) with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"): kdf.reset_index(col_level=2) pdf.index.names = [("x", "class"), ("y", "name")] kdf.index.names = [("x", "class"), ("y", "name")] self.assert_eq(kdf.reset_index(), pdf.reset_index()) with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."): kdf.reset_index(col_level=1) def test_multiindex_column_access(self): columns = pd.MultiIndex.from_tuples( [ ("a", "", "", "b"), ("c", "", "d", ""), ("e", "", "f", ""), ("e", "g", "", ""), ("", "", "", "h"), ("i", "", "", ""), ] ) pdf = pd.DataFrame( [ (1, "a", "x", 10, 100, 1000), (2, "b", "y", 20, 200, 2000), (3, "c", "z", 30, 300, 3000), ], columns=columns, index=np.random.rand(3), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) self.assert_eq(kdf["a"], pdf["a"]) self.assert_eq(kdf["a"]["b"], pdf["a"]["b"]) self.assert_eq(kdf["c"], pdf["c"]) self.assert_eq(kdf["c"]["d"], pdf["c"]["d"]) self.assert_eq(kdf["e"], pdf["e"]) self.assert_eq(kdf["e"][""]["f"], pdf["e"][""]["f"]) self.assert_eq(kdf["e"]["g"], pdf["e"]["g"]) self.assert_eq(kdf[""], pdf[""]) self.assert_eq(kdf[""]["h"], pdf[""]["h"]) self.assert_eq(kdf["i"], pdf["i"]) self.assert_eq(kdf[["a", "e"]], pdf[["a", "e"]]) self.assert_eq(kdf[["e", "a"]], pdf[["e", "a"]]) self.assert_eq(kdf[("a",)], pdf[("a",)]) self.assert_eq(kdf[("e", "g")], pdf[("e", "g")]) # self.assert_eq(kdf[("i",)], pdf[("i",)]) self.assert_eq(kdf[("i", "")], pdf[("i", "")]) self.assertRaises(KeyError, lambda: kdf[("a", "b")]) def test_repr_cache_invalidation(self): # If there is any cache, inplace operations should invalidate it. df = ks.range(10) df.__repr__() df["a"] = df["id"] self.assertEqual(df.__repr__(), df.to_pandas().__repr__()) def test_repr_html_cache_invalidation(self): # If there is any cache, inplace operations should invalidate it. df = ks.range(10) df._repr_html_() df["a"] = df["id"] self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_()) def test_empty_dataframe(self): pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")}) self.assertRaises(ValueError, lambda: ks.from_pandas(pdf)) with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self.assertRaises(ValueError, lambda: ks.from_pandas(pdf)) def test_all_null_dataframe(self): pdf = pd.DataFrame( { "a": pd.Series([None, None, None], dtype="float64"), "b": pd.Series([None, None, None], dtype="str"), }, index=np.random.rand(3), ) self.assertRaises(ValueError, lambda: ks.from_pandas(pdf)) with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self.assertRaises(ValueError, lambda: ks.from_pandas(pdf)) def test_nullable_object(self): pdf = pd.DataFrame( { "a": list("abc") + [np.nan], "b": list(range(1, 4)) + [np.nan], "c": list(np.arange(3, 6).astype("i1")) + [np.nan], "d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan], "e": [True, False, True, np.nan], "f": list(pd.date_range("20130101", periods=3)) + [np.nan], }, index=np.random.rand(4), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) def test_assign(self): pdf, kdf = self.df_pair kdf["w"] = 1.0 pdf["w"] = 1.0 self.assert_eq(kdf, pdf) kdf = kdf.assign(a=kdf["a"] * 2) pdf = pdf.assign(a=pdf["a"] * 2) self.assert_eq(kdf, pdf) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w")]) pdf.columns = columns kdf.columns = columns kdf[("a", "c")] = "def" pdf[("a", "c")] = "def" self.assert_eq(kdf, pdf) kdf = kdf.assign(Z="ZZ") pdf = pdf.assign(Z="ZZ") self.assert_eq(kdf, pdf) kdf["x"] = "ghi" pdf["x"] = "ghi" self.assert_eq(kdf, pdf) def test_head_tail(self): pdf, kdf = self.df_pair self.assert_eq(kdf.head(2), pdf.head(2)) self.assert_eq(kdf.head(3), pdf.head(3)) self.assert_eq(kdf.head(0), pdf.head(0)) self.assert_eq(kdf.head(-3), pdf.head(-3)) self.assert_eq(kdf.head(-10), pdf.head(-10)) def test_attributes(self): kdf = self.kdf self.assertIn("a", dir(kdf)) self.assertNotIn("foo", dir(kdf)) self.assertRaises(AttributeError, lambda: kdf.foo) kdf = ks.DataFrame({"a b c": [1, 2, 3]}) self.assertNotIn("a b c", dir(kdf)) kdf = ks.DataFrame({"a": [1, 2], 5: [1, 2]}) self.assertIn("a", dir(kdf)) self.assertNotIn(5, dir(kdf)) def test_column_names(self): kdf = self.kdf self.assert_eq(kdf.columns, pd.Index(["a", "b"])) self.assert_eq(kdf[["b", "a"]].columns, pd.Index(["b", "a"])) self.assertEqual(kdf["a"].name, "a") self.assertEqual((kdf["a"] + 1).name, "a") self.assertEqual((kdf["a"] + kdf["b"]).name, "a") # TODO: None def test_rename_columns(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) kdf = ks.from_pandas(pdf) kdf.columns = ["x", "y"] pdf.columns = ["x", "y"] self.assert_eq(kdf.columns, pd.Index(["x", "y"])) self.assert_eq(kdf, pdf) self.assert_eq(kdf._internal.data_spark_column_names, ["x", "y"]) self.assert_eq(kdf.to_spark().columns, ["x", "y"]) self.assert_eq(kdf.to_spark(index_col="index").columns, ["index", "x", "y"]) columns = pdf.columns columns.name = "lvl_1" kdf.columns = columns self.assert_eq(kdf.columns.names, ["lvl_1"]) self.assert_eq(kdf, pdf) msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements" with self.assertRaisesRegex(ValueError, msg): kdf.columns = [1, 2, 3, 4] # Multi-index columns pdf = pd.DataFrame( {("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4) ) kdf = ks.from_pandas(pdf) columns = pdf.columns self.assert_eq(kdf.columns, columns) self.assert_eq(kdf, pdf) pdf.columns = ["x", "y"] kdf.columns = ["x", "y"] self.assert_eq(kdf.columns, pd.Index(["x", "y"])) self.assert_eq(kdf, pdf) self.assert_eq(kdf._internal.data_spark_column_names, ["x", "y"]) self.assert_eq(kdf.to_spark().columns, ["x", "y"]) self.assert_eq(kdf.to_spark(index_col="index").columns, ["index", "x", "y"]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.columns, columns) self.assert_eq(kdf, pdf) self.assert_eq(kdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"]) self.assert_eq(kdf.to_spark().columns, ["(A, 0)", "(B, 1)"]) self.assert_eq(kdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"]) columns.names = ["lvl_1", "lvl_2"] kdf.columns = columns self.assert_eq(kdf.columns.names, ["lvl_1", "lvl_2"]) self.assert_eq(kdf, pdf) self.assert_eq(kdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"]) self.assert_eq(kdf.to_spark().columns, ["(A, 0)", "(B, 1)"]) self.assert_eq(kdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"]) def test_rename_dataframe(self): pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) kdf1 = ks.from_pandas(pdf1) self.assert_eq( kdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"}) ) result_kdf = kdf1.rename(index={1: 10, 2: 20}) result_pdf = pdf1.rename(index={1: 10, 2: 20}) self.assert_eq(result_kdf, result_pdf) # inplace pser = result_pdf.A kser = result_kdf.A result_kdf.rename(index={10: 100, 20: 200}, inplace=True) result_pdf.rename(index={10: 100, 20: 200}, inplace=True) self.assert_eq(result_kdf, result_pdf) self.assert_eq(kser, pser) def str_lower(s) -> str: return str.lower(s) self.assert_eq( kdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns") ) def mul10(x) -> int: return x * 10 self.assert_eq(kdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index")) self.assert_eq( kdf1.rename(columns=str_lower, index={1: 10, 2: 20}), pdf1.rename(columns=str_lower, index={1: 10, 2: 20}), ) idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")]) pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx) kdf2 = ks.from_pandas(pdf2) self.assert_eq(kdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower)) self.assert_eq( kdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0) ) self.assert_eq( kdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1) ) pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab")) kdf3 = ks.from_pandas(pdf3) self.assert_eq(kdf3.rename(index=str_lower), pdf3.rename(index=str_lower)) self.assert_eq(kdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)) self.assert_eq(kdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)) pdf4 = pdf2 + 1 kdf4 = kdf2 + 1 self.assert_eq(kdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower)) pdf5 = pdf3 + 1 kdf5 = kdf3 + 1 self.assert_eq(kdf5.rename(index=str_lower), pdf5.rename(index=str_lower)) def test_dot_in_column_name(self): self.assert_eq( ks.DataFrame(ks.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"], ks.Series([1], name="a.b"), ) def test_droplevel(self): # droplevel is new in pandas 0.24.0 if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"): pdf = ( pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) .set_index([0, 1]) .rename_axis(["a", "b"]) ) pdf.columns = pd.MultiIndex.from_tuples( [("c", "e"), ("d", "f")], names=["level_1", "level_2"] ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.droplevel("a"), kdf.droplevel("a")) self.assert_eq(pdf.droplevel(0), kdf.droplevel(0)) self.assert_eq(pdf.droplevel(-1), kdf.droplevel(-1)) self.assert_eq(pdf.droplevel("level_1", axis=1), kdf.droplevel("level_1", axis=1)) self.assert_eq(pdf.droplevel(0, axis=1), kdf.droplevel(0, axis=1)) self.assertRaises(ValueError, lambda: kdf.droplevel(["a", "b"])) self.assertRaises(ValueError, lambda: kdf.droplevel(["level_1", "level_2"], axis=1)) self.assertRaises(ValueError, lambda: kdf.droplevel([1, 1, 1, 1, 1])) self.assertRaises(IndexError, lambda: kdf.droplevel(-3)) # Tupled names pdf.index.names = [("a", "b"), ("x", "y")] kdf = ks.from_pandas(pdf) self.assert_eq(pdf.droplevel([("a", "b")]), kdf.droplevel([("a", "b")])) def test_drop(self): pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2)) kdf = ks.from_pandas(pdf) # Assert 'labels' or 'columns' parameter is set expected_error_message = "Need to specify at least one of 'labels' or 'columns'" with self.assertRaisesRegex(ValueError, expected_error_message): kdf.drop() # Assert axis cannot be 0 with self.assertRaisesRegex(NotImplementedError, "Drop currently only works for axis=1"): kdf.drop("x", axis=0) # Assert using a str for 'labels' works self.assert_eq(kdf.drop("x", axis=1), pdf.drop("x", axis=1)) # Assert axis is 1 by default self.assert_eq(kdf.drop("x"), pdf.drop("x", axis=1)) # Assert using a list for 'labels' works self.assert_eq(kdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1)) # Assert using 'columns' instead of 'labels' produces the same results self.assert_eq(kdf.drop(columns="x"), pdf.drop(columns="x")) self.assert_eq(kdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"])) # Assert 'labels' being used when both 'labels' and 'columns' are specified # TODO: should throw an error? expected_output = pd.DataFrame({"y": [3, 4], "z": [5, 6]}, index=kdf.index.to_pandas()) self.assert_eq(kdf.drop(labels=["x"], columns=["y"]), expected_output) columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")]) pdf.columns = columns kdf = ks.from_pandas(pdf) self.assert_eq(kdf.drop(columns="a"), pdf.drop(columns="a")) self.assert_eq(kdf.drop(columns=("a", "x")), pdf.drop(columns=("a", "x"))) self.assert_eq(kdf.drop(columns=[("a", "x"), "b"]), pdf.drop(columns=[("a", "x"), "b"])) self.assertRaises(KeyError, lambda: kdf.drop(columns="c")) self.assertRaises(KeyError, lambda: kdf.drop(columns=("a", "z"))) def _test_dropna(self, pdf, axis): kdf = ks.from_pandas(pdf) self.assert_eq(kdf.dropna(axis=axis), pdf.dropna(axis=axis)) self.assert_eq(kdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all")) self.assert_eq(kdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"])) self.assert_eq(kdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"])) self.assert_eq( kdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"]) ) self.assert_eq( kdf.dropna(axis=axis, subset=["y", "z"], how="all"), pdf.dropna(axis=axis, subset=["y", "z"], how="all"), ) self.assert_eq(kdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2)) self.assert_eq( kdf.dropna(axis=axis, thresh=1, subset=["y", "z"]), pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]), ) pdf2 = pdf.copy() kdf2 = kdf.copy() pser = pdf2[pdf2.columns[0]] kser = kdf2[kdf2.columns[0]] pdf2.dropna(inplace=True) kdf2.dropna(inplace=True) self.assert_eq(kdf2, pdf2) self.assert_eq(kser, pser) # multi-index columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")]) if axis == 0: pdf.columns = columns else: pdf.index = columns kdf = ks.from_pandas(pdf) self.assert_eq(kdf.dropna(axis=axis), pdf.dropna(axis=axis)) self.assert_eq(kdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all")) self.assert_eq( kdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")]) ) self.assert_eq( kdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")]) ) self.assert_eq( kdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]), pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]), ) self.assert_eq( kdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"), pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"), ) self.assert_eq(kdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2)) self.assert_eq( kdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]), pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]), ) def test_dropna_axis_index(self): pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) self._test_dropna(pdf, axis=0) # empty pdf = pd.DataFrame(index=np.random.rand(6)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.dropna(), pdf.dropna()) self.assert_eq(kdf.dropna(how="all"), pdf.dropna(how="all")) self.assert_eq(kdf.dropna(thresh=0), pdf.dropna(thresh=0)) self.assert_eq(kdf.dropna(thresh=1), pdf.dropna(thresh=1)) with self.assertRaisesRegex(ValueError, "No axis named foo"): kdf.dropna(axis="foo") self.assertRaises(KeyError, lambda: kdf.dropna(subset="1")) with self.assertRaisesRegex(ValueError, "invalid how option: 1"): kdf.dropna(how=1) with self.assertRaisesRegex(TypeError, "must specify how or thresh"): kdf.dropna(how=None) def test_dropna_axis_column(self): pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=[str(r) for r in np.random.rand(6)], ).T self._test_dropna(pdf, axis=1) # empty pdf = pd.DataFrame({"x": [], "y": [], "z": []}) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.dropna(axis=1), pdf.dropna(axis=1)) self.assert_eq(kdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all")) self.assert_eq(kdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0)) self.assert_eq(kdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1)) def test_dtype(self): pdf = pd.DataFrame( { "a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1"), "d": np.arange(4.0, 7.0, dtype="float64"), "e": [True, False, True], "f": pd.date_range("20130101", periods=3), }, index=np.random.rand(3), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) self.assertTrue((kdf.dtypes == pdf.dtypes).all()) # multi-index columns columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef"))) pdf.columns = columns kdf.columns = columns self.assertTrue((kdf.dtypes == pdf.dtypes).all()) def test_fillna(self): pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) self.assert_eq(kdf.fillna(-1), pdf.fillna(-1)) self.assert_eq( kdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5}) ) self.assert_eq(pdf.fillna(method="ffill"), kdf.fillna(method="ffill")) self.assert_eq(pdf.fillna(method="ffill", limit=2), kdf.fillna(method="ffill", limit=2)) self.assert_eq(pdf.fillna(method="bfill"), kdf.fillna(method="bfill")) self.assert_eq(pdf.fillna(method="bfill", limit=2), kdf.fillna(method="bfill", limit=2)) pdf = pdf.set_index(["x", "y"]) kdf = ks.from_pandas(pdf) # check multi index self.assert_eq(kdf.fillna(-1), pdf.fillna(-1)) self.assert_eq(pdf.fillna(method="bfill"), kdf.fillna(method="bfill")) self.assert_eq(pdf.fillna(method="ffill"), kdf.fillna(method="ffill")) pser = pdf.z kser = kdf.z pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True) kdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True) self.assert_eq(kdf, pdf) self.assert_eq(kser, pser) s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int) self.assert_eq(kdf.fillna(s_nan), pdf.fillna(s_nan)) with self.assertRaisesRegex(NotImplementedError, "fillna currently only"): kdf.fillna(-1, axis=1) with self.assertRaisesRegex(NotImplementedError, "fillna currently only"): kdf.fillna(-1, axis="columns") with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"): kdf.fillna(-1, limit=1) with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"): kdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]})) with self.assertRaisesRegex(TypeError, "Unsupported.*numpy.int64"): kdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5}) with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."): kdf.fillna(method="xxx") with self.assertRaisesRegex( ValueError, "Must specify a fillna 'value' or 'method' parameter." ): kdf.fillna() # multi-index columns pdf = pd.DataFrame( { ("x", "a"): [np.nan, 2, 3, 4, np.nan, 6], ("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan], ("y", "c"): [1, 2, 3, 4, np.nan, np.nan], }, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.fillna(-1), pdf.fillna(-1)) self.assert_eq( kdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}), pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}), ) self.assert_eq(pdf.fillna(method="ffill"), kdf.fillna(method="ffill")) self.assert_eq(pdf.fillna(method="ffill", limit=2), kdf.fillna(method="ffill", limit=2)) self.assert_eq(pdf.fillna(method="bfill"), kdf.fillna(method="bfill")) self.assert_eq(pdf.fillna(method="bfill", limit=2), kdf.fillna(method="bfill", limit=2)) self.assert_eq(kdf.fillna({"x": -1}), pdf.fillna({"x": -1})) if sys.version_info >= (3, 6): # flaky in Python 3.5. self.assert_eq( kdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2}) ) self.assert_eq( kdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1}) ) # check multi index pdf = pdf.set_index([("x", "a"), ("x", "b")]) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.fillna(-1), pdf.fillna(-1)) self.assert_eq( kdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}), pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}), ) def test_isnull(self): pdf = pd.DataFrame( {"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6) ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.notnull(), pdf.notnull()) self.assert_eq(kdf.isnull(), pdf.isnull()) def test_to_datetime(self): pdf = pd.DataFrame( {"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2) ) kdf = ks.from_pandas(pdf) self.assert_eq(pd.to_datetime(pdf), ks.to_datetime(kdf)) def test_nunique(self): pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3)) kdf = ks.from_pandas(pdf) # Assert NaNs are dropped by default self.assert_eq(kdf.nunique(), pdf.nunique()) # Assert including NaN values self.assert_eq(kdf.nunique(dropna=False), pdf.nunique(dropna=False)) # Assert approximate counts self.assert_eq( ks.DataFrame({"A": range(100)}).nunique(approx=True), pd.Series([103], index=["A"]), ) self.assert_eq( ks.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01), pd.Series([100], index=["A"]), ) # Assert unsupported axis value yet msg = 'axis should be either 0 or "index" currently.' with self.assertRaisesRegex(NotImplementedError, msg): kdf.nunique(axis=1) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.nunique(), pdf.nunique()) self.assert_eq(kdf.nunique(dropna=False), pdf.nunique(dropna=False)) def test_sort_values(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.sort_values("b"), pdf.sort_values("b")) self.assert_eq(kdf.sort_values(["b", "a"]), pdf.sort_values(["b", "a"])) self.assert_eq( kdf.sort_values(["b", "a"], ascending=[False, True]), pdf.sort_values(["b", "a"], ascending=[False, True]), ) self.assertRaises(ValueError, lambda: kdf.sort_values(["b", "a"], ascending=[False])) self.assert_eq( kdf.sort_values(["b", "a"], na_position="first"), pdf.sort_values(["b", "a"], na_position="first"), ) self.assertRaises(ValueError, lambda: kdf.sort_values(["b", "a"], na_position="invalid")) pserA = pdf.a kserA = kdf.a self.assert_eq(kdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True)) self.assert_eq(kdf, pdf) self.assert_eq(kserA, pserA) columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) kdf.columns = columns self.assertRaisesRegex( ValueError, "For a multi-index, the label must be a tuple with elements", lambda: kdf.sort_values(["X"]), ) def test_sort_index(self): pdf = pd.DataFrame( {"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan] ) kdf = ks.from_pandas(pdf) # Assert invalid parameters self.assertRaises(NotImplementedError, lambda: kdf.sort_index(axis=1)) self.assertRaises(NotImplementedError, lambda: kdf.sort_index(kind="mergesort")) self.assertRaises(ValueError, lambda: kdf.sort_index(na_position="invalid")) # Assert default behavior without parameters self.assert_eq(kdf.sort_index(), pdf.sort_index()) # Assert sorting descending self.assert_eq(kdf.sort_index(ascending=False), pdf.sort_index(ascending=False)) # Assert sorting NA indices first self.assert_eq(kdf.sort_index(na_position="first"), pdf.sort_index(na_position="first")) # Assert sorting inplace pserA = pdf.A kserA = kdf.A self.assertEqual(kdf.sort_index(inplace=True), pdf.sort_index(inplace=True)) self.assert_eq(kdf, pdf) self.assert_eq(kserA, pserA) # Assert multi-indices pdf = pd.DataFrame( {"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]] ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.sort_index(), pdf.sort_index()) self.assert_eq(kdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0])) self.assert_eq(kdf.reset_index().sort_index(), pdf.reset_index().sort_index()) # Assert with multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.sort_index(), pdf.sort_index()) def test_nlargest(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a")) self.assert_eq(kdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"])) def test_nsmallest(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a")) self.assert_eq(kdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])) def test_xs(self): d = { "num_legs": [4, 4, 2, 2], "num_wings": [0, 0, 2, 2], "class": ["mammal", "mammal", "mammal", "bird"], "animal": ["cat", "dog", "bat", "penguin"], "locomotion": ["walks", "walks", "flies", "walks"], } pdf = pd.DataFrame(data=d) pdf = pdf.set_index(["class", "animal", "locomotion"]) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks"))) msg = "'key' should be string or tuple that contains strings" with self.assertRaisesRegex(ValueError, msg): kdf.xs(1) msg = ( "'key' should have index names as only strings " "or a tuple that contain index names as only strings" ) with self.assertRaisesRegex(ValueError, msg): kdf.xs(("mammal", 1)) msg = 'axis should be either 0 or "index" currently.' with self.assertRaisesRegex(NotImplementedError, msg): kdf.xs("num_wings", axis=1) msg = r"'Key length \(4\) exceeds index depth \(3\)'" with self.assertRaisesRegex(KeyError, msg): kdf.xs(("mammal", "dog", "walks", "foo")) def test_missing(self): kdf = self.kdf missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction) unsupported_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function" ] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kdf, name)() deprecated_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function" ] for name in deprecated_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name) ): getattr(kdf, name)() missing_properties = inspect.getmembers( _MissingPandasLikeDataFrame, lambda o: isinstance(o, property) ) unsupported_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "unsupported_property" ] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kdf, name) deprecated_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "deprecated_property" ] for name in deprecated_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name) ): getattr(kdf, name) def test_to_numpy(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 9, 4, 2, 4], "c": ["one", "three", "six", "seven", "one", "5"], }, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) np.testing.assert_equal(kdf.to_numpy(), pdf.values) def test_to_pandas(self): pdf, kdf = self.df_pair self.assert_eq(kdf.toPandas(), pdf) self.assert_eq(kdf.to_pandas(), pdf) def test_isin(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 9, 4, 2, 4], "c": ["one", "three", "six", "seven", "one", "5"], }, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.isin([4, "six"]), pdf.isin([4, "six"])) self.assert_eq( kdf.isin({"a": [2, 8], "c": ["three", "one"]}), pdf.isin({"a": [2, 8], "c": ["three", "one"]}), ) msg = "'DataFrame' object has no attribute {'e'}" with self.assertRaisesRegex(AttributeError, msg): kdf.isin({"e": [5, 7], "a": [1, 6]}) msg = "DataFrame and Series are not supported" with self.assertRaisesRegex(NotImplementedError, msg): kdf.isin(pdf) msg = "Values should be iterable, Series, DataFrame or dict." with self.assertRaisesRegex(TypeError, msg): kdf.isin(1) def test_merge(self): left_pdf = pd.DataFrame( { "lkey": ["foo", "bar", "baz", "foo", "bar", "l"], "value": [1, 2, 3, 5, 6, 7], "x": list("abcdef"), }, columns=["lkey", "value", "x"], ) right_pdf = pd.DataFrame( { "rkey": ["baz", "foo", "bar", "baz", "foo", "r"], "value": [4, 5, 6, 7, 8, 9], "y": list("efghij"), }, columns=["rkey", "value", "y"], ) right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10]) left_kdf = ks.from_pandas(left_pdf) right_kdf = ks.from_pandas(right_pdf) right_kser = ks.from_pandas(right_ps) def check(op, right_kdf=right_kdf, right_pdf=right_pdf): k_res = op(left_kdf, right_kdf) k_res = k_res.to_pandas() k_res = k_res.sort_values(by=list(k_res.columns)) k_res = k_res.reset_index(drop=True) p_res = op(left_pdf, right_pdf) p_res = p_res.sort_values(by=list(p_res.columns)) p_res = p_res.reset_index(drop=True) self.assert_eq(k_res, p_res) check(lambda left, right: left.merge(right)) check(lambda left, right: left.merge(right, on="value")) check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey")) check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey"))) check( lambda left, right: left.set_index("lkey").merge( right, left_index=True, right_on="rkey" ) ) check( lambda left, right: left.merge( right.set_index("rkey"), left_on="lkey", right_index=True ) ) check( lambda left, right: left.set_index("lkey").merge( right.set_index("rkey"), left_index=True, right_index=True ) ) # MultiIndex check( lambda left, right: left.merge( right, left_on=["lkey", "value"], right_on=["rkey", "value"] ) ) check( lambda left, right: left.set_index(["lkey", "value"]).merge( right, left_index=True, right_on=["rkey", "value"] ) ) check( lambda left, right: left.merge( right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True ) ) # TODO: when both left_index=True and right_index=True with multi-index # check(lambda left, right: left.set_index(['lkey', 'value']).merge( # right.set_index(['rkey', 'value']), left_index=True, right_index=True)) # join types for how in ["inner", "left", "right", "outer"]: check(lambda left, right: left.merge(right, on="value", how=how)) check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how)) # suffix check( lambda left, right: left.merge( right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"] ) ) # Test Series on the right # pd.DataFrame.merge with Series is implemented since version 0.24.0 if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"): check(lambda left, right: left.merge(right), right_kser, right_ps) check( lambda left, right: left.merge(right, left_on="x", right_on="x"), right_kser, right_ps, ) check( lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"), right_kser, right_ps, ) # Test join types with Series for how in ["inner", "left", "right", "outer"]: check(lambda left, right: left.merge(right, how=how), right_kser, right_ps) check( lambda left, right: left.merge(right, left_on="x", right_on="x", how=how), right_kser, right_ps, ) # suffix with Series check( lambda left, right: left.merge( right, suffixes=["_left", "_right"], how="outer", left_index=True, right_index=True, ), right_kser, right_ps, ) # multi-index columns left_columns = pd.MultiIndex.from_tuples([("a", "lkey"), ("a", "value"), ("b", "x")]) left_pdf.columns = left_columns left_kdf.columns = left_columns right_columns = pd.MultiIndex.from_tuples([("a", "rkey"), ("a", "value"), ("c", "y")]) right_pdf.columns = right_columns right_kdf.columns = right_columns check(lambda left, right: left.merge(right)) check(lambda left, right: left.merge(right, on=[("a", "value")])) check( lambda left, right: ( left.set_index(("a", "lkey")).merge(right.set_index(("a", "rkey"))) ) ) check( lambda left, right: ( left.set_index(("a", "lkey")).merge( right.set_index(("a", "rkey")), left_index=True, right_index=True ) ) ) # TODO: when both left_index=True and right_index=True with multi-index columns # check(lambda left, right: left.merge(right, # left_on=[('a', 'lkey')], right_on=[('a', 'rkey')])) # check(lambda left, right: (left.set_index(('a', 'lkey')) # .merge(right, left_index=True, right_on=[('a', 'rkey')]))) def test_merge_retains_indices(self): left_pdf = pd.DataFrame({"A": [0, 1]}) right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2]) left_kdf = ks.from_pandas(left_pdf) right_kdf = ks.from_pandas(right_pdf) self.assert_eq( left_kdf.merge(right_kdf, left_index=True, right_index=True), left_pdf.merge(right_pdf, left_index=True, right_index=True), ) self.assert_eq( left_kdf.merge(right_kdf, left_on="A", right_index=True), left_pdf.merge(right_pdf, left_on="A", right_index=True), ) self.assert_eq( left_kdf.merge(right_kdf, left_index=True, right_on="B"), left_pdf.merge(right_pdf, left_index=True, right_on="B"), ) self.assert_eq( left_kdf.merge(right_kdf, left_on="A", right_on="B"), left_pdf.merge(right_pdf, left_on="A", right_on="B"), ) def test_merge_how_parameter(self): left_pdf = pd.DataFrame({"A": [1, 2]}) right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2]) left_kdf = ks.from_pandas(left_pdf) right_kdf = ks.from_pandas(right_pdf) kdf = left_kdf.merge(right_kdf, left_index=True, right_index=True) pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True) self.assert_eq( kdf.sort_values(by=list(kdf.columns)).reset_index(drop=True), pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True), ) kdf = left_kdf.merge(right_kdf, left_index=True, right_index=True, how="left") pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left") self.assert_eq( kdf.sort_values(by=list(kdf.columns)).reset_index(drop=True), pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True), ) kdf = left_kdf.merge(right_kdf, left_index=True, right_index=True, how="right") pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right") self.assert_eq( kdf.sort_values(by=list(kdf.columns)).reset_index(drop=True), pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True), ) kdf = left_kdf.merge(right_kdf, left_index=True, right_index=True, how="outer") pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer") self.assert_eq( kdf.sort_values(by=list(kdf.columns)).reset_index(drop=True), pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True), ) def test_merge_raises(self): left = ks.DataFrame( {"value": [1, 2, 3, 5, 6], "x": list("abcde")}, columns=["value", "x"], index=["foo", "bar", "baz", "foo", "bar"], ) right = ks.DataFrame( {"value": [4, 5, 6, 7, 8], "y": list("fghij")}, columns=["value", "y"], index=["baz", "foo", "bar", "baz", "foo"], ) with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"): left[["x"]].merge(right[["y"]]) with self.assertRaisesRegex(ValueError, "not a combination of both"): left.merge(right, on="value", left_on="x") with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"): left.merge(right, left_on="x") with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"): left.merge(right, left_index=True) with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"): left.merge(right, right_on="y") with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"): left.merge(right, right_index=True) with self.assertRaisesRegex( ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)" ): left.merge(right, left_on="value", right_on=["value", "y"]) with self.assertRaisesRegex( ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)" ): left.merge(right, left_on=["value", "x"], right_on="value") with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"): left.merge(right, left_index=True, right_index=True, how="foo") with self.assertRaisesRegex(KeyError, "id"): left.merge(right, on="id") def test_append(self): pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")) kdf = ks.from_pandas(pdf) other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3]) other_kdf = ks.from_pandas(other_pdf) self.assert_eq(kdf.append(kdf), pdf.append(pdf)) self.assert_eq(kdf.append(kdf, ignore_index=True), pdf.append(pdf, ignore_index=True)) # Assert DataFrames with non-matching columns self.assert_eq(kdf.append(other_kdf), pdf.append(other_pdf)) # Assert appending a Series fails msg = "DataFrames.append() does not support appending Series to DataFrames" with self.assertRaises(ValueError, msg=msg): kdf.append(kdf["A"]) # Assert using the sort parameter raises an exception msg = "The 'sort' parameter is currently not supported" with self.assertRaises(NotImplementedError, msg=msg): kdf.append(kdf, sort=True) # Assert using 'verify_integrity' only raises an exception for overlapping indices self.assert_eq( kdf.append(other_kdf, verify_integrity=True), pdf.append(other_pdf, verify_integrity=True), ) msg = "Indices have overlapping values" with self.assertRaises(ValueError, msg=msg): kdf.append(kdf, verify_integrity=True) # Skip integrity verification when ignore_index=True self.assert_eq( kdf.append(kdf, ignore_index=True, verify_integrity=True), pdf.append(pdf, ignore_index=True, verify_integrity=True), ) # Assert appending multi-index DataFrames multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]]) multi_index_kdf = ks.from_pandas(multi_index_pdf) other_multi_index_pdf = pd.DataFrame( [[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]] ) other_multi_index_kdf = ks.from_pandas(other_multi_index_pdf) self.assert_eq( multi_index_kdf.append(multi_index_kdf), multi_index_pdf.append(multi_index_pdf) ) # Assert DataFrames with non-matching columns self.assert_eq( multi_index_kdf.append(other_multi_index_kdf), multi_index_pdf.append(other_multi_index_pdf), ) # Assert using 'verify_integrity' only raises an exception for overlapping indices self.assert_eq( multi_index_kdf.append(other_multi_index_kdf, verify_integrity=True), multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True), ) with self.assertRaises(ValueError, msg=msg): multi_index_kdf.append(multi_index_kdf, verify_integrity=True) # Skip integrity verification when ignore_index=True self.assert_eq( multi_index_kdf.append(multi_index_kdf, ignore_index=True, verify_integrity=True), multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True), ) # Assert trying to append DataFrames with different index levels msg = "Both DataFrames have to have the same number of index levels" with self.assertRaises(ValueError, msg=msg): kdf.append(multi_index_kdf) # Skip index level check when ignore_index=True self.assert_eq( kdf.append(multi_index_kdf, ignore_index=True), pdf.append(multi_index_pdf, ignore_index=True), ) columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.append(kdf), pdf.append(pdf)) def test_clip(self): pdf = pd.DataFrame( {"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3) ) kdf = ks.from_pandas(pdf) # Assert list-like values are not accepted for 'lower' and 'upper' msg = "List-like value are not supported for 'lower' and 'upper' at the moment" with self.assertRaises(ValueError, msg=msg): kdf.clip(lower=[1]) with self.assertRaises(ValueError, msg=msg): kdf.clip(upper=[1]) # Assert no lower or upper self.assert_eq(kdf.clip(), pdf.clip()) # Assert lower only self.assert_eq(kdf.clip(1), pdf.clip(1)) # Assert upper only self.assert_eq(kdf.clip(upper=3), pdf.clip(upper=3)) # Assert lower and upper self.assert_eq(kdf.clip(1, 3), pdf.clip(1, 3)) pdf["clip"] = pdf.A.clip(lower=1, upper=3) kdf["clip"] = kdf.A.clip(lower=1, upper=3) self.assert_eq(kdf, pdf) # Assert behavior on string values str_kdf = ks.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3)) self.assert_eq(str_kdf.clip(1, 3), str_kdf) def test_binary_operators(self): pdf = pd.DataFrame( {"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3) ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf + kdf.copy(), pdf + pdf.copy()) self.assertRaisesRegex( ValueError, "it comes from a different dataframe", lambda: ks.range(10).add(ks.range(10)), ) self.assertRaisesRegex( ValueError, "add with a sequence is currently not supported", lambda: ks.range(10).add(ks.range(10).id), ) def test_sample(self): pdf = pd.DataFrame({"A": [0, 2, 4]}) kdf = ks.from_pandas(pdf) # Make sure the tests run, but we can't check the result because they are non-deterministic. kdf.sample(frac=0.1) kdf.sample(frac=0.2, replace=True) kdf.sample(frac=0.2, random_state=5) kdf["A"].sample(frac=0.2) kdf["A"].sample(frac=0.2, replace=True) kdf["A"].sample(frac=0.2, random_state=5) with self.assertRaises(ValueError): kdf.sample() with self.assertRaises(NotImplementedError): kdf.sample(n=1) def test_add_prefix(self): pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4)) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.add_prefix("col_"), kdf.add_prefix("col_")) columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.add_prefix("col_"), kdf.add_prefix("col_")) def test_add_suffix(self): pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4)) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.add_suffix("first_series"), kdf.add_suffix("first_series")) columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.add_suffix("first_series"), kdf.add_suffix("first_series")) def test_join(self): # check basic function pdf1 = pd.DataFrame( {"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"] ) pdf2 = pd.DataFrame( {"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"] ) kdf1 = ks.DataFrame( {"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"] ) kdf2 = ks.DataFrame( {"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"] ) ks1 = ks.Series(["A1", "A5"], index=[1, 2], name="A") join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right") join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_kdf = kdf1.join(kdf2, lsuffix="_left", rsuffix="_right") join_kdf.sort_values(by=list(join_kdf.columns), inplace=True) self.assert_eq(join_pdf, join_kdf) # join with duplicated columns in Series with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"): kdf1.join(ks1, how="outer") # join with duplicated columns in DataFrame with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"): kdf1.join(kdf2, how="outer") # check `on` parameter join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right") join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_kdf = kdf1.join(kdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right") join_kdf.sort_values(by=list(join_kdf.columns), inplace=True) self.assert_eq(join_pdf.reset_index(drop=True), join_kdf.reset_index(drop=True)) # multi-index columns columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")]) columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")]) pdf1.columns = columns1 pdf2.columns = columns2 kdf1.columns = columns1 kdf2.columns = columns2 join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right") join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_kdf = kdf1.join(kdf2, lsuffix="_left", rsuffix="_right") join_kdf.sort_values(by=list(join_kdf.columns), inplace=True) self.assert_eq(join_pdf, join_kdf) # check `on` parameter join_pdf = pdf1.join( pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right" ) join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_kdf = kdf1.join( kdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right" ) join_kdf.sort_values(by=list(join_kdf.columns), inplace=True) self.assert_eq(join_pdf.reset_index(drop=True), join_kdf.reset_index(drop=True)) def test_replace(self): pdf = pd.DataFrame( { "name": ["Ironman", "Captain America", "Thor", "Hulk"], "weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"], }, index=np.random.rand(4), ) kdf = ks.from_pandas(pdf) with self.assertRaisesRegex( NotImplementedError, "replace currently works only for method='pad" ): kdf.replace(method="bfill") with self.assertRaisesRegex( NotImplementedError, "replace currently works only when limit=None" ): kdf.replace(limit=10) with self.assertRaisesRegex( NotImplementedError, "replace currently doesn't supports regex" ): kdf.replace(regex="") with self.assertRaisesRegex(TypeError, "Unsupported type <class 'tuple'>"): kdf.replace(value=(1, 2, 3)) with self.assertRaisesRegex(TypeError, "Unsupported type <class 'tuple'>"): kdf.replace(to_replace=(1, 2, 3)) with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"): kdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"]) self.assert_eq(kdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman")) self.assert_eq( kdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]), pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]), ) # inplace pser = pdf.name kser = kdf.name pdf.replace("Ironman", "Spiderman", inplace=True) kdf.replace("Ironman", "Spiderman", inplace=True) self.assert_eq(kdf, pdf) self.assert_eq(kser, pser) pdf = pd.DataFrame( {"A": [0, 1, 2, 3, 4], "B": [5, 6, 7, 8, 9], "C": ["a", "b", "c", "d", "e"]}, index=np.random.rand(5), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4)) self.assert_eq( kdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]), pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]), ) self.assert_eq(kdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200})) self.assert_eq(kdf.replace({"A": 0, "B": 5}, 100), pdf.replace({"A": 0, "B": 5}, 100)) self.assert_eq(kdf.replace({"A": {0: 100, 4: 400}}), pdf.replace({"A": {0: 100, 4: 400}})) self.assert_eq(kdf.replace({"X": {0: 100, 4: 400}}), pdf.replace({"X": {0: 100, 4: 400}})) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4)) self.assert_eq( kdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]), pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]), ) self.assert_eq(kdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200})) self.assert_eq( kdf.replace({("X", "A"): 0, ("X", "B"): 5}, 100), pdf.replace({("X", "A"): 0, ("X", "B"): 5}, 100), ) self.assert_eq( kdf.replace({("X", "A"): {0: 100, 4: 400}}), pdf.replace({("X", "A"): {0: 100, 4: 400}}) ) self.assert_eq( kdf.replace({("X", "B"): {0: 100, 4: 400}}), pdf.replace({("X", "B"): {0: 100, 4: 400}}) ) def test_update(self): # check base function def get_data(left_columns=None, right_columns=None): left_pdf = pd.DataFrame( {"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"] ) right_pdf = pd.DataFrame( {"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]}, columns=["B", "C"], ) left_kdf = ks.DataFrame( {"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"] ) right_kdf = ks.DataFrame( {"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"] ) if left_columns is not None: left_pdf.columns = left_columns left_kdf.columns = left_columns if right_columns is not None: right_pdf.columns = right_columns right_kdf.columns = right_columns return left_kdf, left_pdf, right_kdf, right_pdf left_kdf, left_pdf, right_kdf, right_pdf = get_data() pser = left_pdf.B kser = left_kdf.B left_pdf.update(right_pdf) left_kdf.update(right_kdf) self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_kdf.sort_values(by=["A", "B"])) self.assert_eq(kser.sort_index(), pser.sort_index()) left_kdf, left_pdf, right_kdf, right_pdf = get_data() left_pdf.update(right_pdf, overwrite=False) left_kdf.update(right_kdf, overwrite=False) self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_kdf.sort_values(by=["A", "B"])) with self.assertRaises(NotImplementedError): left_kdf.update(right_kdf, join="right") # multi-index columns left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")]) left_kdf, left_pdf, right_kdf, right_pdf = get_data( left_columns=left_columns, right_columns=right_columns ) left_pdf.update(right_pdf) left_kdf.update(right_kdf) self.assert_eq( left_pdf.sort_values(by=[("X", "A"), ("X", "B")]), left_kdf.sort_values(by=[("X", "A"), ("X", "B")]), ) left_kdf, left_pdf, right_kdf, right_pdf = get_data( left_columns=left_columns, right_columns=right_columns ) left_pdf.update(right_pdf, overwrite=False) left_kdf.update(right_kdf, overwrite=False) self.assert_eq( left_pdf.sort_values(by=[("X", "A"), ("X", "B")]), left_kdf.sort_values(by=[("X", "A"), ("X", "B")]), ) right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")]) left_kdf, left_pdf, right_kdf, right_pdf = get_data( left_columns=left_columns, right_columns=right_columns ) left_pdf.update(right_pdf) left_kdf.update(right_kdf) self.assert_eq( left_pdf.sort_values(by=[("X", "A"), ("X", "B")]), left_kdf.sort_values(by=[("X", "A"), ("X", "B")]), ) def test_pivot_table_dtypes(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 2, 4, 2, 4], "e": [1, 2, 2, 4, 2, 4], "c": [1, 2, 9, 4, 7, 4], }, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) # Skip columns comparison by reset_index res_df = kdf.pivot_table( index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"} ).dtypes.reset_index(drop=True) exp_df = pdf.pivot_table( index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"} ).dtypes.reset_index(drop=True) self.assert_eq(res_df, exp_df) # Results don't have the same column's name # Todo: self.assert_eq(kdf.pivot_table(columns="a", values="b").dtypes, # pdf.pivot_table(columns="a", values="b").dtypes) # Todo: self.assert_eq(kdf.pivot_table(index=['c'], columns="a", values="b").dtypes, # pdf.pivot_table(index=['c'], columns="a", values="b").dtypes) # Todo: self.assert_eq(kdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes, # pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes) # Todo: self.assert_eq(kdf.pivot_table(index=['e', 'c'], # columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'], # columns="a", values="b", fill_value=999).dtypes) def test_pivot_table(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 2, 4, 2, 4], "e": [10, 20, 20, 40, 20, 40], "c": [1, 2, 9, 4, 7, 4], "d": [-1, -2, -3, -4, -5, -6], }, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) # Checking if both DataFrames have the same results self.assert_eq( kdf.pivot_table(columns="a", values="b").sort_index(), pdf.pivot_table(columns="a", values="b").sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table(index=["c"], columns="a", values="b").sort_index(), pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(), pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(), pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table( index=["c"], columns="a", values=["b", "e"], aggfunc="sum" ).sort_index(), pdf.pivot_table( index=["c"], columns="a", values=["b", "e"], aggfunc="sum" ).sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table( index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum" ).sort_index(), pdf.pivot_table( index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum" ).sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table( index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"} ).sort_index(), pdf.pivot_table( index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"} ).sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(), pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(), pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(), almost=True, ) # multi-index columns columns = pd.MultiIndex.from_tuples( [("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")] ) pdf.columns = columns kdf.columns = columns self.assert_eq( kdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(), pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table( index=[("z", "c")], columns=("x", "a"), values=[("x", "b")] ).sort_index(), pdf.pivot_table( index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")] ).sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table( index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")] ).sort_index(), pdf.pivot_table( index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")] ).sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table( index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")] ).sort_index(), pdf.pivot_table( index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e"), ("w", "d")], ).sort_index(), almost=True, ) self.assert_eq( kdf.pivot_table( index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")], aggfunc={("x", "b"): "mean", ("y", "e"): "sum"}, ).sort_index(), pdf.pivot_table( index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")], aggfunc={("x", "b"): "mean", ("y", "e"): "sum"}, ).sort_index(), almost=True, ) def test_pivot_table_and_index(self): # https://github.com/databricks/koalas/issues/805 pdf = pd.DataFrame( { "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], "C": [ "small", "large", "large", "small", "small", "large", "small", "small", "large", ], "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], }, columns=["A", "B", "C", "D", "E"], index=np.random.rand(9), ) kdf = ks.from_pandas(pdf) ptable = pdf.pivot_table( values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0 ).sort_index() ktable = kdf.pivot_table( values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0 ).sort_index() self.assert_eq(ktable, ptable) self.assert_eq(ktable.index, ptable.index) self.assert_eq(repr(ktable.index), repr(ptable.index)) @unittest.skipIf( LooseVersion(pyspark.__version__) < LooseVersion("2.4"), "stack won't work property with PySpark<2.4", ) def test_stack(self): pdf_single_level_cols = pd.DataFrame( [[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"] ) kdf_single_level_cols = ks.from_pandas(pdf_single_level_cols) self.assert_eq( kdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index() ) multicol1 = pd.MultiIndex.from_tuples( [("weight", "kg"), ("weight", "pounds")], names=["x", "y"] ) pdf_multi_level_cols1 = pd.DataFrame( [[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1 ) kdf_multi_level_cols1 = ks.from_pandas(pdf_multi_level_cols1) self.assert_eq( kdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index() ) multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")]) pdf_multi_level_cols2 = pd.DataFrame( [[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2 ) kdf_multi_level_cols2 = ks.from_pandas(pdf_multi_level_cols2) self.assert_eq( kdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index() ) pdf = pd.DataFrame( { ("y", "c"): [True, True], ("x", "b"): [False, False], ("x", "c"): [True, False], ("y", "a"): [False, True], } ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.stack().sort_index(), pdf.stack().sort_index()) self.assert_eq(kdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True) def test_unstack(self): pdf = pd.DataFrame( np.random.randn(3, 3), index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True) def test_pivot_errors(self): kdf = ks.range(10) with self.assertRaisesRegex(ValueError, "columns should be set"): kdf.pivot(index="id") with self.assertRaisesRegex(ValueError, "values should be set"): kdf.pivot(index="id", columns="id") def test_pivot_table_errors(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 2, 4, 2, 4], "e": [1, 2, 2, 4, 2, 4], "c": [1, 2, 9, 4, 7, 4], }, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) msg = "values should be string or list of one column." with self.assertRaisesRegex(ValueError, msg): kdf.pivot_table(index=["c"], columns="a", values=5) msg = "index should be a None or a list of columns." with self.assertRaisesRegex(ValueError, msg): kdf.pivot_table(index="c", columns="a", values="b") msg = "pivot_table doesn't support aggfunc as dict and without index." with self.assertRaisesRegex(NotImplementedError, msg): kdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}) msg = "columns should be string." with self.assertRaisesRegex(ValueError, msg): kdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"}) msg = "Columns in aggfunc must be the same as values." with self.assertRaisesRegex(ValueError, msg): kdf.pivot_table( index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"} ) msg = "values can't be a list without index." with self.assertRaisesRegex(NotImplementedError, msg): kdf.pivot_table(columns="a", values=["b", "e"]) msg = "Wrong columns A." with self.assertRaisesRegex(ValueError, msg): kdf.pivot_table( index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"} ) kdf = ks.DataFrame( { "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], "C": [ "small", "large", "large", "small", "small", "large", "small", "small", "large", ], "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], }, columns=["A", "B", "C", "D", "E"], index=np.random.rand(9), ) msg = "values should be a numeric type." with self.assertRaisesRegex(TypeError, msg): kdf.pivot_table( index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"} ) msg = "values should be a numeric type." with self.assertRaisesRegex(TypeError, msg): kdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"}) def test_transpose(self): # TODO: what if with random index? pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"]) kdf1 = ks.from_pandas(pdf1) pdf2 = pd.DataFrame( data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]}, columns=["score", "kids", "age"], ) kdf2 = ks.from_pandas(pdf2) self.assert_eq( pdf1.transpose().sort_index().rename(columns=str), kdf1.transpose().sort_index() ) self.assert_eq( pdf2.transpose().sort_index().rename(columns=str), kdf2.transpose().sort_index() ) with option_context("compute.max_rows", None): self.assert_eq( pdf1.transpose().sort_index().rename(columns=str), kdf1.transpose().sort_index() ) self.assert_eq( pdf2.transpose().sort_index().rename(columns=str), kdf2.transpose().sort_index() ) pdf3 = pd.DataFrame( { ("cg1", "a"): [1, 2, 3], ("cg1", "b"): [4, 5, 6], ("cg2", "c"): [7, 8, 9], ("cg3", "d"): [9, 9, 9], }, index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]), ) kdf3 = ks.from_pandas(pdf3) self.assert_eq(pdf3.transpose().sort_index(), kdf3.transpose().sort_index()) with option_context("compute.max_rows", None): self.assert_eq(pdf3.transpose().sort_index(), kdf3.transpose().sort_index()) def _test_cummin(self, pdf, kdf): self.assert_eq(pdf.cummin(), kdf.cummin()) self.assert_eq(pdf.cummin(skipna=False), kdf.cummin(skipna=False)) self.assert_eq(pdf.cummin().sum(), kdf.cummin().sum()) def test_cummin(self): pdf = pd.DataFrame( [[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]], columns=list("AB"), index=np.random.rand(5), ) kdf = ks.from_pandas(pdf) self._test_cummin(pdf, kdf) def test_cummin_multiindex_columns(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays) pdf.at["C", ("A", "two")] = None kdf = ks.from_pandas(pdf) self._test_cummin(pdf, kdf) def _test_cummax(self, pdf, kdf): self.assert_eq(pdf.cummax(), kdf.cummax()) self.assert_eq(pdf.cummax(skipna=False), kdf.cummax(skipna=False)) self.assert_eq(pdf.cummax().sum(), kdf.cummax().sum()) def test_cummax(self): pdf = pd.DataFrame( [[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]], columns=list("AB"), index=np.random.rand(5), ) kdf = ks.from_pandas(pdf) self._test_cummax(pdf, kdf) def test_cummax_multiindex_columns(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays) pdf.at["C", ("A", "two")] = None kdf = ks.from_pandas(pdf) self._test_cummax(pdf, kdf) def _test_cumsum(self, pdf, kdf): self.assert_eq(pdf.cumsum(), kdf.cumsum()) self.assert_eq(pdf.cumsum(skipna=False), kdf.cumsum(skipna=False)) self.assert_eq(pdf.cumsum().sum(), kdf.cumsum().sum()) def test_cumsum(self): pdf = pd.DataFrame( [[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]], columns=list("AB"), index=np.random.rand(5), ) kdf = ks.from_pandas(pdf) self._test_cumsum(pdf, kdf) def test_cumsum_multiindex_columns(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays) pdf.at["C", ("A", "two")] = None kdf = ks.from_pandas(pdf) self._test_cumsum(pdf, kdf) def _test_cumprod(self, pdf, kdf): self.assert_eq(pdf.cumprod(), kdf.cumprod(), almost=True) self.assert_eq(pdf.cumprod(skipna=False), kdf.cumprod(skipna=False), almost=True) self.assert_eq(pdf.cumprod().sum(), kdf.cumprod().sum(), almost=True) def test_cumprod(self): pdf = pd.DataFrame( [[2.0, 1.0], [5, None], [1.0, 1.0], [2.0, 4.0], [4.0, 9.0]], columns=list("AB"), index=np.random.rand(5), ) kdf = ks.from_pandas(pdf) self._test_cumprod(pdf, kdf) def test_cumprod_multiindex_columns(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays) pdf.at["C", ("A", "two")] = None kdf = ks.from_pandas(pdf) self._test_cumprod(pdf, kdf) def test_drop_duplicates(self): pdf = pd.DataFrame( {"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5) ) kdf = ks.from_pandas(pdf) # inplace is False for keep in ["first", "last", False]: with self.subTest(keep=keep): self.assert_eq( pdf.drop_duplicates(keep=keep).sort_index(), kdf.drop_duplicates(keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates("a", keep=keep).sort_index(), kdf.drop_duplicates("a", keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(), kdf.drop_duplicates(["a", "b"], keep=keep).sort_index(), ) self.assert_eq( pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(), kdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(), ) self.assert_eq( pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(), kdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(), ) columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")]) pdf.columns = columns kdf.columns = columns # inplace is False for keep in ["first", "last", False]: with self.subTest("multi-index columns", keep=keep): self.assert_eq( pdf.drop_duplicates(keep=keep).sort_index(), kdf.drop_duplicates(keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(), kdf.drop_duplicates(("x", "a"), keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(), kdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(), ) # inplace is True subset_list = [None, "a", ["a", "b"]] for subset in subset_list: pdf = pd.DataFrame( {"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5) ) kdf = ks.from_pandas(pdf) pser = pdf.a kser = kdf.a pdf.drop_duplicates(subset=subset, inplace=True) kdf.drop_duplicates(subset=subset, inplace=True) self.assert_eq(kdf.sort_index(), pdf.sort_index()) self.assert_eq(kser.sort_index(), pser.sort_index()) # multi-index columns, inplace is True subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]] for subset in subset_list: pdf = pd.DataFrame( {"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5) ) kdf = ks.from_pandas(pdf) columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")]) pdf.columns = columns kdf.columns = columns pser = pdf[("x", "a")] kser = kdf[("x", "a")] pdf.drop_duplicates(subset=subset, inplace=True) kdf.drop_duplicates(subset=subset, inplace=True) self.assert_eq(kdf.sort_index(), pdf.sort_index()) self.assert_eq(kser.sort_index(), pser.sort_index()) def test_reindex(self): index = ["A", "B", "C", "D", "E"] pdf = pd.DataFrame({"numbers": [1.0, 2.0, 3.0, 4.0, None]}, index=index) kdf = ks.from_pandas(pdf) self.assert_eq( pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(), kdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(), ) self.assert_eq( pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(), kdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(), ) self.assert_eq( pdf.reindex(index=["A", "B"]).sort_index(), kdf.reindex(index=["A", "B"]).sort_index() ) self.assert_eq( pdf.reindex(index=["A", "B", "2", "3"]).sort_index(), kdf.reindex(index=["A", "B", "2", "3"]).sort_index(), ) self.assert_eq( pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(), kdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(), ) self.assert_eq( pdf.reindex(columns=["numbers"]).sort_index(), kdf.reindex(columns=["numbers"]).sort_index(), ) # Using float as fill_value to avoid int64/32 clash self.assert_eq( pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(), kdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(), ) self.assertRaises(TypeError, lambda: kdf.reindex(columns=["numbers", "2", "3"], axis=1)) self.assertRaises(TypeError, lambda: kdf.reindex(columns=["numbers", "2", "3"], axis=2)) self.assertRaises(TypeError, lambda: kdf.reindex(index=["A", "B", "C"], axis=1)) self.assertRaises(TypeError, lambda: kdf.reindex(index=123)) columns = pd.MultiIndex.from_tuples([("X", "numbers")]) pdf.columns = columns kdf.columns = columns self.assert_eq( pdf.reindex(columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")]).sort_index(), kdf.reindex(columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")]).sort_index(), ) # Using float as fill_value to avoid int64/32 clash self.assert_eq( pdf.reindex( columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=0.0 ).sort_index(), kdf.reindex( columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=0.0 ).sort_index(), ) self.assertRaises(TypeError, lambda: kdf.reindex(columns=["X"])) self.assertRaises(ValueError, lambda: kdf.reindex(columns=[("X",)])) def test_melt(self): pdf = pd.DataFrame( {"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3) ) kdf = ks.from_pandas(pdf) self.assert_eq( kdf.melt().sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt().sort_values(["variable", "value"]), ) self.assert_eq( kdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt(id_vars="A").sort_values(["variable", "value"]), ) self.assert_eq( kdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]), ) self.assert_eq( kdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]), ) self.assert_eq( kdf.melt(id_vars=["A"], value_vars=["C"]) .sort_values(["variable", "value"]) .reset_index(drop=True), pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]), ) self.assert_eq( kdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname") .sort_values(["myVarname", "myValname"]) .reset_index(drop=True), pdf.melt( id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname" ).sort_values(["myVarname", "myValname"]), ) self.assert_eq( kdf.melt(value_vars=("A", "B")) .sort_values(["variable", "value"]) .reset_index(drop=True), pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]), ) self.assertRaises(KeyError, lambda: kdf.melt(id_vars="Z")) self.assertRaises(KeyError, lambda: kdf.melt(value_vars="Z")) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) pdf.columns = columns kdf.columns = columns self.assert_eq( kdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True), pdf.melt().sort_values(["variable_0", "variable_1", "value"]), ) self.assert_eq( kdf.melt(id_vars=[("X", "A")]) .sort_values(["variable_0", "variable_1", "value"]) .reset_index(drop=True), pdf.melt(id_vars=[("X", "A")]).sort_values(["variable_0", "variable_1", "value"]), almost=True, ) self.assert_eq( kdf.melt(id_vars=[("X", "A")], value_vars=[("Y", "C")]) .sort_values(["variable_0", "variable_1", "value"]) .reset_index(drop=True), pdf.melt(id_vars=[("X", "A")], value_vars=[("Y", "C")]).sort_values( ["variable_0", "variable_1", "value"] ), almost=True, ) self.assert_eq( kdf.melt( id_vars=[("X", "A")], value_vars=[("X", "B")], var_name=["myV1", "myV2"], value_name="myValname", ) .sort_values(["myV1", "myV2", "myValname"]) .reset_index(drop=True), pdf.melt( id_vars=[("X", "A")], value_vars=[("X", "B")], var_name=["myV1", "myV2"], value_name="myValname", ).sort_values(["myV1", "myV2", "myValname"]), almost=True, ) columns.names = ["v0", "v1"] pdf.columns = columns kdf.columns = columns self.assert_eq( kdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True), pdf.melt().sort_values(["v0", "v1", "value"]), ) self.assertRaises(ValueError, lambda: kdf.melt(id_vars=("X", "A"))) self.assertRaises(ValueError, lambda: kdf.melt(value_vars=("X", "A"))) self.assertRaises(KeyError, lambda: kdf.melt(id_vars=[("Y", "A")])) self.assertRaises(KeyError, lambda: kdf.melt(value_vars=[("Y", "A")])) def test_all(self): pdf = pd.DataFrame( { "col1": [False, False, False], "col2": [True, False, False], "col3": [0, 0, 1], "col4": [0, 1, 2], "col5": [False, False, None], "col6": [True, False, None], }, index=np.random.rand(3), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.all(), pdf.all()) columns = pd.MultiIndex.from_tuples( [ ("a", "col1"), ("a", "col2"), ("a", "col3"), ("b", "col4"), ("b", "col5"), ("c", "col6"), ] ) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.all(), pdf.all()) columns.names = ["X", "Y"] pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.all(), pdf.all()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): kdf.all(axis=1) def test_any(self): pdf = pd.DataFrame( { "col1": [False, False, False], "col2": [True, False, False], "col3": [0, 0, 1], "col4": [0, 1, 2], "col5": [False, False, None], "col6": [True, False, None], }, index=np.random.rand(3), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.any(), pdf.any()) columns = pd.MultiIndex.from_tuples( [ ("a", "col1"), ("a", "col2"), ("a", "col3"), ("b", "col4"), ("b", "col5"), ("c", "col6"), ] ) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.any(), pdf.any()) columns.names = ["X", "Y"] pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.any(), pdf.any()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): kdf.any(axis=1) def test_rank(self): pdf = pd.DataFrame( data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]}, columns=["col1", "col2"], index=np.random.rand(4), ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.rank().sort_index(), kdf.rank().sort_index()) self.assert_eq( pdf.rank(ascending=False).sort_index(), kdf.rank(ascending=False).sort_index() ) self.assert_eq(pdf.rank(method="min").sort_index(), kdf.rank(method="min").sort_index()) self.assert_eq(pdf.rank(method="max").sort_index(), kdf.rank(method="max").sort_index()) self.assert_eq(pdf.rank(method="first").sort_index(), kdf.rank(method="first").sort_index()) self.assert_eq(pdf.rank(method="dense").sort_index(), kdf.rank(method="dense").sort_index()) msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'" with self.assertRaisesRegex(ValueError, msg): kdf.rank(method="nothing") # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.rank().sort_index(), kdf.rank().sort_index()) def test_round(self): pdf = pd.DataFrame( { "A": [0.028208, 0.038683, 0.877076], "B": [0.992815, 0.645646, 0.149370], "C": [0.173891, 0.577595, 0.491027], }, columns=["A", "B", "C"], index=np.random.rand(3), ) kdf = ks.from_pandas(pdf) pser = pd.Series([1, 0, 2], index=["A", "B", "C"]) kser = ks.Series([1, 0, 2], index=["A", "B", "C"]) self.assert_eq(pdf.round(2), kdf.round(2)) self.assert_eq(pdf.round({"A": 1, "C": 2}), kdf.round({"A": 1, "C": 2})) self.assert_eq(pdf.round({"A": 1, "D": 2}), kdf.round({"A": 1, "D": 2})) self.assert_eq(pdf.round(pser), kdf.round(kser)) msg = "decimals must be an integer, a dict-like or a Series" with self.assertRaisesRegex(ValueError, msg): kdf.round(1.5) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) pdf.columns = columns kdf.columns = columns pser = pd.Series([1, 0, 2], index=columns) kser = ks.Series([1, 0, 2], index=columns) self.assert_eq(pdf.round(2), kdf.round(2)) self.assert_eq( pdf.round({("X", "A"): 1, ("Y", "C"): 2}), kdf.round({("X", "A"): 1, ("Y", "C"): 2}) ) self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), kdf.round({("X", "A"): 1, "Y": 2})) self.assert_eq(pdf.round(pser), kdf.round(kser)) def test_shift(self): pdf = pd.DataFrame( { "Col1": [10, 20, 15, 30, 45], "Col2": [13, 23, 18, 33, 48], "Col3": [17, 27, 22, 37, 52], }, index=np.random.rand(5), ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.shift(3), kdf.shift(3)) # Need the expected result since pandas 0.23 does not support `fill_value` argument. pdf1 = pd.DataFrame( {"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]}, index=pdf.index, ) self.assert_eq(pdf1, kdf.shift(periods=3, fill_value=0)) msg = "should be an int" with self.assertRaisesRegex(ValueError, msg): kdf.shift(1.5) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.shift(3), kdf.shift(3)) def test_diff(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]}, index=np.random.rand(6), ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.diff(), kdf.diff()) msg = "should be an int" with self.assertRaisesRegex(ValueError, msg): kdf.diff(1.5) msg = 'axis should be either 0 or "index" currently.' with self.assertRaisesRegex(NotImplementedError, msg): kdf.diff(axis=1) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.diff(), kdf.diff()) def test_duplicated(self): pdf = pd.DataFrame( {"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4) ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.duplicated().sort_index(), kdf.duplicated().sort_index()) self.assert_eq( pdf.duplicated(keep="last").sort_index(), kdf.duplicated(keep="last").sort_index(), ) self.assert_eq( pdf.duplicated(keep=False).sort_index(), kdf.duplicated(keep=False).sort_index(), ) self.assert_eq( pdf.duplicated(subset=["b"]).sort_index(), kdf.duplicated(subset=["b"]).sort_index(), ) with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"): kdf.duplicated(keep="false") with self.assertRaisesRegex(KeyError, "'d'"): kdf.duplicated(subset=["d"]) pdf.index.name = "x" kdf.index.name = "x" self.assert_eq(pdf.duplicated().sort_index(), kdf.duplicated().sort_index()) # multi-index self.assert_eq( pdf.set_index("a", append=True).duplicated().sort_index(), kdf.set_index("a", append=True).duplicated().sort_index(), ) self.assert_eq( pdf.set_index("a", append=True).duplicated(keep=False).sort_index(), kdf.set_index("a", append=True).duplicated(keep=False).sort_index(), ) self.assert_eq( pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(), kdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(), ) # mutli-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.duplicated().sort_index(), kdf.duplicated().sort_index()) self.assert_eq( pdf.duplicated(subset=[("x", "b")]).sort_index(), kdf.duplicated(subset=[("x", "b")]).sort_index(), ) def test_ffill(self): idx = np.random.rand(6) pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=idx, ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.ffill(), pdf.ffill()) self.assert_eq(kdf.ffill(limit=1), pdf.ffill(limit=1)) pser = pdf.y kser = kdf.y kdf.ffill(inplace=True) pdf.ffill(inplace=True) self.assert_eq(kdf, pdf) self.assert_eq(kser, pser) self.assert_eq(kser[idx[2]], pser[idx[2]]) def test_bfill(self): idx = np.random.rand(6) pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=idx, ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.bfill(), pdf.bfill()) self.assert_eq(kdf.bfill(limit=1), pdf.bfill(limit=1)) pser = pdf.x kser = kdf.x kdf.bfill(inplace=True) pdf.bfill(inplace=True) self.assert_eq(kdf, pdf) self.assert_eq(kser, pser) self.assert_eq(kser[idx[0]], pser[idx[0]]) def test_filter(self): pdf = pd.DataFrame( { "aa": ["aa", "bd", "bc", "ab", "ce"], "ba": [1, 2, 3, 4, 5], "cb": [1.0, 2.0, 3.0, 4.0, 5.0], "db": [1.0, np.nan, 3.0, np.nan, 5.0], } ) pdf = pdf.set_index("aa") kdf = ks.from_pandas(pdf) self.assert_eq( kdf.filter(items=["ab", "aa"], axis=0).sort_index(), pdf.filter(items=["ab", "aa"], axis=0).sort_index(), ) self.assert_eq( kdf.filter(items=["ba", "db"], axis=1).sort_index(), pdf.filter(items=["ba", "db"], axis=1).sort_index(), ) self.assert_eq(kdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index")) self.assert_eq(kdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns")) self.assert_eq(kdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")) self.assert_eq( kdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns") ) pdf = pdf.set_index("ba", append=True) kdf = ks.from_pandas(pdf) self.assert_eq( kdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(), pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(), ) with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"): kdf.filter(items=[["aa", 1], ("bd", 2)], axis=0) with self.assertRaisesRegex(ValueError, "The item should not be empty."): kdf.filter(items=[(), ("bd", 2)], axis=0) self.assert_eq(kdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0)) self.assert_eq(kdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0)) with self.assertRaisesRegex(ValueError, "items should be a list-like object"): kdf.filter(items="b") with self.assertRaisesRegex(ValueError, "No axis named"): kdf.filter(regex="b.*", axis=123) with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"): kdf.filter() with self.assertRaisesRegex(TypeError, "mutually exclusive"): kdf.filter(regex="b.*", like="aaa") # multi-index columns pdf = pd.DataFrame( { ("x", "aa"): ["aa", "ab", "bc", "bd", "ce"], ("x", "ba"): [1, 2, 3, 4, 5], ("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0], ("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0], } ) pdf = pdf.set_index(("x", "aa")) kdf = ks.from_pandas(pdf) self.assert_eq( kdf.filter(items=["ab", "aa"], axis=0).sort_index(), pdf.filter(items=["ab", "aa"], axis=0).sort_index(), ) self.assert_eq( kdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(), pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(), ) self.assert_eq(kdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index")) self.assert_eq(kdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns")) self.assert_eq(kdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")) self.assert_eq( kdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns") ) def test_pipe(self): kdf = ks.DataFrame( {"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]}, columns=["category", "col1", "col2"], ) self.assertRaisesRegex( ValueError, "arg is both the pipe target and a keyword argument", lambda: kdf.pipe((lambda x: x, "arg"), arg="1"), ) def test_transform(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 100, "b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100, "c": [1, 4, 9, 16, 25, 36] * 100, }, columns=["a", "b", "c"], index=np.random.rand(600), ) kdf = ks.DataFrame(pdf) self.assert_eq( kdf.transform(lambda x: x + 1).sort_index(), pdf.transform(lambda x: x + 1).sort_index() ) self.assert_eq( kdf.transform(lambda x, y: x + y, y=2).sort_index(), pdf.transform(lambda x, y: x + y, y=2).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.transform(lambda x: x + 1).sort_index(), pdf.transform(lambda x: x + 1).sort_index(), ) self.assert_eq( kdf.transform(lambda x, y: x + y, y=1).sort_index(), pdf.transform(lambda x, y: x + y, y=1).sort_index(), ) with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"): kdf.transform(1) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns kdf.columns = columns self.assert_eq( kdf.transform(lambda x: x + 1).sort_index(), pdf.transform(lambda x: x + 1).sort_index() ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.transform(lambda x: x + 1).sort_index(), pdf.transform(lambda x: x + 1).sort_index(), ) def test_apply(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 100, "b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100, "c": [1, 4, 9, 16, 25, 36] * 100, }, columns=["a", "b", "c"], index=np.random.rand(600), ) kdf = ks.DataFrame(pdf) self.assert_eq( kdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index() ) self.assert_eq( kdf.apply(lambda x, b: x + b, args=(1,)).sort_index(), pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(), ) self.assert_eq( kdf.apply(lambda x, b: x + b, b=1).sort_index(), pdf.apply(lambda x, b: x + b, b=1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index() ) self.assert_eq( kdf.apply(lambda x, b: x + b, args=(1,)).sort_index(), pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(), ) self.assert_eq( kdf.apply(lambda x, b: x + b, b=1).sort_index(), pdf.apply(lambda x, b: x + b, b=1).sort_index(), ) # returning a Series self.assert_eq( kdf.apply(lambda x: len(x), axis=1).sort_index(), pdf.apply(lambda x: len(x), axis=1).sort_index(), ) self.assert_eq( kdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(), pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.apply(lambda x: len(x), axis=1).sort_index(), pdf.apply(lambda x: len(x), axis=1).sort_index(), ) self.assert_eq( kdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(), pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(), ) with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"): kdf.apply(1) with self.assertRaisesRegex(TypeError, "The given function.*1 or 'column'; however"): def f1(_) -> ks.DataFrame[int]: pass kdf.apply(f1, axis=0) with self.assertRaisesRegex(TypeError, "The given function.*0 or 'index'; however"): def f2(_) -> ks.Series[int]: pass kdf.apply(f2, axis=1) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns kdf.columns = columns self.assert_eq( kdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index() ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index() ) # returning a Series self.assert_eq( kdf.apply(lambda x: len(x), axis=1).sort_index(), pdf.apply(lambda x: len(x), axis=1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.apply(lambda x: len(x), axis=1).sort_index(), pdf.apply(lambda x: len(x), axis=1).sort_index(), ) def test_apply_batch(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 100, "b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100, "c": [1, 4, 9, 16, 25, 36] * 100, }, columns=["a", "b", "c"], index=np.random.rand(600), ) kdf = ks.DataFrame(pdf) # One to test alias. self.assert_eq(kdf.apply_batch(lambda pdf: pdf + 1).sort_index(), (pdf + 1).sort_index()) self.assert_eq( kdf.koalas.apply_batch(lambda pdf, a: pdf + a, args=(1,)).sort_index(), (pdf + 1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.koalas.apply_batch(lambda pdf: pdf + 1).sort_index(), (pdf + 1).sort_index() ) self.assert_eq( kdf.koalas.apply_batch(lambda pdf, b: pdf + b, b=1).sort_index(), (pdf + 1).sort_index(), ) with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"): kdf.koalas.apply_batch(1) with self.assertRaisesRegex(TypeError, "The given function.*frame as its type hints"): def f2(_) -> ks.Series[int]: pass kdf.koalas.apply_batch(f2) with self.assertRaisesRegex(ValueError, "The given function should return a frame"): kdf.koalas.apply_batch(lambda pdf: 1) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.koalas.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index()) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.koalas.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index() ) def test_transform_batch(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 100, "b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100, "c": [1, 4, 9, 16, 25, 36] * 100, }, columns=["a", "b", "c"], index=np.random.rand(600), ) kdf = ks.DataFrame(pdf) # One to test alias. self.assert_eq( kdf.transform_batch(lambda pdf: pdf + 1).sort_index(), (pdf + 1).sort_index() ) self.assert_eq( kdf.koalas.transform_batch(lambda pdf: pdf.c + 1).sort_index(), (pdf.c + 1).sort_index() ) self.assert_eq( kdf.koalas.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(), (pdf + 1).sort_index(), ) self.assert_eq( kdf.koalas.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(), (pdf.c + 1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.koalas.transform_batch(lambda pdf: pdf + 1).sort_index(), (pdf + 1).sort_index() ) self.assert_eq( kdf.koalas.transform_batch(lambda pdf: pdf.b + 1).sort_index(), (pdf.b + 1).sort_index(), ) self.assert_eq( kdf.koalas.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(), (pdf + 1).sort_index(), ) self.assert_eq( kdf.koalas.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(), (pdf.c + 1).sort_index(), ) with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"): kdf.koalas.transform_batch(1) with self.assertRaisesRegex(ValueError, "The given function should return a frame"): kdf.koalas.transform_batch(lambda pdf: 1) with self.assertRaisesRegex( ValueError, "transform_batch cannot produce aggregated results" ): kdf.koalas.transform_batch(lambda pdf: pd.Series(1)) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns kdf.columns = columns self.assert_eq( kdf.koalas.transform_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index() ) with option_context("compute.shortcut_limit", 500): self.assert_eq( kdf.koalas.transform_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index() ) def test_transform_batch_same_anchor(self): kdf = ks.range(10) kdf["d"] = kdf.koalas.transform_batch(lambda pdf: pdf.id + 1) self.assert_eq( kdf, pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]) ) kdf = ks.range(10) # One to test alias. kdf["d"] = kdf.id.transform_batch(lambda ser: ser + 1) self.assert_eq( kdf, pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]) ) kdf = ks.range(10) def plus_one(pdf) -> ks.Series[np.int64]: return pdf.id + 1 kdf["d"] = kdf.koalas.transform_batch(plus_one) self.assert_eq( kdf, pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]) ) kdf = ks.range(10) def plus_one(ser) -> ks.Series[np.int64]: return ser + 1 kdf["d"] = kdf.id.koalas.transform_batch(plus_one) self.assert_eq( kdf, pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]) ) def test_empty_timestamp(self): pdf = pd.DataFrame( { "t": [ datetime(2019, 1, 1, 0, 0, 0), datetime(2019, 1, 2, 0, 0, 0), datetime(2019, 1, 3, 0, 0, 0), ] }, index=np.random.rand(3), ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf[kdf["t"] != kdf["t"]], pdf[pdf["t"] != pdf["t"]]) self.assert_eq(kdf[kdf["t"] != kdf["t"]].dtypes, pdf[pdf["t"] != pdf["t"]].dtypes) def test_to_spark(self): kdf = ks.from_pandas(self.pdf) with self.assertRaisesRegex(ValueError, "'index_col' cannot be overlapped"): kdf.to_spark(index_col="a") with self.assertRaisesRegex(ValueError, "length of index columns.*1.*3"): kdf.to_spark(index_col=["x", "y", "z"]) def test_keys(self): pdf = pd.DataFrame( [[1, 2], [4, 5], [7, 8]], index=["cobra", "viper", "sidewinder"], columns=["max_speed", "shield"], ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.keys(), pdf.keys()) def test_quantile(self): kdf = ks.from_pandas(self.pdf) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): kdf.quantile(0.5, axis=1) with self.assertRaisesRegex( NotImplementedError, "quantile currently doesn't supports numeric_only" ): kdf.quantile(0.5, numeric_only=False) def test_pct_change(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [300, 200, 400, 200]}, index=np.random.rand(4), ) pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.pct_change(2), pdf.pct_change(2), check_exact=False) def test_where(self): kdf = ks.from_pandas(self.pdf) with self.assertRaisesRegex(ValueError, "type of cond must be a DataFrame or Series"): kdf.where(1) def test_mask(self): kdf = ks.from_pandas(self.pdf) with self.assertRaisesRegex(ValueError, "type of cond must be a DataFrame or Series"): kdf.mask(1) def test_query(self): pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2), "C": range(10, 5, -1)}) kdf = ks.from_pandas(pdf) exprs = ("A > B", "A < C", "C == B") for expr in exprs: self.assert_eq(kdf.query(expr), pdf.query(expr)) # test `inplace=True` for expr in exprs: dummy_kdf = kdf.copy() dummy_pdf = pdf.copy() pser = dummy_pdf.A kser = dummy_kdf.A dummy_pdf.query(expr, inplace=True) dummy_kdf.query(expr, inplace=True) self.assert_eq(dummy_kdf, dummy_pdf) self.assert_eq(kser, pser) # invalid values for `expr` invalid_exprs = (1, 1.0, (exprs[0],), [exprs[0]]) for expr in invalid_exprs: with self.assertRaisesRegex( ValueError, "expr must be a string to be evaluated, {} given".format(type(expr)) ): kdf.query(expr) # invalid values for `inplace` invalid_inplaces = (1, 0, "True", "False") for inplace in invalid_inplaces: with self.assertRaisesRegex( ValueError, 'For argument "inplace" expected type bool, received type {}.'.format( type(inplace).__name__ ), ): kdf.query("a < b", inplace=inplace) # doesn't support for MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")]) kdf.columns = columns with self.assertRaisesRegex(ValueError, "Doesn't support for MultiIndex columns"): kdf.query("('A', 'Z') > ('B', 'X')") def test_take(self): pdf = pd.DataFrame( {"A": range(0, 50000), "B": range(100000, 0, -2), "C": range(100000, 50000, -1)} ) kdf = ks.from_pandas(pdf) # axis=0 (default) self.assert_eq(kdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index()) self.assert_eq(kdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index()) self.assert_eq( kdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index() ) self.assert_eq( kdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index() ) self.assert_eq( kdf.take([10, 100, 1000, 10000]).sort_index(), pdf.take([10, 100, 1000, 10000]).sort_index(), ) self.assert_eq( kdf.take([-10, -100, -1000, -10000]).sort_index(), pdf.take([-10, -100, -1000, -10000]).sort_index(), ) # axis=1 self.assert_eq(kdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()) self.assert_eq( kdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index() ) self.assert_eq( kdf.take(range(1, 3), axis=1).sort_index(), pdf.take(range(1, 3), axis=1).sort_index(), ) self.assert_eq( kdf.take(range(-1, -3), axis=1).sort_index(), pdf.take(range(-1, -3), axis=1).sort_index(), ) self.assert_eq( kdf.take([2, 1], axis=1).sort_index(), pdf.take([2, 1], axis=1).sort_index(), ) self.assert_eq( kdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index(), ) # MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")]) kdf.columns = columns pdf.columns = columns # MultiIndex columns with axis=0 (default) self.assert_eq(kdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index()) self.assert_eq(kdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index()) self.assert_eq( kdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index() ) self.assert_eq( kdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index() ) self.assert_eq( kdf.take([10, 100, 1000, 10000]).sort_index(), pdf.take([10, 100, 1000, 10000]).sort_index(), ) self.assert_eq( kdf.take([-10, -100, -1000, -10000]).sort_index(), pdf.take([-10, -100, -1000, -10000]).sort_index(), ) # axis=1 self.assert_eq(kdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()) self.assert_eq( kdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index() ) self.assert_eq( kdf.take(range(1, 3), axis=1).sort_index(), pdf.take(range(1, 3), axis=1).sort_index(), ) self.assert_eq( kdf.take(range(-1, -3), axis=1).sort_index(), pdf.take(range(-1, -3), axis=1).sort_index(), almost=True, ) self.assert_eq( kdf.take([2, 1], axis=1).sort_index(), pdf.take([2, 1], axis=1).sort_index(), ) self.assert_eq( kdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index(), ) # Checking the type of indices. self.assertRaises(ValueError, lambda: kdf.take(1)) self.assertRaises(ValueError, lambda: kdf.take("1")) self.assertRaises(ValueError, lambda: kdf.take({1, 2})) self.assertRaises(ValueError, lambda: kdf.take({1: None, 2: None})) def test_axes(self): pdf = self.pdf kdf = ks.from_pandas(pdf) self.assert_list_eq(pdf.axes, kdf.axes) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")]) pdf.columns = columns kdf.columns = columns self.assert_list_eq(pdf.axes, kdf.axes) def test_udt(self): sparse_values = {0: 0.1, 1: 1.1} sparse_vector = SparseVector(len(sparse_values), sparse_values) pdf = pd.DataFrame({"a": [sparse_vector], "b": [10]}) if LooseVersion(pyspark.__version__) < LooseVersion("2.4"): with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) else: kdf = ks.from_pandas(pdf) self.assert_eq(kdf, pdf) def test_eval(self): pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)}) kdf = ks.from_pandas(pdf) # operation between columns (returns Series) self.assert_eq(pdf.eval("A + B"), kdf.eval("A + B")) self.assert_eq(pdf.eval("A + A"), kdf.eval("A + A")) # assignment (returns DataFrame) self.assert_eq(pdf.eval("C = A + B"), kdf.eval("C = A + B")) self.assert_eq(pdf.eval("A = A + A"), kdf.eval("A = A + A")) # operation between scalars (returns scalar) self.assert_eq(pdf.eval("1 + 1"), kdf.eval("1 + 1")) # complicated operations with assignment self.assert_eq( pdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"), kdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"), ) # inplace=True (only support for assignment) pdf.eval("C = A + B", inplace=True) kdf.eval("C = A + B", inplace=True) self.assert_eq(pdf, kdf) pser = pdf.A kser = kdf.A pdf.eval("A = B + C", inplace=True) kdf.eval("A = B + C", inplace=True) self.assert_eq(pdf, kdf) self.assert_eq(pser, kser) # doesn't support for multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b"), ("z", "c")]) kdf.columns = columns self.assertRaises(ValueError, lambda: kdf.eval("x.a + y.b")) def test_to_markdown(self): pdf = pd.DataFrame(data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}) kdf = ks.from_pandas(pdf) # `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0. if LooseVersion(pd.__version__) < LooseVersion("1.0.0"): self.assertRaises(NotImplementedError, lambda: kdf.to_markdown()) else: self.assert_eq(pdf.to_markdown(), kdf.to_markdown()) def test_cache(self): pdf = pd.DataFrame( [(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"] ) kdf = ks.from_pandas(pdf) with kdf.cache() as cached_df: self.assert_eq(isinstance(cached_df, CachedDataFrame), True) self.assert_eq( repr(cached_df.storage_level), repr(StorageLevel(True, True, False, True)) ) def test_persist(self): pdf = pd.DataFrame( [(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"] ) kdf = ks.from_pandas(pdf) storage_levels = [ StorageLevel.DISK_ONLY, StorageLevel.MEMORY_AND_DISK, StorageLevel.MEMORY_ONLY, StorageLevel.OFF_HEAP, ] for storage_level in storage_levels: with kdf.persist(storage_level) as cached_df: self.assert_eq(isinstance(cached_df, CachedDataFrame), True) self.assert_eq(repr(cached_df.storage_level), repr(storage_level)) self.assertRaises(TypeError, lambda: kdf.persist("DISK_ONLY")) def test_squeeze(self): axises = [None, 0, 1, "rows", "index", "columns"] # Multiple columns pdf = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"], index=["x", "y"]) kdf = ks.from_pandas(pdf) for axis in axises: self.assert_eq(pdf.squeeze(axis), kdf.squeeze(axis)) # Multiple columns with MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")]) pdf.columns = columns kdf.columns = columns for axis in axises: self.assert_eq(pdf.squeeze(axis), kdf.squeeze(axis)) # Single column with single value pdf = pd.DataFrame([[1]], columns=["a"], index=["x"]) kdf = ks.from_pandas(pdf) for axis in axises: self.assert_eq(pdf.squeeze(axis), kdf.squeeze(axis)) # Single column with single value with MultiIndex column columns = pd.MultiIndex.from_tuples([("A", "Z")]) pdf.columns = columns kdf.columns = columns for axis in axises: self.assert_eq(pdf.squeeze(axis), kdf.squeeze(axis)) # Single column with multiple values pdf = pd.DataFrame([1, 2, 3, 4], columns=["a"]) kdf = ks.from_pandas(pdf) for axis in axises: self.assert_eq(pdf.squeeze(axis), kdf.squeeze(axis)) # Single column with multiple values with MultiIndex column pdf.columns = columns kdf.columns = columns for axis in axises: self.assert_eq(pdf.squeeze(axis), kdf.squeeze(axis)) def test_rfloordiv(self): pdf = pd.DataFrame( {"angles": [0, 3, 4], "degrees": [360, 180, 360]}, index=["circle", "triangle", "rectangle"], columns=["angles", "degrees"], ) kdf = ks.from_pandas(pdf) if LooseVersion(pd.__version__) < LooseVersion("1.0.0") and LooseVersion( pd.__version__ ) >= LooseVersion("0.24.0"): expected_result = pd.DataFrame( {"angles": [np.inf, 3.0, 2.0], "degrees": [0.0, 0.0, 0.0]}, index=["circle", "triangle", "rectangle"], columns=["angles", "degrees"], ) else: expected_result = pdf.rfloordiv(10) self.assert_eq(kdf.rfloordiv(10), expected_result) def test_truncate(self): pdf1 = pd.DataFrame( { "A": ["a", "b", "c", "d", "e", "f", "g"], "B": ["h", "i", "j", "k", "l", "m", "n"], "C": ["o", "p", "q", "r", "s", "t", "u"], }, index=[-500, -20, -1, 0, 400, 550, 1000], ) kdf1 = ks.from_pandas(pdf1) pdf2 = pd.DataFrame( { "A": ["a", "b", "c", "d", "e", "f", "g"], "B": ["h", "i", "j", "k", "l", "m", "n"], "C": ["o", "p", "q", "r", "s", "t", "u"], }, index=[1000, 550, 400, 0, -1, -20, -500], ) kdf2 = ks.from_pandas(pdf2) self.assert_eq(kdf1.truncate(), pdf1.truncate()) self.assert_eq(kdf1.truncate(before=-20), pdf1.truncate(before=-20)) self.assert_eq(kdf1.truncate(after=400), pdf1.truncate(after=400)) self.assert_eq(kdf1.truncate(copy=False), pdf1.truncate(copy=False)) self.assert_eq(kdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False)) self.assert_eq(kdf2.truncate(0, 550), pdf2.truncate(0, 550)) self.assert_eq(kdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False)) # axis = 1 self.assert_eq(kdf1.truncate(axis=1), pdf1.truncate(axis=1)) self.assert_eq(kdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1)) self.assert_eq(kdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1)) self.assert_eq(kdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1)) self.assert_eq(kdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1)) self.assert_eq( kdf1.truncate("B", "C", copy=False, axis=1), pdf1.truncate("B", "C", copy=False, axis=1) ) # MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "Z")]) pdf1.columns = columns kdf1.columns = columns pdf2.columns = columns kdf2.columns = columns self.assert_eq(kdf1.truncate(), pdf1.truncate()) self.assert_eq(kdf1.truncate(before=-20), pdf1.truncate(before=-20)) self.assert_eq(kdf1.truncate(after=400), pdf1.truncate(after=400)) self.assert_eq(kdf1.truncate(copy=False), pdf1.truncate(copy=False)) self.assert_eq(kdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False)) self.assert_eq(kdf2.truncate(0, 550), pdf2.truncate(0, 550)) self.assert_eq(kdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False)) # axis = 1 self.assert_eq(kdf1.truncate(axis=1), pdf1.truncate(axis=1)) self.assert_eq(kdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1)) self.assert_eq(kdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1)) self.assert_eq(kdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1)) self.assert_eq(kdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1)) self.assert_eq( kdf1.truncate("B", "C", copy=False, axis=1), pdf1.truncate("B", "C", copy=False, axis=1) ) # Exceptions kdf = ks.DataFrame( { "A": ["a", "b", "c", "d", "e", "f", "g"], "B": ["h", "i", "j", "k", "l", "m", "n"], "C": ["o", "p", "q", "r", "s", "t", "u"], }, index=[-500, 100, 400, 0, -1, 550, -20], ) msg = "truncate requires a sorted index" with self.assertRaisesRegex(ValueError, msg): kdf.truncate() kdf = ks.DataFrame( { "A": ["a", "b", "c", "d", "e", "f", "g"], "B": ["h", "i", "j", "k", "l", "m", "n"], "C": ["o", "p", "q", "r", "s", "t", "u"], }, index=[-500, -20, -1, 0, 400, 550, 1000], ) msg = "Truncate: -20 must be after 400" with self.assertRaisesRegex(ValueError, msg): kdf.truncate(400, -20) msg = "Truncate: B must be after C" with self.assertRaisesRegex(ValueError, msg): kdf.truncate("C", "B", axis=1) def test_explode(self): pdf = pd.DataFrame({"A": [[-1.0, np.nan], [0.0, np.inf], [1.0, -np.inf]], "B": 1}) pdf.index.name = "index" pdf.columns.name = "columns" kdf = ks.from_pandas(pdf) if LooseVersion(pd.__version__) >= LooseVersion("0.25.0"): expected_result1 = pdf.explode("A") expected_result2 = pdf.explode("B") else: expected_result1 = pd.DataFrame( {"A": [-1, np.nan, 0, np.inf, 1, -np.inf], "B": [1, 1, 1, 1, 1, 1]}, index=pd.Index([0, 0, 1, 1, 2, 2]), ) expected_result1.index.name = "index" expected_result1.columns.name = "columns" expected_result2 = pdf self.assert_eq(kdf.explode("A"), expected_result1, almost=True) self.assert_eq(repr(kdf.explode("B")), repr(expected_result2)) self.assert_eq(kdf.explode("A").index.name, expected_result1.index.name) self.assert_eq(kdf.explode("A").columns.name, expected_result1.columns.name) self.assertRaises(ValueError, lambda: kdf.explode(["A", "B"])) # MultiIndex midx = pd.MultiIndex.from_tuples( [("x", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"] ) pdf.index = midx kdf = ks.from_pandas(pdf) if LooseVersion(pd.__version__) >= LooseVersion("0.25.0"): expected_result1 = pdf.explode("A") expected_result2 = pdf.explode("B") else: midx = pd.MultiIndex.from_tuples( [("x", "a"), ("x", "a"), ("x", "b"), ("x", "b"), ("y", "c"), ("y", "c")], names=["index1", "index2"], ) expected_result1.index = midx expected_result2 = pdf self.assert_eq(kdf.explode("A"), expected_result1, almost=True) self.assert_eq(repr(kdf.explode("B")), repr(expected_result2)) self.assert_eq(kdf.explode("A").index.names, expected_result1.index.names) self.assert_eq(kdf.explode("A").columns.name, expected_result1.columns.name) self.assertRaises(ValueError, lambda: kdf.explode(["A", "B"])) # MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")], names=["column1", "column2"]) pdf.columns = columns kdf.columns = columns if LooseVersion(pd.__version__) >= LooseVersion("0.25.0"): expected_result1 = pdf.explode(("A", "Z")) expected_result2 = pdf.explode(("B", "X")) expected_result3 = pdf.A.explode("Z") else: expected_result1.columns = columns expected_result2 = pdf expected_result3 = pd.DataFrame({"Z": [-1, np.nan, 0, np.inf, 1, -np.inf]}, index=midx) expected_result3.index.name = "index" expected_result3.columns.name = "column2" self.assert_eq(kdf.explode(("A", "Z")), expected_result1, almost=True) self.assert_eq(repr(kdf.explode(("B", "X"))), repr(expected_result2)) self.assert_eq(kdf.explode(("A", "Z")).index.names, expected_result1.index.names) self.assert_eq(kdf.explode(("A", "Z")).columns.names, expected_result1.columns.names) self.assert_eq(kdf.A.explode("Z"), expected_result3, almost=True) self.assertRaises(ValueError, lambda: kdf.explode(["A", "B"])) self.assertRaises(ValueError, lambda: kdf.explode("A")) def test_spark_schema(self): kdf = ks.DataFrame( { "a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1"), "d": np.arange(4.0, 7.0, dtype="float64"), "e": [True, False, True], "f": pd.date_range("20130101", periods=3), }, columns=["a", "b", "c", "d", "e", "f"], ) self.assertEqual(kdf.spark_schema(), kdf.spark.schema()) self.assertEqual(kdf.spark_schema("index"), kdf.spark.schema("index")) def test_print_schema(self): kdf = ks.DataFrame( {"a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1")}, columns=["a", "b", "c"], ) prev = sys.stdout try: out = StringIO() sys.stdout = out kdf.print_schema() actual = out.getvalue().strip() out = StringIO() sys.stdout = out kdf.spark.print_schema() expected = out.getvalue().strip() self.assertEqual(actual, expected) finally: sys.stdout = prev def test_explain_hint(self): kdf1 = ks.DataFrame( {"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]}, columns=["lkey", "value"] ) kdf2 = ks.DataFrame( {"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]}, columns=["rkey", "value"] ) merged = kdf1.merge(kdf2.hint("broadcast"), left_on="lkey", right_on="rkey") prev = sys.stdout try: out = StringIO() sys.stdout = out merged.explain() actual = out.getvalue().strip() out = StringIO() sys.stdout = out merged.spark.explain() expected = out.getvalue().strip() self.assertEqual(actual, expected) finally: sys.stdout = prev def test_mad(self): pdf = pd.DataFrame( { "A": [1, 2, None, 4, np.nan], "B": [-0.1, 0.2, -0.3, np.nan, 0.5], "C": ["a", "b", "c", "d", "e"], } ) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.mad(), pdf.mad()) self.assert_eq(kdf.mad(axis=1), pdf.mad(axis=1)) with self.assertRaises(ValueError): kdf.mad(axis=2) # MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "Z")]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.mad(), pdf.mad()) self.assert_eq(kdf.mad(axis=1), pdf.mad(axis=1)) pdf = pd.DataFrame({"A": [True, True, False, False], "B": [True, False, False, True]}) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.mad(), pdf.mad()) self.assert_eq(kdf.mad(axis=1), pdf.mad(axis=1)) def test_abs(self): pdf = pd.DataFrame({"a": [-2, -1, 0, 1]}) kdf = ks.from_pandas(pdf) self.assert_eq(abs(kdf), abs(pdf)) self.assert_eq(np.abs(kdf), np.abs(pdf)) def test_iteritems(self): pdf = pd.DataFrame( {"species": ["bear", "bear", "marsupial"], "population": [1864, 22000, 80000]}, index=["panda", "polar", "koala"], columns=["species", "population"], ) kdf = ks.from_pandas(pdf) for (p_name, p_items), (k_name, k_items) in zip(pdf.iteritems(), kdf.iteritems()): self.assert_eq(p_name, k_name) self.assert_eq(p_items, k_items) def test_tail(self): if LooseVersion(pyspark.__version__) >= LooseVersion("3.0"): pdf = pd.DataFrame({"x": range(1000)}) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.tail(), kdf.tail()) self.assert_eq(pdf.tail(10), kdf.tail(10)) self.assert_eq(pdf.tail(-990), kdf.tail(-990)) self.assert_eq(pdf.tail(0), kdf.tail(0)) self.assert_eq(pdf.tail(-1001), kdf.tail(-1001)) self.assert_eq(pdf.tail(1001), kdf.tail(1001)) with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"): kdf.tail("10") def test_last_valid_index(self): # `pyspark.sql.dataframe.DataFrame.tail` is new in pyspark >= 3.0. if LooseVersion(pyspark.__version__) >= LooseVersion("3.0"): pdf = pd.DataFrame( {"a": [1, 2, 3, None], "b": [1.0, 2.0, 3.0, None], "c": [100, 200, 400, None]}, index=["Q", "W", "E", "R"], ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.last_valid_index(), kdf.last_valid_index()) # MultiIndex columns pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.last_valid_index(), kdf.last_valid_index()) # Empty DataFrame pdf = pd.Series([]).to_frame() kdf = ks.Series([]).to_frame() self.assert_eq(pdf.last_valid_index(), kdf.last_valid_index()) def test_first_valid_index(self): # Empty DataFrame pdf = pd.Series([]).to_frame() kdf = ks.Series([]).to_frame() self.assert_eq(pdf.first_valid_index(), kdf.first_valid_index())
1
16,192
Shall we use different inputs for `PySpark < 2.4` where `transpose` won't work with different data types.
databricks-koalas
py
@@ -20,8 +20,11 @@ Wrappers around spark that correspond to common pandas functions. import pyspark import numpy as np import pandas as pd +from ._dask_stubs.compatibility import string_types +from ._dask_stubs.utils import derived_from from .typing import Col, pandas_wrap -from pyspark.sql import Column, DataFrame +from pyspark.sql import Column, DataFrame, functions as F +from pyspark.sql.types import NumericType def default_session():
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Wrappers around spark that correspond to common pandas functions. """ import pyspark import numpy as np import pandas as pd from .typing import Col, pandas_wrap from pyspark.sql import Column, DataFrame def default_session(): return pyspark.sql.SparkSession.builder.getOrCreate() def from_pandas(pdf): """Create DataFrame from pandas DataFrame. This is similar to `DataFrame.createDataFrame()` with pandas DataFrame, but this also picks the index in the given pandas DataFrame. :param pdf: :class:`pandas.DataFrame` """ return default_session().from_pandas(pdf) def read_csv(path, header='infer', names=None, usecols=None, mangle_dupe_cols=True, parse_dates=False, comment=None): """Read CSV (comma-separated) file into DataFrame. :param path: The path string storing the CSV file to be read. :param header: Whether to to use as the column names, and the start of the data. Default behavior is to infer the column names: if no names are passed the behavior is identical to `header=0` and column names are inferred from the first line of the file, if column names are passed explicitly then the behavior is identical to `header=None`. Explicitly pass `header=0` to be able to replace existing names :param names: List of column names to use. If file contains no header row, then you should explicitly pass `header=None`. Duplicates in this list will cause an error to be issued. :param usecols: Return a subset of the columns. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in names or inferred from the document header row(s). If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to `True`. :param mangle_dupe_cols: Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather than 'X' ... 'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. Currently only `True` is allowed. :param parse_dates: boolean or list of ints or names or list of lists or dict, default `False`. Currently only `False` is allowed. :param comment: Indicates the line should not be parsed. :return: :class:`DataFrame` """ return default_session().read_csv(path=path, header=header, names=names, usecols=usecols, mangle_dupe_cols=mangle_dupe_cols, parse_dates=parse_dates, comment=comment) def read_parquet(path, columns=None): """Load a parquet object from the file path, returning a DataFrame. :param path: File path :param columns: If not None, only these columns will be read from the file. :return: :class:`DataFrame` """ return default_session().read_parquet(path=path, columns=columns) def to_datetime(arg, errors='raise', format=None, infer_datetime_format=False): if isinstance(arg, Column): return _to_datetime1( arg, errors=errors, format=format, infer_datetime_format=infer_datetime_format) if isinstance(arg, (dict, DataFrame)): return _to_datetime2( arg_year=arg['year'], arg_month=arg['month'], arg_day=arg['day'], errors=errors, format=format, infer_datetime_format=infer_datetime_format) # @pandas_wrap(return_col=np.datetime64) @pandas_wrap def _to_datetime1(arg, errors, format, infer_datetime_format) -> Col[np.datetime64]: return pd.to_datetime( arg, errors=errors, format=format, infer_datetime_format=infer_datetime_format) # @pandas_wrap(return_col=np.datetime64) @pandas_wrap def _to_datetime2(arg_year, arg_month, arg_day, errors, format, infer_datetime_format) -> Col[np.datetime64]: arg = dict(year=arg_year, month=arg_month, day=arg_day) for key in arg: if arg[key] is None: del arg[key] return pd.to_datetime( arg, errors=errors, format=format, infer_datetime_format=infer_datetime_format)
1
8,215
we should sort the headers like in spark: public packages, then pyspark, then internal
databricks-koalas
py
@@ -53,7 +53,9 @@ func newPeer(address string, t *Transport) (*grpcPeer, error) { grpc.MaxCallSendMsgSize(t.options.clientMaxSendMsgSize), ), } - if t.options.clientTLS { + if t.options.clientTLSConfig != nil { + dialOptions = append(dialOptions, grpc.WithTransportCredentials(credentials.NewTLS(t.options.clientTLSConfig))) + } else if t.options.clientTLS { dialOptions = append(dialOptions, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""))) } else { dialOptions = append(dialOptions, grpc.WithInsecure())
1
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package grpc import ( "context" "sync" "go.uber.org/yarpc/api/peer" "go.uber.org/yarpc/peer/hostport" "go.uber.org/yarpc/yarpcerrors" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" ) type grpcPeer struct { *hostport.Peer t *Transport clientConn *grpc.ClientConn stoppingC chan struct{} stoppedC chan error lock sync.Mutex stopping bool stopped bool stoppedErr error } func newPeer(address string, t *Transport) (*grpcPeer, error) { dialOptions := []grpc.DialOption{ grpc.WithUserAgent(UserAgent), grpc.WithDefaultCallOptions( grpc.CallCustomCodec(customCodec{}), grpc.MaxCallRecvMsgSize(t.options.clientMaxRecvMsgSize), grpc.MaxCallSendMsgSize(t.options.clientMaxSendMsgSize), ), } if t.options.clientTLS { dialOptions = append(dialOptions, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""))) } else { dialOptions = append(dialOptions, grpc.WithInsecure()) } clientConn, err := grpc.Dial(address, dialOptions...) if err != nil { return nil, err } grpcPeer := &grpcPeer{ Peer: hostport.NewPeer(hostport.PeerIdentifier(address), t), t: t, clientConn: clientConn, stoppingC: make(chan struct{}, 1), stoppedC: make(chan error, 1), } go grpcPeer.monitor() return grpcPeer, nil } func (p *grpcPeer) monitor() { if !p.monitorStart() { p.monitorStop(nil) return } var attempts uint backoff := p.t.options.backoffStrategy.Backoff() connectivityState := p.clientConn.GetState() changed := true for { var peerConnectionStatus peer.ConnectionStatus var err error // will be called the first time since changed is initialized to true if changed { peerConnectionStatus, err = connectivityStateToPeerConnectionStatus(connectivityState) if err != nil { p.monitorStop(err) return } p.Peer.SetStatus(peerConnectionStatus) } var ctx context.Context var cancel context.CancelFunc if peerConnectionStatus == peer.Available { attempts = 0 ctx = context.Background() } else { attempts++ ctx, cancel = context.WithTimeout(context.Background(), backoff.Duration(attempts)) } newConnectivityState, loop := p.monitorLoopWait(ctx, cancel, connectivityState) if !loop { p.monitorStop(nil) return } changed = connectivityState != newConnectivityState connectivityState = newConnectivityState } } // return true if the transport is started // return false is monitor was stopped in the meantime // this should only be called by monitor() func (p *grpcPeer) monitorStart() bool { select { // wait for start so we can be certain that we have a channel case <-p.t.once.Started(): return true case <-p.stoppingC: return false } } // this should only be called by monitor() func (p *grpcPeer) monitorStop(err error) { p.Peer.SetStatus(peer.Unavailable) // Close always returns an error _ = p.clientConn.Close() p.stoppedC <- err close(p.stoppedC) } // this should only be called by monitor() // this does not correlate to wait() at all // // return true to continue looping func (p *grpcPeer) monitorLoopWait(ctx context.Context, cancel context.CancelFunc, connectivityState connectivity.State) (connectivity.State, bool) { changedC := make(chan bool, 1) go func() { changedC <- p.clientConn.WaitForStateChange(ctx, connectivityState) }() loop := false select { case changed := <-changedC: if cancel != nil { cancel() } if changed { connectivityState = p.clientConn.GetState() } loop = true case <-p.stoppingC: case <-p.t.once.Stopping(): if cancel != nil { cancel() } } return connectivityState, loop } func (p *grpcPeer) stop() { p.lock.Lock() defer p.lock.Unlock() if !p.stopping { // this is selected on in monitor() p.stoppingC <- struct{}{} close(p.stoppingC) p.stopping = true } } func (p *grpcPeer) wait() error { p.lock.Lock() defer p.lock.Unlock() if p.stopped { return p.stoppedErr } p.stoppedErr = <-p.stoppedC p.stopped = true return p.stoppedErr } func connectivityStateToPeerConnectionStatus(connectivityState connectivity.State) (peer.ConnectionStatus, error) { switch connectivityState { case connectivity.Idle, connectivity.TransientFailure, connectivity.Shutdown: return peer.Unavailable, nil case connectivity.Connecting: return peer.Connecting, nil case connectivity.Ready: return peer.Available, nil default: return 0, yarpcerrors.Newf(yarpcerrors.CodeInternal, "unknown connectivity.State: %v", connectivityState) } }
1
16,632
looks like we should drop this var from the transport options struct
yarpc-yarpc-go
go
@@ -721,17 +721,6 @@ func TestValidateDuration(t *testing.T) { }, errs: []*field.Error{field.Invalid(fldPath.Child("renewBefore"), usefulDurations["ten years"].Duration, fmt.Sprintf("certificate duration %s must be greater than renewBefore %s", cmapi.DefaultCertificateDuration, usefulDurations["ten years"].Duration))}, }, - "default renewBefore is bigger than the set duration": { - cfg: &internalcmapi.Certificate{ - Spec: internalcmapi.CertificateSpec{ - Duration: usefulDurations["one hour"], - CommonName: "testcn", - SecretName: "abc", - IssuerRef: validIssuerRef, - }, - }, - errs: []*field.Error{field.Invalid(fldPath.Child("renewBefore"), cmapi.DefaultRenewBefore, fmt.Sprintf("certificate duration %s must be greater than renewBefore %s", usefulDurations["one hour"].Duration, cmapi.DefaultRenewBefore))}, - }, "renewBefore is bigger than the duration": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{
1
/* Copyright 2020 The cert-manager Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package validation import ( "fmt" "reflect" "testing" "time" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" cmapiv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" cmapiv1alpha3 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha3" cmapiv1beta1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1beta1" "github.com/jetstack/cert-manager/pkg/internal/api/validation" internalcmapi "github.com/jetstack/cert-manager/pkg/internal/apis/certmanager" cmmeta "github.com/jetstack/cert-manager/pkg/internal/apis/meta" ) var ( validIssuerRef = cmmeta.ObjectReference{ Name: "name", Kind: "ClusterIssuer", } someAdmissionRequest = &admissionv1.AdmissionRequest{ RequestKind: &metav1.GroupVersionKind{ Group: "test", Kind: "test", Version: "test", }, } ) func strPtr(s string) *string { return &s } func int32Ptr(i int32) *int32 { return &i } func TestValidateCertificate(t *testing.T) { fldPath := field.NewPath("spec") scenarios := map[string]struct { cfg *internalcmapi.Certificate a *admissionv1.AdmissionRequest errs []*field.Error warnings validation.WarningList }{ "valid basic certificate": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, }, "valid with blank issuerRef kind": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: cmmeta.ObjectReference{ Name: "valid", }, }, }, a: someAdmissionRequest, }, "valid with 'Issuer' issuerRef kind": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: cmmeta.ObjectReference{ Name: "valid", Kind: "Issuer", }, }, }, a: someAdmissionRequest, }, "valid with org set": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", Subject: &internalcmapi.X509Subject{ Organizations: []string{"testorg"}, }, IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, }, "invalid issuerRef kind": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: cmmeta.ObjectReference{ Name: "valid", Kind: "invalid", }, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("issuerRef", "kind"), "invalid", "must be one of Issuer or ClusterIssuer"), }, }, "certificate missing secretName": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", IssuerRef: validIssuerRef, }, }, errs: []*field.Error{ field.Required(fldPath.Child("secretName"), "must be specified"), }, a: someAdmissionRequest, }, "certificate with no domains, URIs or common name": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath, "", "at least one of commonName, dnsNames, uris ipAddresses, or emailAddresses must be set"), }, }, "certificate with no issuerRef": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Required(fldPath.Child("issuerRef", "name"), "must be specified"), }, }, "valid certificate with only dnsNames": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ DNSNames: []string{"validdnsname"}, SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, }, "valid certificate with rsa keyAlgorithm specified and no keySize": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Algorithm: internalcmapi.RSAKeyAlgorithm, }, }, }, a: someAdmissionRequest, }, "valid certificate with rsa keyAlgorithm specified with keySize 2048": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Algorithm: internalcmapi.RSAKeyAlgorithm, Size: 2048, }, }, }, a: someAdmissionRequest, }, "valid certificate with rsa keyAlgorithm specified with keySize 4096": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Algorithm: internalcmapi.RSAKeyAlgorithm, Size: 4096, }, }, }, a: someAdmissionRequest, }, "valid certificate with rsa keyAlgorithm specified with keySize 8192": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Algorithm: internalcmapi.RSAKeyAlgorithm, Size: 8192, }, }, }, a: someAdmissionRequest, }, "valid certificate with ecdsa keyAlgorithm specified and no keySize": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Algorithm: internalcmapi.ECDSAKeyAlgorithm, }, }, }, a: someAdmissionRequest, }, "valid certificate with ecdsa keyAlgorithm specified with keySize 256": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Size: 256, Algorithm: internalcmapi.ECDSAKeyAlgorithm, }, }, }, a: someAdmissionRequest, }, "valid certificate with ecdsa keyAlgorithm specified with keySize 384": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Size: 384, Algorithm: internalcmapi.ECDSAKeyAlgorithm, }, }, }, a: someAdmissionRequest, }, "valid certificate with ecdsa keyAlgorithm specified with keySize 521": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Size: 521, Algorithm: internalcmapi.ECDSAKeyAlgorithm, }, }, }, a: someAdmissionRequest, }, "valid certificate with keyAlgorithm not specified and keySize specified": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Size: 2048, }, }, }, a: someAdmissionRequest, }, "certificate with rsa keyAlgorithm specified and invalid keysize 1024": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Algorithm: internalcmapi.RSAKeyAlgorithm, Size: 1024, }, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("privateKey", "size"), 1024, "must be between 2048 & 8192 for rsa keyAlgorithm"), }, }, "certificate with rsa keyAlgorithm specified and invalid keysize 8196": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Algorithm: internalcmapi.RSAKeyAlgorithm, Size: 8196, }, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("privateKey", "size"), 8196, "must be between 2048 & 8192 for rsa keyAlgorithm"), }, }, "certificate with ecdsa keyAlgorithm specified and invalid keysize": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Size: 100, Algorithm: internalcmapi.ECDSAKeyAlgorithm, }, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.NotSupported(fldPath.Child("privateKey", "size"), 100, []string{"256", "384", "521"}), }, }, "certificate with invalid keyAlgorithm": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, PrivateKey: &internalcmapi.CertificatePrivateKey{ Algorithm: internalcmapi.PrivateKeyAlgorithm("blah"), }, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("privateKey", "algorithm"), internalcmapi.PrivateKeyAlgorithm("blah"), "must be either empty or one of rsa or ecdsa"), }, }, "valid certificate with ipAddresses": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", IPAddresses: []string{"127.0.0.1"}, SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, }, "certificate with invalid ipAddresses": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", IPAddresses: []string{"blah"}, SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("ipAddresses").Index(0), "blah", "invalid IP address"), }, }, "valid certificate with commonName exactly 64 bytes": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "this-is-a-big-long-string-which-is-exactly-sixty-four-characters", SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, errs: []*field.Error{}, }, "invalid certificate with commonName longer than 64 bytes": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "this-is-a-big-long-string-which-has-exactly-sixty-five-characters", SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.TooLong(fldPath.Child("commonName"), "this-is-a-big-long-string-which-has-exactly-sixty-five-characters", 64), }, }, "valid certificate with no commonName and second dnsName longer than 64 bytes": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ SecretName: "abc", IssuerRef: validIssuerRef, DNSNames: []string{ "dnsName", "this-is-a-big-long-string-which-has-exactly-sixty-five-characters", }, }, }, a: someAdmissionRequest, }, "valid certificate with commonName and first dnsName longer than 64 bytes": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, DNSNames: []string{ "this-is-a-big-long-string-which-has-exactly-sixty-five-characters", "dnsName", }, }, }, a: someAdmissionRequest, }, "valid certificate with basic keyusage": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, Usages: []internalcmapi.KeyUsage{"signing"}, }, }, a: someAdmissionRequest, }, "valid certificate with multiple keyusage": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, Usages: []internalcmapi.KeyUsage{"signing", "s/mime"}, }, }, a: someAdmissionRequest, }, "invalid certificate with nonexistent keyusage": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, Usages: []internalcmapi.KeyUsage{"nonexistent"}, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("usages").Index(0), internalcmapi.KeyUsage("nonexistent"), "unknown keyusage"), }, }, "valid certificate with only URI SAN name": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ SecretName: "abc", IssuerRef: validIssuerRef, URISANs: []string{ "foo.bar", }, }, }, a: someAdmissionRequest, }, "valid certificate with only email SAN": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ EmailSANs: []string{"[email protected]"}, SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, }, "invalid certificate with incorrect email": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ EmailSANs: []string{"aliceexample.com"}, SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("emailAddresses").Index(0), "aliceexample.com", "invalid email address: mail: missing '@' or angle-addr"), }, }, "invalid certificate with email formatted with name": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ EmailSANs: []string{"Alice <[email protected]>"}, SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("emailAddresses").Index(0), "Alice <[email protected]>", "invalid email address: make sure the supplied value only contains the email address itself"), }, }, "invalid certificate with email formatted with mailto": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ EmailSANs: []string{"mailto:[email protected]"}, SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("emailAddresses").Index(0), "mailto:[email protected]", "invalid email address: mail: expected comma"), }, }, "valid certificate with revision history limit == 1": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "abc", SecretName: "abc", IssuerRef: validIssuerRef, RevisionHistoryLimit: int32Ptr(1), }, }, a: someAdmissionRequest, }, "invalid certificate with revision history limit < 1": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "abc", SecretName: "abc", IssuerRef: validIssuerRef, RevisionHistoryLimit: int32Ptr(0), }, }, a: someAdmissionRequest, errs: []*field.Error{ field.Invalid(fldPath.Child("revisionHistoryLimit"), int32(0), "must not be less than 1"), }, }, "v1alpha2 certificate created": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "abc", SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: &admissionv1.AdmissionRequest{ RequestKind: &metav1.GroupVersionKind{Group: "cert-manager.io", Version: "v1alpha2", Kind: "Certificate"}, }, warnings: validation.WarningList{ fmt.Sprintf(deprecationMessageTemplate, cmapiv1alpha2.SchemeGroupVersion.String(), "Certificate", cmapi.SchemeGroupVersion.String(), "Certificate"), }, }, "v1alpha3 certificate created": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "abc", SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: &admissionv1.AdmissionRequest{ RequestKind: &metav1.GroupVersionKind{Group: "cert-manager.io", Version: "v1alpha3", Kind: "Certificate"}, }, warnings: validation.WarningList{ fmt.Sprintf(deprecationMessageTemplate, cmapiv1alpha3.SchemeGroupVersion.String(), "Certificate", cmapi.SchemeGroupVersion.String(), "Certificate"), }, }, "v1beta1 certificate created": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "abc", SecretName: "abc", IssuerRef: validIssuerRef, }, }, a: &admissionv1.AdmissionRequest{ RequestKind: &metav1.GroupVersionKind{Group: "cert-manager.io", Version: "v1beta1", Kind: "Certificate"}, }, warnings: validation.WarningList{ fmt.Sprintf(deprecationMessageTemplate, cmapiv1beta1.SchemeGroupVersion.String(), "Certificate", cmapi.SchemeGroupVersion.String(), "Certificate"), }, }, } for n, s := range scenarios { t.Run(n, func(t *testing.T) { errs, warnings := ValidateCertificate(s.a, s.cfg) if len(errs) != len(s.errs) { t.Errorf("Expected errors %v but got %v", s.errs, errs) return } if len(warnings) != len(s.warnings) { t.Errorf("Expected warnings %v but got %v", s.warnings, warnings) } for i, e := range errs { expectedErr := s.errs[i] if !reflect.DeepEqual(e, expectedErr) { t.Errorf("Expected error %v but got %v", expectedErr, e) } } for i, w := range warnings { expectedWarning := s.warnings[i] if w != expectedWarning { t.Errorf("Expected warning %q but got %q", expectedWarning, w) } } }) } } func TestValidateDuration(t *testing.T) { usefulDurations := map[string]*metav1.Duration{ "one second": {Duration: time.Second}, "ten minutes": {Duration: time.Minute * 10}, "half hour": {Duration: time.Minute * 30}, "one hour": {Duration: time.Hour}, "one month": {Duration: time.Hour * 24 * 30}, "half year": {Duration: time.Hour * 24 * 180}, "one year": {Duration: time.Hour * 24 * 365}, "ten years": {Duration: time.Hour * 24 * 365 * 10}, } fldPath := field.NewPath("spec") scenarios := map[string]struct { cfg *internalcmapi.Certificate errs []*field.Error }{ "default duration and renewBefore": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, }, "valid duration and renewBefore": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ Duration: usefulDurations["one year"], RenewBefore: usefulDurations["half year"], CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, }, "unset duration, valid renewBefore for default": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ RenewBefore: usefulDurations["one month"], CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, }, "unset renewBefore, valid duration for default": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ Duration: usefulDurations["one year"], CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, }, "renewBefore is bigger than the default duration": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ RenewBefore: usefulDurations["ten years"], CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, errs: []*field.Error{field.Invalid(fldPath.Child("renewBefore"), usefulDurations["ten years"].Duration, fmt.Sprintf("certificate duration %s must be greater than renewBefore %s", cmapi.DefaultCertificateDuration, usefulDurations["ten years"].Duration))}, }, "default renewBefore is bigger than the set duration": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ Duration: usefulDurations["one hour"], CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, errs: []*field.Error{field.Invalid(fldPath.Child("renewBefore"), cmapi.DefaultRenewBefore, fmt.Sprintf("certificate duration %s must be greater than renewBefore %s", usefulDurations["one hour"].Duration, cmapi.DefaultRenewBefore))}, }, "renewBefore is bigger than the duration": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ Duration: usefulDurations["one month"], RenewBefore: usefulDurations["one year"], CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, errs: []*field.Error{field.Invalid(fldPath.Child("renewBefore"), usefulDurations["one year"].Duration, fmt.Sprintf("certificate duration %s must be greater than renewBefore %s", usefulDurations["one month"].Duration, usefulDurations["one year"].Duration))}, }, "renewBefore is less than the minimum permitted value": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ RenewBefore: usefulDurations["one second"], CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, errs: []*field.Error{field.Invalid(fldPath.Child("renewBefore"), usefulDurations["one second"].Duration, fmt.Sprintf("certificate renewBefore must be greater than %s", cmapi.MinimumRenewBefore))}, }, "duration is less than the minimum permitted value": { cfg: &internalcmapi.Certificate{ Spec: internalcmapi.CertificateSpec{ Duration: usefulDurations["half hour"], RenewBefore: usefulDurations["ten minutes"], CommonName: "testcn", SecretName: "abc", IssuerRef: validIssuerRef, }, }, errs: []*field.Error{field.Invalid(fldPath.Child("duration"), usefulDurations["half hour"].Duration, fmt.Sprintf("certificate duration must be greater than %s", cmapi.MinimumCertificateDuration))}, }, } for n, s := range scenarios { t.Run(n, func(t *testing.T) { errs := ValidateDuration(&s.cfg.Spec, fldPath) if len(errs) != len(s.errs) { t.Errorf("Expected %v but got %v", s.errs, errs) return } for i, e := range errs { expectedErr := s.errs[i] if !reflect.DeepEqual(e, expectedErr) { t.Errorf("Expected %v but got %v", expectedErr, e) } } }) } }
1
27,348
Ah, yeah, this is the test for the validation that I mentioned in a remark above about relaxing the validation.
jetstack-cert-manager
go
@@ -21,6 +21,19 @@ def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): + """Check whether the anchors are inside the border + + Args: + flat_anchors (torch.Tensor): Flatten anchors + valid_flags (torch.Tensor): An existing valid flags of anchors + img_shape (tuple(int)): Shape of current image + allowed_border (int, optional): The border to allow the valid anchor. + Defaults to 0. + + Returns: + torch.Tensor: Flags indicating whether the anchors are inside a + valid range. + """ img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \
1
import torch def images_to_levels(target, num_levels): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_levels: end = start + n # level_targets.append(target[:, start:end].squeeze(0)) level_targets.append(target[:, start:end]) start = end return level_targets def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border) & \ (flat_anchors[:, 1] >= -allowed_border) & \ (flat_anchors[:, 2] < img_w + allowed_border) & \ (flat_anchors[:, 3] < img_h + allowed_border) else: inside_flags = valid_flags return inside_flags def calc_region(bbox, ratio, featmap_size=None): """Calculate a proportional bbox region. The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. Args: bbox (Tensor): Bboxes to calculate regions, shape (n, 4) ratio (float): Ratio of the output region. featmap_size (tuple): Feature map size used for clipping the boundary. Returns: tuple: x1, y1, x2, y2 """ x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2)
1
20,472
For tensors, it is better to illustrate the shape.
open-mmlab-mmdetection
py
@@ -42,6 +42,11 @@ describe( 'Site Kit admin bar component display', () => { status: 200, body: JSON.stringify( mockBatchResponse[ 'modules::search-console::searchanalytics::e74216dd17533dcb67fa2d433c23467c' ] ), } ); + } else if ( request.url().match( 'google-site-kit/v1/data/' ) ) { + request.respond( { + status: 200, + body: JSON.stringify( mockBatchResponse ), + } ); } else { request.continue(); }
1
/** * WordPress dependencies */ import { activatePlugin, createURL } from '@wordpress/e2e-test-utils'; /** * Internal dependencies */ import { setEditPostFeature, setSiteVerification, setSearchConsoleProperty, useRequestInterception, } from '../../../utils'; import * as adminBarMockResponses from './fixtures/admin-bar'; let mockBatchResponse; // Editor utilities are no-op if WP is pre v5. async function exitFullscreenEditor() { const bodyClasses = await page.evaluate( () => Array.from( document.body.classList ) ); if ( bodyClasses.includes( 'is-fullscreen-mode' ) ) { await setEditPostFeature( 'fullscreenMode', false ); } } async function dismissEditorWelcome() { await setEditPostFeature( 'welcomeGuide', false ); } describe( 'Site Kit admin bar component display', () => { beforeAll( async () => { await activatePlugin( 'e2e-tests-proxy-auth-plugin' ); await activatePlugin( 'e2e-tests-admin-bar-visibility' ); await setSiteVerification(); await setSearchConsoleProperty(); await page.setRequestInterception( true ); useRequestInterception( ( request ) => { if ( request.url().match( 'google-site-kit/v1/modules/search-console/data/searchanalytics?' ) ) { request.respond( { status: 200, body: JSON.stringify( mockBatchResponse[ 'modules::search-console::searchanalytics::e74216dd17533dcb67fa2d433c23467c' ] ), } ); } else { request.continue(); } } ); } ); beforeEach( async () => { mockBatchResponse = []; await page.goto( createURL( '/hello-world' ), { waitUntil: 'load' } ); } ); it( 'loads when viewing the front end of a post with data in Search Console', async () => { const { searchConsole } = adminBarMockResponses; // Data is requested when the Admin Bar app loads on first hover mockBatchResponse = Object.assign( {}, searchConsole ); await Promise.all( [ page.hover( '#wp-admin-bar-google-site-kit' ), page.waitForResponse( ( res ) => res.url().match( 'google-site-kit/v1/modules/search-console/data/searchanalytics?' ) ), ] ); const adminBarApp = await page.$( '#js-googlesitekit-adminbar' ); await expect( adminBarApp ).toMatchElement( '.googlesitekit-data-block__title', { text: /total clicks/i } ); await expect( adminBarApp ).toMatchElement( '.googlesitekit-data-block__title', { text: /total impressions/i } ); // Ensure Analytics CTA is displayed await expect( adminBarApp ).toMatchElement( '.googlesitekit-cta-link', { text: /Set up analytics/i } ); // More details link await expect( adminBarApp ).toMatchElement( '.googlesitekit-cta-link', { text: /More details/i } ); await adminBarApp.dispose(); } ); it( 'loads when editing a post with data in Search Console', async () => { const { searchConsole } = adminBarMockResponses; // Data is requested when the Admin Bar app loads on first hover mockBatchResponse = Object.assign( {}, searchConsole ); // Navigate to edit view for this post await Promise.all( [ expect( page ).toClick( '#wp-admin-bar-edit a', { text: /edit post/i } ), page.waitForNavigation( { waitUntil: 'load' } ), ] ); // We're now in Gutenberg. await dismissEditorWelcome(); await exitFullscreenEditor(); await Promise.all( [ page.hover( '#wp-admin-bar-google-site-kit' ), page.waitForResponse( ( res ) => res.url().match( 'google-site-kit/v1/modules/search-console/data/searchanalytics?' ) ), ] ); await page.evaluate( () => { // Temporarily replace XMLHttpRequest.send with a no-op to prevent a DOMException on navigation. // https://github.com/WordPress/gutenberg/blob/d635ca96f8c5dbdc993f30b1f3a3a0b4359e3e2e/packages/editor/src/components/post-locked-modal/index.js#L114 window.XMLHttpRequest.prototype.send = function() {}; } ); const adminBarApp = await page.$( '#js-googlesitekit-adminbar' ); await expect( adminBarApp ).toMatchElement( '.googlesitekit-data-block__title', { text: /total clicks/i } ); await expect( adminBarApp ).toMatchElement( '.googlesitekit-data-block__title', { text: /total impressions/i } ); // Ensure Analytics CTA is displayed await expect( adminBarApp ).toMatchElement( '.googlesitekit-cta-link', { text: /Set up analytics/i } ); // More details link await expect( adminBarApp ).toMatchElement( '.googlesitekit-cta-link', { text: /More details/i } ); await adminBarApp.dispose(); } ); it( 'links "More details" to the dashboard details view for the current post', async () => { const { searchConsole } = adminBarMockResponses; // Data is requested when the Admin Bar app loads on first hover mockBatchResponse = Object.assign( {}, searchConsole ); await Promise.all( [ page.hover( '#wp-admin-bar-google-site-kit' ), page.waitForResponse( ( res ) => res.url().match( 'google-site-kit/v1/modules/search-console/data/searchanalytics?' ) ), ] ); await expect( page ).toMatchElement( '#js-googlesitekit-adminbar .googlesitekit-cta-link', { text: /More details/i, visible: true, timeout: 5000 } ); // Follow more details await Promise.all( [ expect( page ).toClick( '#js-googlesitekit-adminbar .googlesitekit-cta-link', { text: /More details/i } ), // Waiting for navigation here does not work as expected as this is a JS navigation. page.waitForSelector( '.googlesitekit-page-header__title' ), ] ); await expect( page ).toMatchElement( '.googlesitekit-page-header__title', { title: /Detailed Page Stats/i } ); } ); } );
1
36,078
This was removed in a recent PR for the admin bar, but should have been kept. It's been restored in the other admin bar PR but I've added it here to for completeness.
google-site-kit-wp
js
@@ -127,6 +127,15 @@ namespace OpenTelemetry.Instrumentation.AspNetCore.Tests Assert.Equal(2, activityProcessor.Invocations.Count); // begin and end was called var activity = (Activity)activityProcessor.Invocations[1].Arguments[0]; +#if !NETCOREAPP2_1 + // ASP.NET Core after 2.x is W3C aware and hence Activity created by it + // must be used. + Assert.Equal("Microsoft.AspNetCore.Hosting.HttpRequestIn", activity.OperationName); +#else + // ASP.NET Core before 3.x is not W3C aware and hence Activity created by it + // is always ignored and new one is created by the Instrumentation + Assert.Equal("ActivityCreatedByHttpInListener", activity.OperationName); +#endif Assert.Equal(ActivityKind.Server, activity.Kind); Assert.Equal("api/Values/{id}", activity.DisplayName); Assert.Equal("/api/values/2", activity.GetTagValue(SpanAttributeConstants.HttpPathKey) as string);
1
// <copyright file="BasicTests.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Net.Http; using System.Threading; using System.Threading.Tasks; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Mvc.Testing; using Microsoft.AspNetCore.TestHost; using Microsoft.Extensions.DependencyInjection; using Moq; using OpenTelemetry.Context.Propagation; using OpenTelemetry.Instrumentation.AspNetCore.Implementation; using OpenTelemetry.Tests; using OpenTelemetry.Trace; #if NETCOREAPP2_1 using TestApp.AspNetCore._2._1; #else using TestApp.AspNetCore._3._1; #endif using Xunit; namespace OpenTelemetry.Instrumentation.AspNetCore.Tests { // See https://github.com/aspnet/Docs/tree/master/aspnetcore/test/integration-tests/samples/2.x/IntegrationTestsSample public class BasicTests : IClassFixture<WebApplicationFactory<Startup>>, IDisposable { private readonly WebApplicationFactory<Startup> factory; private TracerProvider openTelemetrySdk = null; public BasicTests(WebApplicationFactory<Startup> factory) { this.factory = factory; } [Fact] public void AddAspNetCoreInstrumentation_BadArgs() { TracerProviderBuilder builder = null; Assert.Throws<ArgumentNullException>(() => builder.AddAspNetCoreInstrumentation()); } [Fact] public async Task SuccessfulTemplateControllerCallGeneratesASpan() { var expectedResource = Resources.Resources.CreateServiceResource("test-service"); var activityProcessor = new Mock<ActivityProcessor>(); void ConfigureTestServices(IServiceCollection services) { this.openTelemetrySdk = Sdk.CreateTracerProviderBuilder() .AddAspNetCoreInstrumentation() .SetResource(expectedResource) .AddProcessor(activityProcessor.Object) .Build(); } // Arrange using (var client = this.factory .WithWebHostBuilder(builder => builder.ConfigureTestServices(ConfigureTestServices)) .CreateClient()) { // Act var response = await client.GetAsync("/api/values"); // Assert response.EnsureSuccessStatusCode(); // Status Code 200-299 WaitForProcessorInvocations(activityProcessor, 2); } Assert.Equal(2, activityProcessor.Invocations.Count); // begin and end was called var activity = (Activity)activityProcessor.Invocations[1].Arguments[0]; ValidateAspNetCoreActivity(activity, "/api/values", expectedResource); } [Fact] public async Task SuccessfulTemplateControllerCallUsesParentContext() { var activityProcessor = new Mock<ActivityProcessor>(); var expectedTraceId = ActivityTraceId.CreateRandom(); var expectedSpanId = ActivitySpanId.CreateRandom(); // Arrange using (var testFactory = this.factory .WithWebHostBuilder(builder => builder.ConfigureTestServices(services => { this.openTelemetrySdk = Sdk.CreateTracerProviderBuilder().AddAspNetCoreInstrumentation() .AddProcessor(activityProcessor.Object) .Build(); }))) { using var client = testFactory.CreateClient(); var request = new HttpRequestMessage(HttpMethod.Get, "/api/values/2"); request.Headers.Add("traceparent", $"00-{expectedTraceId}-{expectedSpanId}-01"); // Act var response = await client.SendAsync(request); // Assert response.EnsureSuccessStatusCode(); // Status Code 200-299 WaitForProcessorInvocations(activityProcessor, 2); } Assert.Equal(2, activityProcessor.Invocations.Count); // begin and end was called var activity = (Activity)activityProcessor.Invocations[1].Arguments[0]; Assert.Equal(ActivityKind.Server, activity.Kind); Assert.Equal("api/Values/{id}", activity.DisplayName); Assert.Equal("/api/values/2", activity.GetTagValue(SpanAttributeConstants.HttpPathKey) as string); Assert.Equal(expectedTraceId, activity.Context.TraceId); Assert.Equal(expectedSpanId, activity.ParentSpanId); } [Fact] public async Task CustomPropagator() { var activityProcessor = new Mock<ActivityProcessor>(); var expectedTraceId = ActivityTraceId.CreateRandom(); var expectedSpanId = ActivitySpanId.CreateRandom(); var propagator = new Mock<IPropagator>(); propagator.Setup(m => m.Extract(It.IsAny<PropagationContext>(), It.IsAny<HttpRequest>(), It.IsAny<Func<HttpRequest, string, IEnumerable<string>>>())).Returns( new PropagationContext( new ActivityContext( expectedTraceId, expectedSpanId, ActivityTraceFlags.Recorded), default)); // Arrange using (var testFactory = this.factory .WithWebHostBuilder(builder => builder.ConfigureTestServices(services => { this.openTelemetrySdk = Sdk.CreateTracerProviderBuilder() .AddAspNetCoreInstrumentation((opt) => opt.Propagator = propagator.Object) .AddProcessor(activityProcessor.Object) .Build(); }))) { using var client = testFactory.CreateClient(); var response = await client.GetAsync("/api/values/2"); response.EnsureSuccessStatusCode(); // Status Code 200-299 WaitForProcessorInvocations(activityProcessor, 2); } // begin and end was called once each. Assert.Equal(2, activityProcessor.Invocations.Count); var activity = (Activity)activityProcessor.Invocations[1].Arguments[0]; Assert.Equal(ActivityKind.Server, activity.Kind); Assert.True(activity.Duration != TimeSpan.Zero); Assert.Equal("api/Values/{id}", activity.DisplayName); Assert.Equal("/api/values/2", activity.GetTagValue(SpanAttributeConstants.HttpPathKey) as string); Assert.Equal(expectedTraceId, activity.Context.TraceId); Assert.Equal(expectedSpanId, activity.ParentSpanId); } [Fact] public async Task RequestNotCollectedWhenFilterIsApplied() { var activityProcessor = new Mock<ActivityProcessor>(); void ConfigureTestServices(IServiceCollection services) { this.openTelemetrySdk = Sdk.CreateTracerProviderBuilder() .AddAspNetCoreInstrumentation((opt) => opt.Filter = (ctx) => ctx.Request.Path != "/api/values/2") .AddProcessor(activityProcessor.Object) .Build(); } // Arrange using (var testFactory = this.factory .WithWebHostBuilder(builder => builder.ConfigureTestServices(ConfigureTestServices))) { using var client = testFactory.CreateClient(); // Act var response1 = await client.GetAsync("/api/values"); var response2 = await client.GetAsync("/api/values/2"); // Assert response1.EnsureSuccessStatusCode(); // Status Code 200-299 response2.EnsureSuccessStatusCode(); // Status Code 200-299 WaitForProcessorInvocations(activityProcessor, 2); } // we should only create one span and never call processor with another Assert.Equal(2, activityProcessor.Invocations.Count); // begin and end was called var activity = (Activity)activityProcessor.Invocations[1].Arguments[0]; Assert.Equal(ActivityKind.Server, activity.Kind); Assert.Equal("/api/values", activity.GetTagValue(SpanAttributeConstants.HttpPathKey) as string); } [Fact] public async Task RequestNotCollectedWhenFilterThrowException() { var activityProcessor = new Mock<ActivityProcessor>(); void ConfigureTestServices(IServiceCollection services) { this.openTelemetrySdk = Sdk.CreateTracerProviderBuilder() .AddAspNetCoreInstrumentation((opt) => opt.Filter = (ctx) => { if (ctx.Request.Path == "/api/values/2") { throw new Exception("from InstrumentationFilter"); } else { return true; } }) .AddProcessor(activityProcessor.Object) .Build(); } // Arrange using (var testFactory = this.factory .WithWebHostBuilder(builder => builder.ConfigureTestServices(ConfigureTestServices))) { using var client = testFactory.CreateClient(); // Act using (var inMemoryEventListener = new InMemoryEventListener(AspNetCoreInstrumentationEventSource.Log)) { var response1 = await client.GetAsync("/api/values"); var response2 = await client.GetAsync("/api/values/2"); response1.EnsureSuccessStatusCode(); // Status Code 200-299 response2.EnsureSuccessStatusCode(); // Status Code 200-299 Assert.Single(inMemoryEventListener.Events.Where((e) => e.EventId == 3)); } WaitForProcessorInvocations(activityProcessor, 2); } // As InstrumentationFilter threw, we continue as if the // InstrumentationFilter did not exist. Assert.Equal(2, activityProcessor.Invocations.Count); // begin and end was called var activity = (Activity)activityProcessor.Invocations[1].Arguments[0]; Assert.Equal(ActivityKind.Server, activity.Kind); Assert.Equal("/api/values", activity.GetTagValue(SpanAttributeConstants.HttpPathKey) as string); } public void Dispose() { this.openTelemetrySdk?.Dispose(); } private static void WaitForProcessorInvocations(Mock<ActivityProcessor> activityProcessor, int invocationCount) { // We need to let End callback execute as it is executed AFTER response was returned. // In unit tests environment there may be a lot of parallel unit tests executed, so // giving some breezing room for the End callback to complete Assert.True(SpinWait.SpinUntil( () => { Thread.Sleep(10); return activityProcessor.Invocations.Count >= invocationCount; }, TimeSpan.FromSeconds(1))); } private static void ValidateAspNetCoreActivity(Activity activityToValidate, string expectedHttpPath, Resources.Resource expectedResource) { Assert.Equal(ActivityKind.Server, activityToValidate.Kind); Assert.Equal(expectedHttpPath, activityToValidate.GetTagValue(SpanAttributeConstants.HttpPathKey) as string); Assert.Equal(expectedResource, activityToValidate.GetResource()); var request = activityToValidate.GetCustomProperty(HttpInListener.RequestCustomPropertyName); Assert.NotNull(request); Assert.True(request is HttpRequest); var response = activityToValidate.GetCustomProperty(HttpInListener.ResponseCustomPropertyName); Assert.NotNull(response); Assert.True(response is HttpResponse); } } }
1
17,178
nit: probably swap the if condition and `if/else` and check `NETCOREAPP2_1` which looks more natural.
open-telemetry-opentelemetry-dotnet
.cs
@@ -486,12 +486,13 @@ func (a *Account) IsExportService(service string) bool { // IsExportServiceTracking will indicate if given publish subject is an export service with tracking enabled. func (a *Account) IsExportServiceTracking(service string) bool { a.mu.RLock() - defer a.mu.RUnlock() ea, ok := a.exports.services[service] if ok && ea == nil { + a.mu.RUnlock() return false } if ok && ea != nil && ea.latency != nil { + a.mu.RUnlock() return true } // FIXME(dlc) - Might want to cache this is in the hot path checking for
1
// Copyright 2018-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "fmt" "io/ioutil" "math/rand" "net/http" "net/url" "reflect" "sort" "strings" "sync" "time" "github.com/nats-io/jwt" ) // For backwards compatibility with NATS < 2.0, users who are not explicitly defined into an // account will be grouped in the default global account. const globalAccountName = "$G" // Account are subject namespace definitions. By default no messages are shared between accounts. // You can share via Exports and Imports of Streams and Services. type Account struct { Name string Nkey string Issuer string claimJWT string updated time.Time mu sync.RWMutex sl *Sublist etmr *time.Timer ctmr *time.Timer strack map[string]sconns nrclients int32 sysclients int32 nleafs int32 nrleafs int32 clients map[*client]*client rm map[string]int32 lqws map[string]int32 usersRevoked map[string]int64 actsRevoked map[string]int64 respMap map[string][]*serviceRespEntry lleafs []*client imports importMap exports exportMap limits nae int32 pruning bool rmPruning bool expired bool signingKeys []string srv *Server // server this account is registered with (possibly nil) } // Account based limits. type limits struct { mpay int32 msubs int32 mconns int32 mleafs int32 maxnae int32 maxnrm int32 maxaettl time.Duration } // Used to track remote clients and leafnodes per remote server. type sconns struct { conns int32 leafs int32 } // Import stream mapping struct type streamImport struct { acc *Account from string prefix string claim *jwt.Import invalid bool } // Import service mapping struct type serviceImport struct { acc *Account claim *jwt.Import from string to string ts int64 rt ServiceRespType latency *serviceLatency m1 *ServiceLatency ae bool internal bool invalid bool tracking bool } // This is used to record when we create a mapping for implicit service // imports. We use this to clean up entries that are not singletons when // we detect that interest is no longer present. The key to the map will // be the actual interest. We record the mapped subject and the serviceImport type serviceRespEntry struct { acc *Account msub string } // ServiceRespType represents the types of service request response types. type ServiceRespType uint8 // Service response types. Defaults to a singleton. const ( Singleton ServiceRespType = iota Stream Chunked ) // String helper. func (rt ServiceRespType) String() string { switch rt { case Singleton: return "Singleton" case Stream: return "Stream" case Chunked: return "Chunked" } return "Unknown ServiceResType" } // exportAuth holds configured approvals or boolean indicating an // auth token is required for import. type exportAuth struct { tokenReq bool approved map[string]*Account // Only used for service types respType ServiceRespType latency *serviceLatency } // Used to track service latency. type serviceLatency struct { sampling int8 subject string } // importMap tracks the imported streams and services. type importMap struct { streams map[string]*streamImport services map[string]*serviceImport // TODO(dlc) sync.Map may be better. } // exportMap tracks the exported streams and services. type exportMap struct { streams map[string]*exportAuth services map[string]*exportAuth } // NewAccount creates a new unlimited account with the given name. func NewAccount(name string) *Account { a := &Account{ Name: name, limits: limits{-1, -1, -1, -1, 0, 0, 0}, } return a } // Used to create shallow copies of accounts for transfer // from opts to real accounts in server struct. func (a *Account) shallowCopy() *Account { na := NewAccount(a.Name) na.Nkey = a.Nkey na.Issuer = a.Issuer na.imports = a.imports na.exports = a.exports return na } // NumConnections returns active number of clients for this account for // all known servers. func (a *Account) NumConnections() int { a.mu.RLock() nc := len(a.clients) + int(a.nrclients) a.mu.RUnlock() return nc } // NumLocalConnections returns active number of clients for this account // on this server. func (a *Account) NumLocalConnections() int { a.mu.RLock() nlc := a.numLocalConnections() a.mu.RUnlock() return nlc } // Do not account for the system accounts. func (a *Account) numLocalConnections() int { return len(a.clients) - int(a.sysclients) - int(a.nleafs) } func (a *Account) numLocalLeafNodes() int { return int(a.nleafs) } // MaxTotalConnectionsReached returns if we have reached our limit for number of connections. func (a *Account) MaxTotalConnectionsReached() bool { a.mu.RLock() mtc := a.maxTotalConnectionsReached() a.mu.RUnlock() return mtc } func (a *Account) maxTotalConnectionsReached() bool { if a.mconns != jwt.NoLimit { return len(a.clients)-int(a.sysclients)+int(a.nrclients) >= int(a.mconns) } return false } // MaxActiveConnections return the set limit for the account system // wide for total number of active connections. func (a *Account) MaxActiveConnections() int { a.mu.RLock() mconns := int(a.mconns) a.mu.RUnlock() return mconns } // MaxTotalLeafNodesReached returns if we have reached our limit for number of leafnodes. func (a *Account) MaxTotalLeafNodesReached() bool { a.mu.RLock() mtc := a.maxTotalLeafNodesReached() a.mu.RUnlock() return mtc } func (a *Account) maxTotalLeafNodesReached() bool { if a.mleafs != jwt.NoLimit { return a.nleafs+a.nrleafs >= a.mleafs } return false } // NumLeafNodes returns the active number of local and remote // leaf node connections. func (a *Account) NumLeafNodes() int { a.mu.RLock() nln := int(a.nleafs + a.nrleafs) a.mu.RUnlock() return nln } // NumRemoteLeafNodes returns the active number of remote // leaf node connections. func (a *Account) NumRemoteLeafNodes() int { a.mu.RLock() nrn := int(a.nrleafs) a.mu.RUnlock() return nrn } // MaxActiveLeafNodes return the set limit for the account system // wide for total number of leavenode connections. // NOTE: these are tracked separately. func (a *Account) MaxActiveLeafNodes() int { a.mu.RLock() mleafs := int(a.mleafs) a.mu.RUnlock() return mleafs } // RoutedSubs returns how many subjects we would send across a route when first // connected or expressing interest. Local client subs. func (a *Account) RoutedSubs() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.rm) } // TotalSubs returns total number of Subscriptions for this account. func (a *Account) TotalSubs() int { a.mu.RLock() defer a.mu.RUnlock() return int(a.sl.Count()) } // addClient keeps our accounting of local active clients or leafnodes updated. // Returns previous total. func (a *Account) addClient(c *client) int { a.mu.Lock() n := len(a.clients) if a.clients != nil { a.clients[c] = c } added := n != len(a.clients) if added { if c.kind == SYSTEM { a.sysclients++ } else if c.kind == LEAF { a.nleafs++ a.lleafs = append(a.lleafs, c) } } a.mu.Unlock() if c != nil && c.srv != nil && a != c.srv.globalAccount() && added { c.srv.accConnsUpdate(a) } return n } // Helper function to remove leaf nodes. If number of leafnodes gets large // this may need to be optimized out of linear search but believe number // of active leafnodes per account scope to be small and therefore cache friendly. // Lock should be held on account. func (a *Account) removeLeafNode(c *client) { ll := len(a.lleafs) for i, l := range a.lleafs { if l == c { a.lleafs[i] = a.lleafs[ll-1] if ll == 1 { a.lleafs = nil } else { a.lleafs = a.lleafs[:ll-1] } return } } } // removeClient keeps our accounting of local active clients updated. func (a *Account) removeClient(c *client) int { a.mu.Lock() n := len(a.clients) delete(a.clients, c) removed := n != len(a.clients) if removed { if c.kind == SYSTEM { a.sysclients-- } else if c.kind == LEAF { a.nleafs-- a.removeLeafNode(c) } } a.mu.Unlock() if c != nil && c.srv != nil && a != c.srv.gacc && removed { c.srv.accConnsUpdate(a) } return n } func (a *Account) randomClient() *client { var c *client for _, c = range a.clients { break } return c } // AddServiceExport will configure the account with the defined export. func (a *Account) AddServiceExport(subject string, accounts []*Account) error { return a.AddServiceExportWithResponse(subject, Singleton, accounts) } // AddServiceExportWithresponse will configure the account with the defined export and response type. func (a *Account) AddServiceExportWithResponse(subject string, respType ServiceRespType, accounts []*Account) error { a.mu.Lock() defer a.mu.Unlock() if a == nil { return ErrMissingAccount } if a.exports.services == nil { a.exports.services = make(map[string]*exportAuth) } ea := a.exports.services[subject] if respType != Singleton { if ea == nil { ea = &exportAuth{} } ea.respType = respType } if accounts != nil { if ea == nil { ea = &exportAuth{} } // empty means auth required but will be import token. if len(accounts) == 0 { ea.tokenReq = true } else { if ea.approved == nil { ea.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { ea.approved[acc.Name] = acc } } } a.exports.services[subject] = ea return nil } // TrackServiceExport will enable latency tracking of the named service. // Results will be published in this account to the given results subject. func (a *Account) TrackServiceExport(service, results string) error { return a.TrackServiceExportWithSampling(service, results, 100) } // TrackServiceExportWithSampling will enable latency tracking of the named service for the given // sampling rate (1-100). Results will be published in this account to the given results subject. func (a *Account) TrackServiceExportWithSampling(service, results string, sampling int) error { if sampling < 1 || sampling > 100 { return ErrBadSampling } if !IsValidPublishSubject(results) { return ErrBadPublishSubject } // Don't loop back on outselves. if a.IsExportService(results) { return ErrBadPublishSubject } if a.srv == nil || !a.srv.eventsEnabled() { return ErrNoSysAccount } a.mu.Lock() defer a.mu.Unlock() if a == nil { return ErrMissingAccount } if a.exports.services == nil { return ErrMissingService } ea, ok := a.exports.services[service] if !ok { return ErrMissingService } if ea == nil { ea = &exportAuth{} a.exports.services[service] = ea } else if ea.respType != Singleton { return ErrBadServiceType } ea.latency = &serviceLatency{ sampling: int8(sampling), subject: results, } return nil } // IsExportService will indicate if this service exists. Will check wildcard scenarios. func (a *Account) IsExportService(service string) bool { a.mu.RLock() defer a.mu.RUnlock() _, ok := a.exports.services[service] if ok { return true } tokens := strings.Split(service, tsep) for subj := range a.exports.services { if isSubsetMatch(tokens, subj) { return true } } return false } // IsExportServiceTracking will indicate if given publish subject is an export service with tracking enabled. func (a *Account) IsExportServiceTracking(service string) bool { a.mu.RLock() defer a.mu.RUnlock() ea, ok := a.exports.services[service] if ok && ea == nil { return false } if ok && ea != nil && ea.latency != nil { return true } // FIXME(dlc) - Might want to cache this is in the hot path checking for // latency tracking. tokens := strings.Split(service, tsep) for subj, ea := range a.exports.services { if isSubsetMatch(tokens, subj) && ea != nil && ea.latency != nil { return true } } return false } // ServiceLatency is the JSON message sent out in response to latency tracking for // exported services. type ServiceLatency struct { AppName string `json:"app_name,omitempty"` RequestStart time.Time `json:"request_start"` ServiceLatency time.Duration `json:"service_latency"` NATSLatency time.Duration `json:"nats_latency"` TotalLatency time.Duration `json:"total_latency"` } // Used for transporting remote laytency measurements. type remoteLatency struct { Account string `json:"account"` ReqId string `json:"req_id"` M2 ServiceLatency `json:"m2"` } // sendTrackingMessage will send out the appropriate tracking information for the // service request/response latency. This is called when the requestor's server has // received the response. // TODO(dlc) - holding locks for RTTs may be too much long term. Should revisit. func (a *Account) sendTrackingLatency(si *serviceImport, requestor, responder *client) bool { now := time.Now() serviceRTT := time.Duration(now.UnixNano() - si.ts) var ( reqClientRTT = requestor.getRTTValue() natsRTT = reqClientRTT respClientRTT time.Duration appName string ) expectRemoteM2 := responder != nil && responder.kind != CLIENT if responder != nil && responder.kind == CLIENT { respClientRTT = responder.getRTTValue() natsRTT += respClientRTT appName = responder.GetName() } // We will estimate time when request left the requestor by time we received // and the client RTT for the requestor. reqStart := time.Unix(0, si.ts-int64(reqClientRTT)) sl := ServiceLatency{ AppName: appName, RequestStart: reqStart, ServiceLatency: serviceRTT - respClientRTT, NATSLatency: natsRTT, TotalLatency: reqClientRTT + serviceRTT, } // If we are expecting a remote measurement, store our sl here. // We need to account for the race between this and us receiving the // remote measurement. // FIXME(dlc) - We need to clean these up but this should happen // already with the auto-expire logic. if expectRemoteM2 { si.acc.mu.Lock() if si.m1 != nil { m2 := si.m1 m1 := &sl m1.AppName = m2.AppName m1.ServiceLatency = m2.ServiceLatency m1.NATSLatency = m1.TotalLatency - m1.ServiceLatency si.acc.mu.Unlock() a.srv.sendInternalAccountMsg(a, si.latency.subject, m1) return true } si.m1 = &sl si.acc.mu.Unlock() return false } else { a.srv.sendInternalAccountMsg(a, si.latency.subject, &sl) } return true } // numServiceRoutes returns the number of service routes on this account. func (a *Account) numServiceRoutes() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.services) } // AddServiceImportWithClaim will add in the service import via the jwt claim. func (a *Account) AddServiceImportWithClaim(destination *Account, from, to string, imClaim *jwt.Import) error { if destination == nil { return ErrMissingAccount } // Empty means use from. if to == "" { to = from } if !IsValidLiteralSubject(from) || !IsValidLiteralSubject(to) { return ErrInvalidSubject } // First check to see if the account has authorized us to route to the "to" subject. if !destination.checkServiceImportAuthorized(a, to, imClaim) { return ErrServiceImportAuthorization } a.addServiceImport(destination, from, to, imClaim) return nil } // AddServiceImport will add a route to an account to send published messages / requests // to the destination account. From is the local subject to map, To is the // subject that will appear on the destination account. Destination will need // to have an import rule to allow access via addService. func (a *Account) AddServiceImport(destination *Account, from, to string) error { return a.AddServiceImportWithClaim(destination, from, to, nil) } // removeServiceImport will remove the route by subject. func (a *Account) removeServiceImport(subject string) { a.mu.Lock() si, ok := a.imports.services[subject] if ok && si != nil && si.ae { a.nae-- } delete(a.imports.services, subject) a.mu.Unlock() if a.srv != nil && a.srv.gateway.enabled { a.srv.gatewayHandleServiceImport(a, []byte(subject), nil, -1) } } // This tracks responses to service requests mappings. This is used for cleanup. func (a *Account) addRespMapEntry(acc *Account, reply, from string) { a.mu.Lock() if a.respMap == nil { a.respMap = make(map[string][]*serviceRespEntry) } sre := &serviceRespEntry{acc, from} sra := a.respMap[reply] a.respMap[reply] = append(sra, sre) if len(a.respMap) > int(a.maxnrm) && !a.rmPruning { a.rmPruning = true go a.pruneNonAutoExpireResponseMaps() } a.mu.Unlock() } // This checks for any response map entries. func (a *Account) checkForRespEntry(reply string) { a.mu.RLock() if len(a.imports.services) == 0 || len(a.respMap) == 0 { a.mu.RUnlock() return } sra := a.respMap[reply] if sra == nil { a.mu.RUnlock() return } // If we are here we have an entry we should check. We will first check // if there is any interest for this subject for the entire account. If // there is we can not delete any entries yet. rr := a.sl.Match(reply) a.mu.RUnlock() // No interest. if len(rr.psubs)+len(rr.qsubs) > 0 { return } // Delete all the entries here. a.mu.Lock() delete(a.respMap, reply) a.mu.Unlock() // If we are here we no longer have interest and we have a respMap entry // that we should clean up. for _, sre := range sra { sre.acc.removeServiceImport(sre.msub) } } // Return the number of AutoExpireResponseMaps for request/reply. These are mapped to the account that // has the service import. func (a *Account) numAutoExpireResponseMaps() int { a.mu.RLock() defer a.mu.RUnlock() return int(a.nae) } // MaxAutoExpireResponseMaps return the maximum number of // auto expire response maps we will allow. func (a *Account) MaxAutoExpireResponseMaps() int { a.mu.RLock() defer a.mu.RUnlock() return int(a.maxnae) } // SetMaxAutoExpireResponseMaps sets the max outstanding auto expire response maps. func (a *Account) SetMaxAutoExpireResponseMaps(max int) { a.mu.Lock() defer a.mu.Unlock() a.maxnae = int32(max) } // AutoExpireTTL returns the ttl for response maps. func (a *Account) AutoExpireTTL() time.Duration { a.mu.RLock() defer a.mu.RUnlock() return a.maxaettl } // SetAutoExpireTTL sets the ttl for response maps. func (a *Account) SetAutoExpireTTL(ttl time.Duration) { a.mu.Lock() defer a.mu.Unlock() a.maxaettl = ttl } // Return a list of the current autoExpireResponseMaps. func (a *Account) autoExpireResponseMaps() []*serviceImport { a.mu.RLock() if len(a.imports.services) == 0 { a.mu.RUnlock() return nil } aesis := make([]*serviceImport, 0, len(a.imports.services)) for _, si := range a.imports.services { if si.ae { aesis = append(aesis, si) } } sort.Slice(aesis, func(i, j int) bool { return aesis[i].ts < aesis[j].ts }) a.mu.RUnlock() return aesis } // MaxResponseMaps return the maximum number of // non auto-expire response maps we will allow. func (a *Account) MaxResponseMaps() int { a.mu.RLock() defer a.mu.RUnlock() return int(a.maxnrm) } // SetMaxResponseMaps sets the max outstanding non auto-expire response maps. func (a *Account) SetMaxResponseMaps(max int) { a.mu.Lock() defer a.mu.Unlock() a.maxnrm = int32(max) } // Add a route to connect from an implicit route created for a response to a request. // This does no checks and should be only called by the msg processing code. Use // AddServiceImport from above if responding to user input or config changes, etc. func (a *Account) addServiceImport(dest *Account, from, to string, claim *jwt.Import) *serviceImport { rt := Singleton var lat *serviceLatency dest.mu.Lock() if ea := dest.getExportAuth(to); ea != nil { rt = ea.respType lat = ea.latency } dest.mu.Unlock() a.mu.Lock() if a.imports.services == nil { a.imports.services = make(map[string]*serviceImport) } si := &serviceImport{dest, claim, from, to, 0, rt, lat, nil, false, false, false, false} a.imports.services[from] = si a.mu.Unlock() return si } // Helper to detrmine when to sample. func shouldSample(l *serviceLatency) bool { if l == nil || l.sampling <= 0 { return false } if l.sampling >= 100 { return true } return rand.Int31n(100) <= int32(l.sampling) } // This is for internal responses. func (a *Account) addRespServiceImport(dest *Account, from, to string, rt ServiceRespType, lat *serviceLatency) *serviceImport { a.mu.Lock() if a.imports.services == nil { a.imports.services = make(map[string]*serviceImport) } ae := rt == Singleton si := &serviceImport{dest, nil, from, to, 0, rt, nil, nil, ae, true, false, false} a.imports.services[from] = si if ae { a.nae++ si.ts = time.Now().UnixNano() if lat != nil { si.latency = lat si.tracking = true } if a.nae > a.maxnae && !a.pruning { a.pruning = true go a.pruneAutoExpireResponseMaps() } } a.mu.Unlock() return si } // This will prune off the non auto-expire (non singleton) response maps. func (a *Account) pruneNonAutoExpireResponseMaps() { var sres []*serviceRespEntry a.mu.Lock() for subj, sra := range a.respMap { rr := a.sl.Match(subj) if len(rr.psubs)+len(rr.qsubs) == 0 { delete(a.respMap, subj) sres = append(sres, sra...) } } a.rmPruning = false a.mu.Unlock() for _, sre := range sres { sre.acc.removeServiceImport(sre.msub) } } // This will prune the list to below the threshold and remove all ttl'd maps. func (a *Account) pruneAutoExpireResponseMaps() { defer func() { a.mu.Lock() a.pruning = false a.mu.Unlock() }() a.mu.RLock() ttl := int64(a.maxaettl) a.mu.RUnlock() for { sis := a.autoExpireResponseMaps() // Check ttl items. now := time.Now().UnixNano() for i, si := range sis { if now-si.ts >= ttl { a.removeServiceImport(si.from) } else { sis = sis[i:] break } } a.mu.RLock() numOver := int(a.nae - a.maxnae) a.mu.RUnlock() if numOver <= 0 { return } else if numOver >= len(sis) { numOver = len(sis) - 1 } // These are in sorted order, remove at least numOver for _, si := range sis[:numOver] { a.removeServiceImport(si.from) } } } // AddStreamImportWithClaim will add in the stream import from a specific account with optional token. func (a *Account) AddStreamImportWithClaim(account *Account, from, prefix string, imClaim *jwt.Import) error { if account == nil { return ErrMissingAccount } // First check to see if the account has authorized export of the subject. if !account.checkStreamImportAuthorized(a, from, imClaim) { return ErrStreamImportAuthorization } a.mu.Lock() defer a.mu.Unlock() if a.imports.streams == nil { a.imports.streams = make(map[string]*streamImport) } if prefix != "" && prefix[len(prefix)-1] != btsep { prefix = prefix + string(btsep) } // TODO(dlc) - collisions, etc. a.imports.streams[from] = &streamImport{account, from, prefix, imClaim, false} return nil } // AddStreamImport will add in the stream import from a specific account. func (a *Account) AddStreamImport(account *Account, from, prefix string) error { return a.AddStreamImportWithClaim(account, from, prefix, nil) } // IsPublicExport is a placeholder to denote a public export. var IsPublicExport = []*Account(nil) // AddStreamExport will add an export to the account. If accounts is nil // it will signify a public export, meaning anyone can impoort. func (a *Account) AddStreamExport(subject string, accounts []*Account) error { a.mu.Lock() defer a.mu.Unlock() if a == nil { return ErrMissingAccount } if a.exports.streams == nil { a.exports.streams = make(map[string]*exportAuth) } ea := a.exports.streams[subject] if accounts != nil { if ea == nil { ea = &exportAuth{} } // empty means auth required but will be import token. if len(accounts) == 0 { ea.tokenReq = true } else { if ea.approved == nil { ea.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { ea.approved[acc.Name] = acc } } } a.exports.streams[subject] = ea return nil } // Check if another account is authorized to import from us. func (a *Account) checkStreamImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the exports list. a.mu.RLock() auth := a.checkStreamImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return auth } func (a *Account) checkStreamImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { if a.exports.streams == nil || !IsValidSubject(subject) { return false } return a.checkExportApproved(account, subject, imClaim, a.exports.streams) } func (a *Account) checkExportApproved(account *Account, subject string, imClaim *jwt.Import, m map[string]*exportAuth) bool { // Check direct match of subject first ea, ok := m[subject] if ok { // if ea is nil or eq.approved is nil, that denotes a public export if ea == nil || (ea.approved == nil && !ea.tokenReq) { return true } // Check if token required if ea.tokenReq { return a.checkActivation(account, imClaim, true) } // If we have a matching account we are authorized _, ok := ea.approved[account.Name] return ok } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, ea := range m { if isSubsetMatch(tokens, subj) { if ea == nil || ea.approved == nil && !ea.tokenReq { return true } // Check if token required if ea.tokenReq { return a.checkActivation(account, imClaim, true) } _, ok := ea.approved[account.Name] return ok } } return false } // Helper function to get an exportAuth. // Lock should be held on entry. func (a *Account) getExportAuth(subj string) *exportAuth { ea, ok := a.exports.services[subj] // The export probably has a wildcard, so lookup that up. if !ok { ea = a.getWildcardExportAuth(subj) } return ea } // This helper is used when trying to match an exportAuth record that is // represented by a wildcard. // Lock should be held on entry. func (a *Account) getWildcardExportAuth(to string) *exportAuth { tokens := strings.Split(to, tsep) for subj, ea := range a.exports.services { if isSubsetMatch(tokens, subj) { return ea } } return nil } // Will fetch the activation token for an import. func fetchActivation(url string) string { // FIXME(dlc) - Make configurable. c := &http.Client{Timeout: 2 * time.Second} resp, err := c.Get(url) if err != nil || resp == nil { return "" } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "" } return string(body) } // These are import stream specific versions for when an activation expires. func (a *Account) streamActivationExpired(subject string) { a.mu.RLock() if a.expired || a.imports.streams == nil { a.mu.RUnlock() return } si := a.imports.streams[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true clients := make([]*client, 0, len(a.clients)) for _, c := range a.clients { clients = append(clients, c) } awcsti := map[string]struct{}{a.Name: {}} a.mu.Unlock() for _, c := range clients { c.processSubsOnConfigReload(awcsti) } } // These are import service specific versions for when an activation expires. func (a *Account) serviceActivationExpired(subject string) { a.mu.RLock() if a.expired || a.imports.services == nil { a.mu.RUnlock() return } si := a.imports.services[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true a.mu.Unlock() } // Fires for expired activation tokens. We could track this with timers etc. // Instead we just re-analyze where we are and if we need to act. func (a *Account) activationExpired(subject string, kind jwt.ExportType) { switch kind { case jwt.Stream: a.streamActivationExpired(subject) case jwt.Service: a.serviceActivationExpired(subject) } } // checkActivation will check the activation token for validity. func (a *Account) checkActivation(acc *Account, claim *jwt.Import, expTimer bool) bool { if claim == nil || claim.Token == "" { return false } // Create a quick clone so we can inline Token JWT. clone := *claim // We grab the token from a URL by hand here since we need expiration etc. if url, err := url.Parse(clone.Token); err == nil && url.Scheme != "" { clone.Token = fetchActivation(url.String()) } vr := jwt.CreateValidationResults() clone.Validate(a.Name, vr) if vr.IsBlocking(true) { return false } act, err := jwt.DecodeActivationClaims(clone.Token) if err != nil { return false } vr = jwt.CreateValidationResults() act.Validate(vr) if vr.IsBlocking(true) { return false } if !a.isIssuerClaimTrusted(act) { return false } if act.Expires != 0 { tn := time.Now().Unix() if act.Expires <= tn { return false } if expTimer { expiresAt := time.Duration(act.Expires - tn) time.AfterFunc(expiresAt*time.Second, func() { acc.activationExpired(string(act.ImportSubject), claim.Type) }) } } // Check for token revocation.. if a.actsRevoked != nil { if t, ok := a.actsRevoked[act.Subject]; ok && t <= time.Now().Unix() { return false } } return true } // Returns true if the activation claim is trusted. That is the issuer matches // the account or is an entry in the signing keys. func (a *Account) isIssuerClaimTrusted(claims *jwt.ActivationClaims) bool { // if no issuer account, issuer is the account if claims.IssuerAccount == "" { return true } // get the referenced account if a.srv != nil { ia, err := a.srv.lookupAccount(claims.IssuerAccount) if err != nil { return false } return ia.hasIssuer(claims.Issuer) } // couldn't verify return false } // Returns true if `a` and `b` stream imports are the same. Note that the // check is done with the account's name, not the pointer. This is used // during config reload where we are comparing current and new config // in which pointers are different. // No lock is acquired in this function, so it is assumed that the // import maps are not changed while this executes. func (a *Account) checkStreamImportsEqual(b *Account) bool { if len(a.imports.streams) != len(b.imports.streams) { return false } for subj, aim := range a.imports.streams { bim := b.imports.streams[subj] if bim == nil { return false } if aim.acc.Name != bim.acc.Name || aim.from != bim.from || aim.prefix != bim.prefix { return false } } return true } func (a *Account) checkStreamExportsEqual(b *Account) bool { if len(a.exports.streams) != len(b.exports.streams) { return false } for subj, aea := range a.exports.streams { bea, ok := b.exports.streams[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } func (a *Account) checkServiceExportsEqual(b *Account) bool { if len(a.exports.services) != len(b.exports.services) { return false } for subj, aea := range a.exports.services { bea, ok := b.exports.services[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } func (a *Account) checkServiceImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { a.mu.RLock() defer a.mu.RUnlock() return a.checkServiceImportAuthorizedNoLock(account, subject, imClaim) } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the services list. if a.exports.services == nil || !IsValidLiteralSubject(subject) { return false } return a.checkExportApproved(account, subject, imClaim, a.exports.services) } // IsExpired returns expiration status. func (a *Account) IsExpired() bool { a.mu.RLock() exp := a.expired a.mu.RUnlock() return exp } // Called when an account has expired. func (a *Account) expiredTimeout() { // Mark expired first. a.mu.Lock() a.expired = true a.mu.Unlock() // Collect the clients and expire them. cs := make([]*client, 0, len(a.clients)) a.mu.RLock() for c := range a.clients { cs = append(cs, c) } a.mu.RUnlock() for _, c := range cs { c.accountAuthExpired() } } // Sets the expiration timer for an account JWT that has it set. func (a *Account) setExpirationTimer(d time.Duration) { a.etmr = time.AfterFunc(d, a.expiredTimeout) } // Lock should be held func (a *Account) clearExpirationTimer() bool { if a.etmr == nil { return true } stopped := a.etmr.Stop() a.etmr = nil return stopped } // checkUserRevoked will check if a user has been revoked. func (a *Account) checkUserRevoked(nkey string) bool { a.mu.RLock() defer a.mu.RUnlock() if a.usersRevoked == nil { return false } if t, ok := a.usersRevoked[nkey]; !ok || t > time.Now().Unix() { return false } return true } // Check expiration and set the proper state as needed. func (a *Account) checkExpiration(claims *jwt.ClaimsData) { a.mu.Lock() defer a.mu.Unlock() a.clearExpirationTimer() if claims.Expires == 0 { a.expired = false return } tn := time.Now().Unix() if claims.Expires <= tn { a.expired = true return } expiresAt := time.Duration(claims.Expires - tn) a.setExpirationTimer(expiresAt * time.Second) a.expired = false } // hasIssuer returns true if the issuer matches the account // issuer or it is a signing key for the account. func (a *Account) hasIssuer(issuer string) bool { a.mu.RLock() defer a.mu.RUnlock() // same issuer if a.Issuer == issuer { return true } for i := 0; i < len(a.signingKeys); i++ { if a.signingKeys[i] == issuer { return true } } return false } // Placeholder for signaling token auth required. var tokenAuthReq = []*Account{} func authAccounts(tokenReq bool) []*Account { if tokenReq { return tokenAuthReq } return nil } // SetAccountResolver will assign the account resolver. func (s *Server) SetAccountResolver(ar AccountResolver) { s.mu.Lock() s.accResolver = ar s.mu.Unlock() } // AccountResolver returns the registered account resolver. func (s *Server) AccountResolver() AccountResolver { s.mu.Lock() defer s.mu.Unlock() return s.accResolver } // UpdateAccountClaims will call updateAccountClaims. func (s *Server) UpdateAccountClaims(a *Account, ac *jwt.AccountClaims) { s.updateAccountClaims(a, ac) } // updateAccountClaims will update an existing account with new claims. // This will replace any exports or imports previously defined. func (s *Server) updateAccountClaims(a *Account, ac *jwt.AccountClaims) { if a == nil { return } s.Debugf("Updating account claims: %s", a.Name) a.checkExpiration(ac.Claims()) a.mu.Lock() // Clone to update, only select certain fields. old := &Account{Name: a.Name, imports: a.imports, exports: a.exports, limits: a.limits, signingKeys: a.signingKeys} // Reset exports and imports here. a.exports = exportMap{} a.imports = importMap{} // Reset any notion of export revocations. a.actsRevoked = nil // update account signing keys a.signingKeys = nil signersChanged := false if len(ac.SigningKeys) > 0 { // insure copy the new keys and sort a.signingKeys = append(a.signingKeys, ac.SigningKeys...) sort.Strings(a.signingKeys) } if len(a.signingKeys) != len(old.signingKeys) { signersChanged = true } else { for i := 0; i < len(old.signingKeys); i++ { if a.signingKeys[i] != old.signingKeys[i] { signersChanged = true break } } } a.mu.Unlock() gatherClients := func() []*client { a.mu.RLock() clients := make([]*client, 0, len(a.clients)) for _, c := range a.clients { clients = append(clients, c) } a.mu.RUnlock() return clients } for _, e := range ac.Exports { switch e.Type { case jwt.Stream: s.Debugf("Adding stream export %q for %s", e.Subject, a.Name) if err := a.AddStreamExport(string(e.Subject), authAccounts(e.TokenReq)); err != nil { s.Debugf("Error adding stream export to account [%s]: %v", a.Name, err.Error()) } case jwt.Service: s.Debugf("Adding service export %q for %s", e.Subject, a.Name) rt := Singleton switch e.ResponseType { case jwt.ResponseTypeStream: rt = Stream case jwt.ResponseTypeChunked: rt = Chunked } if err := a.AddServiceExportWithResponse(string(e.Subject), rt, authAccounts(e.TokenReq)); err != nil { s.Debugf("Error adding service export to account [%s]: %v", a.Name, err.Error()) } } // We will track these at the account level. Should not have any collisions. if e.Revocations != nil { a.mu.Lock() if a.actsRevoked == nil { a.actsRevoked = make(map[string]int64) } for k, t := range e.Revocations { a.actsRevoked[k] = t } a.mu.Unlock() } } for _, i := range ac.Imports { var acc *Account if v, ok := s.accounts.Load(i.Account); ok { acc = v.(*Account) } if acc == nil { // Check to see if the account referenced is not one that // we are currently building (but not yet fully registered). if v, ok := s.tmpAccounts.Load(i.Account); ok { acc = v.(*Account) } } if acc == nil { if acc, _ = s.fetchAccount(i.Account); acc == nil { s.Debugf("Can't locate account [%s] for import of [%v] %s", i.Account, i.Subject, i.Type) continue } } switch i.Type { case jwt.Stream: s.Debugf("Adding stream import %s:%q for %s:%q", acc.Name, i.Subject, a.Name, i.To) if err := a.AddStreamImportWithClaim(acc, string(i.Subject), string(i.To), i); err != nil { s.Debugf("Error adding stream import to account [%s]: %v", a.Name, err.Error()) } case jwt.Service: s.Debugf("Adding service import %s:%q for %s:%q", acc.Name, i.Subject, a.Name, i.To) if err := a.AddServiceImportWithClaim(acc, string(i.Subject), string(i.To), i); err != nil { s.Debugf("Error adding service import to account [%s]: %v", a.Name, err.Error()) } } } // Now let's apply any needed changes from import/export changes. if !a.checkStreamImportsEqual(old) { awcsti := map[string]struct{}{a.Name: {}} for _, c := range gatherClients() { c.processSubsOnConfigReload(awcsti) } } // Now check if stream exports have changed. if !a.checkStreamExportsEqual(old) || signersChanged { clients := make([]*client, 0, 16) // We need to check all accounts that have an import claim from this account. awcsti := map[string]struct{}{} s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.streams { if im != nil && im.acc.Name == a.Name { // Check for if we are still authorized for an import. im.invalid = !a.checkStreamImportAuthorizedNoLock(im.acc, im.from, im.claim) awcsti[acc.Name] = struct{}{} for _, c := range acc.clients { clients = append(clients, c) } } } acc.mu.Unlock() return true }) // Now walk clients. for _, c := range clients { c.processSubsOnConfigReload(awcsti) } } // Now check if service exports have changed. if !a.checkServiceExportsEqual(old) || signersChanged { s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name { // Check for if we are still authorized for an import. im.invalid = !a.checkServiceImportAuthorizedNoLock(a, im.to, im.claim) } } acc.mu.Unlock() return true }) } // Now do limits if they are present. a.mu.Lock() a.msubs = int32(ac.Limits.Subs) a.mpay = int32(ac.Limits.Payload) a.mconns = int32(ac.Limits.Conn) a.mleafs = int32(ac.Limits.LeafNodeConn) // Check for any revocations if len(ac.Revocations) > 0 { // We will always replace whatever we had with most current, so no // need to look at what we have. a.usersRevoked = make(map[string]int64, len(ac.Revocations)) for pk, t := range ac.Revocations { a.usersRevoked[pk] = t } } a.mu.Unlock() clients := gatherClients() // Sort if we are over the limit. if a.maxTotalConnectionsReached() { sort.Slice(clients, func(i, j int) bool { return clients[i].start.After(clients[j].start) }) } now := time.Now().Unix() for i, c := range clients { if a.mconns != jwt.NoLimit && i >= int(a.mconns) { c.maxAccountConnExceeded() continue } c.mu.Lock() c.applyAccountLimits() // Check for being revoked here. We use ac one to avoid // the account lock. var nkey string if c.user != nil { nkey = c.user.Nkey } c.mu.Unlock() // Check if we have been revoked. if ac.Revocations != nil { if t, ok := ac.Revocations[nkey]; ok && now >= t { c.sendErrAndDebug("User Authentication Revoked") c.closeConnection(Revocation) continue } } } // Check if the signing keys changed, might have to evict if signersChanged { for _, c := range clients { c.mu.Lock() sk := c.user.SigningKey c.mu.Unlock() if sk != "" && !a.hasIssuer(sk) { c.closeConnection(AuthenticationViolation) } } } } // Helper to build an internal account structure from a jwt.AccountClaims. func (s *Server) buildInternalAccount(ac *jwt.AccountClaims) *Account { acc := NewAccount(ac.Subject) acc.Issuer = ac.Issuer // We don't want to register an account that is in the process of // being built, however, to solve circular import dependencies, we // need to store it here. s.tmpAccounts.Store(ac.Subject, acc) s.updateAccountClaims(acc, ac) return acc } // Helper to build internal NKeyUser. func buildInternalNkeyUser(uc *jwt.UserClaims, acc *Account) *NkeyUser { nu := &NkeyUser{Nkey: uc.Subject, Account: acc} if uc.IssuerAccount != "" { nu.SigningKey = uc.Issuer } // Now check for permissions. var p *Permissions if len(uc.Pub.Allow) > 0 || len(uc.Pub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Publish = &SubjectPermission{} p.Publish.Allow = uc.Pub.Allow p.Publish.Deny = uc.Pub.Deny } if len(uc.Sub.Allow) > 0 || len(uc.Sub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Subscribe = &SubjectPermission{} p.Subscribe.Allow = uc.Sub.Allow p.Subscribe.Deny = uc.Sub.Deny } if uc.Resp != nil { if p == nil { p = &Permissions{Publish: &SubjectPermission{}} } if p.Publish.Allow == nil { // We turn off the blanket allow statement. p.Publish.Allow = []string{} } p.Response = &ResponsePermission{ MaxMsgs: uc.Resp.MaxMsgs, Expires: uc.Resp.Expires, } } nu.Permissions = p return nu } // AccountResolver interface. This is to fetch Account JWTs by public nkeys type AccountResolver interface { Fetch(name string) (string, error) Store(name, jwt string) error } // MemAccResolver is a memory only resolver. // Mostly for testing. type MemAccResolver struct { sm sync.Map } // Fetch will fetch the account jwt claims from the internal sync.Map. func (m *MemAccResolver) Fetch(name string) (string, error) { if j, ok := m.sm.Load(name); ok { return j.(string), nil } return _EMPTY_, ErrMissingAccount } // Store will store the account jwt claims in the internal sync.Map. func (m *MemAccResolver) Store(name, jwt string) error { m.sm.Store(name, jwt) return nil } // URLAccResolver implements an http fetcher. type URLAccResolver struct { url string c *http.Client } // NewURLAccResolver returns a new resolver for the given base URL. func NewURLAccResolver(url string) (*URLAccResolver, error) { if !strings.HasSuffix(url, "/") { url += "/" } // Do basic test to see if anyone is home. // FIXME(dlc) - Make timeout configurable post MVP. ur := &URLAccResolver{ url: url, c: &http.Client{Timeout: 2 * time.Second}, } if _, err := ur.Fetch(""); err != nil { return nil, err } return ur, nil } // Fetch will fetch the account jwt claims from the base url, appending the // account name onto the end. func (ur *URLAccResolver) Fetch(name string) (string, error) { url := ur.url + name resp, err := ur.c.Get(url) if err != nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, err) } else if resp == nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: no response", url) } else if resp.StatusCode != http.StatusOK { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", url, resp.Status) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return _EMPTY_, err } return string(body), nil } // Store is not implemented for URL Resolver. func (ur *URLAccResolver) Store(name, jwt string) error { return fmt.Errorf("Store operation not supported for URL Resolver") }
1
9,372
for the tag, should it be `requestor_rtt` since the other is `responder_rtt`?
nats-io-nats-server
go
@@ -99,6 +99,15 @@ func (rcv *Monster) InventoryBytes() []byte { return nil } +func (rcv *Monster) MutateInventory(j int, n byte) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n) + } + return false +} + func (rcv *Monster) Color() Color { o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 {
1
// Code generated by the FlatBuffers compiler. DO NOT EDIT. package Example import ( flatbuffers "github.com/google/flatbuffers/go" MyGame "MyGame" ) /// an example documentation comment: monster object type Monster struct { _tab flatbuffers.Table } func GetRootAsMonster(buf []byte, offset flatbuffers.UOffsetT) *Monster { n := flatbuffers.GetUOffsetT(buf[offset:]) x := &Monster{} x.Init(buf, n+offset) return x } func (rcv *Monster) Init(buf []byte, i flatbuffers.UOffsetT) { rcv._tab.Bytes = buf rcv._tab.Pos = i } func (rcv *Monster) Table() flatbuffers.Table { return rcv._tab } func (rcv *Monster) Pos(obj *Vec3) *Vec3 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { x := o + rcv._tab.Pos if obj == nil { obj = new(Vec3) } obj.Init(rcv._tab.Bytes, x) return obj } return nil } func (rcv *Monster) Mana() int16 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { return rcv._tab.GetInt16(o + rcv._tab.Pos) } return 150 } func (rcv *Monster) MutateMana(n int16) bool { return rcv._tab.MutateInt16Slot(6, n) } func (rcv *Monster) Hp() int16 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { return rcv._tab.GetInt16(o + rcv._tab.Pos) } return 100 } func (rcv *Monster) MutateHp(n int16) bool { return rcv._tab.MutateInt16Slot(8, n) } func (rcv *Monster) Name() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *Monster) Inventory(j int) byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *Monster) InventoryLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) InventoryBytes() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *Monster) Color() Color { o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { return rcv._tab.GetInt8(o + rcv._tab.Pos) } return 8 } func (rcv *Monster) MutateColor(n Color) bool { return rcv._tab.MutateInt8Slot(16, n) } func (rcv *Monster) TestType() byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) if o != 0 { return rcv._tab.GetByte(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTestType(n byte) bool { return rcv._tab.MutateByteSlot(18, n) } func (rcv *Monster) Test(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) if o != 0 { rcv._tab.Union(obj, o) return true } return false } func (rcv *Monster) Test4(obj *Test, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) if o != 0 { x := rcv._tab.Vector(o) x += flatbuffers.UOffsetT(j) * 4 obj.Init(rcv._tab.Bytes, x) return true } return false } func (rcv *Monster) Test4Length() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(22)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) Testarrayofstring(j int) []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) } return nil } func (rcv *Monster) TestarrayofstringLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(24)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } /// an example documentation comment: this will end up in the generated code /// multiline too func (rcv *Monster) Testarrayoftables(obj *Monster, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(26)) if o != 0 { x := rcv._tab.Vector(o) x += flatbuffers.UOffsetT(j) * 4 x = rcv._tab.Indirect(x) obj.Init(rcv._tab.Bytes, x) return true } return false } func (rcv *Monster) TestarrayoftablesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(26)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } /// an example documentation comment: this will end up in the generated code /// multiline too func (rcv *Monster) Enemy(obj *Monster) *Monster { o := flatbuffers.UOffsetT(rcv._tab.Offset(28)) if o != 0 { x := rcv._tab.Indirect(o + rcv._tab.Pos) if obj == nil { obj = new(Monster) } obj.Init(rcv._tab.Bytes, x) return obj } return nil } func (rcv *Monster) Testnestedflatbuffer(j int) byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(30)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *Monster) TestnestedflatbufferLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(30)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) TestnestedflatbufferBytes() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(30)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *Monster) Testempty(obj *Stat) *Stat { o := flatbuffers.UOffsetT(rcv._tab.Offset(32)) if o != 0 { x := rcv._tab.Indirect(o + rcv._tab.Pos) if obj == nil { obj = new(Stat) } obj.Init(rcv._tab.Bytes, x) return obj } return nil } func (rcv *Monster) Testbool() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(34)) if o != 0 { return rcv._tab.GetBool(o + rcv._tab.Pos) } return false } func (rcv *Monster) MutateTestbool(n bool) bool { return rcv._tab.MutateBoolSlot(34, n) } func (rcv *Monster) Testhashs32Fnv1() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(36)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTesthashs32Fnv1(n int32) bool { return rcv._tab.MutateInt32Slot(36, n) } func (rcv *Monster) Testhashu32Fnv1() uint32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(38)) if o != 0 { return rcv._tab.GetUint32(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTesthashu32Fnv1(n uint32) bool { return rcv._tab.MutateUint32Slot(38, n) } func (rcv *Monster) Testhashs64Fnv1() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(40)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTesthashs64Fnv1(n int64) bool { return rcv._tab.MutateInt64Slot(40, n) } func (rcv *Monster) Testhashu64Fnv1() uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(42)) if o != 0 { return rcv._tab.GetUint64(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTesthashu64Fnv1(n uint64) bool { return rcv._tab.MutateUint64Slot(42, n) } func (rcv *Monster) Testhashs32Fnv1a() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(44)) if o != 0 { return rcv._tab.GetInt32(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTesthashs32Fnv1a(n int32) bool { return rcv._tab.MutateInt32Slot(44, n) } func (rcv *Monster) Testhashu32Fnv1a() uint32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(46)) if o != 0 { return rcv._tab.GetUint32(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTesthashu32Fnv1a(n uint32) bool { return rcv._tab.MutateUint32Slot(46, n) } func (rcv *Monster) Testhashs64Fnv1a() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(48)) if o != 0 { return rcv._tab.GetInt64(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTesthashs64Fnv1a(n int64) bool { return rcv._tab.MutateInt64Slot(48, n) } func (rcv *Monster) Testhashu64Fnv1a() uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(50)) if o != 0 { return rcv._tab.GetUint64(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateTesthashu64Fnv1a(n uint64) bool { return rcv._tab.MutateUint64Slot(50, n) } func (rcv *Monster) Testarrayofbools(j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(52)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetBool(a + flatbuffers.UOffsetT(j*1)) } return false } func (rcv *Monster) TestarrayofboolsLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(52)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) Testf() float32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(54)) if o != 0 { return rcv._tab.GetFloat32(o + rcv._tab.Pos) } return 3.14159 } func (rcv *Monster) MutateTestf(n float32) bool { return rcv._tab.MutateFloat32Slot(54, n) } func (rcv *Monster) Testf2() float32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(56)) if o != 0 { return rcv._tab.GetFloat32(o + rcv._tab.Pos) } return 3.0 } func (rcv *Monster) MutateTestf2(n float32) bool { return rcv._tab.MutateFloat32Slot(56, n) } func (rcv *Monster) Testf3() float32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(58)) if o != 0 { return rcv._tab.GetFloat32(o + rcv._tab.Pos) } return 0.0 } func (rcv *Monster) MutateTestf3(n float32) bool { return rcv._tab.MutateFloat32Slot(58, n) } func (rcv *Monster) Testarrayofstring2(j int) []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(60)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4)) } return nil } func (rcv *Monster) Testarrayofstring2Length() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(60)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) Testarrayofsortedstruct(obj *Ability, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(62)) if o != 0 { x := rcv._tab.Vector(o) x += flatbuffers.UOffsetT(j) * 8 obj.Init(rcv._tab.Bytes, x) return true } return false } func (rcv *Monster) TestarrayofsortedstructLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(62)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) Flex(j int) byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(64)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *Monster) FlexLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(64)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) FlexBytes() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(64)) if o != 0 { return rcv._tab.ByteVector(o + rcv._tab.Pos) } return nil } func (rcv *Monster) Test5(obj *Test, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(66)) if o != 0 { x := rcv._tab.Vector(o) x += flatbuffers.UOffsetT(j) * 4 obj.Init(rcv._tab.Bytes, x) return true } return false } func (rcv *Monster) Test5Length() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(66)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) VectorOfLongs(j int) int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(68)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8)) } return 0 } func (rcv *Monster) VectorOfLongsLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(68)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) VectorOfDoubles(j int) float64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(70)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetFloat64(a + flatbuffers.UOffsetT(j*8)) } return 0 } func (rcv *Monster) VectorOfDoublesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(70)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) ParentNamespaceTest(obj *MyGame.InParentNamespace) *MyGame.InParentNamespace { o := flatbuffers.UOffsetT(rcv._tab.Offset(72)) if o != 0 { x := rcv._tab.Indirect(o + rcv._tab.Pos) if obj == nil { obj = new(MyGame.InParentNamespace) } obj.Init(rcv._tab.Bytes, x) return obj } return nil } func (rcv *Monster) VectorOfReferrables(obj *Referrable, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(74)) if o != 0 { x := rcv._tab.Vector(o) x += flatbuffers.UOffsetT(j) * 4 x = rcv._tab.Indirect(x) obj.Init(rcv._tab.Bytes, x) return true } return false } func (rcv *Monster) VectorOfReferrablesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(74)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) SingleWeakReference() uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(76)) if o != 0 { return rcv._tab.GetUint64(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateSingleWeakReference(n uint64) bool { return rcv._tab.MutateUint64Slot(76, n) } func (rcv *Monster) VectorOfWeakReferences(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(78)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } func (rcv *Monster) VectorOfWeakReferencesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(78)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) VectorOfStrongReferrables(obj *Referrable, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(80)) if o != 0 { x := rcv._tab.Vector(o) x += flatbuffers.UOffsetT(j) * 4 x = rcv._tab.Indirect(x) obj.Init(rcv._tab.Bytes, x) return true } return false } func (rcv *Monster) VectorOfStrongReferrablesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(80)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) CoOwningReference() uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(82)) if o != 0 { return rcv._tab.GetUint64(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateCoOwningReference(n uint64) bool { return rcv._tab.MutateUint64Slot(82, n) } func (rcv *Monster) VectorOfCoOwningReferences(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(84)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } func (rcv *Monster) VectorOfCoOwningReferencesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(84)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) NonOwningReference() uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(86)) if o != 0 { return rcv._tab.GetUint64(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateNonOwningReference(n uint64) bool { return rcv._tab.MutateUint64Slot(86, n) } func (rcv *Monster) VectorOfNonOwningReferences(j int) uint64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(88)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetUint64(a + flatbuffers.UOffsetT(j*8)) } return 0 } func (rcv *Monster) VectorOfNonOwningReferencesLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(88)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func (rcv *Monster) AnyUniqueType() byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(90)) if o != 0 { return rcv._tab.GetByte(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateAnyUniqueType(n byte) bool { return rcv._tab.MutateByteSlot(90, n) } func (rcv *Monster) AnyUnique(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(92)) if o != 0 { rcv._tab.Union(obj, o) return true } return false } func (rcv *Monster) AnyAmbiguousType() byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(94)) if o != 0 { return rcv._tab.GetByte(o + rcv._tab.Pos) } return 0 } func (rcv *Monster) MutateAnyAmbiguousType(n byte) bool { return rcv._tab.MutateByteSlot(94, n) } func (rcv *Monster) AnyAmbiguous(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(96)) if o != 0 { rcv._tab.Union(obj, o) return true } return false } func (rcv *Monster) VectorOfEnums(j int) Color { o := flatbuffers.UOffsetT(rcv._tab.Offset(98)) if o != 0 { a := rcv._tab.Vector(o) return rcv._tab.GetInt8(a + flatbuffers.UOffsetT(j*1)) } return 0 } func (rcv *Monster) VectorOfEnumsLength() int { o := flatbuffers.UOffsetT(rcv._tab.Offset(98)) if o != 0 { return rcv._tab.VectorLen(o) } return 0 } func MonsterStart(builder *flatbuffers.Builder) { builder.StartObject(48) } func MonsterAddPos(builder *flatbuffers.Builder, pos flatbuffers.UOffsetT) { builder.PrependStructSlot(0, flatbuffers.UOffsetT(pos), 0) } func MonsterAddMana(builder *flatbuffers.Builder, mana int16) { builder.PrependInt16Slot(1, mana, 150) } func MonsterAddHp(builder *flatbuffers.Builder, hp int16) { builder.PrependInt16Slot(2, hp, 100) } func MonsterAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(name), 0) } func MonsterAddInventory(builder *flatbuffers.Builder, inventory flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(inventory), 0) } func MonsterStartInventoryVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func MonsterAddColor(builder *flatbuffers.Builder, color int8) { builder.PrependInt8Slot(6, color, 8) } func MonsterAddTestType(builder *flatbuffers.Builder, testType byte) { builder.PrependByteSlot(7, testType, 0) } func MonsterAddTest(builder *flatbuffers.Builder, test flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(test), 0) } func MonsterAddTest4(builder *flatbuffers.Builder, test4 flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(test4), 0) } func MonsterStartTest4Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 2) } func MonsterAddTestarrayofstring(builder *flatbuffers.Builder, testarrayofstring flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(10, flatbuffers.UOffsetT(testarrayofstring), 0) } func MonsterStartTestarrayofstringVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func MonsterAddTestarrayoftables(builder *flatbuffers.Builder, testarrayoftables flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(11, flatbuffers.UOffsetT(testarrayoftables), 0) } func MonsterStartTestarrayoftablesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func MonsterAddEnemy(builder *flatbuffers.Builder, enemy flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(12, flatbuffers.UOffsetT(enemy), 0) } func MonsterAddTestnestedflatbuffer(builder *flatbuffers.Builder, testnestedflatbuffer flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(13, flatbuffers.UOffsetT(testnestedflatbuffer), 0) } func MonsterStartTestnestedflatbufferVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func MonsterAddTestempty(builder *flatbuffers.Builder, testempty flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(14, flatbuffers.UOffsetT(testempty), 0) } func MonsterAddTestbool(builder *flatbuffers.Builder, testbool bool) { builder.PrependBoolSlot(15, testbool, false) } func MonsterAddTesthashs32Fnv1(builder *flatbuffers.Builder, testhashs32Fnv1 int32) { builder.PrependInt32Slot(16, testhashs32Fnv1, 0) } func MonsterAddTesthashu32Fnv1(builder *flatbuffers.Builder, testhashu32Fnv1 uint32) { builder.PrependUint32Slot(17, testhashu32Fnv1, 0) } func MonsterAddTesthashs64Fnv1(builder *flatbuffers.Builder, testhashs64Fnv1 int64) { builder.PrependInt64Slot(18, testhashs64Fnv1, 0) } func MonsterAddTesthashu64Fnv1(builder *flatbuffers.Builder, testhashu64Fnv1 uint64) { builder.PrependUint64Slot(19, testhashu64Fnv1, 0) } func MonsterAddTesthashs32Fnv1a(builder *flatbuffers.Builder, testhashs32Fnv1a int32) { builder.PrependInt32Slot(20, testhashs32Fnv1a, 0) } func MonsterAddTesthashu32Fnv1a(builder *flatbuffers.Builder, testhashu32Fnv1a uint32) { builder.PrependUint32Slot(21, testhashu32Fnv1a, 0) } func MonsterAddTesthashs64Fnv1a(builder *flatbuffers.Builder, testhashs64Fnv1a int64) { builder.PrependInt64Slot(22, testhashs64Fnv1a, 0) } func MonsterAddTesthashu64Fnv1a(builder *flatbuffers.Builder, testhashu64Fnv1a uint64) { builder.PrependUint64Slot(23, testhashu64Fnv1a, 0) } func MonsterAddTestarrayofbools(builder *flatbuffers.Builder, testarrayofbools flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(24, flatbuffers.UOffsetT(testarrayofbools), 0) } func MonsterStartTestarrayofboolsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func MonsterAddTestf(builder *flatbuffers.Builder, testf float32) { builder.PrependFloat32Slot(25, testf, 3.14159) } func MonsterAddTestf2(builder *flatbuffers.Builder, testf2 float32) { builder.PrependFloat32Slot(26, testf2, 3.0) } func MonsterAddTestf3(builder *flatbuffers.Builder, testf3 float32) { builder.PrependFloat32Slot(27, testf3, 0.0) } func MonsterAddTestarrayofstring2(builder *flatbuffers.Builder, testarrayofstring2 flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(28, flatbuffers.UOffsetT(testarrayofstring2), 0) } func MonsterStartTestarrayofstring2Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func MonsterAddTestarrayofsortedstruct(builder *flatbuffers.Builder, testarrayofsortedstruct flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(29, flatbuffers.UOffsetT(testarrayofsortedstruct), 0) } func MonsterStartTestarrayofsortedstructVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(8, numElems, 4) } func MonsterAddFlex(builder *flatbuffers.Builder, flex flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(30, flatbuffers.UOffsetT(flex), 0) } func MonsterStartFlexVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func MonsterAddTest5(builder *flatbuffers.Builder, test5 flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(31, flatbuffers.UOffsetT(test5), 0) } func MonsterStartTest5Vector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 2) } func MonsterAddVectorOfLongs(builder *flatbuffers.Builder, vectorOfLongs flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(32, flatbuffers.UOffsetT(vectorOfLongs), 0) } func MonsterStartVectorOfLongsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(8, numElems, 8) } func MonsterAddVectorOfDoubles(builder *flatbuffers.Builder, vectorOfDoubles flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(33, flatbuffers.UOffsetT(vectorOfDoubles), 0) } func MonsterStartVectorOfDoublesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(8, numElems, 8) } func MonsterAddParentNamespaceTest(builder *flatbuffers.Builder, parentNamespaceTest flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(34, flatbuffers.UOffsetT(parentNamespaceTest), 0) } func MonsterAddVectorOfReferrables(builder *flatbuffers.Builder, vectorOfReferrables flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(35, flatbuffers.UOffsetT(vectorOfReferrables), 0) } func MonsterStartVectorOfReferrablesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func MonsterAddSingleWeakReference(builder *flatbuffers.Builder, singleWeakReference uint64) { builder.PrependUint64Slot(36, singleWeakReference, 0) } func MonsterAddVectorOfWeakReferences(builder *flatbuffers.Builder, vectorOfWeakReferences flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(37, flatbuffers.UOffsetT(vectorOfWeakReferences), 0) } func MonsterStartVectorOfWeakReferencesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(8, numElems, 8) } func MonsterAddVectorOfStrongReferrables(builder *flatbuffers.Builder, vectorOfStrongReferrables flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(38, flatbuffers.UOffsetT(vectorOfStrongReferrables), 0) } func MonsterStartVectorOfStrongReferrablesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(4, numElems, 4) } func MonsterAddCoOwningReference(builder *flatbuffers.Builder, coOwningReference uint64) { builder.PrependUint64Slot(39, coOwningReference, 0) } func MonsterAddVectorOfCoOwningReferences(builder *flatbuffers.Builder, vectorOfCoOwningReferences flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(40, flatbuffers.UOffsetT(vectorOfCoOwningReferences), 0) } func MonsterStartVectorOfCoOwningReferencesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(8, numElems, 8) } func MonsterAddNonOwningReference(builder *flatbuffers.Builder, nonOwningReference uint64) { builder.PrependUint64Slot(41, nonOwningReference, 0) } func MonsterAddVectorOfNonOwningReferences(builder *flatbuffers.Builder, vectorOfNonOwningReferences flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(42, flatbuffers.UOffsetT(vectorOfNonOwningReferences), 0) } func MonsterStartVectorOfNonOwningReferencesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(8, numElems, 8) } func MonsterAddAnyUniqueType(builder *flatbuffers.Builder, anyUniqueType byte) { builder.PrependByteSlot(43, anyUniqueType, 0) } func MonsterAddAnyUnique(builder *flatbuffers.Builder, anyUnique flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(44, flatbuffers.UOffsetT(anyUnique), 0) } func MonsterAddAnyAmbiguousType(builder *flatbuffers.Builder, anyAmbiguousType byte) { builder.PrependByteSlot(45, anyAmbiguousType, 0) } func MonsterAddAnyAmbiguous(builder *flatbuffers.Builder, anyAmbiguous flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(46, flatbuffers.UOffsetT(anyAmbiguous), 0) } func MonsterAddVectorOfEnums(builder *flatbuffers.Builder, vectorOfEnums flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(47, flatbuffers.UOffsetT(vectorOfEnums), 0) } func MonsterStartVectorOfEnumsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { return builder.StartVector(1, numElems, 1) } func MonsterEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() }
1
15,269
The one sad part of this is that is will generate a lot of overhead if you loop through a vector, since it obtains the vector every time. But with the current API there is no alternative I guess, and it is better to have the option than not.
google-flatbuffers
java
@@ -8,6 +8,7 @@ import ( "time" "github.com/filecoin-project/go-filecoin/types" + files "github.com/ipfs/go-ipfs-files" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require"
1
package commands_test import ( "context" "math/big" "strings" "testing" "time" "github.com/filecoin-project/go-filecoin/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-filecoin/fixtures" tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags" "github.com/filecoin-project/go-filecoin/tools/fast" "github.com/filecoin-project/go-filecoin/tools/fast/fastesting" "github.com/filecoin-project/go-filecoin/tools/fast/series" ) func parseInt(t *testing.T, s string) *big.Int { i := new(big.Int) i, err := i.SetString(strings.TrimSpace(s), 10) assert.True(t, err, "couldn't parse as big.Int %q", s) return i } func TestMiningGenBlock(t *testing.T) { tf.IntegrationTest(t) d := makeTestDaemonWithMinerAndStart(t) defer d.ShutdownSuccess() addr := fixtures.TestAddresses[0] s := d.RunSuccess("wallet", "balance", addr) beforeBalance := parseInt(t, s.ReadStdout()) d.RunSuccess("mining", "once") s = d.RunSuccess("wallet", "balance", addr) afterBalance := parseInt(t, s.ReadStdout()) sum := new(big.Int) assert.Equal(t, sum.Add(beforeBalance, big.NewInt(1000)), afterBalance) } func TestMiningSealNow(t *testing.T) { tf.FunctionalTest(t) ctx, env := fastesting.NewTestEnvironment(context.Background(), t, fast.FilecoinOpts{ InitOpts: []fast.ProcessInitOption{fast.POAutoSealIntervalSeconds(1)}, DaemonOpts: []fast.ProcessDaemonOption{fast.POBlockTime(50 * time.Millisecond)}, }) defer func() { require.NoError(t, env.Teardown(ctx)) }() genesisNode := env.GenesisMiner minerNode := env.RequireNewNodeWithFunds(1000) // Connect the clientNode and the minerNode require.NoError(t, series.Connect(ctx, genesisNode, minerNode)) // Calls MiningOnce on genesis (client). This also starts the Miner. pparams, err := minerNode.Protocol(ctx) require.NoError(t, err) sinfo := pparams.SupportedSectors[0] // mine the create storage message, then mine the set ask message series.CtxMiningNext(ctx, 2) _, err = series.CreateStorageMinerWithAsk(ctx, minerNode, big.NewInt(500), big.NewFloat(0.0001), big.NewInt(3000), sinfo.Size) require.NoError(t, err) // get address of miner so we can check power miningAddress, err := minerNode.MiningAddress(ctx) require.NoError(t, err) // start mining for miner node to seal and schedule PoSting require.NoError(t, minerNode.MiningStart(ctx)) defer func() { require.NoError(t, minerNode.MiningStop(ctx)) }() // Since the miner does not yet have power, we still need the genesis node to mine // the miner's commitSector and the submitPoSt messages series.CtxMiningNext(ctx, 2) // start sealing err = minerNode.SealNow(ctx) require.NoError(t, err) // We know the miner has sealed and committed a sector if their power increases on chain. // Wait up to 3 minutes for that to happen. for i := 0; i < 180; i++ { power, err := minerNode.MinerPower(ctx, miningAddress) require.NoError(t, err) if power.Power.GreaterThan(types.ZeroBytes) { // miner has gained power, so seal was successful return } time.Sleep(time.Second) } assert.Fail(t, "timed out waiting for miner to gain power from sealing") }
1
20,899
nit: there should be a newline between `types` and `go-ipfs-files`.
filecoin-project-venus
go
@@ -0,0 +1,5 @@ +package reacher + +var ( + RetryAfter = &retryAfterDuration +)
1
1
15,710
would be nice to have this as a parameter (Options), re: technical debt discussions
ethersphere-bee
go
@@ -10,10 +10,13 @@ import java.util.List; import java.util.Objects; import java.util.stream.Collectors; +import org.apache.commons.lang3.ArrayUtils; + import net.sourceforge.pmd.PMDVersion; import net.sourceforge.pmd.util.fxdesigner.app.DesignerRoot; import net.sourceforge.pmd.util.fxdesigner.util.DesignerUtil; +import com.sun.javafx.fxml.builder.ProxyBuilder; import javafx.application.Application; import javafx.collections.ObservableList; import javafx.fxml.FXMLLoader;
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.util.fxdesigner; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import net.sourceforge.pmd.PMDVersion; import net.sourceforge.pmd.util.fxdesigner.app.DesignerRoot; import net.sourceforge.pmd.util.fxdesigner.util.DesignerUtil; import javafx.application.Application; import javafx.collections.ObservableList; import javafx.fxml.FXMLLoader; import javafx.scene.Parent; import javafx.scene.Scene; import javafx.scene.image.Image; import javafx.stage.Stage; /** * Main class for the designer, launched only if {@link DesignerStarter} detected JavaFX support. * * @author Clément Fournier * @since 6.0.0 */ public class Designer extends Application { private boolean parseParameters(Parameters params) { List<String> raw = params.getRaw(); if (!raw.contains("-v") && !raw.contains("--verbose")) { // error output is disabled by default System.err.close(); return false; } return true; } @Override public void start(Stage stage) throws IOException { boolean isDeveloperMode = parseParameters(getParameters()); FXMLLoader loader = new FXMLLoader(DesignerUtil.getFxml("designer.fxml")); DesignerRoot owner = new DesignerRoot(stage, isDeveloperMode); MainDesignerController mainController = new MainDesignerController(owner); NodeInfoPanelController nodeInfoPanelController = new NodeInfoPanelController(mainController); XPathPanelController xpathPanelController = new XPathPanelController(mainController); SourceEditorController sourceEditorController = new SourceEditorController(mainController); loader.setControllerFactory(type -> { if (type == MainDesignerController.class) { return mainController; } else if (type == NodeInfoPanelController.class) { return nodeInfoPanelController; } else if (type == XPathPanelController.class) { return xpathPanelController; } else if (type == SourceEditorController.class) { return sourceEditorController; } else { // default behavior for controllerFactory: try { return type.newInstance(); } catch (Exception exc) { exc.printStackTrace(); throw new RuntimeException(exc); // fatal, just bail... } } }); stage.setOnCloseRequest(e -> mainController.shutdown()); Parent root = loader.load(); Scene scene = new Scene(root); stage.setTitle("PMD Rule Designer (v " + PMDVersion.VERSION + ')'); setIcons(stage); stage.setScene(scene); stage.show(); } private void setIcons(Stage primaryStage) { ObservableList<Image> icons = primaryStage.getIcons(); final String dirPrefix = "icons/app/"; List<String> imageNames = Arrays.asList("designer_logo.jpeg"); // TODO make more icon sizes List<Image> images = imageNames.stream() .map(s -> dirPrefix + s) .map(s -> getClass().getResourceAsStream(s)) .filter(Objects::nonNull) .map(Image::new) .collect(Collectors.toList()); icons.addAll(images); } public static void main(String[] args) { launch(args); } }
1
15,441
complete ignorance here but is it ok to import this from `com.sun`? can it break under non-oracle JREs?
pmd-pmd
java
@@ -61,6 +61,13 @@ func (tr *tracer) Start(ctx context.Context, name string, o ...apitrace.SpanOpti span := startSpanInternal(name, parent, remoteParent, opts) span.tracer = tr + if span.IsRecordingEvents() { + sps, _ := spanProcessors.Load().(spanProcessorMap) + for sp := range sps { + sp.OnStart(span.data) + } + } + ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end return newContext(ctx, span), span
1
// Copyright 2019, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "go.opentelemetry.io/api/core" "go.opentelemetry.io/api/tag" apitrace "go.opentelemetry.io/api/trace" ) type tracer struct { name string component string resources []core.KeyValue } var _ apitrace.Tracer = &tracer{} func (tr *tracer) Start(ctx context.Context, name string, o ...apitrace.SpanOption) (context.Context, apitrace.Span) { var opts apitrace.SpanOptions var parent core.SpanContext var remoteParent bool //TODO [rghetia] : Add new option for parent. If parent is configured then use that parent. for _, op := range o { op(&opts) } // TODO: [rghetia] ChildOfRelationship is used to indicate that the parent is remote // and its context is received as part of a request. There are two possibilities // 1. Remote is trusted. So continue using same trace. // tracer.Start(ctx, "some name", ChildOf(remote_span_context)) // 2. Remote is not trusted. In this case create a root span and then add the remote as link // span := tracer.Start(ctx, "some name") // span.Link(remote_span_context, ChildOfRelationship) if opts.Reference.SpanContext != core.EmptySpanContext() && opts.Reference.RelationshipType == apitrace.ChildOfRelationship { parent = opts.Reference.SpanContext remoteParent = true } else { if p := fromContext(ctx); p != nil { p.addChild() parent = p.spanContext } } span := startSpanInternal(name, parent, remoteParent, opts) span.tracer = tr ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end return newContext(ctx, span), span } func (tr *tracer) WithSpan(ctx context.Context, name string, body func(ctx context.Context) error) error { ctx, span := tr.Start(ctx, name) defer span.Finish() if err := body(ctx); err != nil { // TODO: set event with boolean attribute for error. return err } return nil } func (tr *tracer) WithService(name string) apitrace.Tracer { tr.name = name return tr } // WithResources does nothing and returns noop implementation of apitrace.Tracer. func (tr *tracer) WithResources(res ...core.KeyValue) apitrace.Tracer { tr.resources = res return tr } // WithComponent does nothing and returns noop implementation of apitrace.Tracer. func (tr *tracer) WithComponent(component string) apitrace.Tracer { tr.component = component return tr } func (tr *tracer) Inject(ctx context.Context, span apitrace.Span, injector apitrace.Injector) { injector.Inject(span.SpanContext(), tag.NewEmptyMap()) }
1
9,781
Is this some way of saying that the span is going to be sampled? I'm basing this question on the condition I saw for the `OnEnd` to be called - `mustExport := s.spanContext.IsSampled() && )`. Shouldn't the condition be `s.spanContext.IsSampled()` to match the condition for calling the `OnEnd`?
open-telemetry-opentelemetry-go
go
@@ -650,6 +650,13 @@ func (c *CStorVolumeReplicaController) syncCvr(cvr *apis.CStorVolumeReplica) { } else { cvr.Status.Capacity = *capacity } + + err = volumereplica.GetAndUpdateSnapshotInfo(c.clientset, cvr) + if err != nil { + klog.Errorf( + "Unable to update snapshot list details in cvr %s status err: %v", + cvr.Name, err) + } } func (c *CStorVolumeReplicaController) reconcileVersion(cvr *apis.CStorVolumeReplica) (
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package replicacontroller import ( "encoding/json" "fmt" "os" "reflect" "strings" "github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common" "github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned" "github.com/openebs/maya/pkg/debug" errors "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" "k8s.io/klog" ) const ( v130 = "1.3.0" ) type upgradeParams struct { cvr *apis.CStorVolumeReplica client clientset.Interface } type upgradeFunc func(u *upgradeParams) (*apis.CStorVolumeReplica, error) var ( upgradeMap = map[string]upgradeFunc{ "1.0.0": setReplicaID, "1.1.0": setReplicaID, "1.2.0": setReplicaID, } ) // CVRPatch struct represent the struct used to patch // the cvr object type CVRPatch struct { // Op defines the operation Op string `json:"op"` // Path defines the key path // eg. for // { // "Name": "openebs" // Category: { // "Inclusive": "v1", // "Rank": "A" // } // } // The path of 'Inclusive' would be // "/Name/Category/Inclusive" Path string `json:"path"` Value string `json:"value"` } // syncHandler handles CVR changes based on the provided // operation. It reconciles desired state of CVR with the // actual state. // // Finally, it updates CVR Status func (c *CStorVolumeReplicaController) syncHandler( key string, operation common.QueueOperation, ) error { cvrGot, err := c.getVolumeReplicaResource(key) if err != nil { return err } if cvrGot == nil { return errors.Errorf( "failed to reconcile cvr {%s}: object not found", key, ) } cvrGot, err = c.populateVersion(cvrGot) if err != nil { klog.Errorf("failed to add versionDetails to cvr %s:%s", cvrGot.Name, err.Error()) c.recorder.Event( cvrGot, corev1.EventTypeWarning, "FailedPopulate", fmt.Sprintf("Failed to add current version: %s", err.Error()), ) return nil } cvrGot, err = c.reconcileVersion(cvrGot) if err != nil { klog.Errorf("failed to upgrade cvr %s:%s", cvrGot.Name, err.Error()) c.recorder.Event( cvrGot, corev1.EventTypeWarning, "FailedUpgrade", fmt.Sprintf("Failed to upgrade cvr to %s version: %s", cvrGot.VersionDetails.Desired, err.Error(), ), ) cvrGot.VersionDetails.Status.SetErrorStatus( "Failed to reconcile cvr version", err, ) _, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace).Update(cvrGot) if err != nil { klog.Errorf("failed to update versionDetails status for cvr %s:%s", cvrGot.Name, err.Error()) } return nil } status, err := c.cVREventHandler(operation, cvrGot) if status == "" { // TODO // need to rethink on this logic !! // status holds more importance than error return nil } cvrGot.Status.LastUpdateTime = metav1.Now() if cvrGot.Status.Phase != apis.CStorVolumeReplicaPhase(status) { cvrGot.Status.LastTransitionTime = cvrGot.Status.LastUpdateTime // set phase based on received status cvrGot.Status.Phase = apis.CStorVolumeReplicaPhase(status) } // need to update cvr before returning this error if err != nil { if debug.EI.IsCVRUpdateErrorInjected() { return errors.Errorf("CVR update error via injection") } _, err1 := c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace). Update(cvrGot) if err1 != nil { return errors.Wrapf( err, "failed to reconcile cvr {%s}: failed to update cvr with phase {%s}: {%s}", key, cvrGot.Status.Phase, err1.Error(), ) } return errors.Wrapf(err, "failed to reconcile cvr {%s}", key) } // Synchronize cstor volume total allocated and // used capacity fields on CVR object. // Any kind of sync activity should be done from here. c.syncCvr(cvrGot) _, err = c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrGot.Namespace). Update(cvrGot) if err != nil { return errors.Wrapf( err, "failed to reconcile cvr {%s}: failed to update cvr with phase {%s}", key, cvrGot.Status.Phase, ) } klog.V(4).Infof( "cvr {%s} reconciled successfully with current phase being {%s}", key, cvrGot.Status.Phase, ) return nil } func (c *CStorVolumeReplicaController) cVREventHandler( operation common.QueueOperation, cvrObj *apis.CStorVolumeReplica, ) (string, error) { err := volumereplica.CheckValidVolumeReplica(cvrObj) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureValidate), string(common.MessageResourceFailValidate), ) return string(apis.CVRStatusOffline), err } // PoolNameHandler tries to get pool name and blocks for // particular number of attempts. var noOfAttempts = 2 if !common.PoolNameHandler(cvrObj, noOfAttempts) { return string(cvrObj.Status.Phase), errors.New("pool not found") } // cvr is created at zfs in the form poolname/volname fullVolName := volumereplica.PoolNameFromCVR(cvrObj) + "/" + cvrObj.Labels["cstorvolume.openebs.io/name"] switch operation { case common.QOpAdd: klog.Infof( "will process add event for cvr {%s} as volume {%s}", cvrObj.Name, fullVolName, ) status, err := c.cVRAddEventHandler(cvrObj, fullVolName) return status, err case common.QOpDestroy: klog.Infof( "will process delete event for cvr {%s} as volume {%s}", cvrObj.Name, fullVolName, ) err := volumereplica.DeleteVolume(fullVolName) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureDestroy), string(common.MessageResourceFailDestroy), ) return string(apis.CVRStatusDeletionFailed), err } err = c.removeFinalizer(cvrObj) if err != nil { c.recorder.Event( cvrObj, corev1.EventTypeWarning, string(common.FailureRemoveFinalizer), string(common.MessageResourceFailDestroy), ) return string(apis.CVRStatusDeletionFailed), err } return "", nil case common.QOpModify: fallthrough case common.QOpSync: klog.V(4).Infof( "will process sync event for cvr {%s} as volume {%s}", cvrObj.Name, operation, ) if isCVRCreateStatus(cvrObj) { return c.cVRAddEventHandler(cvrObj, fullVolName) } return c.getCVRStatus(cvrObj) } klog.Errorf( "failed to handle event for cvr {%s}: operation {%s} not supported", cvrObj.Name, string(operation), ) return string(apis.CVRStatusInvalid), nil } // removeFinalizer removes finalizers present in // CVR resource func (c *CStorVolumeReplicaController) removeFinalizer( cvrObj *apis.CStorVolumeReplica, ) error { cvrPatch := []CVRPatch{ CVRPatch{ Op: "remove", Path: "/metadata/finalizers", }, } cvrPatchBytes, err := json.Marshal(cvrPatch) if err != nil { return errors.Wrapf( err, "failed to remove finalizers from cvr {%s}", cvrObj.Name, ) } _, err = c.clientset. OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace). Patch(cvrObj.Name, types.JSONPatchType, cvrPatchBytes) if err != nil { return errors.Wrapf( err, "failed to remove finalizers from cvr {%s}", cvrObj.Name, ) } klog.Infof("finalizers removed successfully from cvr {%s}", cvrObj.Name) return nil } func (c *CStorVolumeReplicaController) cVRAddEventHandler( cVR *apis.CStorVolumeReplica, fullVolName string, ) (string, error) { var err error // lock is to synchronize pool and volumereplica. Until certain pool related // operations are over, the volumereplica threads will be held. common.SyncResources.Mux.Lock() if common.SyncResources.IsImported { common.SyncResources.Mux.Unlock() // To check if volume is already imported with pool. importedFlag := common.CheckForInitialImportedPoolVol( common.InitialImportedPoolVol, fullVolName, ) if importedFlag && !IsEmptyStatus(cVR) { klog.Infof( "CStorVolumeReplica %v is already imported", string(cVR.ObjectMeta.UID), ) c.recorder.Event( cVR, corev1.EventTypeNormal, string(common.SuccessImported), string(common.MessageResourceImported), ) // If the volume already present then get the status of replica from ZFS // and update it with corresponding status phase. If status gives error // then return old phase. return getVolumeReplicaStatus(cVR, fullVolName) } } else { common.SyncResources.Mux.Unlock() } // Below block will be useful when the only cstor-pool-mgmt gets restarted // then it is required to cross-check whether the volume exists or not. existingvol, _ := volumereplica.GetVolumes() if common.CheckIfPresent(existingvol, fullVolName) { klog.Warningf( "CStorVolumeReplica %v is already present", string(cVR.GetUID()), ) c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent), ) // After creating zfs datasets in zpool but update to etcd might be // failed if isEmptyReplicaID(cVR) { cVR.Spec.ReplicaID, err = volumereplica.GetReplicaIDFromZFS(fullVolName) if err != nil { // If error happened then update with same as with existing CVR // phase. So, in next reconciliation it will try to update with // proper changes return string(cVR.Status.Phase), errors.Wrapf(err, "volume replica %s exists", cVR.Name) } } // If the volume already present then get the status of replica from ZFS // and update it with corresponding status return getVolumeReplicaStatus(cVR, fullVolName) } //TODO: Follow best practice while refactor reconciliation logic if isCVRCreateStatus(cVR) { return c.createVolumeReplica(cVR, fullVolName) } return string(apis.CVRStatusOffline), fmt.Errorf( "VolumeReplica offline: %v, %v", cVR.Name, cVR.Labels["cstorvolume.openebs.io/name"], ) } // createVolumeReplica will do following things // 1. If replicaID is empty and if it is new volume generate replicaID. // 2. Trigger ZFS volume dataset create command on success get the status from // ZFS and update it. If `ZFS command` fails then return with same status phase // which is currently holding by CVR. func (c *CStorVolumeReplicaController) createVolumeReplica( cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) { // Setting quorum to true for newly creating Volumes. var quorum = true if IsRecreateStatus(cVR) { klog.Infof( "Pool is recreated hence creating the volumes by setting off the quorum property", ) quorum = false } // We should generate replicaID for new volume replicas only if it doesn't has // replica ID. if isEmptyReplicaID(cVR) && (IsEmptyStatus(cVR) || IsInitStatus(cVR)) { if err := volumereplica.GenerateReplicaID(cVR); err != nil { klog.Errorf("cVR ReplicaID creation failure: %v", err.Error()) return string(cVR.Status.Phase), err } } if len(cVR.Spec.ReplicaID) == 0 { return string(cVR.Status.Phase), errors.New("ReplicaID is not set") } err := volumereplica.CreateVolumeReplica(cVR, fullVolName, quorum) if err != nil { klog.Errorf("cVR creation failure: %v", err.Error()) c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.FailureCreate), fmt.Sprintf("failed to create volume replica error: %v", err.Error()), ) return string(cVR.Status.Phase), err } c.recorder.Event( cVR, corev1.EventTypeNormal, string(common.SuccessCreated), string(common.MessageResourceCreated), ) klog.Infof( "cVR creation successful: %v, %v", cVR.ObjectMeta.Name, string(cVR.GetUID()), ) return getVolumeReplicaStatus(cVR, fullVolName) } // getVolumeReplicaStatus return the status of replica after executing ZFS // stats command and return previous state and error if any error occured while // getting the status from ZFS func getVolumeReplicaStatus( cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) { status, err := volumereplica.Status(fullVolName) if err != nil { return string(cVR.Status.Phase), err } return status, nil } // getVolumeReplicaResource returns object corresponding to the resource key func (c *CStorVolumeReplicaController) getVolumeReplicaResource( key string, ) (*apis.CStorVolumeReplica, error) { // Convert the key(namespace/name) string into a distinct name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil, nil } cStorVolumeReplicaUpdated, err := c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(namespace). Get(name, metav1.GetOptions{}) if err != nil { // The cStorPool resource may no longer exist, in which case we stop // processing. if k8serrors.IsNotFound(err) { runtime.HandleError( fmt.Errorf( "cStorVolumeReplicaUpdated '%s' in work queue no longer exists", key, ), ) return nil, nil } return nil, err } return cStorVolumeReplicaUpdated, nil } // IsRightCStorVolumeReplica is to check if the cvr // request is for particular pod/application. func IsRightCStorVolumeReplica(cVR *apis.CStorVolumeReplica) bool { if strings.TrimSpace(string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"])) != "" { return os.Getenv(string(common.OpenEBSIOCStorID)) == string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"]) } if strings.TrimSpace(string(cVR.ObjectMeta.Labels["cstorpoolinstance.openebs.io/uid"])) != "" { return os.Getenv(string(common.OpenEBSIOCSPIID)) == string(cVR.ObjectMeta.Labels["cstorpoolinstance.openebs.io/uid"]) } return false } // IsDestroyEvent is to check if the call is for CStorVolumeReplica destroy. func IsDestroyEvent(cVR *apis.CStorVolumeReplica) bool { if cVR.ObjectMeta.DeletionTimestamp != nil { return true } return false } // IsOnlyStatusChange is to check only status change of cStorVolumeReplica object. func IsOnlyStatusChange(oldCVR, newCVR *apis.CStorVolumeReplica) bool { if reflect.DeepEqual(oldCVR.Spec, newCVR.Spec) && !reflect.DeepEqual(oldCVR.Status, newCVR.Status) { return true } return false } // IsDeletionFailedBefore flags if status of // cvr is CVRStatusDeletionFailed func IsDeletionFailedBefore(cvrObj *apis.CStorVolumeReplica) bool { return cvrObj.Status.Phase == apis.CVRStatusDeletionFailed } // IsOnlineStatus is to check if the status of cStorVolumeReplica object is // Healthy. func IsOnlineStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusOnline) { klog.Infof("cVR Healthy status: %v", string(cVR.ObjectMeta.UID)) return true } klog.Infof( "cVR '%s': uid '%s': phase '%s': is_healthy_status: false", string(cVR.ObjectMeta.Name), string(cVR.ObjectMeta.UID), cVR.Status.Phase, ) return false } // IsEmptyStatus is to check if the status of cStorVolumeReplica object is empty. func IsEmptyStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusEmpty) { klog.Infof("cVR empty status: %v", string(cVR.ObjectMeta.UID)) return true } klog.Infof( "cVR '%s': uid '%s': phase '%s': is_empty_status: false", string(cVR.ObjectMeta.Name), string(cVR.ObjectMeta.UID), cVR.Status.Phase, ) return false } // IsInitStatus is to check if the status of cStorVolumeReplica object is pending. func IsInitStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusInit) { klog.Infof("cVR pending: %v", string(cVR.ObjectMeta.UID)) return true } klog.V(4).Infof("Not pending status: %v", string(cVR.ObjectMeta.UID)) return false } // IsRecreateStatus is to check if the status of cStorVolumeReplica object is // in recreated state. func IsRecreateStatus(cVR *apis.CStorVolumeReplica) bool { if string(cVR.Status.Phase) == string(apis.CVRStatusRecreate) { klog.Infof("cVR Recreate: %v", string(cVR.ObjectMeta.UID)) return true } klog.V(4).Infof("Not Recreate status: %v", string(cVR.ObjectMeta.UID)) return false } // isCVRCreateStatus returns true if volume replica needs to be created else // return false func isCVRCreateStatus(cVR *apis.CStorVolumeReplica) bool { cVRStatus := string(cVR.Status.Phase) if strings.EqualFold(cVRStatus, string(apis.CVRStatusEmpty)) || strings.EqualFold(cVRStatus, string(apis.CVRStatusRecreate)) || strings.EqualFold(cVRStatus, string(apis.CVRStatusInit)) { return true } return false } func isEmptyReplicaID(cVR *apis.CStorVolumeReplica) bool { return cVR.Spec.ReplicaID == "" } // getCVRStatus is a wrapper that fetches the status of cstor volume. func (c *CStorVolumeReplicaController) getCVRStatus( cVR *apis.CStorVolumeReplica, ) (string, error) { volumeName, err := volumereplica.GetVolumeName(cVR) if err != nil { return "", fmt.Errorf("unable to get volume name:%s", err.Error()) } replicaStatus, err := volumereplica.Status(volumeName) if err != nil { // ToDO : Put error in event recorder c.recorder.Event( cVR, corev1.EventTypeWarning, string(common.FailureStatusSync), string(common.MessageResourceFailStatusSync), ) return "", err } return replicaStatus, nil } // syncCvr updates field on CVR object after fetching the values from zfs utility. func (c *CStorVolumeReplicaController) syncCvr(cvr *apis.CStorVolumeReplica) { // Get the zfs volume name corresponding to this cvr. volumeName, err := volumereplica.GetVolumeName(cvr) if err != nil { klog.Errorf("Unable to sync CVR capacity: %v", err) c.recorder.Event( cvr, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync), ) } // Get capacity of the volume. capacity, err := volumereplica.Capacity(volumeName) if err != nil { klog.Errorf("Unable to sync CVR capacity: %v", err) c.recorder.Event( cvr, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync), ) } else { cvr.Status.Capacity = *capacity } } func (c *CStorVolumeReplicaController) reconcileVersion(cvr *apis.CStorVolumeReplica) ( *apis.CStorVolumeReplica, error, ) { var err error // the below code uses deep copy to have the state of object just before // any update call is done so that on failure the last state object can be returned if cvr.VersionDetails.Status.Current != cvr.VersionDetails.Desired { if !apis.IsCurrentVersionValid(cvr.VersionDetails.Status.Current) { return cvr, errors.Errorf("invalid current version %s", cvr.VersionDetails.Status.Current) } if !apis.IsDesiredVersionValid(cvr.VersionDetails.Desired) { return cvr, errors.Errorf("invalid desired version %s", cvr.VersionDetails.Desired) } cvrObj := cvr.DeepCopy() if cvrObj.VersionDetails.Status.State != apis.ReconcileInProgress { cvrObj.VersionDetails.Status.SetInProgressStatus() cvrObj, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } } path := strings.Split(cvrObj.VersionDetails.Status.Current, "-")[0] u := &upgradeParams{ cvr: cvrObj, client: c.clientset, } // Get upgrade function for corresponding path, if path does not // exits then no upgrade is required and funcValue will be nil. funcValue := upgradeMap[path] if funcValue != nil { cvrObj, err = funcValue(u) if err != nil { return cvrObj, err } } cvr = cvrObj.DeepCopy() cvrObj.VersionDetails.SetSuccessStatus() cvrObj, err = c.clientset.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } return cvrObj, nil } return cvr, nil } // populateVersion assigns VersionDetails for old cvr object func (c *CStorVolumeReplicaController) populateVersion(cvr *apis.CStorVolumeReplica) ( *apis.CStorVolumeReplica, error, ) { v := cvr.Labels[string(apis.OpenEBSVersionKey)] // 1.3.0 onwards new CVR will have the field populated during creation if v < v130 && cvr.VersionDetails.Status.Current == "" { cvrObj := cvr.DeepCopy() cvrObj.VersionDetails.Status.Current = v cvrObj.VersionDetails.Desired = v cvrObj, err := c.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cvrObj.Namespace). Update(cvrObj) if err != nil { return cvr, err } klog.Infof("Version %s added on cvr %s", v, cvrObj.Name) return cvrObj, nil } return cvr, nil } // setReplicaID sets the replica_id if not present for old cvrs when // they are upgraded to version 1.3.0 or above. func setReplicaID(u *upgradeParams) (*apis.CStorVolumeReplica, error) { cvr := u.cvr cvrObj := cvr.DeepCopy() err := volumereplica.GetAndUpdateReplicaID(cvrObj) if err != nil { return cvr, err } cvrObj, err = u.client.OpenebsV1alpha1(). CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj) if err != nil { return cvr, err } return cvrObj, nil }
1
18,255
just calling UpdateSnapshotInfo() should be fine
openebs-maya
go
@@ -156,8 +156,8 @@ namespace Datadog.Trace.Logging return rate; } - // We don't want to rate limit messages by default when in debug mode - return GlobalSettings.Source.DebugEnabled ? 0 : DefaultLogMessageRateLimit; + // By default, we don't rate limit log messages; + return 0; } private static string GetLogDirectory()
1
using System; using System.Diagnostics; using System.IO; using System.Runtime.InteropServices; using Datadog.Trace.Configuration; using Datadog.Trace.Util; using Datadog.Trace.Vendors.Serilog; using Datadog.Trace.Vendors.Serilog.Core; using Datadog.Trace.Vendors.Serilog.Events; using Datadog.Trace.Vendors.Serilog.Sinks.File; namespace Datadog.Trace.Logging { internal static class DatadogLogging { internal static readonly LoggingLevelSwitch LoggingLevelSwitch = new LoggingLevelSwitch(DefaultLogLevel); private const int DefaultLogMessageRateLimit = 60; private const LogEventLevel DefaultLogLevel = LogEventLevel.Information; private static readonly long? MaxLogFileSize = 10 * 1024 * 1024; private static readonly IDatadogLogger SharedLogger = null; private static readonly ILogger InternalLogger = null; static DatadogLogging() { // No-op for if we fail to construct the file logger var nullRateLimiter = new NullLogRateLimiter(); InternalLogger = new LoggerConfiguration() .WriteTo.Sink<NullSink>() .CreateLogger(); SharedLogger = new DatadogSerilogLogger(InternalLogger, nullRateLimiter); try { if (GlobalSettings.Source.DebugEnabled) { LoggingLevelSwitch.MinimumLevel = LogEventLevel.Debug; } var maxLogSizeVar = EnvironmentHelpers.GetEnvironmentVariable(ConfigurationKeys.MaxLogFileSize); if (long.TryParse(maxLogSizeVar, out var maxLogSize)) { // No verbose or debug logs MaxLogFileSize = maxLogSize; } string logDirectory = null; try { logDirectory = GetLogDirectory(); } catch { // Do nothing when an exception is thrown for attempting to access the filesystem } // ReSharper disable once ConditionIsAlwaysTrueOrFalse if (logDirectory == null) { return; } // Ends in a dash because of the date postfix var managedLogPath = Path.Combine(logDirectory, $"dotnet-tracer-managed-{DomainMetadata.ProcessName}-.log"); var loggerConfiguration = new LoggerConfiguration() .Enrich.FromLogContext() .MinimumLevel.ControlledBy(LoggingLevelSwitch) .WriteTo.File( managedLogPath, outputTemplate: "{Timestamp:yyyy-MM-dd HH:mm:ss.fff zzz} [{Level:u3}] {Message:lj}{NewLine}{Exception}{Properties}{NewLine}", rollingInterval: RollingInterval.Day, rollOnFileSizeLimit: true, fileSizeLimitBytes: MaxLogFileSize); try { loggerConfiguration.Enrich.WithProperty("MachineName", DomainMetadata.MachineName); loggerConfiguration.Enrich.WithProperty("Process", $"[{DomainMetadata.ProcessId} {DomainMetadata.ProcessName}]"); loggerConfiguration.Enrich.WithProperty("AppDomain", $"[{DomainMetadata.AppDomainId} {DomainMetadata.AppDomainName}]"); loggerConfiguration.Enrich.WithProperty("TracerVersion", TracerConstants.AssemblyVersion); } catch { // At all costs, make sure the logger works when possible. } InternalLogger = loggerConfiguration.CreateLogger(); SharedLogger = new DatadogSerilogLogger(InternalLogger, nullRateLimiter); var rate = GetRateLimit(); ILogRateLimiter rateLimiter = rate == 0 ? nullRateLimiter : new LogRateLimiter(rate); SharedLogger = new DatadogSerilogLogger(InternalLogger, rateLimiter); } catch { // Don't let this exception bubble up as this logger is for debugging and is non-critical } } public static IDatadogLogger GetLoggerFor(Type classType) { // Tells us which types are loaded, when, and how often. SharedLogger.Debug($"Logger retrieved for: {classType.AssemblyQualifiedName}"); return SharedLogger; } public static IDatadogLogger GetLoggerFor<T>() { return GetLoggerFor(typeof(T)); } [Obsolete("This method is deprecated and will be removed. Use GetLoggerFor() instead. " + "Kept for backwards compatability where there is a version mismatch between manual and automatic instrumentation")] public static ILogger GetLogger(Type classType) { // Tells us which types are loaded, when, and how often. SharedLogger.Debug($"Obsolete logger retrieved for: {classType.AssemblyQualifiedName}"); return InternalLogger; } [Obsolete("This method is deprecated and will be removed. Use GetLoggerFor() instead. " + "Kept for backwards compatability where there is a version mismatch between manual and automatic instrumentation")] public static ILogger For<T>() { return GetLogger(typeof(T)); } internal static void Reset() { LoggingLevelSwitch.MinimumLevel = DefaultLogLevel; } internal static void SetLogLevel(LogEventLevel logLevel) { LoggingLevelSwitch.MinimumLevel = logLevel; } internal static void UseDefaultLevel() { SetLogLevel(DefaultLogLevel); } private static int GetRateLimit() { string rawRateLimit = EnvironmentHelpers.GetEnvironmentVariable(ConfigurationKeys.LogRateLimit); if (!string.IsNullOrEmpty(rawRateLimit) && int.TryParse(rawRateLimit, out var rate) && (rate >= 0)) { return rate; } // We don't want to rate limit messages by default when in debug mode return GlobalSettings.Source.DebugEnabled ? 0 : DefaultLogMessageRateLimit; } private static string GetLogDirectory() { string logDirectory = EnvironmentHelpers.GetEnvironmentVariable(ConfigurationKeys.LogDirectory); if (logDirectory == null) { var nativeLogFile = EnvironmentHelpers.GetEnvironmentVariable(ConfigurationKeys.ProfilerLogPath); if (!string.IsNullOrEmpty(nativeLogFile)) { logDirectory = Path.GetDirectoryName(nativeLogFile); } } // This entire block may throw a SecurityException if not granted the System.Security.Permissions.FileIOPermission // because of the following API calls // - Directory.Exists // - Environment.GetFolderPath // - Path.GetTempPath if (logDirectory == null) { #if NETFRAMEWORK logDirectory = Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData), @"Datadog .NET Tracer", "logs"); #else if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { logDirectory = Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData), @"Datadog .NET Tracer", "logs"); } else { // Linux logDirectory = "/var/log/datadog/dotnet"; } #endif } if (!Directory.Exists(logDirectory)) { try { Directory.CreateDirectory(logDirectory); } catch { // Unable to create the directory meaning that the user // will have to create it on their own. // Last effort at writing logs logDirectory = Path.GetTempPath(); } } return logDirectory; } } }
1
19,586
Was `DefaultLogMessageRateLimit` not configurable anywhere?
DataDog-dd-trace-dotnet
.cs